From 79556df293b2efbb3ccebb6db02120d62e348b44 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 17 Jul 2018 10:57:50 +0100 Subject: [PATCH 0001/1692] drm/i915/gtt: Enable full-ppgtt by default everywhere We should we have all the kinks worked out and full-ppgtt now works reliably on gen7 (Ivybridge, Valleyview/Baytrail and Haswell). If we can let userspace have full control over their own ppgtt, it makes softpinning far more effective, in turn making GPU dispatch far more efficient by virtue of better mm segregation. On the other hand, switching over to a different GTT for every client does incur noticeable overhead, but only for very lightweight tasks. Signed-off-by: Chris Wilson Cc: Joonas Lahtinen Cc: Mika Kuoppala Cc: Matthew Auld Reviewed-by: Joonas Lahtinen Cc: Jason Ekstrand Cc: Kenneth Graunke Acked-by: Kenneth Graunke Link: https://patchwork.freedesktop.org/patch/msgid/20180717095751.1034-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_gtt.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 3d75f2bb5623..dd09d4d8b0ed 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -179,13 +179,11 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, return 0; } - if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) { - if (has_full_48bit_ppgtt) - return 3; + if (has_full_48bit_ppgtt) + return 3; - if (has_full_ppgtt) - return 2; - } + if (has_full_ppgtt) + return 2; return 1; } -- GitLab From 5f9c4f95bed24c8a8ad9258ee120a910876f6eed Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 17 Jul 2018 10:57:51 +0100 Subject: [PATCH 0002/1692] drm/i915/gtt: Full ppgtt everywhere, no excuses MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We believe we have all the kinks worked out, even for the early Valleyview devices, for whom we currently disable all ppgtt. References: 62942ed7279d ("drm/i915/vlv: disable PPGTT on early revs v3") Signed-off-by: Chris Wilson Cc: Ville Syrjälä Cc: Joonas Lahtinen Reviewed-by: Joonas Lahtinen Acked-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20180717095751.1034-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_gtt.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index dd09d4d8b0ed..f02f7848305d 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -173,12 +173,6 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, return 0; } - /* Early VLV doesn't have this */ - if (IS_VALLEYVIEW(dev_priv) && dev_priv->drm.pdev->revision < 0xb) { - DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n"); - return 0; - } - if (has_full_48bit_ppgtt) return 3; -- GitLab From 516a49cc19467e298d08a404f73a6e311f4548d1 Mon Sep 17 00:00:00 2001 From: Azhar Shaikh Date: Fri, 6 Jul 2018 11:37:30 -0700 Subject: [PATCH 0003/1692] drm/i915: Fix assert_plane() warning on bootup with external display MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On KBL, WHL RVPs, booting up with an external display connected, triggers below warning, when the BiOS brings up the external display too. This warning is not seen during hotplug. [ 3.615226] ------------[ cut here ]------------ [ 3.619829] plane 1A assertion failure (expected on, current off) [ 3.632039] WARNING: CPU: 2 PID: 354 at drivers/gpu/drm/i915/intel_display.c:1294 assert_plane+0x71/0xbb [ 3.633920] iwlwifi 0000:00:14.3: loaded firmware version 38.c0e03d94.0 op_mode iwlmvm [ 3.647157] Modules linked in: iwlwifi cfg80211 btusb btrtl btbcm btintel bluetooth ecdh_generic [ 3.647163] CPU: 2 PID: 354 Comm: frecon Not tainted 4.17.0-rc7-50176-g655af12d39c2 #3 [ 3.647165] Hardware name: Intel Corporation CoffeeLake Client Platform/WhiskeyLake U DDR4 ERB, BIOS CNLSFWR1.R00.X140.B00.1804040304 04/04/2018 [ 3.684509] RIP: 0010:assert_plane+0x71/0xbb [ 3.764451] Call Trace: [ 3.766888] intel_atomic_commit_tail+0xa97/0xb77 [ 3.771569] intel_atomic_commit+0x26a/0x279 [ 3.771572] drm_atomic_helper_set_config+0x5c/0x76 [ 3.780670] __drm_mode_set_config_internal+0x66/0x109 [ 3.780672] drm_mode_setcrtc+0x4c9/0x5cc [ 3.780674] ? drm_mode_getcrtc+0x162/0x162 [ 3.789774] ? drm_mode_getcrtc+0x162/0x162 [ 3.798108] drm_ioctl_kernel+0x8d/0xe4 [ 3.801926] drm_ioctl+0x27d/0x368 [ 3.805311] ? drm_mode_getcrtc+0x162/0x162 [ 3.805314] ? selinux_file_ioctl+0x14e/0x199 [ 3.805317] vfs_ioctl+0x21/0x2f [ 3.813812] do_vfs_ioctl+0x491/0x4b4 [ 3.813813] ? security_file_ioctl+0x37/0x4b [ 3.813816] ksys_ioctl+0x55/0x75 [ 3.820672] __x64_sys_ioctl+0x1a/0x1e [ 3.820674] do_syscall_64+0x51/0x5f [ 3.820678] entry_SYSCALL_64_after_hwframe+0x44/0xa9 [ 3.828221] RIP: 0033:0x7b5e04953967 [ 3.835504] RSP: 002b:00007fff2eafb6f8 EFLAGS: 00000246 ORIG_RAX: 0000000000000010 [ 3.835505] RAX: ffffffffffffffda RBX: 0000000000000002 RCX: 00007b5e04953967 [ 3.835505] RDX: 00007fff2eafb730 RSI: 00000000c06864a2 RDI: 000000000000000f [ 3.835506] RBP: 00007fff2eafb720 R08: 0000000000000000 R09: 0000000000000000 [ 3.835507] R10: 0000000000000070 R11: 0000000000000246 R12: 000000000000000f [ 3.879988] R13: 000056bc9dd7d210 R14: 00007fff2eafb730 R15: 00000000c06864a2 [ 3.887081] Code: 48 c7 c7 06 71 a5 be 84 c0 48 c7 c2 06 fd a3 be 48 89 f9 48 0f 44 ca 84 db 48 0f 45 d7 48 c7 c7 df d3 a4 be 31 c0 e8 af a0 c0 ff <0f> 0b eb 2b 48 c7 c7 06 fd a3 be 84 c0 48 c7 c2 06 71 a5 be 48 [ 3.905845] WARNING: CPU: 2 PID: 354 at drivers/gpu/drm/i915/intel_display.c:1294 assert_plane+0x71/0xbb [ 3.920964] ---[ end trace dac692f4ac46391a ]--- The warning is seen when mode_setcrtc() is called for pipeB during bootup and before we get a mode_setcrtc() for pipeA, while doing update_crtcs() in intel_atomic_commit_tail(). Now since, plane1A is still active after commit, update_crtcs() is done for pipeA and eventually update_plane() for plane1A. intel_plane_state->ctl for plane1A is not updated since set_modecrtc() is called for pipeB. So intel_plane_state->ctl for plane 1A will be 0x0. So doing an update_plane() for plane1A, will result in clearing PLANE_CTL_ENABLE bit, and hence the warning. To fix this warning, force all active planes to recompute their states in probe. Changes in v8: - Actually add Reviewed-by: Ville Syrjälä Changes in v7: - Move call to intel_initial_commit() after sanitize_watermarks() Otherwise the plane update will still consult potentially bogus watermarks we read out from the hardware. (Ville) - Carry Reviewed-by: Ville Syrjälä from v6 Changes in v6: - Handle EDEADLK for drm_atomic_get_crtc_state() and drm_atomic_add_affected_planes() - Remove optimization of calling intel_initial_commit() only when there is more than one active pipe in probe. - Avoid using intel_ types. Changes in v5: - Drop drm_modeset_lock_all_ctx() since locks will be taken later. Changes in v4: - Handle locking in intel_initial_commit() - Move the for loop inside intel_initial_commit() so that drm_atomic_commit() is called only once - Call intel_initial_commit() only for more than one active crtc on boot. - Save the return value of intel_initial_commit() and print a message in case of an error Changes in v3: - Add comments Changes in v2: - Force all planes to recompute their states.(Ville Syrjälä) - Update the commit message Signed-off-by: Azhar Shaikh Reviewed-by: Ville Syrjälä Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/1530902250-44583-1-git-send-email-azhar.shaikh@intel.com --- drivers/gpu/drm/i915/intel_display.c | 61 +++++++++++++++++++++++++++- 1 file changed, 59 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 8bd9080fce34..8719c1a9d1ce 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -15065,12 +15065,61 @@ static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv) DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq); } +static int intel_initial_commit(struct drm_device *dev) +{ + struct drm_atomic_state *state = NULL; + struct drm_modeset_acquire_ctx ctx; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + int ret = 0; + + state = drm_atomic_state_alloc(dev); + if (!state) + return -ENOMEM; + + drm_modeset_acquire_init(&ctx, 0); + +retry: + state->acquire_ctx = &ctx; + + drm_for_each_crtc(crtc, dev) { + crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) { + ret = PTR_ERR(crtc_state); + goto out; + } + + if (crtc_state->active) { + ret = drm_atomic_add_affected_planes(state, crtc); + if (ret) + goto out; + } + } + + ret = drm_atomic_commit(state); + +out: + if (ret == -EDEADLK) { + drm_atomic_state_clear(state); + drm_modeset_backoff(&ctx); + goto retry; + } + + drm_atomic_state_put(state); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + + return ret; +} + int intel_modeset_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; enum pipe pipe; struct intel_crtc *crtc; + int ret; dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0); @@ -15145,8 +15194,6 @@ int intel_modeset_init(struct drm_device *dev) INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : ""); for_each_pipe(dev_priv, pipe) { - int ret; - ret = intel_crtc_init(dev_priv, pipe); if (ret) { drm_mode_config_cleanup(dev); @@ -15202,6 +15249,16 @@ int intel_modeset_init(struct drm_device *dev) if (!HAS_GMCH_DISPLAY(dev_priv)) sanitize_watermarks(dev); + /* + * Force all active planes to recompute their states. So that on + * mode_setcrtc after probe, all the intel_plane_state variables + * are already calculated and there is no assert_plane warnings + * during bootup. + */ + ret = intel_initial_commit(dev); + if (ret) + DRM_DEBUG_KMS("Initial commit in probe failed.\n"); + return 0; } -- GitLab From f7a738fca03c8dae6a1b448393989cc9f612198d Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Wed, 11 Jul 2018 14:59:02 -0700 Subject: [PATCH 0004/1692] drm/i915/icl: compute the TBT PLL registers Use the hardcoded tables provided by our spec. v2: - SSC stays disabled. - Use intel_port_is_tc(). Cc: Anusha Srivatsa Reviewed-by: Rodrigo Vivi Signed-off-by: Paulo Zanoni Link: https://patchwork.freedesktop.org/patch/msgid/20180711215909.23945-2-paulo.r.zanoni@intel.com --- drivers/gpu/drm/i915/intel_dpll_mgr.c | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index 058696b7d6c3..e046c4f668e0 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c @@ -2452,6 +2452,16 @@ static const struct skl_wrpll_params icl_dp_combo_pll_19_2MHz_values[] = { .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0}, }; +static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = { + .dco_integer = 0x151, .dco_fraction = 0x4000, + .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, +}; + +static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = { + .dco_integer = 0x1A5, .dco_fraction = 0x7000, + .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, +}; + static bool icl_calc_dp_combo_pll(struct drm_i915_private *dev_priv, int clock, struct skl_wrpll_params *pll_params) { @@ -2494,6 +2504,14 @@ static bool icl_calc_dp_combo_pll(struct drm_i915_private *dev_priv, int clock, return true; } +static bool icl_calc_tbt_pll(struct drm_i915_private *dev_priv, int clock, + struct skl_wrpll_params *pll_params) +{ + *pll_params = dev_priv->cdclk.hw.ref == 24000 ? + icl_tbt_pll_24MHz_values : icl_tbt_pll_19_2MHz_values; + return true; +} + static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder, int clock, struct intel_dpll_hw_state *pll_state) @@ -2503,7 +2521,9 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state, struct skl_wrpll_params pll_params = { 0 }; bool ret; - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + if (intel_port_is_tc(dev_priv, encoder->port)) + ret = icl_calc_tbt_pll(dev_priv, clock, &pll_params); + else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) ret = cnl_ddi_calculate_wrpll(clock, dev_priv, &pll_params); else ret = icl_calc_dp_combo_pll(dev_priv, clock, &pll_params); -- GitLab From 35e900818e177d9ae34988d15461792582937924 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 20 Jul 2018 10:51:44 +0100 Subject: [PATCH 0005/1692] drm/i915: Suppress assertion for i915_ggtt_disable_guc MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Another step in the drv_module_reload fault-injection saga, is that we try to disable the guc twice. Probably. It's a little unclear exactly what is going on in the unload sequence that catches us out, so for the time being suppress the assertion to get the test re-enabled. Testcase: igt/drv_module_reload/basic-reload-inject Signed-off-by: Chris Wilson Cc: Michał Winiarski Cc: Michal Wajdeczko Acked-by: Michał Winiarski Link: https://patchwork.freedesktop.org/patch/msgid/20180720095144.5885-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_gtt.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index f02f7848305d..81a2c340c091 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -3654,6 +3654,10 @@ void i915_ggtt_enable_guc(struct drm_i915_private *i915) void i915_ggtt_disable_guc(struct drm_i915_private *i915) { + /* XXX Temporary pardon for error unload */ + if (i915->ggtt.invalidate == gen6_ggtt_invalidate) + return; + /* We should only be called after i915_ggtt_enable_guc() */ GEM_BUG_ON(i915->ggtt.invalidate != guc_ggtt_invalidate); -- GitLab From 900ccf30f9e112b508a61b228bf014e3bea14bc4 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 20 Jul 2018 11:19:10 +0100 Subject: [PATCH 0006/1692] drm/i915: Only force GGTT coherency w/a on required chipsets Not all chipsets have an internal buffer delaying the visibility of writes via the GGTT being visible by other physical paths, but we use a very heavy workaround for all. We only need to apply that workarounds to the chipsets we know suffer from the delay and the resulting coherency issue. Similarly, the same inconsistent coherency fouls up our ABI promise that a write into a mmap_gtt is immediately visible to others. Since the HW has made that a lie, let userspace know when that contract is broken. (Not that userspace would want to use mmap_gtt on those chipsets for other performance reasons...) Testcase: igt/drv_selftest/live_coherency Testcase: igt/gem_mmap_gtt/coherency Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=100587 Signed-off-by: Chris Wilson Cc: Joonas Lahtinen Reviewed-by: Tomasz Lis Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20180720101910.11153-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_drv.c | 3 +++ drivers/gpu/drm/i915/i915_gem.c | 5 +++++ drivers/gpu/drm/i915/i915_pci.c | 10 ++++++++++ drivers/gpu/drm/i915/intel_device_info.h | 1 + include/uapi/drm/i915_drm.h | 22 ++++++++++++++++++++++ 5 files changed, 41 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 343e79a44abd..23e9a86cbc2a 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -441,6 +441,9 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data, case I915_PARAM_CS_TIMESTAMP_FREQUENCY: value = 1000 * INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz; break; + case I915_PARAM_MMAP_GTT_COHERENT: + value = INTEL_INFO(dev_priv)->has_coherent_ggtt; + break; default: DRM_DEBUG("Unknown parameter %d\n", param->param); return -EINVAL; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index fcc73a6ab503..8b52cb768a67 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -802,6 +802,11 @@ void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv) * that was!). */ + wmb(); + + if (INTEL_INFO(dev_priv)->has_coherent_ggtt) + return; + i915_gem_chipset_flush(dev_priv); intel_runtime_pm_get(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 6a4d1388ad2d..e443fe44da3a 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -74,6 +74,7 @@ .unfenced_needs_alignment = 1, \ .ring_mask = RENDER_RING, \ .has_snoop = true, \ + .has_coherent_ggtt = false, \ GEN_DEFAULT_PIPEOFFSETS, \ GEN_DEFAULT_PAGE_SIZES, \ CURSOR_OFFSETS @@ -110,6 +111,7 @@ static const struct intel_device_info intel_i865g_info = { .has_gmch_display = 1, \ .ring_mask = RENDER_RING, \ .has_snoop = true, \ + .has_coherent_ggtt = true, \ GEN_DEFAULT_PIPEOFFSETS, \ GEN_DEFAULT_PAGE_SIZES, \ CURSOR_OFFSETS @@ -117,6 +119,7 @@ static const struct intel_device_info intel_i865g_info = { static const struct intel_device_info intel_i915g_info = { GEN3_FEATURES, PLATFORM(INTEL_I915G), + .has_coherent_ggtt = false, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, .hws_needs_physical = 1, @@ -178,6 +181,7 @@ static const struct intel_device_info intel_pineview_info = { .has_gmch_display = 1, \ .ring_mask = RENDER_RING, \ .has_snoop = true, \ + .has_coherent_ggtt = true, \ GEN_DEFAULT_PIPEOFFSETS, \ GEN_DEFAULT_PAGE_SIZES, \ CURSOR_OFFSETS @@ -220,6 +224,7 @@ static const struct intel_device_info intel_gm45_info = { .has_hotplug = 1, \ .ring_mask = RENDER_RING | BSD_RING, \ .has_snoop = true, \ + .has_coherent_ggtt = true, \ /* ilk does support rc6, but we do not implement [power] contexts */ \ .has_rc6 = 0, \ GEN_DEFAULT_PIPEOFFSETS, \ @@ -243,6 +248,7 @@ static const struct intel_device_info intel_ironlake_m_info = { .has_hotplug = 1, \ .has_fbc = 1, \ .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ + .has_coherent_ggtt = true, \ .has_llc = 1, \ .has_rc6 = 1, \ .has_rc6p = 1, \ @@ -287,6 +293,7 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = { .has_hotplug = 1, \ .has_fbc = 1, \ .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ + .has_coherent_ggtt = true, \ .has_llc = 1, \ .has_rc6 = 1, \ .has_rc6p = 1, \ @@ -347,6 +354,7 @@ static const struct intel_device_info intel_valleyview_info = { .has_aliasing_ppgtt = 1, .has_full_ppgtt = 1, .has_snoop = true, + .has_coherent_ggtt = false, .ring_mask = RENDER_RING | BSD_RING | BLT_RING, .display_mmio_offset = VLV_DISPLAY_BASE, GEN_DEFAULT_PAGE_SIZES, @@ -441,6 +449,7 @@ static const struct intel_device_info intel_cherryview_info = { .has_full_ppgtt = 1, .has_reset_engine = 1, .has_snoop = true, + .has_coherent_ggtt = false, .display_mmio_offset = VLV_DISPLAY_BASE, GEN_DEFAULT_PAGE_SIZES, GEN_CHV_PIPEOFFSETS, @@ -517,6 +526,7 @@ static const struct intel_device_info intel_skylake_gt4_info = { .has_full_48bit_ppgtt = 1, \ .has_reset_engine = 1, \ .has_snoop = true, \ + .has_coherent_ggtt = false, \ .has_ipc = 1, \ GEN9_DEFAULT_PAGE_SIZES, \ GEN_DEFAULT_PIPEOFFSETS, \ diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index 633f9fbf72ea..07e8364d1a8c 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -106,6 +106,7 @@ enum intel_platform { func(has_resource_streamer); \ func(has_runtime_pm); \ func(has_snoop); \ + func(has_coherent_ggtt); \ func(unfenced_needs_alignment); \ func(cursor_needs_physical); \ func(hws_needs_physical); \ diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 7f5634ce8e88..a4446f452040 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -529,6 +529,28 @@ typedef struct drm_i915_irq_wait { */ #define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51 +/* + * Once upon a time we supposed that writes through the GGTT would be + * immediately in physical memory (once flushed out of the CPU path). However, + * on a few different processors and chipsets, this is not necessarily the case + * as the writes appear to be buffered internally. Thus a read of the backing + * storage (physical memory) via a different path (with different physical tags + * to the indirect write via the GGTT) will see stale values from before + * the GGTT write. Inside the kernel, we can for the most part keep track of + * the different read/write domains in use (e.g. set-domain), but the assumption + * of coherency is baked into the ABI, hence reporting its true state in this + * parameter. + * + * Reports true when writes via mmap_gtt are immediately visible following an + * lfence to flush the WCB. + * + * Reports false when writes via mmap_gtt are indeterminately delayed in an in + * internal buffer and are _not_ immediately visible to third parties accessing + * directly via mmap_cpu/mmap_wc. Use of mmap_gtt as part of an IPC + * communications channel when reporting false is strongly disadvised. + */ +#define I915_PARAM_MMAP_GTT_COHERENT 52 + typedef struct drm_i915_getparam { __s32 param; /* -- GitLab From 6bd31b3798c83b611737097d98008663a2f5d065 Mon Sep 17 00:00:00 2001 From: Rodrigo Vivi Date: Thu, 19 Jul 2018 16:42:17 -0700 Subject: [PATCH 0007/1692] drm/i915: Remove unused "ret" variable. Just a small clean-up with no functional change, only removing a variable that is never actually used. Cc: Dhinakaran Pandiyan Signed-off-by: Rodrigo Vivi Reviewed-by: Reviewed-by: Nathan Ciobanu Link: https://patchwork.freedesktop.org/patch/msgid/20180719234217.7855-1-rodrigo.vivi@intel.com --- drivers/gpu/drm/i915/intel_dp_mst.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 789a403e9f99..d88d0f5abdce 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -263,7 +263,6 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder, struct intel_dp *intel_dp = &intel_dig_port->dp; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = intel_dig_port->base.port; - int ret; DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links); @@ -274,9 +273,9 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder, 1)) DRM_ERROR("Timed out waiting for ACT sent\n"); - ret = drm_dp_check_act_status(&intel_dp->mst_mgr); + drm_dp_check_act_status(&intel_dp->mst_mgr); - ret = drm_dp_update_payload_part2(&intel_dp->mst_mgr); + drm_dp_update_payload_part2(&intel_dp->mst_mgr); if (pipe_config->has_audio) intel_audio_codec_enable(encoder, pipe_config, conn_state); } -- GitLab From 7a72c78bdd0a1ea1d879610542679cc680398220 Mon Sep 17 00:00:00 2001 From: Rodrigo Vivi Date: Thu, 19 Jul 2018 17:31:55 -0700 Subject: [PATCH 0008/1692] drm/i915: Fix psr sink status report. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit First of all don't try to read dpcd if PSR is not even supported. But also, if read failed return -EIO instead of reporting via a backchannel. v2: fix dev_priv: At this level m->private is the connector. (CI/DK) don't convert dpcd read errors to EIO. (DK) Fixes: 5b7b30864d1d ("drm/i915/psr: Split sink status into a separate debugfs node") Cc: Chris Wilson Cc: Dhinakaran Pandiyan Cc: José Roberto de Souza Signed-off-by: Rodrigo Vivi Reviewed-by: Dhinakaran Pandiyan Link: https://patchwork.freedesktop.org/patch/msgid/20180720003155.16290-1-rodrigo.vivi@intel.com --- drivers/gpu/drm/i915/i915_debugfs.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index b3aefd623557..59dc0610ea44 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -2606,13 +2606,22 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data) "sink internal error", }; struct drm_connector *connector = m->private; + struct drm_i915_private *dev_priv = to_i915(connector->dev); struct intel_dp *intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base); + int ret; + + if (!CAN_PSR(dev_priv)) { + seq_puts(m, "PSR Unsupported\n"); + return -ENODEV; + } if (connector->status != connector_status_connected) return -ENODEV; - if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val) == 1) { + ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val); + + if (ret == 1) { const char *str = "unknown"; val &= DP_PSR_SINK_STATE_MASK; @@ -2620,7 +2629,7 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data) str = sink_status[val]; seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str); } else { - DRM_ERROR("dpcd read (at %u) failed\n", DP_PSR_STATUS); + return ret; } return 0; -- GitLab From 6f15a7de86c8cf2dc09fc9e6d07047efa40ef809 Mon Sep 17 00:00:00 2001 From: Anusha Srivatsa Date: Fri, 20 Jul 2018 14:42:42 -0700 Subject: [PATCH 0009/1692] drm/i915/dsc: Add missing _MMIO() from PPS registers This patch fixes the commit - <2efbb2f099fb> ("i915/dp/dsc: Add DSC PPS register definitions"), which did not have _MMIO() for DSCA and DSCC. v2: Fix typos. (manasi) v3: Change the commit message (Rodrigo) Cc: Rodrigi Vivi Cc: Manasi Navare Signed-off-by: Anusha Srivatsa Reviewed-by: Manasi Navare Signed-off-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/1532122962-9068-1-git-send-email-anusha.srivatsa@intel.com --- drivers/gpu/drm/i915/i915_reg.h | 76 ++++++++++++++++----------------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 8af945d8a995..73946055aa15 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -10349,8 +10349,8 @@ enum skl_power_gate { #define ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN (1 << 23) /* Icelake Display Stream Compression Registers */ -#define DSCA_PICTURE_PARAMETER_SET_0 0x6B200 -#define DSCC_PICTURE_PARAMETER_SET_0 0x6BA00 +#define DSCA_PICTURE_PARAMETER_SET_0 _MMIO(0x6B200) +#define DSCC_PICTURE_PARAMETER_SET_0 _MMIO(0x6BA00) #define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB 0x78270 #define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB 0x78370 #define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC 0x78470 @@ -10370,8 +10370,8 @@ enum skl_power_gate { #define DSC_VER_MIN_SHIFT 4 #define DSC_VER_MAJ (0x1 << 0) -#define DSCA_PICTURE_PARAMETER_SET_1 0x6B204 -#define DSCC_PICTURE_PARAMETER_SET_1 0x6BA04 +#define DSCA_PICTURE_PARAMETER_SET_1 _MMIO(0x6B204) +#define DSCC_PICTURE_PARAMETER_SET_1 _MMIO(0x6BA04) #define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB 0x78274 #define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB 0x78374 #define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC 0x78474 @@ -10384,8 +10384,8 @@ enum skl_power_gate { _ICL_DSC1_PICTURE_PARAMETER_SET_1_PC) #define DSC_BPP(bpp) ((bpp) << 0) -#define DSCA_PICTURE_PARAMETER_SET_2 0x6B208 -#define DSCC_PICTURE_PARAMETER_SET_2 0x6BA08 +#define DSCA_PICTURE_PARAMETER_SET_2 _MMIO(0x6B208) +#define DSCC_PICTURE_PARAMETER_SET_2 _MMIO(0x6BA08) #define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB 0x78278 #define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB 0x78378 #define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC 0x78478 @@ -10399,8 +10399,8 @@ enum skl_power_gate { #define DSC_PIC_WIDTH(pic_width) ((pic_width) << 16) #define DSC_PIC_HEIGHT(pic_height) ((pic_height) << 0) -#define DSCA_PICTURE_PARAMETER_SET_3 0x6B20C -#define DSCC_PICTURE_PARAMETER_SET_3 0x6BA0C +#define DSCA_PICTURE_PARAMETER_SET_3 _MMIO(0x6B20C) +#define DSCC_PICTURE_PARAMETER_SET_3 _MMIO(0x6BA0C) #define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB 0x7827C #define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB 0x7837C #define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC 0x7847C @@ -10414,8 +10414,8 @@ enum skl_power_gate { #define DSC_SLICE_WIDTH(slice_width) ((slice_width) << 16) #define DSC_SLICE_HEIGHT(slice_height) ((slice_height) << 0) -#define DSCA_PICTURE_PARAMETER_SET_4 0x6B210 -#define DSCC_PICTURE_PARAMETER_SET_4 0x6BA10 +#define DSCA_PICTURE_PARAMETER_SET_4 _MMIO(0x6B210) +#define DSCC_PICTURE_PARAMETER_SET_4 _MMIO(0x6BA10) #define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB 0x78280 #define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB 0x78380 #define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC 0x78480 @@ -10429,8 +10429,8 @@ enum skl_power_gate { #define DSC_INITIAL_DEC_DELAY(dec_delay) ((dec_delay) << 16) #define DSC_INITIAL_XMIT_DELAY(xmit_delay) ((xmit_delay) << 0) -#define DSCA_PICTURE_PARAMETER_SET_5 0x6B214 -#define DSCC_PICTURE_PARAMETER_SET_5 0x6BA14 +#define DSCA_PICTURE_PARAMETER_SET_5 _MMIO(0x6B214) +#define DSCC_PICTURE_PARAMETER_SET_5 _MMIO(0x6BA14) #define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB 0x78284 #define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB 0x78384 #define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC 0x78484 @@ -10441,11 +10441,11 @@ enum skl_power_gate { #define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC, \ _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC) -#define DSC_SCALE_DEC_INTINT(scale_dec) ((scale_dec) << 16) +#define DSC_SCALE_DEC_INT(scale_dec) ((scale_dec) << 16) #define DSC_SCALE_INC_INT(scale_inc) ((scale_inc) << 0) -#define DSCA_PICTURE_PARAMETER_SET_6 0x6B218 -#define DSCC_PICTURE_PARAMETER_SET_6 0x6BA18 +#define DSCA_PICTURE_PARAMETER_SET_6 _MMIO(0x6B218) +#define DSCC_PICTURE_PARAMETER_SET_6 _MMIO(0x6BA18) #define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB 0x78288 #define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB 0x78388 #define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC 0x78488 @@ -10456,13 +10456,13 @@ enum skl_power_gate { #define ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB, \ _ICL_DSC1_PICTURE_PARAMETER_SET_6_PC) -#define DSC_FLATNESS_MAX_QP(max_qp) (qp << 24) -#define DSC_FLATNESS_MIN_QP(min_qp) (qp << 16) +#define DSC_FLATNESS_MAX_QP(max_qp) ((max_qp) << 24) +#define DSC_FLATNESS_MIN_QP(min_qp) ((min_qp) << 16) #define DSC_FIRST_LINE_BPG_OFFSET(offset) ((offset) << 8) #define DSC_INITIAL_SCALE_VALUE(value) ((value) << 0) -#define DSCA_PICTURE_PARAMETER_SET_7 0x6B21C -#define DSCC_PICTURE_PARAMETER_SET_7 0x6BA1C +#define DSCA_PICTURE_PARAMETER_SET_7 _MMIO(0x6B21C) +#define DSCC_PICTURE_PARAMETER_SET_7 _MMIO(0x6BA1C) #define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB 0x7828C #define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB 0x7838C #define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC 0x7848C @@ -10476,8 +10476,8 @@ enum skl_power_gate { #define DSC_NFL_BPG_OFFSET(bpg_offset) ((bpg_offset) << 16) #define DSC_SLICE_BPG_OFFSET(bpg_offset) ((bpg_offset) << 0) -#define DSCA_PICTURE_PARAMETER_SET_8 0x6B220 -#define DSCC_PICTURE_PARAMETER_SET_8 0x6BA20 +#define DSCA_PICTURE_PARAMETER_SET_8 _MMIO(0x6B220) +#define DSCC_PICTURE_PARAMETER_SET_8 _MMIO(0x6BA20) #define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB 0x78290 #define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB 0x78390 #define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC 0x78490 @@ -10491,8 +10491,8 @@ enum skl_power_gate { #define DSC_INITIAL_OFFSET(initial_offset) ((initial_offset) << 16) #define DSC_FINAL_OFFSET(final_offset) ((final_offset) << 0) -#define DSCA_PICTURE_PARAMETER_SET_9 0x6B224 -#define DSCC_PICTURE_PARAMETER_SET_9 0x6BA24 +#define DSCA_PICTURE_PARAMETER_SET_9 _MMIO(0x6B224) +#define DSCC_PICTURE_PARAMETER_SET_9 _MMIO(0x6BA24) #define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB 0x78294 #define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB 0x78394 #define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC 0x78494 @@ -10506,8 +10506,8 @@ enum skl_power_gate { #define DSC_RC_EDGE_FACTOR(rc_edge_fact) ((rc_edge_fact) << 16) #define DSC_RC_MODEL_SIZE(rc_model_size) ((rc_model_size) << 0) -#define DSCA_PICTURE_PARAMETER_SET_10 0x6B228 -#define DSCC_PICTURE_PARAMETER_SET_10 0x6BA28 +#define DSCA_PICTURE_PARAMETER_SET_10 _MMIO(0x6B228) +#define DSCC_PICTURE_PARAMETER_SET_10 _MMIO(0x6BA28) #define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB 0x78298 #define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB 0x78398 #define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC 0x78498 @@ -10523,8 +10523,8 @@ enum skl_power_gate { #define DSC_RC_QUANT_INC_LIMIT1(lim) ((lim) << 8) #define DSC_RC_QUANT_INC_LIMIT0(lim) ((lim) << 0) -#define DSCA_PICTURE_PARAMETER_SET_11 0x6B22C -#define DSCC_PICTURE_PARAMETER_SET_11 0x6BA2C +#define DSCA_PICTURE_PARAMETER_SET_11 _MMIO(0x6B22C) +#define DSCC_PICTURE_PARAMETER_SET_11 _MMIO(0x6BA2C) #define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB 0x7829C #define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB 0x7839C #define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC 0x7849C @@ -10536,8 +10536,8 @@ enum skl_power_gate { _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB, \ _ICL_DSC1_PICTURE_PARAMETER_SET_11_PC) -#define DSCA_PICTURE_PARAMETER_SET_12 0x6B260 -#define DSCC_PICTURE_PARAMETER_SET_12 0x6BA60 +#define DSCA_PICTURE_PARAMETER_SET_12 _MMIO(0x6B260) +#define DSCC_PICTURE_PARAMETER_SET_12 _MMIO(0x6BA60) #define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB 0x782A0 #define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB 0x783A0 #define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC 0x784A0 @@ -10549,8 +10549,8 @@ enum skl_power_gate { _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB, \ _ICL_DSC1_PICTURE_PARAMETER_SET_12_PC) -#define DSCA_PICTURE_PARAMETER_SET_13 0x6B264 -#define DSCC_PICTURE_PARAMETER_SET_13 0x6BA64 +#define DSCA_PICTURE_PARAMETER_SET_13 _MMIO(0x6B264) +#define DSCC_PICTURE_PARAMETER_SET_13 _MMIO(0x6BA64) #define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB 0x782A4 #define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB 0x783A4 #define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC 0x784A4 @@ -10562,8 +10562,8 @@ enum skl_power_gate { _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB, \ _ICL_DSC1_PICTURE_PARAMETER_SET_13_PC) -#define DSCA_PICTURE_PARAMETER_SET_14 0x6B268 -#define DSCC_PICTURE_PARAMETER_SET_14 0x6BA68 +#define DSCA_PICTURE_PARAMETER_SET_14 _MMIO(0x6B268) +#define DSCC_PICTURE_PARAMETER_SET_14 _MMIO(0x6BA68) #define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB 0x782A8 #define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB 0x783A8 #define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC 0x784A8 @@ -10575,8 +10575,8 @@ enum skl_power_gate { _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB, \ _ICL_DSC1_PICTURE_PARAMETER_SET_14_PC) -#define DSCA_PICTURE_PARAMETER_SET_15 0x6B26C -#define DSCC_PICTURE_PARAMETER_SET_15 0x6BA6C +#define DSCA_PICTURE_PARAMETER_SET_15 _MMIO(0x6B26C) +#define DSCC_PICTURE_PARAMETER_SET_15 _MMIO(0x6BA6C) #define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB 0x782AC #define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB 0x783AC #define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC 0x784AC @@ -10588,8 +10588,8 @@ enum skl_power_gate { _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB, \ _ICL_DSC1_PICTURE_PARAMETER_SET_15_PC) -#define DSCA_PICTURE_PARAMETER_SET_16 0x6B270 -#define DSCC_PICTURE_PARAMETER_SET_16 0x6BA70 +#define DSCA_PICTURE_PARAMETER_SET_16 _MMIO(0x6B270) +#define DSCC_PICTURE_PARAMETER_SET_16 _MMIO(0x6BA70) #define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB 0x782B0 #define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB 0x783B0 #define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC 0x784B0 @@ -10601,7 +10601,7 @@ enum skl_power_gate { _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB, \ _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC) #define DSC_SLICE_PER_LINE(slice_per_line) ((slice_per_line) << 16) -#define DSC_SLICE_CHUNK_SIZE(slice_chunk_aize) (slice_chunk_size << 0) +#define DSC_SLICE_CHUNK_SIZE(slice_chunk_size) ((slice_chunk_size) << 0) /* Icelake Rate Control Buffer Threshold Registers */ #define DSCA_RC_BUF_THRESH_0 _MMIO(0x6B230) -- GitLab From 4eaf317a60fbea0555b936035002ca9bd9b9105d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Winiarski?= Date: Thu, 12 Jul 2018 17:53:30 +0200 Subject: [PATCH 0010/1692] drm/i915/kvmgt: Fix compilation error MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit gvt_pin_guest_page extracted some of the gvt_dma_map_page functionality: commit 79e542f5af79 ("drm/i915/kvmgt: Support setting dma map for huge pages") And yet, part of it was reintroduced in: commit 39b4cbadb9a9 ("drm/i915/kvmgt: Check the pfn got from vfio_pin_pages") Causing kvmgt part to no longer build. Let's remove it. Reported-by: Tomasz Lis Signed-off-by: Michał Winiarski Cc: Changbin Du Cc: Zhenyu Wang Acked-by: Zhenyu Wang Signed-off-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20180712155330.32055-1-michal.winiarski@intel.com --- drivers/gpu/drm/i915/gvt/kvmgt.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 718ab307a500..4d2f53ae9f0f 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -185,12 +185,6 @@ static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn, if (ret) return ret; - if (!pfn_valid(pfn)) { - gvt_vgpu_err("pfn 0x%lx is not mem backed\n", pfn); - vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1); - return -EINVAL; - } - /* Setup DMA mapping. */ *dma_addr = dma_map_page(dev, page, 0, size, PCI_DMA_BIDIRECTIONAL); ret = dma_mapping_error(dev, *dma_addr); -- GitLab From bb5ffe6fd58cd37f1cade411c4a00745ed0fbbd1 Mon Sep 17 00:00:00 2001 From: Nathan Ciobanu Date: Fri, 20 Jul 2018 14:44:12 -0700 Subject: [PATCH 0011/1692] drm/i915/dp: Limit link training clock recovery loop Limit the link training clock recovery loop to 10 attempts at LANEx_CR_DONE per DP 1.4 spec section 3.5.1.2.2 and 80 attempts for pre-DP 1.4 (4 voltage levels x 4 preemphasis levels x x 5 identical voltages tries). Some faulty USB-C MST hubs can cause us to get stuck in this loop indefinitely requesting something like: voltage swing: 0, pre-emphasis level: 2 voltage swing: 1, pre-emphasis level: 2 voltage swing: 0, pre-emphasis level: 3 over and over so max_vswing would never be reached, drm_dp_clock_recovery_ok() would never return true and voltage_tries would always get reset to 1. The driver sends those values to the hub but the hub keeps requesting new values every time. Changes in v2: - updated commit message (DK, Manasi) - defined DP_DP14_MAX_CR_TRIES (Marc) - made the loop iterate for max 10 times (Rodrigo, Marc) Changes in v3: - changed error message to use DP_DP14_MAX_CR_TRIES Changes in v4: - Updated the title to reflect the change - Updated the commit message - Added 80 attempts for pre-DP 1.4 devices Changes in v5: - Removed DP_DP14_MAX_CR_TRIES from drm v6: Updated comment to match kernel style (Rodrigo) Cc: Dhinakaran Pandiyan Cc: Rodrigo Vivi Cc: Marc Herbert Cc: Manasi Navare Signed-off-by: Nathan Ciobanu Reviewed-by: Rodrigo Vivi Signed-off-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20180720214413.29506-1-rodrigo.vivi@intel.com --- drivers/gpu/drm/i915/intel_dp_link_training.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c index 4da6e33c7fa1..299cad5632ed 100644 --- a/drivers/gpu/drm/i915/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/intel_dp_link_training.c @@ -129,7 +129,7 @@ static bool intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) { uint8_t voltage; - int voltage_tries, max_vswing_tries; + int voltage_tries, max_vswing_tries, cr_tries, max_cr_tries; uint8_t link_config[2]; uint8_t link_bw, rate_select; @@ -170,9 +170,20 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) return false; } + /* + * DP 1.4 spec clock recovery retries defined but + * for devices pre-DP 1.4 we set the retry limit + * to 4 (voltage levels) x 4 (preemphasis levels) x + * x 5 (same voltage retries) = 80 (max iterations) + */ + if (intel_dp->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14) + max_cr_tries = 10; + else + max_cr_tries = 80; + voltage_tries = 1; max_vswing_tries = 0; - for (;;) { + for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) { uint8_t link_status[DP_LINK_STATUS_SIZE]; drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd); @@ -216,6 +227,8 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) ++max_vswing_tries; } + DRM_ERROR("Failed clock recovery %d times, giving up!\n", max_cr_tries); + return false; } /* -- GitLab From 102506d52922345cf17af362b94b869cffefbbe1 Mon Sep 17 00:00:00 2001 From: Nathan Ciobanu Date: Fri, 20 Jul 2018 14:44:13 -0700 Subject: [PATCH 0012/1692] drm/i915/dp: Refactor max_vswing_tries variable Changes the type and renames the max_vswing_tries variable which was declared as an integer but used as a boolean making it easy to be confused with a counter. Changes in v2: - updated the title and commit message - left the loop exit point in place v3: fix typo in title v4: renamed max_vswing to max_vswing_reached (Ville) Cc: Dhinakaran Pandiyan Cc: Rodrigo Vivi Cc: Marc Herbert Signed-off-by: Nathan Ciobanu Reviewed-by: Rodrigo Vivi Signed-off-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20180720214413.29506-2-rodrigo.vivi@intel.com --- drivers/gpu/drm/i915/intel_dp_link_training.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c index 299cad5632ed..07e128c7443c 100644 --- a/drivers/gpu/drm/i915/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/intel_dp_link_training.c @@ -129,7 +129,8 @@ static bool intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) { uint8_t voltage; - int voltage_tries, max_vswing_tries, cr_tries, max_cr_tries; + int voltage_tries, cr_tries, max_cr_tries; + bool max_vswing_reached = false; uint8_t link_config[2]; uint8_t link_bw, rate_select; @@ -182,7 +183,6 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) max_cr_tries = 80; voltage_tries = 1; - max_vswing_tries = 0; for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) { uint8_t link_status[DP_LINK_STATUS_SIZE]; @@ -203,7 +203,7 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) return false; } - if (max_vswing_tries == 1) { + if (max_vswing_reached) { DRM_DEBUG_KMS("Max Voltage Swing reached\n"); return false; } @@ -224,7 +224,7 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) voltage_tries = 1; if (intel_dp_link_max_vswing_reached(intel_dp)) - ++max_vswing_tries; + max_vswing_reached = true; } DRM_ERROR("Failed clock recovery %d times, giving up!\n", max_cr_tries); -- GitLab From 6a2f59e45afc6277cb3e9c9dec466935bb8a8295 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 21 Jul 2018 13:50:37 +0100 Subject: [PATCH 0013/1692] drm/i915: Pull unpin map into vma release MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A reasonably common operation is to pin the map of the vma alongside the vma itself for the lifetime of the vma, and so release both pins at the same time as destroying the vma. It is common enough to pull into the release function, making that central function more attractive to a couple of other callsites. The continual ulterior motive is to sweep over errors on module load aborting... Testcase: igt/drv_module_reload/basic-reload-inject Signed-off-by: Chris Wilson Cc: Michał Winiarski Cc: Michal Wajdeczko Reviewed-by: Michał Winiarski Link: https://patchwork.freedesktop.org/patch/msgid/20180721125037.20127-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_perf.c | 10 ++++------ drivers/gpu/drm/i915/i915_vma.c | 5 ++++- drivers/gpu/drm/i915/i915_vma.h | 3 ++- drivers/gpu/drm/i915/intel_engine_cs.c | 18 +++--------------- drivers/gpu/drm/i915/intel_guc.c | 5 ++--- drivers/gpu/drm/i915/intel_guc_ads.c | 2 +- drivers/gpu/drm/i915/intel_guc_ct.c | 7 ++----- drivers/gpu/drm/i915/intel_guc_log.c | 2 +- drivers/gpu/drm/i915/intel_guc_submission.c | 10 ++++------ drivers/gpu/drm/i915/intel_lrc.c | 2 +- 10 files changed, 24 insertions(+), 40 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 6bf10952c724..0376338d1f8d 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1338,14 +1338,12 @@ free_oa_buffer(struct drm_i915_private *i915) { mutex_lock(&i915->drm.struct_mutex); - i915_gem_object_unpin_map(i915->perf.oa.oa_buffer.vma->obj); - i915_vma_unpin(i915->perf.oa.oa_buffer.vma); - i915_gem_object_put(i915->perf.oa.oa_buffer.vma->obj); - - i915->perf.oa.oa_buffer.vma = NULL; - i915->perf.oa.oa_buffer.vaddr = NULL; + i915_vma_unpin_and_release(&i915->perf.oa.oa_buffer.vma, + I915_VMA_RELEASE_MAP); mutex_unlock(&i915->drm.struct_mutex); + + i915->perf.oa.oa_buffer.vaddr = NULL; } static void i915_oa_stream_destroy(struct i915_perf_stream *stream) diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 11d834f94220..274fd2a7bcb6 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -406,7 +406,7 @@ void i915_vma_unpin_iomap(struct i915_vma *vma) i915_vma_unpin(vma); } -void i915_vma_unpin_and_release(struct i915_vma **p_vma) +void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags) { struct i915_vma *vma; struct drm_i915_gem_object *obj; @@ -421,6 +421,9 @@ void i915_vma_unpin_and_release(struct i915_vma **p_vma) i915_vma_unpin(vma); i915_vma_close(vma); + if (flags & I915_VMA_RELEASE_MAP) + i915_gem_object_unpin_map(obj); + __i915_gem_object_release_unless_active(obj); } diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index f06d66377107..af5296b015f5 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -138,7 +138,8 @@ i915_vma_instance(struct drm_i915_gem_object *obj, struct i915_address_space *vm, const struct i915_ggtt_view *view); -void i915_vma_unpin_and_release(struct i915_vma **p_vma); +void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags); +#define I915_VMA_RELEASE_MAP BIT(0) static inline bool i915_vma_is_active(struct i915_vma *vma) { diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 2d1952849d69..734a789688da 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -527,7 +527,7 @@ int intel_engine_create_scratch(struct intel_engine_cs *engine, void intel_engine_cleanup_scratch(struct intel_engine_cs *engine) { - i915_vma_unpin_and_release(&engine->scratch); + i915_vma_unpin_and_release(&engine->scratch, 0); } static void cleanup_phys_status_page(struct intel_engine_cs *engine) @@ -543,20 +543,8 @@ static void cleanup_phys_status_page(struct intel_engine_cs *engine) static void cleanup_status_page(struct intel_engine_cs *engine) { - struct i915_vma *vma; - struct drm_i915_gem_object *obj; - - vma = fetch_and_zero(&engine->status_page.vma); - if (!vma) - return; - - obj = vma->obj; - - i915_vma_unpin(vma); - i915_vma_close(vma); - - i915_gem_object_unpin_map(obj); - __i915_gem_object_release_unless_active(obj); + i915_vma_unpin_and_release(&engine->status_page.vma, + I915_VMA_RELEASE_MAP); } static int init_status_page(struct intel_engine_cs *engine) diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c index 560c7406ae40..846d693ecb53 100644 --- a/drivers/gpu/drm/i915/intel_guc.c +++ b/drivers/gpu/drm/i915/intel_guc.c @@ -170,7 +170,7 @@ static int guc_shared_data_create(struct intel_guc *guc) vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); if (IS_ERR(vaddr)) { - i915_vma_unpin_and_release(&vma); + i915_vma_unpin_and_release(&vma, 0); return PTR_ERR(vaddr); } @@ -182,8 +182,7 @@ static int guc_shared_data_create(struct intel_guc *guc) static void guc_shared_data_destroy(struct intel_guc *guc) { - i915_gem_object_unpin_map(guc->shared_data->obj); - i915_vma_unpin_and_release(&guc->shared_data); + i915_vma_unpin_and_release(&guc->shared_data, I915_VMA_RELEASE_MAP); } int intel_guc_init(struct intel_guc *guc) diff --git a/drivers/gpu/drm/i915/intel_guc_ads.c b/drivers/gpu/drm/i915/intel_guc_ads.c index dcaa3fb71765..f0db62887f50 100644 --- a/drivers/gpu/drm/i915/intel_guc_ads.c +++ b/drivers/gpu/drm/i915/intel_guc_ads.c @@ -148,5 +148,5 @@ int intel_guc_ads_create(struct intel_guc *guc) void intel_guc_ads_destroy(struct intel_guc *guc) { - i915_vma_unpin_and_release(&guc->ads_vma); + i915_vma_unpin_and_release(&guc->ads_vma, 0); } diff --git a/drivers/gpu/drm/i915/intel_guc_ct.c b/drivers/gpu/drm/i915/intel_guc_ct.c index 371b6005954a..a52883e9146f 100644 --- a/drivers/gpu/drm/i915/intel_guc_ct.c +++ b/drivers/gpu/drm/i915/intel_guc_ct.c @@ -204,7 +204,7 @@ static int ctch_init(struct intel_guc *guc, return 0; err_vma: - i915_vma_unpin_and_release(&ctch->vma); + i915_vma_unpin_and_release(&ctch->vma, 0); err_out: CT_DEBUG_DRIVER("CT: channel %d initialization failed; err=%d\n", ctch->owner, err); @@ -214,10 +214,7 @@ static int ctch_init(struct intel_guc *guc, static void ctch_fini(struct intel_guc *guc, struct intel_guc_ct_channel *ctch) { - GEM_BUG_ON(!ctch->vma); - - i915_gem_object_unpin_map(ctch->vma->obj); - i915_vma_unpin_and_release(&ctch->vma); + i915_vma_unpin_and_release(&ctch->vma, I915_VMA_RELEASE_MAP); } static int ctch_open(struct intel_guc *guc, diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c index 6da61a71d28f..d3ebdbc0182e 100644 --- a/drivers/gpu/drm/i915/intel_guc_log.c +++ b/drivers/gpu/drm/i915/intel_guc_log.c @@ -498,7 +498,7 @@ int intel_guc_log_create(struct intel_guc_log *log) void intel_guc_log_destroy(struct intel_guc_log *log) { - i915_vma_unpin_and_release(&log->vma); + i915_vma_unpin_and_release(&log->vma, 0); } int intel_guc_log_set_level(struct intel_guc_log *log, u32 level) diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c index 4aa5e6463e7b..195adbd0ebf7 100644 --- a/drivers/gpu/drm/i915/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/intel_guc_submission.c @@ -317,7 +317,7 @@ static int guc_stage_desc_pool_create(struct intel_guc *guc) vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); if (IS_ERR(vaddr)) { - i915_vma_unpin_and_release(&vma); + i915_vma_unpin_and_release(&vma, 0); return PTR_ERR(vaddr); } @@ -331,8 +331,7 @@ static int guc_stage_desc_pool_create(struct intel_guc *guc) static void guc_stage_desc_pool_destroy(struct intel_guc *guc) { ida_destroy(&guc->stage_ids); - i915_gem_object_unpin_map(guc->stage_desc_pool->obj); - i915_vma_unpin_and_release(&guc->stage_desc_pool); + i915_vma_unpin_and_release(&guc->stage_desc_pool, I915_VMA_RELEASE_MAP); } /* @@ -1008,7 +1007,7 @@ guc_client_alloc(struct drm_i915_private *dev_priv, err_vaddr: i915_gem_object_unpin_map(client->vma->obj); err_vma: - i915_vma_unpin_and_release(&client->vma); + i915_vma_unpin_and_release(&client->vma, 0); err_id: ida_simple_remove(&guc->stage_ids, client->stage_id); err_client: @@ -1020,8 +1019,7 @@ static void guc_client_free(struct intel_guc_client *client) { unreserve_doorbell(client); guc_stage_desc_fini(client->guc, client); - i915_gem_object_unpin_map(client->vma->obj); - i915_vma_unpin_and_release(&client->vma); + i915_vma_unpin_and_release(&client->vma, I915_VMA_RELEASE_MAP); ida_simple_remove(&client->guc->stage_ids, client->stage_id); kfree(client); } diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 174479232e94..c52ef2817c96 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -1657,7 +1657,7 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine) static void lrc_destroy_wa_ctx(struct intel_engine_cs *engine) { - i915_vma_unpin_and_release(&engine->wa_ctx.vma); + i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0); } typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch); -- GitLab From a5b22b5ed88bfb848d40d3c593f5506bdb75c882 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 20 Jul 2018 12:11:02 +0100 Subject: [PATCH 0014/1692] drm/i915: Show stack (by WARN) for hitting forcewake errors On Sandybridge, we need a workaround to wait for the CPU thread to wake up before we are sure that we have enabled the GT power well. However, we do see the errors being reported and failed reads returning spurious results. To try and capture more details as it fails, promote the error into a WARN so we grab the stacktrace, and to try and reduce the frequency of error increase the timeout from 500us to 5ms. Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20180720111102.11549-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_uncore.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index b892ca8396e8..284be151f645 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -283,14 +283,24 @@ fw_domains_reset(struct drm_i915_private *i915, fw_domain_reset(i915, d); } +static inline u32 gt_thread_status(struct drm_i915_private *dev_priv) +{ + u32 val; + + val = __raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG); + val &= GEN6_GT_THREAD_STATUS_CORE_MASK; + + return val; +} + static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) { - /* w/a for a sporadic read returning 0 by waiting for the GT + /* + * w/a for a sporadic read returning 0 by waiting for the GT * thread to wake up. */ - if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & - GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500)) - DRM_ERROR("GT thread status wait timed out\n"); + WARN_ONCE(wait_for_atomic_us(gt_thread_status(dev_priv) == 0, 5000), + "GT thread status wait timed out\n"); } static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv, -- GitLab From a38bb309c2ce25a562819949a19fefa38ae8ab96 Mon Sep 17 00:00:00 2001 From: Manasi Navare Date: Fri, 13 Jul 2018 12:43:13 -0700 Subject: [PATCH 0015/1692] drm/i915/icl: Add remaining registers and bitfields for MG PHY DDI This patch adds the remaining register definitions and bit fields required for MG PHy DDI buffer initializations and voltage swing programming for MG PHy DDI ports. While at it this patch also fixes the naming for previously defined MG PHY registers in original commit id (c92f47b5ec977a "drm/i915/icl: Add register defs for voltage swing sequences for MG PHY DDI"). Since the MG PHY registers are first defined in ICL platform, there is no need for _ICL prefix. v4 (from Paulo): add two white spaces to CRI_CALCINIT too. v3: * Fix register names, add spaces for MASK defines, correct the order of #defines (Paulo) v2: * Change the MG_TX_DRVCTL registers names to match the spec (Anusha) Cc: James Ausmus Reviewed-by: Paulo Zanoni Signed-off-by: Manasi Navare Signed-off-by: Paulo Zanoni Link: https://patchwork.freedesktop.org/patch/msgid/1531510993-6606-1-git-send-email-manasi.d.navare@intel.com --- drivers/gpu/drm/i915/i915_reg.h | 270 +++++++++++++++++++------------- 1 file changed, 157 insertions(+), 113 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 73946055aa15..477e694b8cc4 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1932,121 +1932,165 @@ enum i915_power_well_id { #define N_SCALAR(x) ((x) << 24) #define N_SCALAR_MASK (0x7F << 24) -#define _ICL_MG_PHY_PORT_LN(port, ln, ln0p1, ln0p2, ln1p1) \ +#define MG_PHY_PORT_LN(port, ln, ln0p1, ln0p2, ln1p1) \ _MMIO(_PORT((port) - PORT_C, ln0p1, ln0p2) + (ln) * ((ln1p1) - (ln0p1))) -#define _ICL_MG_TX_LINK_PARAMS_TX1LN0_PORT1 0x16812C -#define _ICL_MG_TX_LINK_PARAMS_TX1LN1_PORT1 0x16852C -#define _ICL_MG_TX_LINK_PARAMS_TX1LN0_PORT2 0x16912C -#define _ICL_MG_TX_LINK_PARAMS_TX1LN1_PORT2 0x16952C -#define _ICL_MG_TX_LINK_PARAMS_TX1LN0_PORT3 0x16A12C -#define _ICL_MG_TX_LINK_PARAMS_TX1LN1_PORT3 0x16A52C -#define _ICL_MG_TX_LINK_PARAMS_TX1LN0_PORT4 0x16B12C -#define _ICL_MG_TX_LINK_PARAMS_TX1LN1_PORT4 0x16B52C -#define ICL_PORT_MG_TX1_LINK_PARAMS(port, ln) \ - _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_LINK_PARAMS_TX1LN0_PORT1, \ - _ICL_MG_TX_LINK_PARAMS_TX1LN0_PORT2, \ - _ICL_MG_TX_LINK_PARAMS_TX1LN1_PORT1) - -#define _ICL_MG_TX_LINK_PARAMS_TX2LN0_PORT1 0x1680AC -#define _ICL_MG_TX_LINK_PARAMS_TX2LN1_PORT1 0x1684AC -#define _ICL_MG_TX_LINK_PARAMS_TX2LN0_PORT2 0x1690AC -#define _ICL_MG_TX_LINK_PARAMS_TX2LN1_PORT2 0x1694AC -#define _ICL_MG_TX_LINK_PARAMS_TX2LN0_PORT3 0x16A0AC -#define _ICL_MG_TX_LINK_PARAMS_TX2LN1_PORT3 0x16A4AC -#define _ICL_MG_TX_LINK_PARAMS_TX2LN0_PORT4 0x16B0AC -#define _ICL_MG_TX_LINK_PARAMS_TX2LN1_PORT4 0x16B4AC -#define ICL_PORT_MG_TX2_LINK_PARAMS(port, ln) \ - _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_LINK_PARAMS_TX2LN0_PORT1, \ - _ICL_MG_TX_LINK_PARAMS_TX2LN0_PORT2, \ - _ICL_MG_TX_LINK_PARAMS_TX2LN1_PORT1) -#define CRI_USE_FS32 (1 << 5) - -#define _ICL_MG_TX_PISO_READLOAD_TX1LN0_PORT1 0x16814C -#define _ICL_MG_TX_PISO_READLOAD_TX1LN1_PORT1 0x16854C -#define _ICL_MG_TX_PISO_READLOAD_TX1LN0_PORT2 0x16914C -#define _ICL_MG_TX_PISO_READLOAD_TX1LN1_PORT2 0x16954C -#define _ICL_MG_TX_PISO_READLOAD_TX1LN0_PORT3 0x16A14C -#define _ICL_MG_TX_PISO_READLOAD_TX1LN1_PORT3 0x16A54C -#define _ICL_MG_TX_PISO_READLOAD_TX1LN0_PORT4 0x16B14C -#define _ICL_MG_TX_PISO_READLOAD_TX1LN1_PORT4 0x16B54C -#define ICL_PORT_MG_TX1_PISO_READLOAD(port, ln) \ - _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_PISO_READLOAD_TX1LN0_PORT1, \ - _ICL_MG_TX_PISO_READLOAD_TX1LN0_PORT2, \ - _ICL_MG_TX_PISO_READLOAD_TX1LN1_PORT1) - -#define _ICL_MG_TX_PISO_READLOAD_TX2LN0_PORT1 0x1680CC -#define _ICL_MG_TX_PISO_READLOAD_TX2LN1_PORT1 0x1684CC -#define _ICL_MG_TX_PISO_READLOAD_TX2LN0_PORT2 0x1690CC -#define _ICL_MG_TX_PISO_READLOAD_TX2LN1_PORT2 0x1694CC -#define _ICL_MG_TX_PISO_READLOAD_TX2LN0_PORT3 0x16A0CC -#define _ICL_MG_TX_PISO_READLOAD_TX2LN1_PORT3 0x16A4CC -#define _ICL_MG_TX_PISO_READLOAD_TX2LN0_PORT4 0x16B0CC -#define _ICL_MG_TX_PISO_READLOAD_TX2LN1_PORT4 0x16B4CC -#define ICL_PORT_MG_TX2_PISO_READLOAD(port, ln) \ - _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_PISO_READLOAD_TX2LN0_PORT1, \ - _ICL_MG_TX_PISO_READLOAD_TX2LN0_PORT2, \ - _ICL_MG_TX_PISO_READLOAD_TX2LN1_PORT1) -#define CRI_CALCINIT (1 << 1) - -#define _ICL_MG_TX_SWINGCTRL_TX1LN0_PORT1 0x168148 -#define _ICL_MG_TX_SWINGCTRL_TX1LN1_PORT1 0x168548 -#define _ICL_MG_TX_SWINGCTRL_TX1LN0_PORT2 0x169148 -#define _ICL_MG_TX_SWINGCTRL_TX1LN1_PORT2 0x169548 -#define _ICL_MG_TX_SWINGCTRL_TX1LN0_PORT3 0x16A148 -#define _ICL_MG_TX_SWINGCTRL_TX1LN1_PORT3 0x16A548 -#define _ICL_MG_TX_SWINGCTRL_TX1LN0_PORT4 0x16B148 -#define _ICL_MG_TX_SWINGCTRL_TX1LN1_PORT4 0x16B548 -#define ICL_PORT_MG_TX1_SWINGCTRL(port, ln) \ - _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_SWINGCTRL_TX1LN0_PORT1, \ - _ICL_MG_TX_SWINGCTRL_TX1LN0_PORT2, \ - _ICL_MG_TX_SWINGCTRL_TX1LN1_PORT1) - -#define _ICL_MG_TX_SWINGCTRL_TX2LN0_PORT1 0x1680C8 -#define _ICL_MG_TX_SWINGCTRL_TX2LN1_PORT1 0x1684C8 -#define _ICL_MG_TX_SWINGCTRL_TX2LN0_PORT2 0x1690C8 -#define _ICL_MG_TX_SWINGCTRL_TX2LN1_PORT2 0x1694C8 -#define _ICL_MG_TX_SWINGCTRL_TX2LN0_PORT3 0x16A0C8 -#define _ICL_MG_TX_SWINGCTRL_TX2LN1_PORT3 0x16A4C8 -#define _ICL_MG_TX_SWINGCTRL_TX2LN0_PORT4 0x16B0C8 -#define _ICL_MG_TX_SWINGCTRL_TX2LN1_PORT4 0x16B4C8 -#define ICL_PORT_MG_TX2_SWINGCTRL(port, ln) \ - _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_SWINGCTRL_TX2LN0_PORT1, \ - _ICL_MG_TX_SWINGCTRL_TX2LN0_PORT2, \ - _ICL_MG_TX_SWINGCTRL_TX2LN1_PORT1) -#define CRI_TXDEEMPH_OVERRIDE_17_12(x) ((x) << 0) -#define CRI_TXDEEMPH_OVERRIDE_17_12_MASK (0x3F << 0) - -#define _ICL_MG_TX_DRVCTRL_TX1LN0_PORT1 0x168144 -#define _ICL_MG_TX_DRVCTRL_TX1LN1_PORT1 0x168544 -#define _ICL_MG_TX_DRVCTRL_TX1LN0_PORT2 0x169144 -#define _ICL_MG_TX_DRVCTRL_TX1LN1_PORT2 0x169544 -#define _ICL_MG_TX_DRVCTRL_TX1LN0_PORT3 0x16A144 -#define _ICL_MG_TX_DRVCTRL_TX1LN1_PORT3 0x16A544 -#define _ICL_MG_TX_DRVCTRL_TX1LN0_PORT4 0x16B144 -#define _ICL_MG_TX_DRVCTRL_TX1LN1_PORT4 0x16B544 -#define ICL_PORT_MG_TX1_DRVCTRL(port, ln) \ - _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_DRVCTRL_TX1LN0_PORT1, \ - _ICL_MG_TX_DRVCTRL_TX1LN0_PORT2, \ - _ICL_MG_TX_DRVCTRL_TX1LN1_PORT1) - -#define _ICL_MG_TX_DRVCTRL_TX2LN0_PORT1 0x1680C4 -#define _ICL_MG_TX_DRVCTRL_TX2LN1_PORT1 0x1684C4 -#define _ICL_MG_TX_DRVCTRL_TX2LN0_PORT2 0x1690C4 -#define _ICL_MG_TX_DRVCTRL_TX2LN1_PORT2 0x1694C4 -#define _ICL_MG_TX_DRVCTRL_TX2LN0_PORT3 0x16A0C4 -#define _ICL_MG_TX_DRVCTRL_TX2LN1_PORT3 0x16A4C4 -#define _ICL_MG_TX_DRVCTRL_TX2LN0_PORT4 0x16B0C4 -#define _ICL_MG_TX_DRVCTRL_TX2LN1_PORT4 0x16B4C4 -#define ICL_PORT_MG_TX2_DRVCTRL(port, ln) \ - _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_DRVCTRL_TX2LN0_PORT1, \ - _ICL_MG_TX_DRVCTRL_TX2LN0_PORT2, \ - _ICL_MG_TX_DRVCTRL_TX2LN1_PORT1) -#define CRI_TXDEEMPH_OVERRIDE_11_6(x) ((x) << 24) -#define CRI_TXDEEMPH_OVERRIDE_11_6_MASK (0x3F << 24) -#define CRI_TXDEEMPH_OVERRIDE_EN (1 << 22) -#define CRI_TXDEEMPH_OVERRIDE_5_0(x) ((x) << 16) -#define CRI_TXDEEMPH_OVERRIDE_5_0_MASK (0x3F << 16) +#define MG_TX_LINK_PARAMS_TX1LN0_PORT1 0x16812C +#define MG_TX_LINK_PARAMS_TX1LN1_PORT1 0x16852C +#define MG_TX_LINK_PARAMS_TX1LN0_PORT2 0x16912C +#define MG_TX_LINK_PARAMS_TX1LN1_PORT2 0x16952C +#define MG_TX_LINK_PARAMS_TX1LN0_PORT3 0x16A12C +#define MG_TX_LINK_PARAMS_TX1LN1_PORT3 0x16A52C +#define MG_TX_LINK_PARAMS_TX1LN0_PORT4 0x16B12C +#define MG_TX_LINK_PARAMS_TX1LN1_PORT4 0x16B52C +#define MG_TX1_LINK_PARAMS(port, ln) \ + MG_PHY_PORT_LN(port, ln, MG_TX_LINK_PARAMS_TX1LN0_PORT1, \ + MG_TX_LINK_PARAMS_TX1LN0_PORT2, \ + MG_TX_LINK_PARAMS_TX1LN1_PORT1) + +#define MG_TX_LINK_PARAMS_TX2LN0_PORT1 0x1680AC +#define MG_TX_LINK_PARAMS_TX2LN1_PORT1 0x1684AC +#define MG_TX_LINK_PARAMS_TX2LN0_PORT2 0x1690AC +#define MG_TX_LINK_PARAMS_TX2LN1_PORT2 0x1694AC +#define MG_TX_LINK_PARAMS_TX2LN0_PORT3 0x16A0AC +#define MG_TX_LINK_PARAMS_TX2LN1_PORT3 0x16A4AC +#define MG_TX_LINK_PARAMS_TX2LN0_PORT4 0x16B0AC +#define MG_TX_LINK_PARAMS_TX2LN1_PORT4 0x16B4AC +#define MG_TX2_LINK_PARAMS(port, ln) \ + MG_PHY_PORT_LN(port, ln, MG_TX_LINK_PARAMS_TX2LN0_PORT1, \ + MG_TX_LINK_PARAMS_TX2LN0_PORT2, \ + MG_TX_LINK_PARAMS_TX2LN1_PORT1) +#define CRI_USE_FS32 (1 << 5) + +#define MG_TX_PISO_READLOAD_TX1LN0_PORT1 0x16814C +#define MG_TX_PISO_READLOAD_TX1LN1_PORT1 0x16854C +#define MG_TX_PISO_READLOAD_TX1LN0_PORT2 0x16914C +#define MG_TX_PISO_READLOAD_TX1LN1_PORT2 0x16954C +#define MG_TX_PISO_READLOAD_TX1LN0_PORT3 0x16A14C +#define MG_TX_PISO_READLOAD_TX1LN1_PORT3 0x16A54C +#define MG_TX_PISO_READLOAD_TX1LN0_PORT4 0x16B14C +#define MG_TX_PISO_READLOAD_TX1LN1_PORT4 0x16B54C +#define MG_TX1_PISO_READLOAD(port, ln) \ + MG_PHY_PORT_LN(port, ln, MG_TX_PISO_READLOAD_TX1LN0_PORT1, \ + MG_TX_PISO_READLOAD_TX1LN0_PORT2, \ + MG_TX_PISO_READLOAD_TX1LN1_PORT1) + +#define MG_TX_PISO_READLOAD_TX2LN0_PORT1 0x1680CC +#define MG_TX_PISO_READLOAD_TX2LN1_PORT1 0x1684CC +#define MG_TX_PISO_READLOAD_TX2LN0_PORT2 0x1690CC +#define MG_TX_PISO_READLOAD_TX2LN1_PORT2 0x1694CC +#define MG_TX_PISO_READLOAD_TX2LN0_PORT3 0x16A0CC +#define MG_TX_PISO_READLOAD_TX2LN1_PORT3 0x16A4CC +#define MG_TX_PISO_READLOAD_TX2LN0_PORT4 0x16B0CC +#define MG_TX_PISO_READLOAD_TX2LN1_PORT4 0x16B4CC +#define MG_TX2_PISO_READLOAD(port, ln) \ + MG_PHY_PORT_LN(port, ln, MG_TX_PISO_READLOAD_TX2LN0_PORT1, \ + MG_TX_PISO_READLOAD_TX2LN0_PORT2, \ + MG_TX_PISO_READLOAD_TX2LN1_PORT1) +#define CRI_CALCINIT (1 << 1) + +#define MG_TX_SWINGCTRL_TX1LN0_PORT1 0x168148 +#define MG_TX_SWINGCTRL_TX1LN1_PORT1 0x168548 +#define MG_TX_SWINGCTRL_TX1LN0_PORT2 0x169148 +#define MG_TX_SWINGCTRL_TX1LN1_PORT2 0x169548 +#define MG_TX_SWINGCTRL_TX1LN0_PORT3 0x16A148 +#define MG_TX_SWINGCTRL_TX1LN1_PORT3 0x16A548 +#define MG_TX_SWINGCTRL_TX1LN0_PORT4 0x16B148 +#define MG_TX_SWINGCTRL_TX1LN1_PORT4 0x16B548 +#define MG_TX1_SWINGCTRL(port, ln) \ + MG_PHY_PORT_LN(port, ln, MG_TX_SWINGCTRL_TX1LN0_PORT1, \ + MG_TX_SWINGCTRL_TX1LN0_PORT2, \ + MG_TX_SWINGCTRL_TX1LN1_PORT1) + +#define MG_TX_SWINGCTRL_TX2LN0_PORT1 0x1680C8 +#define MG_TX_SWINGCTRL_TX2LN1_PORT1 0x1684C8 +#define MG_TX_SWINGCTRL_TX2LN0_PORT2 0x1690C8 +#define MG_TX_SWINGCTRL_TX2LN1_PORT2 0x1694C8 +#define MG_TX_SWINGCTRL_TX2LN0_PORT3 0x16A0C8 +#define MG_TX_SWINGCTRL_TX2LN1_PORT3 0x16A4C8 +#define MG_TX_SWINGCTRL_TX2LN0_PORT4 0x16B0C8 +#define MG_TX_SWINGCTRL_TX2LN1_PORT4 0x16B4C8 +#define MG_TX2_SWINGCTRL(port, ln) \ + MG_PHY_PORT_LN(port, ln, MG_TX_SWINGCTRL_TX2LN0_PORT1, \ + MG_TX_SWINGCTRL_TX2LN0_PORT2, \ + MG_TX_SWINGCTRL_TX2LN1_PORT1) +#define CRI_TXDEEMPH_OVERRIDE_17_12(x) ((x) << 0) +#define CRI_TXDEEMPH_OVERRIDE_17_12_MASK (0x3F << 0) + +#define MG_TX_DRVCTRL_TX1LN0_TXPORT1 0x168144 +#define MG_TX_DRVCTRL_TX1LN1_TXPORT1 0x168544 +#define MG_TX_DRVCTRL_TX1LN0_TXPORT2 0x169144 +#define MG_TX_DRVCTRL_TX1LN1_TXPORT2 0x169544 +#define MG_TX_DRVCTRL_TX1LN0_TXPORT3 0x16A144 +#define MG_TX_DRVCTRL_TX1LN1_TXPORT3 0x16A544 +#define MG_TX_DRVCTRL_TX1LN0_TXPORT4 0x16B144 +#define MG_TX_DRVCTRL_TX1LN1_TXPORT4 0x16B544 +#define MG_TX1_DRVCTRL(port, ln) \ + MG_PHY_PORT_LN(port, ln, MG_TX_DRVCTRL_TX1LN0_TXPORT1, \ + MG_TX_DRVCTRL_TX1LN0_TXPORT2, \ + MG_TX_DRVCTRL_TX1LN1_TXPORT1) + +#define MG_TX_DRVCTRL_TX2LN0_PORT1 0x1680C4 +#define MG_TX_DRVCTRL_TX2LN1_PORT1 0x1684C4 +#define MG_TX_DRVCTRL_TX2LN0_PORT2 0x1690C4 +#define MG_TX_DRVCTRL_TX2LN1_PORT2 0x1694C4 +#define MG_TX_DRVCTRL_TX2LN0_PORT3 0x16A0C4 +#define MG_TX_DRVCTRL_TX2LN1_PORT3 0x16A4C4 +#define MG_TX_DRVCTRL_TX2LN0_PORT4 0x16B0C4 +#define MG_TX_DRVCTRL_TX2LN1_PORT4 0x16B4C4 +#define MG_TX2_DRVCTRL(port, ln) \ + MG_PHY_PORT_LN(port, ln, MG_TX_DRVCTRL_TX2LN0_PORT1, \ + MG_TX_DRVCTRL_TX2LN0_PORT2, \ + MG_TX_DRVCTRL_TX2LN1_PORT1) +#define CRI_TXDEEMPH_OVERRIDE_11_6(x) ((x) << 24) +#define CRI_TXDEEMPH_OVERRIDE_11_6_MASK (0x3F << 24) +#define CRI_TXDEEMPH_OVERRIDE_EN (1 << 22) +#define CRI_TXDEEMPH_OVERRIDE_5_0(x) ((x) << 16) +#define CRI_TXDEEMPH_OVERRIDE_5_0_MASK (0x3F << 16) +#define CRI_LOADGEN_SEL(x) ((x) << 12) +#define CRI_LOADGEN_SEL_MASK (0x3 << 12) + +#define MG_CLKHUB_LN0_PORT1 0x16839C +#define MG_CLKHUB_LN1_PORT1 0x16879C +#define MG_CLKHUB_LN0_PORT2 0x16939C +#define MG_CLKHUB_LN1_PORT2 0x16979C +#define MG_CLKHUB_LN0_PORT3 0x16A39C +#define MG_CLKHUB_LN1_PORT3 0x16A79C +#define MG_CLKHUB_LN0_PORT4 0x16B39C +#define MG_CLKHUB_LN1_PORT4 0x16B79C +#define MG_CLKHUB(port, ln) \ + MG_PHY_PORT_LN(port, ln, MG_CLKHUB_LN0_PORT1, \ + MG_CLKHUB_LN0_PORT2, \ + MG_CLKHUB_LN1_PORT1) +#define CFG_LOW_RATE_LKREN_EN (1 << 11) + +#define MG_TX_DCC_TX1LN0_PORT1 0x168110 +#define MG_TX_DCC_TX1LN1_PORT1 0x168510 +#define MG_TX_DCC_TX1LN0_PORT2 0x169110 +#define MG_TX_DCC_TX1LN1_PORT2 0x169510 +#define MG_TX_DCC_TX1LN0_PORT3 0x16A110 +#define MG_TX_DCC_TX1LN1_PORT3 0x16A510 +#define MG_TX_DCC_TX1LN0_PORT4 0x16B110 +#define MG_TX_DCC_TX1LN1_PORT4 0x16B510 +#define MG_TX1_DCC(port, ln) \ + MG_PHY_PORT_LN(port, ln, MG_TX_DCC_TX1LN0_PORT1, \ + MG_TX_DCC_TX1LN0_PORT2, \ + MG_TX_DCC_TX1LN1_PORT1) +#define MG_TX_DCC_TX2LN0_PORT1 0x168090 +#define MG_TX_DCC_TX2LN1_PORT1 0x168490 +#define MG_TX_DCC_TX2LN0_PORT2 0x169090 +#define MG_TX_DCC_TX2LN1_PORT2 0x169490 +#define MG_TX_DCC_TX2LN0_PORT3 0x16A090 +#define MG_TX_DCC_TX2LN1_PORT3 0x16A490 +#define MG_TX_DCC_TX2LN0_PORT4 0x16B090 +#define MG_TX_DCC_TX2LN1_PORT4 0x16B490 +#define MG_TX2_DCC(port, ln) \ + MG_PHY_PORT_LN(port, ln, MG_TX_DCC_TX2LN0_PORT1, \ + MG_TX_DCC_TX2LN0_PORT2, \ + MG_TX_DCC_TX2LN1_PORT1) +#define CFG_AMI_CK_DIV_OVERRIDE_VAL(x) ((x) << 25) +#define CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK (0x3 << 25) +#define CFG_AMI_CK_DIV_OVERRIDE_EN (1 << 24) /* The spec defines this only for BXT PHY0, but lets assume that this * would exist for PHY1 too if it had a second channel. -- GitLab From 07685c827b2a020c88c25a6961c435050bbbe7b3 Mon Sep 17 00:00:00 2001 From: Manasi Navare Date: Thu, 28 Jun 2018 15:35:44 -0700 Subject: [PATCH 0016/1692] drm/i915/icl: Implement voltage swing programming sequence for MG PHY DDI This sequence is used to setup voltage swing before enabling MG PHY DDI as well as for changing the voltage during DisplayPort Link training. For ICL, there are two types of DDIs. This sequence needs to be used for MG PHY DDI which is ports C-F. v6 (From Manasi): * Add programming for MG_CLKHUB and MG_TX_DCC as per the spec updates v5 (from Paulo): * Checkpatch. v4 (from Paulo): * Fix bogus error message * Fix copy+paste bugs (missing s/TX1/TX2/ after copy+paste) * Use the new mask names * Stay under 80 columns * Add some blank lines v3: * Clear the regs before writing (Paulo) v2: * Rename to MG PHY in the function def (Jani Nikula) * Rebase on top of new revision of other patches in series Cc: Rodrigo Vivi Cc: Jani Nikula Reviewed-by: Paulo Zanoni Signed-off-by: Manasi Navare Signed-off-by: Paulo Zanoni Link: https://patchwork.freedesktop.org/patch/msgid/1530225344-20373-2-git-send-email-manasi.d.navare@intel.com --- drivers/gpu/drm/i915/intel_ddi.c | 135 +++++++++++++++++++++++++++++-- 1 file changed, 129 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 39d66f8493fa..01c07a000464 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -2464,7 +2464,128 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); } -static void icl_ddi_vswing_sequence(struct intel_encoder *encoder, u32 level, +static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, + int link_clock, + u32 level) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum port port = encoder->port; + const struct icl_mg_phy_ddi_buf_trans *ddi_translations; + u32 n_entries, val; + int ln; + + n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); + ddi_translations = icl_mg_phy_ddi_translations; + /* The table does not have values for level 3 and level 9. */ + if (level >= n_entries || level == 3 || level == 9) { + DRM_DEBUG_KMS("DDI translation not found for level %d. Using %d instead.", + level, n_entries - 2); + level = n_entries - 2; + } + + /* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */ + for (ln = 0; ln < 2; ln++) { + val = I915_READ(MG_TX1_LINK_PARAMS(port, ln)); + val &= ~CRI_USE_FS32; + I915_WRITE(MG_TX1_LINK_PARAMS(port, ln), val); + + val = I915_READ(MG_TX2_LINK_PARAMS(port, ln)); + val &= ~CRI_USE_FS32; + I915_WRITE(MG_TX2_LINK_PARAMS(port, ln), val); + } + + /* Program MG_TX_SWINGCTRL with values from vswing table */ + for (ln = 0; ln < 2; ln++) { + val = I915_READ(MG_TX1_SWINGCTRL(port, ln)); + val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK; + val |= CRI_TXDEEMPH_OVERRIDE_17_12( + ddi_translations[level].cri_txdeemph_override_17_12); + I915_WRITE(MG_TX1_SWINGCTRL(port, ln), val); + + val = I915_READ(MG_TX2_SWINGCTRL(port, ln)); + val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK; + val |= CRI_TXDEEMPH_OVERRIDE_17_12( + ddi_translations[level].cri_txdeemph_override_17_12); + I915_WRITE(MG_TX2_SWINGCTRL(port, ln), val); + } + + /* Program MG_TX_DRVCTRL with values from vswing table */ + for (ln = 0; ln < 2; ln++) { + val = I915_READ(MG_TX1_DRVCTRL(port, ln)); + val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK | + CRI_TXDEEMPH_OVERRIDE_5_0_MASK); + val |= CRI_TXDEEMPH_OVERRIDE_5_0( + ddi_translations[level].cri_txdeemph_override_5_0) | + CRI_TXDEEMPH_OVERRIDE_11_6( + ddi_translations[level].cri_txdeemph_override_11_6) | + CRI_TXDEEMPH_OVERRIDE_EN; + I915_WRITE(MG_TX1_DRVCTRL(port, ln), val); + + val = I915_READ(MG_TX2_DRVCTRL(port, ln)); + val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK | + CRI_TXDEEMPH_OVERRIDE_5_0_MASK); + val |= CRI_TXDEEMPH_OVERRIDE_5_0( + ddi_translations[level].cri_txdeemph_override_5_0) | + CRI_TXDEEMPH_OVERRIDE_11_6( + ddi_translations[level].cri_txdeemph_override_11_6) | + CRI_TXDEEMPH_OVERRIDE_EN; + I915_WRITE(MG_TX2_DRVCTRL(port, ln), val); + + /* FIXME: Program CRI_LOADGEN_SEL after the spec is updated */ + } + + /* + * Program MG_CLKHUB with value from frequency table + * In case of Legacy mode on MG PHY, both TX1 and TX2 enabled so use the + * values from table for which TX1 and TX2 enabled. + */ + for (ln = 0; ln < 2; ln++) { + val = I915_READ(MG_CLKHUB(port, ln)); + if (link_clock < 300000) + val |= CFG_LOW_RATE_LKREN_EN; + else + val &= ~CFG_LOW_RATE_LKREN_EN; + I915_WRITE(MG_CLKHUB(port, ln), val); + } + + /* Program the MG_TX_DCC based on the link frequency */ + for (ln = 0; ln < 2; ln++) { + val = I915_READ(MG_TX1_DCC(port, ln)); + val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK; + if (link_clock <= 500000) { + val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN; + } else { + val |= CFG_AMI_CK_DIV_OVERRIDE_EN | + CFG_AMI_CK_DIV_OVERRIDE_VAL(1); + } + I915_WRITE(MG_TX1_DCC(port, ln), val); + + val = I915_READ(MG_TX2_DCC(port, ln)); + val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK; + if (link_clock <= 500000) { + val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN; + } else { + val |= CFG_AMI_CK_DIV_OVERRIDE_EN | + CFG_AMI_CK_DIV_OVERRIDE_VAL(1); + } + I915_WRITE(MG_TX2_DCC(port, ln), val); + } + + /* Program MG_TX_PISO_READLOAD with values from vswing table */ + for (ln = 0; ln < 2; ln++) { + val = I915_READ(MG_TX1_PISO_READLOAD(port, ln)); + val |= CRI_CALCINIT; + I915_WRITE(MG_TX1_PISO_READLOAD(port, ln), val); + + val = I915_READ(MG_TX2_PISO_READLOAD(port, ln)); + val |= CRI_CALCINIT; + I915_WRITE(MG_TX2_PISO_READLOAD(port, ln), val); + } +} + +static void icl_ddi_vswing_sequence(struct intel_encoder *encoder, + int link_clock, + u32 level, enum intel_output_type type) { enum port port = encoder->port; @@ -2472,8 +2593,7 @@ static void icl_ddi_vswing_sequence(struct intel_encoder *encoder, u32 level, if (port == PORT_A || port == PORT_B) icl_combo_phy_ddi_vswing_sequence(encoder, level, type); else - /* Not Implemented Yet */ - WARN_ON(1); + icl_mg_phy_ddi_vswing_sequence(encoder, link_clock, level); } static uint32_t translate_signal_level(int signal_levels) @@ -2508,7 +2628,8 @@ u32 bxt_signal_levels(struct intel_dp *intel_dp) int level = intel_ddi_dp_level(intel_dp); if (IS_ICELAKE(dev_priv)) - icl_ddi_vswing_sequence(encoder, level, encoder->type); + icl_ddi_vswing_sequence(encoder, intel_dp->link_rate, + level, encoder->type); else if (IS_CANNONLAKE(dev_priv)) cnl_ddi_vswing_sequence(encoder, level, encoder->type); else @@ -2689,7 +2810,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); if (IS_ICELAKE(dev_priv)) - icl_ddi_vswing_sequence(encoder, level, encoder->type); + icl_ddi_vswing_sequence(encoder, crtc_state->port_clock, + level, encoder->type); else if (IS_CANNONLAKE(dev_priv)) cnl_ddi_vswing_sequence(encoder, level, encoder->type); else if (IS_GEN9_LP(dev_priv)) @@ -2724,7 +2846,8 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); if (IS_ICELAKE(dev_priv)) - icl_ddi_vswing_sequence(encoder, level, INTEL_OUTPUT_HDMI); + icl_ddi_vswing_sequence(encoder, crtc_state->port_clock, + level, INTEL_OUTPUT_HDMI); else if (IS_CANNONLAKE(dev_priv)) cnl_ddi_vswing_sequence(encoder, level, INTEL_OUTPUT_HDMI); else if (IS_GEN9_LP(dev_priv)) -- GitLab From 3970c65c2b47c450f917bc8a29c5849563a95dfe Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 23 Jul 2018 15:53:35 +0100 Subject: [PATCH 0017/1692] drm/i915: Skip repeated calls to i915_gem_set_wedged() If we already wedged, i915_gem_set_wedged() becomes a complicated no-op. References: https://bugs.freedesktop.org/show_bug.cgi?id=107343 Signed-off-by: Chris Wilson Reviewed-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20180723145335.24579-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 8b52cb768a67..a4031fab57b0 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3312,8 +3312,8 @@ void i915_gem_set_wedged(struct drm_i915_private *i915) intel_engine_dump(engine, &p, "%s\n", engine->name); } - set_bit(I915_WEDGED, &i915->gpu_error.flags); - smp_mb__after_atomic(); + if (test_and_set_bit(I915_WEDGED, &i915->gpu_error.flags)) + goto out; /* * First, stop submission to hw, but do not yet complete requests by @@ -3372,6 +3372,7 @@ void i915_gem_set_wedged(struct drm_i915_private *i915) i915_gem_reset_finish_engine(engine); } +out: GEM_TRACE("end\n"); wake_up_all(&i915->gpu_error.reset_queue); -- GitLab From 4de737a26e6a251c86ff1e96667aebc5eecf9456 Mon Sep 17 00:00:00 2001 From: Nathan Ciobanu Date: Tue, 24 Jul 2018 15:33:32 -0700 Subject: [PATCH 0018/1692] drm/i915/dp: Improve clock recovery loop limit comment Clarifies the clock recovery loop limit comment that 80 max_cr_tries for pre-DP1.4 devices was chosen as a very tolerant upper bound. Assumptions made: - DP1.4 syncs should be smarter so they won't need more than 10 tries - pre-DP1.4 syncs should be compliant enough to not need that many tries (80) but we should tolerate any that may trigger this corner case Cc: Dhinakaran Pandiyan Cc: Rodrigo Vivi Cc: Marc Herbert Suggested-by: Marc Herbert Signed-off-by: Nathan Ciobanu Reviewed-by: Marc Herbert Signed-off-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/1532471612-30001-1-git-send-email-nathan.d.ciobanu@linux.intel.com --- drivers/gpu/drm/i915/intel_dp_link_training.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c index 07e128c7443c..a9f40985a621 100644 --- a/drivers/gpu/drm/i915/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/intel_dp_link_training.c @@ -172,10 +172,12 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) } /* - * DP 1.4 spec clock recovery retries defined but - * for devices pre-DP 1.4 we set the retry limit - * to 4 (voltage levels) x 4 (preemphasis levels) x - * x 5 (same voltage retries) = 80 (max iterations) + * The DP 1.4 spec defines the max clock recovery retries value + * as 10 but for pre-DP 1.4 devices we set a very tolerant + * retry limit of 80 (4 voltage levels x 4 preemphasis levels x + * x 5 identical voltage retries). Since the previous specs didn't + * define a limit and created the possibility of an infinite loop + * we want to prevent any sync from triggering that corner case. */ if (intel_dp->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14) max_cr_tries = 10; -- GitLab From b9fcddab4afbac67660410009828eae83b6f3a36 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Wed, 25 Jul 2018 12:59:27 -0700 Subject: [PATCH 0019/1692] drm/i915/icl: implement icl_digital_port_connected() Do like the other functions and check for the status bits. The "Hot Plug Detection" page from our documentation says we can't just use the ISR bits on the CPU side (North Display, which has the TC and TBT modes), so use the correct register: DFLEXDPSP, TC Live State field. v2: Rebase. v3: - Simplify true/false assignment (Rodrigo). - Reorganize is_gen if ladder (Rodrigo). - Don't use the ISR for TC/TBT CPU bits. v4: - Improve commit message wording (Lucas). v5: - COMMIT_LOG_LONG_LINE (Checkpatch). Cc: Animesh Manna Cc: Rodrigo Vivi Cc: Lucas De Marchi Reviewed-by: Lucas De Marchi (v3). Signed-off-by: Rodrigo Vivi Signed-off-by: Paulo Zanoni Link: https://patchwork.freedesktop.org/patch/msgid/20180725195927.12059-1-paulo.r.zanoni@intel.com --- drivers/gpu/drm/i915/i915_reg.h | 8 +++++ drivers/gpu/drm/i915/intel_dp.c | 55 ++++++++++++++++++++++++++++++++- 2 files changed, 62 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 477e694b8cc4..93de6f724e77 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -7254,6 +7254,7 @@ enum { #define GEN11_TC3_HOTPLUG (1 << 18) #define GEN11_TC2_HOTPLUG (1 << 17) #define GEN11_TC1_HOTPLUG (1 << 16) +#define GEN11_TC_HOTPLUG(tc_port) (1 << ((tc_port) + 16)) #define GEN11_DE_TC_HOTPLUG_MASK (GEN11_TC4_HOTPLUG | \ GEN11_TC3_HOTPLUG | \ GEN11_TC2_HOTPLUG | \ @@ -7262,6 +7263,7 @@ enum { #define GEN11_TBT3_HOTPLUG (1 << 2) #define GEN11_TBT2_HOTPLUG (1 << 1) #define GEN11_TBT1_HOTPLUG (1 << 0) +#define GEN11_TBT_HOTPLUG(tc_port) (1 << (tc_port)) #define GEN11_DE_TBT_HOTPLUG_MASK (GEN11_TBT4_HOTPLUG | \ GEN11_TBT3_HOTPLUG | \ GEN11_TBT2_HOTPLUG | \ @@ -7634,6 +7636,8 @@ enum { #define SDE_GMBUS_ICP (1 << 23) #define SDE_DDIB_HOTPLUG_ICP (1 << 17) #define SDE_DDIA_HOTPLUG_ICP (1 << 16) +#define SDE_TC_HOTPLUG_ICP(tc_port) (1 << ((tc_port) + 24)) +#define SDE_DDI_HOTPLUG_ICP(port) (1 << ((port) + 16)) #define SDE_DDI_MASK_ICP (SDE_DDIB_HOTPLUG_ICP | \ SDE_DDIA_HOTPLUG_ICP) #define SDE_TC_MASK_ICP (SDE_TC4_HOTPLUG_ICP | \ @@ -10698,4 +10702,8 @@ enum skl_power_gate { _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \ _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC) +#define PORT_TX_DFLEXDPSP _MMIO(0x1638A0) +#define TC_LIVE_STATE_TBT(tc_port) (1 << ((tc_port) * 8 + 6)) +#define TC_LIVE_STATE_TC(tc_port) (1 << ((tc_port) * 8 + 5)) + #endif /* _I915_REG_H_ */ diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index cd0f649b57a5..998d698788f9 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -4586,6 +4586,57 @@ static bool bxt_digital_port_connected(struct intel_encoder *encoder) return I915_READ(GEN8_DE_PORT_ISR) & bit; } +static bool icl_combo_port_connected(struct drm_i915_private *dev_priv, + struct intel_digital_port *intel_dig_port) +{ + enum port port = intel_dig_port->base.port; + + return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(port); +} + +static bool icl_tc_port_connected(struct drm_i915_private *dev_priv, + struct intel_digital_port *intel_dig_port) +{ + enum port port = intel_dig_port->base.port; + enum tc_port tc_port = intel_port_to_tc(dev_priv, port); + bool is_legacy, is_typec, is_tbt; + u32 dpsp; + + is_legacy = I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port); + + /* + * The spec says we shouldn't be using the ISR bits for detecting + * between TC and TBT. We should use DFLEXDPSP. + */ + dpsp = I915_READ(PORT_TX_DFLEXDPSP); + is_typec = dpsp & TC_LIVE_STATE_TC(tc_port); + is_tbt = dpsp & TC_LIVE_STATE_TBT(tc_port); + + WARN_ON(is_legacy + is_typec + is_tbt > 1); + + return is_legacy || is_typec || is_tbt; +} + +static bool icl_digital_port_connected(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + + switch (encoder->hpd_pin) { + case HPD_PORT_A: + case HPD_PORT_B: + return icl_combo_port_connected(dev_priv, dig_port); + case HPD_PORT_C: + case HPD_PORT_D: + case HPD_PORT_E: + case HPD_PORT_F: + return icl_tc_port_connected(dev_priv, dig_port); + default: + MISSING_CASE(encoder->hpd_pin); + return false; + } +} + /* * intel_digital_port_connected - is the specified port connected? * @encoder: intel_encoder @@ -4613,8 +4664,10 @@ bool intel_digital_port_connected(struct intel_encoder *encoder) return bdw_digital_port_connected(encoder); else if (IS_GEN9_LP(dev_priv)) return bxt_digital_port_connected(encoder); - else + else if (IS_GEN9_BC(dev_priv) || IS_GEN10(dev_priv)) return spt_digital_port_connected(encoder); + else + return icl_digital_port_connected(encoder); } static struct edid * -- GitLab From 6075546f57f8fb56a95070adf9602687a01fd49b Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Tue, 24 Jul 2018 17:28:10 -0700 Subject: [PATCH 0020/1692] drm/i915/icl: store the port type for TC ports The type is detected based on the live status bits. Once detected, it's not supposed to be changed, so we have some sanity checks for that. v2: Rebase. Cc: Animesh Manna Reviewed-by: Rodrigo Vivi Signed-off-by: Rodrigo Vivi Signed-off-by: Paulo Zanoni Link: https://patchwork.freedesktop.org/patch/msgid/20180725002813.6938-3-paulo.r.zanoni@intel.com --- drivers/gpu/drm/i915/intel_display.h | 7 +++++ drivers/gpu/drm/i915/intel_dp.c | 40 ++++++++++++++++++++++++++-- drivers/gpu/drm/i915/intel_drv.h | 1 + 3 files changed, 46 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h index 9292001cdd14..0a79a46d5805 100644 --- a/drivers/gpu/drm/i915/intel_display.h +++ b/drivers/gpu/drm/i915/intel_display.h @@ -137,6 +137,13 @@ enum tc_port { I915_MAX_TC_PORTS }; +enum tc_port_type { + TC_PORT_UNKNOWN = 0, + TC_PORT_TYPEC, + TC_PORT_TBT, + TC_PORT_LEGACY, +}; + enum dpio_channel { DPIO_CH0, DPIO_CH1 diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 998d698788f9..90c5ba6b222b 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -4594,6 +4594,38 @@ static bool icl_combo_port_connected(struct drm_i915_private *dev_priv, return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(port); } +static void icl_update_tc_port_type(struct drm_i915_private *dev_priv, + struct intel_digital_port *intel_dig_port, + bool is_legacy, bool is_typec, bool is_tbt) +{ + enum port port = intel_dig_port->base.port; + enum tc_port_type old_type = intel_dig_port->tc_type; + const char *type_str; + + WARN_ON(is_legacy + is_typec + is_tbt != 1); + + if (is_legacy) { + intel_dig_port->tc_type = TC_PORT_LEGACY; + type_str = "legacy"; + } else if (is_typec) { + intel_dig_port->tc_type = TC_PORT_TYPEC; + type_str = "typec"; + } else if (is_tbt) { + intel_dig_port->tc_type = TC_PORT_TBT; + type_str = "tbt"; + } else { + return; + } + + /* Types are not supposed to be changed at runtime. */ + WARN_ON(old_type != TC_PORT_UNKNOWN && + old_type != intel_dig_port->tc_type); + + if (old_type != intel_dig_port->tc_type) + DRM_DEBUG_KMS("Port %c has TC type %s\n", port_name(port), + type_str); +} + static bool icl_tc_port_connected(struct drm_i915_private *dev_priv, struct intel_digital_port *intel_dig_port) { @@ -4612,9 +4644,13 @@ static bool icl_tc_port_connected(struct drm_i915_private *dev_priv, is_typec = dpsp & TC_LIVE_STATE_TC(tc_port); is_tbt = dpsp & TC_LIVE_STATE_TBT(tc_port); - WARN_ON(is_legacy + is_typec + is_tbt > 1); + if (!is_legacy && !is_typec && !is_tbt) + return false; + + icl_update_tc_port_type(dev_priv, intel_dig_port, is_legacy, is_typec, + is_tbt); - return is_legacy || is_typec || is_tbt; + return true; } static bool icl_digital_port_connected(struct intel_encoder *encoder) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index c275f91244a6..5e225d8ba09a 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1164,6 +1164,7 @@ struct intel_digital_port { bool release_cl2_override; uint8_t max_lanes; enum intel_display_power_domain ddi_io_power_domain; + enum tc_port_type tc_type; void (*write_infoframe)(struct drm_encoder *encoder, const struct intel_crtc_state *crtc_state, -- GitLab From db7295c2c4efb56efab7f6fe0fc3c646d200630a Mon Sep 17 00:00:00 2001 From: Animesh Manna Date: Tue, 24 Jul 2018 17:28:11 -0700 Subject: [PATCH 0021/1692] drm/i915/icl: Update FIA supported lane count for hpd. In ICL, Flexible IO Adapter (FIA) muxes data and clocks of USB 3.1, tbt and display controller. In DP alt mode FIA configure the number of lanes and will be used apart from DPCD read to calculate max available lanes for DP enablement. v2 (from Paulo): Simple rebase. Reviewed-by: Anusha Srivatsa (v1). Reviewed-by: Rodrigo Vivi Signed-off-by: Animesh Manna [Paulo: significant rewrite of the patch.] Signed-off-by: Paulo Zanoni Link: https://patchwork.freedesktop.org/patch/msgid/20180725002813.6938-4-paulo.r.zanoni@intel.com --- drivers/gpu/drm/i915/i915_reg.h | 3 +++ drivers/gpu/drm/i915/intel_dp.c | 33 ++++++++++++++++++++++++++++++++- 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 93de6f724e77..72acecaad5c1 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -10705,5 +10705,8 @@ enum skl_power_gate { #define PORT_TX_DFLEXDPSP _MMIO(0x1638A0) #define TC_LIVE_STATE_TBT(tc_port) (1 << ((tc_port) * 8 + 6)) #define TC_LIVE_STATE_TC(tc_port) (1 << ((tc_port) * 8 + 5)) +#define DP_LANE_ASSIGNMENT_SHIFT(tc_port) ((tc_port) * 8) +#define DP_LANE_ASSIGNMENT_MASK(tc_port) (0xf << ((tc_port) * 8)) +#define DP_LANE_ASSIGNMENT(tc_port, x) ((x) << ((tc_port) * 8)) #endif /* _I915_REG_H_ */ diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 90c5ba6b222b..bb59e71d6f9c 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -176,14 +176,45 @@ static int intel_dp_max_common_rate(struct intel_dp *intel_dp) return intel_dp->common_rates[intel_dp->num_common_rates - 1]; } +static int intel_dp_get_fia_supported_lane_count(struct intel_dp *intel_dp) +{ + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); + u32 lane_info; + + if (tc_port == PORT_TC_NONE || dig_port->tc_type != TC_PORT_TYPEC) + return 4; + + lane_info = (I915_READ(PORT_TX_DFLEXDPSP) & + DP_LANE_ASSIGNMENT_MASK(tc_port)) >> + DP_LANE_ASSIGNMENT_SHIFT(tc_port); + + switch (lane_info) { + default: + MISSING_CASE(lane_info); + case 1: + case 2: + case 4: + case 8: + return 1; + case 3: + case 12: + return 2; + case 15: + return 4; + } +} + /* Theoretical max between source and sink */ static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); int source_max = intel_dig_port->max_lanes; int sink_max = drm_dp_max_lane_count(intel_dp->dpcd); + int fia_max = intel_dp_get_fia_supported_lane_count(intel_dp); - return min(source_max, sink_max); + return min3(source_max, sink_max, fia_max); } int intel_dp_max_lane_count(struct intel_dp *intel_dp) -- GitLab From 340a44bef2342b0ff7334017e9e821645fa8ae43 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Tue, 24 Jul 2018 17:28:12 -0700 Subject: [PATCH 0022/1692] drm/i915/icl: program MG_DP_MODE Programming this register is part of the Enable Sequence for DisplayPort on ICL. Do as the spec says. v2: Simple rebase. Cc: Animesh Manna Reviewed-by: Maarten Lankhorst (v1) Signed-off-by: Paulo Zanoni Link: https://patchwork.freedesktop.org/patch/msgid/20180725002813.6938-5-paulo.r.zanoni@intel.com --- drivers/gpu/drm/i915/i915_reg.h | 15 ++++++++ drivers/gpu/drm/i915/intel_ddi.c | 2 + drivers/gpu/drm/i915/intel_dp.c | 66 ++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/intel_drv.h | 1 + 4 files changed, 84 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 72acecaad5c1..cf1d2bbb0613 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2092,6 +2092,21 @@ enum i915_power_well_id { #define CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK (0x3 << 25) #define CFG_AMI_CK_DIV_OVERRIDE_EN (1 << 24) +#define MG_DP_MODE_LN0_ACU_PORT1 0x1683A0 +#define MG_DP_MODE_LN1_ACU_PORT1 0x1687A0 +#define MG_DP_MODE_LN0_ACU_PORT2 0x1693A0 +#define MG_DP_MODE_LN1_ACU_PORT2 0x1697A0 +#define MG_DP_MODE_LN0_ACU_PORT3 0x16A3A0 +#define MG_DP_MODE_LN1_ACU_PORT3 0x16A7A0 +#define MG_DP_MODE_LN0_ACU_PORT4 0x16B3A0 +#define MG_DP_MODE_LN1_ACU_PORT4 0x16B7A0 +#define MG_DP_MODE(port, ln) \ + MG_PHY_PORT_LN(port, ln, MG_DP_MODE_LN0_ACU_PORT1, \ + MG_DP_MODE_LN0_ACU_PORT2, \ + MG_DP_MODE_LN1_ACU_PORT1) +#define MG_DP_MODE_CFG_DP_X2_MODE (1 << 7) +#define MG_DP_MODE_CFG_DP_X1_MODE (1 << 6) + /* The spec defines this only for BXT PHY0, but lets assume that this * would exist for PHY1 too if it had a second channel. */ diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 01c07a000464..399c438bd210 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -2809,6 +2809,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); + icl_program_mg_dp_mode(intel_dp); + if (IS_ICELAKE(dev_priv)) icl_ddi_vswing_sequence(encoder, crtc_state->port_clock, level, encoder->type); diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index bb59e71d6f9c..28de73be4507 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -229,6 +229,72 @@ intel_dp_link_required(int pixel_clock, int bpp) return DIV_ROUND_UP(pixel_clock * bpp, 8); } +void icl_program_mg_dp_mode(struct intel_dp *intel_dp) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + enum port port = intel_dig_port->base.port; + enum tc_port tc_port = intel_port_to_tc(dev_priv, port); + u32 ln0, ln1, lane_info; + + if (tc_port == PORT_TC_NONE || intel_dig_port->tc_type == TC_PORT_TBT) + return; + + ln0 = I915_READ(MG_DP_MODE(port, 0)); + ln1 = I915_READ(MG_DP_MODE(port, 1)); + + switch (intel_dig_port->tc_type) { + case TC_PORT_TYPEC: + ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE); + ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE); + + lane_info = (I915_READ(PORT_TX_DFLEXDPSP) & + DP_LANE_ASSIGNMENT_MASK(tc_port)) >> + DP_LANE_ASSIGNMENT_SHIFT(tc_port); + + switch (lane_info) { + case 0x1: + case 0x4: + break; + case 0x2: + ln0 |= MG_DP_MODE_CFG_DP_X1_MODE; + break; + case 0x3: + ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | + MG_DP_MODE_CFG_DP_X2_MODE; + break; + case 0x8: + ln1 |= MG_DP_MODE_CFG_DP_X1_MODE; + break; + case 0xC: + ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | + MG_DP_MODE_CFG_DP_X2_MODE; + break; + case 0xF: + ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | + MG_DP_MODE_CFG_DP_X2_MODE; + ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | + MG_DP_MODE_CFG_DP_X2_MODE; + break; + default: + MISSING_CASE(lane_info); + } + break; + + case TC_PORT_LEGACY: + ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE; + ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE; + break; + + default: + MISSING_CASE(intel_dig_port->tc_type); + return; + } + + I915_WRITE(MG_DP_MODE(port, 0), ln0); + I915_WRITE(MG_DP_MODE(port, 1), ln1); +} + int intel_dp_max_data_rate(int max_link_clock, int max_lanes) { diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 5e225d8ba09a..4e5b00052b5b 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1714,6 +1714,7 @@ void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, unsigned int frontbuffer_bits); void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, unsigned int frontbuffer_bits); +void icl_program_mg_dp_mode(struct intel_dp *intel_dp); void intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, -- GitLab From bc334d914eeee02eddefd7be533acafd9a042ade Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Tue, 24 Jul 2018 17:28:13 -0700 Subject: [PATCH 0023/1692] drm/i915/icl: toggle PHY clock gating around link training The Gen11 TypeC PHY DDI Buffer chapter, PHY Clock Gating Programming section says that PHY clock gating should be disabled before starting voltage swing programming, then enabled after any link training is complete. v2: Simple rebase. Cc: Animesh Manna Cc: Manasi Navare Reviewed-by: Maarten Lankhorst (v1) Signed-off-by: Paulo Zanoni Link: https://patchwork.freedesktop.org/patch/msgid/20180725002813.6938-6-paulo.r.zanoni@intel.com --- drivers/gpu/drm/i915/i915_reg.h | 20 ++++++++++ drivers/gpu/drm/i915/intel_ddi.c | 3 ++ drivers/gpu/drm/i915/intel_dp.c | 66 ++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/intel_drv.h | 2 + 4 files changed, 91 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index cf1d2bbb0613..5530c470f30d 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2106,6 +2106,26 @@ enum i915_power_well_id { MG_DP_MODE_LN1_ACU_PORT1) #define MG_DP_MODE_CFG_DP_X2_MODE (1 << 7) #define MG_DP_MODE_CFG_DP_X1_MODE (1 << 6) +#define MG_DP_MODE_CFG_TR2PWR_GATING (1 << 5) +#define MG_DP_MODE_CFG_TRPWR_GATING (1 << 4) +#define MG_DP_MODE_CFG_CLNPWR_GATING (1 << 3) +#define MG_DP_MODE_CFG_DIGPWR_GATING (1 << 2) +#define MG_DP_MODE_CFG_GAONPWR_GATING (1 << 1) + +#define MG_MISC_SUS0_PORT1 0x168814 +#define MG_MISC_SUS0_PORT2 0x169814 +#define MG_MISC_SUS0_PORT3 0x16A814 +#define MG_MISC_SUS0_PORT4 0x16B814 +#define MG_MISC_SUS0(tc_port) \ + _MMIO(_PORT(tc_port, MG_MISC_SUS0_PORT1, MG_MISC_SUS0_PORT2)) +#define MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK (3 << 14) +#define MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(x) ((x) << 14) +#define MG_MISC_SUS0_CFG_TR2PWR_GATING (1 << 12) +#define MG_MISC_SUS0_CFG_CL2PWR_GATING (1 << 11) +#define MG_MISC_SUS0_CFG_GAONPWR_GATING (1 << 10) +#define MG_MISC_SUS0_CFG_TRPWR_GATING (1 << 7) +#define MG_MISC_SUS0_CFG_CL1PWR_GATING (1 << 6) +#define MG_MISC_SUS0_CFG_DGPWR_GATING (1 << 5) /* The spec defines this only for BXT PHY0, but lets assume that this * would exist for PHY1 too if it had a second channel. diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 399c438bd210..0adc043529f2 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -2810,6 +2810,7 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); icl_program_mg_dp_mode(intel_dp); + icl_disable_phy_clock_gating(dig_port); if (IS_ICELAKE(dev_priv)) icl_ddi_vswing_sequence(encoder, crtc_state->port_clock, @@ -2828,6 +2829,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, if (port != PORT_A || INTEL_GEN(dev_priv) >= 9) intel_dp_stop_link_train(intel_dp); + icl_enable_phy_clock_gating(dig_port); + intel_ddi_enable_pipe_clock(crtc_state); } diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 28de73be4507..cc33d7c6ba19 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -295,6 +295,72 @@ void icl_program_mg_dp_mode(struct intel_dp *intel_dp) I915_WRITE(MG_DP_MODE(port, 1), ln1); } +void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port) +{ + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + enum port port = dig_port->base.port; + enum tc_port tc_port = intel_port_to_tc(dev_priv, port); + i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) }; + u32 val; + int i; + + if (tc_port == PORT_TC_NONE) + return; + + for (i = 0; i < ARRAY_SIZE(mg_regs); i++) { + val = I915_READ(mg_regs[i]); + val |= MG_DP_MODE_CFG_TR2PWR_GATING | + MG_DP_MODE_CFG_TRPWR_GATING | + MG_DP_MODE_CFG_CLNPWR_GATING | + MG_DP_MODE_CFG_DIGPWR_GATING | + MG_DP_MODE_CFG_GAONPWR_GATING; + I915_WRITE(mg_regs[i], val); + } + + val = I915_READ(MG_MISC_SUS0(tc_port)); + val |= MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3) | + MG_MISC_SUS0_CFG_TR2PWR_GATING | + MG_MISC_SUS0_CFG_CL2PWR_GATING | + MG_MISC_SUS0_CFG_GAONPWR_GATING | + MG_MISC_SUS0_CFG_TRPWR_GATING | + MG_MISC_SUS0_CFG_CL1PWR_GATING | + MG_MISC_SUS0_CFG_DGPWR_GATING; + I915_WRITE(MG_MISC_SUS0(tc_port), val); +} + +void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port) +{ + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + enum port port = dig_port->base.port; + enum tc_port tc_port = intel_port_to_tc(dev_priv, port); + i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) }; + u32 val; + int i; + + if (tc_port == PORT_TC_NONE) + return; + + for (i = 0; i < ARRAY_SIZE(mg_regs); i++) { + val = I915_READ(mg_regs[i]); + val &= ~(MG_DP_MODE_CFG_TR2PWR_GATING | + MG_DP_MODE_CFG_TRPWR_GATING | + MG_DP_MODE_CFG_CLNPWR_GATING | + MG_DP_MODE_CFG_DIGPWR_GATING | + MG_DP_MODE_CFG_GAONPWR_GATING); + I915_WRITE(mg_regs[i], val); + } + + val = I915_READ(MG_MISC_SUS0(tc_port)); + val &= ~(MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK | + MG_MISC_SUS0_CFG_TR2PWR_GATING | + MG_MISC_SUS0_CFG_CL2PWR_GATING | + MG_MISC_SUS0_CFG_GAONPWR_GATING | + MG_MISC_SUS0_CFG_TRPWR_GATING | + MG_MISC_SUS0_CFG_CL1PWR_GATING | + MG_MISC_SUS0_CFG_DGPWR_GATING); + I915_WRITE(MG_MISC_SUS0(tc_port), val); +} + int intel_dp_max_data_rate(int max_link_clock, int max_lanes) { diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 4e5b00052b5b..99a5f5be5b82 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1715,6 +1715,8 @@ void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, unsigned int frontbuffer_bits); void icl_program_mg_dp_mode(struct intel_dp *intel_dp); +void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port); +void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port); void intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, -- GitLab From 45ef40aab72e21eb81147a6e8a2bca863f0234fd Mon Sep 17 00:00:00 2001 From: Dhinakaran Pandiyan Date: Wed, 18 Jul 2018 10:19:42 -0700 Subject: [PATCH 0024/1692] drm/i915/mst: Do not retrain new links The short pulse handler checks if channel equalization is okay and goes onto retrain a link if there are active MST links. This retraining path is not meant for new MST connections, but due to a bug elsewhere, if active_mst_links is < 0 the boolean check for active_mst_links passes and we proceed to retrain a new link. This results in a sequence of failed link training attempts, most likely due to the hardware not setup for link training at that point i.e., missing the DDI pre_enable sequence. [ 80.301272] [drm:intel_dp_check_mst_status] channel EQ not ok, retraining [ 80.301312] [drm:intel_ddi_prepare_link_retrain] *ERROR* Timeout waiting for DDI BUF C idle bit The above error gives us a hint something went wrong before link training started. Check for a positive value of active_mst_links and throw in a warning for invalid active_mst_links as debug aid. Cc: Nathan Ciobanu Cc: Rodrigo Vivi Signed-off-by: Dhinakaran Pandiyan Tested-by: Nathan Ciobanu Reviewed-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20180718171943.3246-1-dhinakaran.pandiyan@intel.com --- drivers/gpu/drm/i915/intel_dp.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index cc33d7c6ba19..ac59590b281b 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -4253,12 +4253,14 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp) int ret = 0; int retry; bool handled; + + WARN_ON_ONCE(intel_dp->active_mst_links < 0); bret = intel_dp_get_sink_irq_esi(intel_dp, esi); go_again: if (bret == true) { /* check link status - esi[10] = 0x200c */ - if (intel_dp->active_mst_links && + if (intel_dp->active_mst_links > 0 && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) { DRM_DEBUG_KMS("channel EQ not ok, retraining\n"); intel_dp_start_link_train(intel_dp); -- GitLab From 65172699a8bd9956705f71fb8b66b1068a1bb5cd Mon Sep 17 00:00:00 2001 From: Dhinakaran Pandiyan Date: Wed, 18 Jul 2018 10:19:43 -0700 Subject: [PATCH 0025/1692] drm/i915/mst: Continue state updates even if AUX writes fail. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We are too late in the enabling sequence to back out cleanly, not updating state tracking variables, like intel_dp->active_mst_links in this instance, results in incorrect behaviour further along. v2: Fixed int v/s bool comparison Cc: Ville Syrjälä Cc: Rodrigo Vivi Cc: Nathan Ciobanu Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=107281 Signed-off-by: Dhinakaran Pandiyan Reviewed-by: Nathan Ciobanu Tested-by: Nathan Ciobanu Reviewed-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20180718171943.3246-2-dhinakaran.pandiyan@intel.com --- drivers/gpu/drm/i915/intel_dp_mst.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 18c65f8e4fe8..352e5216cc65 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -241,11 +241,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder, connector->port, pipe_config->pbn, pipe_config->dp_m_n.tu); - if (ret == false) { + if (!ret) DRM_ERROR("failed to allocate vcpi\n"); - return; - } - intel_dp->active_mst_links++; temp = I915_READ(DP_TP_STATUS(port)); -- GitLab From 406bc5633c6b1c7e7a86230db312ee34e785a8f1 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 26 Jul 2018 11:47:59 +0100 Subject: [PATCH 0026/1692] drm/i915: Avoid computing tile_row_size() for untiled objects i915_gem_tile_height() asserts that the object is tiled, but inside the error printer for the selftest we computed the row size regardless of tiling, tripping over the assert. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20180726104759.8684-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/selftests/i915_gem_object.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c index c69cbd5aed52..d9eca1b02aee 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c @@ -282,7 +282,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj, view.partial.offset, view.partial.size, vma->size >> PAGE_SHIFT, - tile_row_pages(obj), + tile->tiling ? tile_row_pages(obj) : 0, vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride, offset >> PAGE_SHIFT, (unsigned int)offset_in_page(offset), -- GitLab From d899aceb60912c9f1d7e1d7a3b388f116baf1b44 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 25 Jul 2018 16:54:47 +0100 Subject: [PATCH 0027/1692] drm/i915: Mark up object tiling-and-stride getters as const For that little bit of defense against a tired programmer. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20180725155447.11909-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem.c | 4 ++-- drivers/gpu/drm/i915/i915_gem_object.h | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index a4031fab57b0..0946e1932907 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1911,7 +1911,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, return 0; } -static unsigned int tile_row_pages(struct drm_i915_gem_object *obj) +static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj) { return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT; } @@ -1970,7 +1970,7 @@ int i915_gem_mmap_gtt_version(void) } static inline struct i915_ggtt_view -compute_partial_view(struct drm_i915_gem_object *obj, +compute_partial_view(const struct drm_i915_gem_object *obj, pgoff_t page_offset, unsigned int chunk) { diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h index 83e5e01fa9ea..a6dd7c46de0d 100644 --- a/drivers/gpu/drm/i915/i915_gem_object.h +++ b/drivers/gpu/drm/i915/i915_gem_object.h @@ -421,19 +421,19 @@ i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj) } static inline unsigned int -i915_gem_object_get_tiling(struct drm_i915_gem_object *obj) +i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj) { return obj->tiling_and_stride & TILING_MASK; } static inline bool -i915_gem_object_is_tiled(struct drm_i915_gem_object *obj) +i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj) { return i915_gem_object_get_tiling(obj) != I915_TILING_NONE; } static inline unsigned int -i915_gem_object_get_stride(struct drm_i915_gem_object *obj) +i915_gem_object_get_stride(const struct drm_i915_gem_object *obj) { return obj->tiling_and_stride & STRIDE_MASK; } @@ -446,13 +446,13 @@ i915_gem_tile_height(unsigned int tiling) } static inline unsigned int -i915_gem_object_get_tile_height(struct drm_i915_gem_object *obj) +i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj) { return i915_gem_tile_height(i915_gem_object_get_tiling(obj)); } static inline unsigned int -i915_gem_object_get_tile_row_size(struct drm_i915_gem_object *obj) +i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj) { return (i915_gem_object_get_stride(obj) * i915_gem_object_get_tile_height(obj)); -- GitLab From 52dda80d62dff39979fb407d67b7c9fc02381589 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 26 Jul 2018 09:50:31 +0100 Subject: [PATCH 0028/1692] drm/i915: Protect guc_fini_wq() against module load abort MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Prevent [ 397.873143] general protection fault: 0000 [#1] PREEMPT SMP PTI [ 397.873154] CPU: 4 PID: 4799 Comm: drv_module_relo Tainted: G U 4.18.0-rc6-CI-CI_DRM_4534+ #1 [ 397.873162] Hardware name: Micro-Star International Co., Ltd. MS-7B54/Z370M MORTAR (MS-7B54), BIOS 1.10 12/28/2017 [ 397.873175] RIP: 0010:__lock_acquire+0xf6/0x1b50 [ 397.873179] Code: 85 c0 4c 8b 9d 40 ff ff ff 8b 8d 38 ff ff ff 44 8b 8d 30 ff ff ff 4c 8b 85 28 ff ff ff 44 8b 95 24 ff ff ff 0f 84 54 03 00 00 ff 80 38 01 00 00 8b 15 45 8c 59 02 45 8b bc 24 70 08 00 00 85 [ 397.873240] RSP: 0018:ffffc90000497b40 EFLAGS: 00010002 [ 397.873246] RAX: 6b6b6b6b6b6b6b6b RBX: 0000000000000001 RCX: 0000000000000000 [ 397.873252] RDX: 0000000000000046 RSI: 0000000000000000 RDI: 0000000000000000 [ 397.873258] RBP: ffffc90000497c20 R08: ffffffff810a25e9 R09: 0000000000000000 [ 397.873264] R10: 0000000000000000 R11: ffff880255c63c28 R12: ffff8801093b2840 [ 397.873270] R13: 0000000000000001 R14: 0000000000000001 R15: 0000000000000246 [ 397.873277] FS: 00007faf88d71980(0000) GS:ffff880266300000(0000) knlGS:0000000000000000 [ 397.873284] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 397.873289] CR2: 000055d866c9ca10 CR3: 000000025472e006 CR4: 00000000003606e0 [ 397.873295] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 397.873301] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [ 397.873308] Call Trace: [ 397.873318] ? lock_acquire+0xa6/0x210 [ 397.873323] lock_acquire+0xa6/0x210 [ 397.873331] ? drain_workqueue+0x19/0x180 [ 397.873339] __mutex_lock+0x89/0x980 [ 397.873346] ? drain_workqueue+0x19/0x180 [ 397.873352] ? _raw_spin_unlock_irqrestore+0x4c/0x60 [ 397.873359] ? trace_hardirqs_on_caller+0xe0/0x1b0 [ 397.873365] ? drain_workqueue+0x19/0x180 [ 397.873373] ? debug_object_active_state+0x127/0x150 [ 397.873381] ? drain_workqueue+0x19/0x180 [ 397.873387] drain_workqueue+0x19/0x180 [ 397.873395] destroy_workqueue+0x12/0x1f0 [ 397.873476] intel_guc_fini_misc+0x36/0x90 [i915] [ 397.873540] i915_gem_fini+0x91/0x100 [i915] [ 397.873588] i915_driver_unload+0xd2/0x110 [i915] [ 397.873638] i915_pci_remove+0x19/0x30 [i915] [ 397.873646] pci_device_remove+0x36/0xb0 [ 397.873653] device_release_driver_internal+0x185/0x250 [ 397.873660] driver_detach+0x35/0x70 [ 397.873668] bus_remove_driver+0x53/0xd0 [ 397.873675] pci_unregister_driver+0x25/0xa0 [ 397.873683] __se_sys_delete_module+0x162/0x210 [ 397.873691] ? do_syscall_64+0xd/0x190 [ 397.873697] do_syscall_64+0x55/0x190 [ 397.873704] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 397.873710] RIP: 0033:0x7faf884231b7 [ 397.873714] Code: 73 01 c3 48 8b 0d d1 8c 2c 00 f7 d8 64 89 01 48 83 c8 ff c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 44 00 00 b8 b0 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d a1 8c 2c 00 f7 d8 64 89 01 48 [ 397.873775] RSP: 002b:00007ffda4e98cf8 EFLAGS: 00000206 ORIG_RAX: 00000000000000b0 [ 397.873784] RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007faf884231b7 [ 397.873790] RDX: 0000000000000000 RSI: 0000000000000800 RDI: 000055fbb18f1bd8 [ 397.873796] RBP: 000055fbb18f1b70 R08: 000055fbb18f1bdc R09: 00007ffda4e98d38 [ 397.873802] R10: 00007ffda4e97cf4 R11: 0000000000000206 R12: 000055fbb0d32470 [ 397.873808] R13: 00007ffda4e992e0 R14: 0000000000000000 R15: 0000000000000000 v2: It's use-after-free; not a NULL pointer. Testcase: igt/drv_module_reload/basic-reload-inject Signed-off-by: Chris Wilson Cc: Michał Winiarski Cc: Michal Wajdeczko Reviewed-by: Michał Winiarski Link: https://patchwork.freedesktop.org/patch/msgid/20180726085033.4044-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_guc.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c index 846d693ecb53..3082d7670f05 100644 --- a/drivers/gpu/drm/i915/intel_guc.c +++ b/drivers/gpu/drm/i915/intel_guc.c @@ -128,13 +128,15 @@ static int guc_init_wq(struct intel_guc *guc) static void guc_fini_wq(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct workqueue_struct *wq; - if (HAS_LOGICAL_RING_PREEMPTION(dev_priv) && - USES_GUC_SUBMISSION(dev_priv)) - destroy_workqueue(guc->preempt_wq); + wq = fetch_and_zero(&guc->preempt_wq); + if (wq) + destroy_workqueue(wq); - destroy_workqueue(guc->log.relay.flush_wq); + wq = fetch_and_zero(&guc->log.relay.flush_wq); + if (wq) + destroy_workqueue(wq); } int intel_guc_init_misc(struct intel_guc *guc) -- GitLab From 7ed43df720c007d60bee6d81da07bcdc7e4a55ae Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 26 Jul 2018 09:50:32 +0100 Subject: [PATCH 0029/1692] drm/i915: Restore sane defaults for KMS on GEM error load MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If we fail during GEM initialisation, we scrub the HW state by performing a device level GPU resuet. However, we want to leave the system in a usable state (with functioning KMS but no GEM) so after scrubbing the HW state, we need to restore some sane defaults and re-enable the low-level common parts of the GPU (such as the GMCH). v2: Restore GTT entries. Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20180726085033.4044-2-chris@chris-wilson.co.uk Reviewed-by: Michał Winiarski --- drivers/gpu/drm/i915/i915_gem.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 0946e1932907..3ca6a3364085 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -5598,6 +5598,8 @@ int i915_gem_init(struct drm_i915_private *dev_priv) i915_gem_cleanup_userptr(dev_priv); if (ret == -EIO) { + mutex_lock(&dev_priv->drm.struct_mutex); + /* * Allow engine initialisation to fail by marking the GPU as * wedged. But we only want to do this where the GPU is angry, @@ -5608,7 +5610,14 @@ int i915_gem_init(struct drm_i915_private *dev_priv) "Failed to initialize GPU, declaring it wedged!\n"); i915_gem_set_wedged(dev_priv); } - ret = 0; + + /* Minimal basic recovery for KMS */ + ret = i915_ggtt_enable_hw(dev_priv); + i915_gem_restore_gtt_mappings(dev_priv); + i915_gem_restore_fences(dev_priv); + intel_init_clock_gating(dev_priv); + + mutex_unlock(&dev_priv->drm.struct_mutex); } i915_gem_drain_freed_objects(dev_priv); -- GitLab From ec5b65a97c60f482bd23d513ce8c398797d40156 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 26 Jul 2018 09:50:33 +0100 Subject: [PATCH 0030/1692] drm/i915: Don't disable the GPU for older gen on wedging MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If we issue a device level GPU reset on the older gen, it will disable key components of the GMCH and the display engine. The purpose of wedging is to simply prevent further GEM usage without disabling KMS, so we need to be careful when we do issue the reset on wedging. Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20180726085033.4044-3-chris@chris-wilson.co.uk Reviewed-by: Michał Winiarski --- drivers/gpu/drm/i915/i915_gem.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 3ca6a3364085..460f256114f7 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3329,7 +3329,8 @@ void i915_gem_set_wedged(struct drm_i915_private *i915) i915->caps.scheduler = 0; /* Even if the GPU reset fails, it should still stop the engines */ - intel_gpu_reset(i915, ALL_ENGINES); + if (INTEL_GEN(i915) >= 5) + intel_gpu_reset(i915, ALL_ENGINES); /* * Make sure no one is running the old callback before we proceed with -- GitLab From ab84a110490d38d40780113a1cdfce03b1cdec13 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 19 Jul 2018 20:47:45 +0100 Subject: [PATCH 0031/1692] drm/i915/selftests: Use a full emulation of a user ppgtt context To test eviction from a ppgtt, we just want a ppgtt i.e. something other than the Global GTT which is shared and used by the kernel for HW features like fencing and scanout. However, we also need it to pass !i915_is_ggtt() and the simplest way is to emulate a full user context rather than the internal kernel context that is used for the GGTT. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20180719194746.19111-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/selftests/intel_hangcheck.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c index 65d66cdedd26..b2d6d15f025a 100644 --- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c @@ -1144,19 +1144,27 @@ static int igt_reset_evict_ppgtt(void *arg) { struct drm_i915_private *i915 = arg; struct i915_gem_context *ctx; + struct drm_file *file; int err; + file = mock_file(i915); + if (IS_ERR(file)) + return PTR_ERR(file); + mutex_lock(&i915->drm.struct_mutex); - ctx = kernel_context(i915); + ctx = live_context(i915, file); mutex_unlock(&i915->drm.struct_mutex); - if (IS_ERR(ctx)) - return PTR_ERR(ctx); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto out; + } err = 0; if (ctx->ppgtt) /* aliasing == global gtt locking, covered above */ err = __igt_reset_evict_vma(i915, &ctx->ppgtt->vm); - kernel_context_close(ctx); +out: + mock_file_free(i915, file); return err; } -- GitLab From 6dc17d69f83ec315157b76fbf47d4379e1266cef Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 19 Jul 2018 20:47:46 +0100 Subject: [PATCH 0032/1692] drm/i915/selftests: Exercise resetting in the middle of a wait-on-fence On older HW, gen2/3, fence registers are used for detiling GPU commands and as such changing those registers requires serialisation with the requests on the GPU. Anything running on the GPU is subject to a hang, and so we must be able to recover cleanly in the middle of a stuck wait on a fence register. We can simulate using the fence on the GPU simply by marking the fence as active on the request for this vma, the interface being common to all gen, thus broadening the test. Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20180719194746.19111-2-chris@chris-wilson.co.uk --- .../gpu/drm/i915/selftests/intel_hangcheck.c | 85 +++++++++++++++++-- 1 file changed, 77 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c index b2d6d15f025a..db378226ac10 100644 --- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c @@ -1018,8 +1018,41 @@ static int evict_vma(void *data) return err; } +static int evict_fence(void *data) +{ + struct evict_vma *arg = data; + struct drm_i915_private *i915 = arg->vma->vm->i915; + int err; + + complete(&arg->completion); + + mutex_lock(&i915->drm.struct_mutex); + + /* Mark the fence register as dirty to force the mmio update. */ + err = i915_gem_object_set_tiling(arg->vma->obj, I915_TILING_Y, 512); + if (err) { + pr_err("Invalid Y-tiling settings; err:%d\n", err); + goto out_unlock; + } + + err = i915_vma_pin_fence(arg->vma); + if (err) { + pr_err("Unable to pin Y-tiled fence; err:%d\n", err); + goto out_unlock; + } + + i915_vma_unpin_fence(arg->vma); + +out_unlock: + mutex_unlock(&i915->drm.struct_mutex); + + return err; +} + static int __igt_reset_evict_vma(struct drm_i915_private *i915, - struct i915_address_space *vm) + struct i915_address_space *vm, + int (*fn)(void *), + unsigned int flags) { struct drm_i915_gem_object *obj; struct task_struct *tsk = NULL; @@ -1040,12 +1073,20 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915, if (err) goto unlock; - obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + obj = i915_gem_object_create_internal(i915, SZ_1M); if (IS_ERR(obj)) { err = PTR_ERR(obj); goto fini; } + if (flags & EXEC_OBJECT_NEEDS_FENCE) { + err = i915_gem_object_set_tiling(obj, I915_TILING_X, 512); + if (err) { + pr_err("Invalid X-tiling settings; err:%d\n", err); + goto out_obj; + } + } + arg.vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(arg.vma)) { err = PTR_ERR(arg.vma); @@ -1059,11 +1100,28 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915, } err = i915_vma_pin(arg.vma, 0, 0, - i915_vma_is_ggtt(arg.vma) ? PIN_GLOBAL : PIN_USER); - if (err) + i915_vma_is_ggtt(arg.vma) ? + PIN_GLOBAL | PIN_MAPPABLE : + PIN_USER); + if (err) { + i915_request_add(rq); goto out_obj; + } + + if (flags & EXEC_OBJECT_NEEDS_FENCE) { + err = i915_vma_pin_fence(arg.vma); + if (err) { + pr_err("Unable to pin X-tiled fence; err:%d\n", err); + i915_vma_unpin(arg.vma); + i915_request_add(rq); + goto out_obj; + } + } - err = i915_vma_move_to_active(arg.vma, rq, EXEC_OBJECT_WRITE); + err = i915_vma_move_to_active(arg.vma, rq, flags); + + if (flags & EXEC_OBJECT_NEEDS_FENCE) + i915_vma_unpin_fence(arg.vma); i915_vma_unpin(arg.vma); i915_request_get(rq); @@ -1086,7 +1144,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915, init_completion(&arg.completion); - tsk = kthread_run(evict_vma, &arg, "igt/evict_vma"); + tsk = kthread_run(fn, &arg, "igt/evict_vma"); if (IS_ERR(tsk)) { err = PTR_ERR(tsk); tsk = NULL; @@ -1137,7 +1195,8 @@ static int igt_reset_evict_ggtt(void *arg) { struct drm_i915_private *i915 = arg; - return __igt_reset_evict_vma(i915, &i915->ggtt.vm); + return __igt_reset_evict_vma(i915, &i915->ggtt.vm, + evict_vma, EXEC_OBJECT_WRITE); } static int igt_reset_evict_ppgtt(void *arg) @@ -1161,13 +1220,22 @@ static int igt_reset_evict_ppgtt(void *arg) err = 0; if (ctx->ppgtt) /* aliasing == global gtt locking, covered above */ - err = __igt_reset_evict_vma(i915, &ctx->ppgtt->vm); + err = __igt_reset_evict_vma(i915, &ctx->ppgtt->vm, + evict_vma, EXEC_OBJECT_WRITE); out: mock_file_free(i915, file); return err; } +static int igt_reset_evict_fence(void *arg) +{ + struct drm_i915_private *i915 = arg; + + return __igt_reset_evict_vma(i915, &i915->ggtt.vm, + evict_fence, EXEC_OBJECT_NEEDS_FENCE); +} + static int wait_for_others(struct drm_i915_private *i915, struct intel_engine_cs *exclude) { @@ -1417,6 +1485,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_reset_wait), SUBTEST(igt_reset_evict_ggtt), SUBTEST(igt_reset_evict_ppgtt), + SUBTEST(igt_reset_evict_fence), SUBTEST(igt_handle_error), }; bool saved_hangcheck; -- GitLab From 7a859c655d8f9e83d95ad8e4722c0da6b29590d6 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 27 Jul 2018 10:18:55 +0100 Subject: [PATCH 0033/1692] drm/i915: Eliminate use of PAGE_SIZE as a virtual alignment Using PAGE_SIZE for virtual offset alignment is superfluous as it is equal to the minimum gtt alignment and so equivalent to 0. It is also the wrong value to use as we stopped using physical page constructs for the virtual GTT, i.e. it would be preferrable to use I915_GTT_PAGE_SIZE and in these cases merely imply I915_GTT_MIN_ALIGNMENT. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20180727091855.1879-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_engine_cs.c | 4 ++-- drivers/gpu/drm/i915/intel_guc.c | 2 +- drivers/gpu/drm/i915/intel_lrc.c | 2 +- drivers/gpu/drm/i915/intel_ringbuffer.c | 5 ++--- 4 files changed, 6 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 734a789688da..67c4fc5d737c 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -513,7 +513,7 @@ int intel_engine_create_scratch(struct intel_engine_cs *engine, goto err_unref; } - ret = i915_vma_pin(vma, 0, 4096, PIN_GLOBAL | PIN_HIGH); + ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); if (ret) goto err_unref; @@ -586,7 +586,7 @@ static int init_status_page(struct intel_engine_cs *engine) flags |= PIN_MAPPABLE; else flags |= PIN_HIGH; - ret = i915_vma_pin(vma, 0, 4096, flags); + ret = i915_vma_pin(vma, 0, 0, flags); if (ret) goto err; diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c index 3082d7670f05..97460982985c 100644 --- a/drivers/gpu/drm/i915/intel_guc.c +++ b/drivers/gpu/drm/i915/intel_guc.c @@ -659,7 +659,7 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size) if (IS_ERR(vma)) goto err; - ret = i915_vma_pin(vma, 0, PAGE_SIZE, + ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_OFFSET_BIAS | guc->ggtt_pin_bias); if (ret) { vma = ERR_PTR(ret); diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index c52ef2817c96..986f84920290 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -1643,7 +1643,7 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine) goto err; } - err = i915_vma_pin(vma, 0, PAGE_SIZE, PIN_GLOBAL | PIN_HIGH); + err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); if (err) goto err; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 33faad3197fe..27b24000412f 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1035,7 +1035,7 @@ int intel_ring_pin(struct intel_ring *ring, return ret; } - ret = i915_vma_pin(vma, 0, PAGE_SIZE, flags); + ret = i915_vma_pin(vma, 0, 0, flags); if (unlikely(ret)) return ret; @@ -1220,8 +1220,7 @@ static int __context_pin(struct intel_context *ce) return err; } - err = i915_vma_pin(vma, 0, I915_GTT_MIN_ALIGNMENT, - PIN_GLOBAL | PIN_HIGH); + err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); if (err) return err; -- GitLab From c00db496bbd4586448bd46e19d79f424fd6faf6a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 27 Jul 2018 10:29:47 +0100 Subject: [PATCH 0034/1692] drm/i915: Remove superfluous GEN8_LR_CONTEXT_ALIGN As GEN8_LR_CONTEXT_ALIGN is I915_GTT_MIN_ALIGNMENT is it functionally equivalent to 0, and we will not be able to reduce the min-alignment for the GTT, so passing 0 is and will remain equivalent. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20180727092947.1953-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_lrc.c | 2 +- drivers/gpu/drm/i915/intel_lrc.h | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 986f84920290..66c7252526f3 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -1306,7 +1306,7 @@ static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma) if (ctx->ggtt_offset_bias) flags |= PIN_OFFSET_BIAS | ctx->ggtt_offset_bias; - return i915_vma_pin(vma, 0, GEN8_LR_CONTEXT_ALIGN, flags); + return i915_vma_pin(vma, 0, 0, flags); } static struct intel_context * diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index 4dfb78e3ec7e..f5a5502ecf70 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h @@ -27,8 +27,6 @@ #include "intel_ringbuffer.h" #include "i915_gem_context.h" -#define GEN8_LR_CONTEXT_ALIGN I915_GTT_MIN_ALIGNMENT - /* Execlists regs */ #define RING_ELSP(engine) _MMIO((engine)->mmio_base + 0x230) #define RING_EXECLIST_STATUS_LO(engine) _MMIO((engine)->mmio_base + 0x234) -- GitLab From 9936ef55f254bb95fca9258539819a92d824497d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Bartmi=C5=84ski?= Date: Fri, 27 Jul 2018 16:11:43 +0200 Subject: [PATCH 0035/1692] drm/i915/guc: Avoid wasting memory on incorrect GuC pin bias MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It would appear that the calculated GuC pin bias was larger than it should be, as the GuC address space does NOT contain the "HW contexts RSVD" part of the WOPCM. Thus, the GuC pin bias is simply the GuC WOPCM size. v5: Clarify the diagram to better represent the GuC address space. Since we now don't use guc.base for the pin bias there's no need to validate it. It also has already been verified in WOPCM init. Bspec: 1180 Signed-off-by: Jakub Bartmiński Cc: Chris Wilson Cc: Michał Winiarski Cc: Michal Wajdeczko Reviewed-by: Michał Winiarski Reviewed-by: Michal Wajdeczko Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20180727141148.30874-1-jakub.bartminski@intel.com --- drivers/gpu/drm/i915/intel_guc.c | 49 +++++++++++++------------------- 1 file changed, 20 insertions(+), 29 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c index 97460982985c..deb6a2053eaf 100644 --- a/drivers/gpu/drm/i915/intel_guc.c +++ b/drivers/gpu/drm/i915/intel_guc.c @@ -585,50 +585,41 @@ int intel_guc_resume(struct intel_guc *guc) * * :: * - * +==============> +====================+ <== GUC_GGTT_TOP - * ^ | | - * | | | - * | | DRAM | - * | | Memory | - * | | | - * GuC | | - * Address +========> +====================+ <== WOPCM Top - * Space ^ | HW contexts RSVD | - * | | | WOPCM | - * | | +==> +--------------------+ <== GuC WOPCM Top - * | GuC ^ | | - * | GGTT | | | - * | Pin GuC | GuC | - * | Bias WOPCM | WOPCM | - * | | Size | | - * | | | | | - * v v v | | - * +=====+=====+==> +====================+ <== GuC WOPCM Base - * | Non-GuC WOPCM | - * | (HuC/Reserved) | - * +====================+ <== WOPCM Base + * +===========> +====================+ <== FFFF_FFFF + * ^ | Reserved | + * | +====================+ <== GUC_GGTT_TOP + * | | | + * | | DRAM | + * GuC | | + * Address +===> +====================+ <== GuC ggtt_pin_bias + * Space ^ | | + * | | | | + * | GuC | GuC | + * | WOPCM | WOPCM | + * | Size | | + * | | | | + * v v | | + * +=======+===> +====================+ <== 0000_0000 * - * The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to WOPCM + * The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to GuC WOPCM * while upper part of GuC Address Space [ggtt_pin_bias, GUC_GGTT_TOP) is mapped - * to DRAM. The value of the GuC ggtt_pin_bias is determined by WOPCM size and - * actual GuC WOPCM size. + * to DRAM. The value of the GuC ggtt_pin_bias is the GuC WOPCM size. */ /** * guc_init_ggtt_pin_bias() - Initialize the GuC ggtt_pin_bias value. * @guc: intel_guc structure. * - * This function will calculate and initialize the ggtt_pin_bias value based on - * overall WOPCM size and GuC WOPCM size. + * This function will calculate and initialize the ggtt_pin_bias value + * based on the GuC WOPCM size. */ static void guc_init_ggtt_pin_bias(struct intel_guc *guc) { struct drm_i915_private *i915 = guc_to_i915(guc); GEM_BUG_ON(!i915->wopcm.size); - GEM_BUG_ON(i915->wopcm.size < i915->wopcm.guc.base); - guc->ggtt_pin_bias = i915->wopcm.size - i915->wopcm.guc.base; + guc->ggtt_pin_bias = i915->wopcm.guc.size; } /** -- GitLab From b6445e17799d931c8c94e5d7acc59c9558f52df5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Bartmi=C5=84ski?= Date: Fri, 27 Jul 2018 16:11:44 +0200 Subject: [PATCH 0036/1692] drm/i915/guc: Do not partition WOPCM if GuC is not used MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There seems to be no reason for doing extra work on WOPCM partitioning in the case GuC is not used, as the partitioning will not be used by the intel_wopcm_init_hw function anyway. Signed-off-by: Jakub Bartmiński Cc: Chris Wilson Cc: Michał Winiarski Cc: Michal Wajdeczko Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20180727141148.30874-2-jakub.bartminski@intel.com --- drivers/gpu/drm/i915/intel_wopcm.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c index 74bf76f3fddc..09cc62b0d7ca 100644 --- a/drivers/gpu/drm/i915/intel_wopcm.c +++ b/drivers/gpu/drm/i915/intel_wopcm.c @@ -163,6 +163,9 @@ int intel_wopcm_init(struct intel_wopcm *wopcm) u32 guc_wopcm_rsvd; int err; + if (!USES_GUC(dev_priv)) + return 0; + GEM_BUG_ON(!wopcm->size); if (guc_fw_size >= wopcm->size) { -- GitLab From dd18cedfa36fbbc19903aed12d6d94c06f5e6dea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Bartmi=C5=84ski?= Date: Fri, 27 Jul 2018 16:11:45 +0200 Subject: [PATCH 0037/1692] drm/i915/guc: Move the pin bias value from GuC to GGTT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Removing the pin bias from GuC allows us to not check for GuC every time we pin a context, which fixes the assertion error on unresolved GuC platform default in mock contexts selftest. It also seems that we were using uninitialized WOPCM variables when setting the GuC pin bias. The pin bias has to be set after the WOPCM, but before the call to i915_gem_contexts_init where the first contexts are pinned. v2: This also makes it so that there's no need to set GuC variables from within the WOPCM init function or to move the WOPCM init, while keeping the correct initialization order. Also for mock tests the pin bias is left at 0 and we make sure that the pin bias with GuC will not be smaller than without GuC. v3: Avoid unused i915 in intel_guc_ggtt_offset if debug is disabled. v4: Squash with WOPCM init reordering. Moved the i915_ggtt_pin_bias helper to this patch, and made some functions use it instead of directly dereferencing i915->ggtt. v5: Since we now don't use wopcm.guc.base for the pin bias there's no need to validate it. It also has already been verified in WOPCM init. v6: Deleted the now unnecessarily introduced includes from previous versions. Dropped naming changes from dev_priv to i915 for better patch readability. v7: Changed some comments to make more sense in the context they're in. v8: Moved and renamed the function which now returns the wopcm.guc.size to intel_guc.c:intel_guc_reserved_gtt_size to avoid any possible confusion with the pin_bias in ggtt, which should be used for pinning. Fixed patch not applying or the most recent upstream. Fixes: f7dc0157e4b5 ("drm/i915/uc: Fetch GuC/HuC firmwares from guc/huc specific init") Testcase: igt/drv_selftest/mock_contexts #GuC Signed-off-by: Jakub Bartmiński Cc: Chris Wilson Cc: Michał Winiarski Cc: Michal Wajdeczko Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20180727141148.30874-3-jakub.bartminski@intel.com --- drivers/gpu/drm/i915/i915_gem_context.c | 10 +----- drivers/gpu/drm/i915/i915_gem_gtt.c | 9 ++++++ drivers/gpu/drm/i915/i915_gem_gtt.h | 2 ++ drivers/gpu/drm/i915/i915_vma.h | 5 +++ drivers/gpu/drm/i915/intel_guc.c | 42 ++++++++++++------------- drivers/gpu/drm/i915/intel_guc.h | 12 +++---- drivers/gpu/drm/i915/intel_huc.c | 2 +- drivers/gpu/drm/i915/intel_uc_fw.c | 2 +- 8 files changed, 44 insertions(+), 40 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index b10770cfccd2..32f96b8cd9c4 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -329,15 +329,7 @@ __create_hw_context(struct drm_i915_private *dev_priv, ctx->desc_template = default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt); - /* - * GuC requires the ring to be placed in Non-WOPCM memory. If GuC is not - * present or not in use we still need a small bias as ring wraparound - * at offset 0 sometimes hangs. No idea why. - */ - if (USES_GUC(dev_priv)) - ctx->ggtt_offset_bias = dev_priv->guc.ggtt_pin_bias; - else - ctx->ggtt_offset_bias = I915_GTT_PAGE_SIZE; + ctx->ggtt_offset_bias = dev_priv->ggtt.pin_bias; return ctx; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 1b476423bfab..87219870d559 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2929,6 +2929,15 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) struct drm_mm_node *entry; int ret; + /* + * GuC requires all resources that we're sharing with it to be placed in + * non-WOPCM memory. If GuC is not present or not in use we still need a + * small bias as ring wraparound at offset 0 sometimes hangs. No idea + * why. + */ + ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE, + intel_guc_reserved_gtt_size(&dev_priv->guc)); + ret = intel_vgt_balloon(dev_priv); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 2a116a91420b..ce945bf78a89 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -401,6 +401,8 @@ struct i915_ggtt { int mtrr; + u32 pin_bias; + struct drm_mm_node error_capture; }; diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index af5296b015f5..f1ba40bbe6f9 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -208,6 +208,11 @@ static inline u32 i915_ggtt_offset(const struct i915_vma *vma) return lower_32_bits(vma->node.start); } +static inline u32 i915_ggtt_pin_bias(struct i915_vma *vma) +{ + return i915_vm_to_ggtt(vma->vm)->pin_bias; +} + static inline struct i915_vma *i915_vma_get(struct i915_vma *vma) { i915_gem_object_get(vma->obj); diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c index deb6a2053eaf..230aea69385d 100644 --- a/drivers/gpu/drm/i915/intel_guc.c +++ b/drivers/gpu/drm/i915/intel_guc.c @@ -27,8 +27,6 @@ #include "intel_guc_submission.h" #include "i915_drv.h" -static void guc_init_ggtt_pin_bias(struct intel_guc *guc); - static void gen8_guc_raise_irq(struct intel_guc *guc) { struct drm_i915_private *dev_priv = guc_to_i915(guc); @@ -144,8 +142,6 @@ int intel_guc_init_misc(struct intel_guc *guc) struct drm_i915_private *i915 = guc_to_i915(guc); int ret; - guc_init_ggtt_pin_bias(guc); - ret = guc_init_wq(guc); if (ret) return ret; @@ -606,22 +602,6 @@ int intel_guc_resume(struct intel_guc *guc) * to DRAM. The value of the GuC ggtt_pin_bias is the GuC WOPCM size. */ -/** - * guc_init_ggtt_pin_bias() - Initialize the GuC ggtt_pin_bias value. - * @guc: intel_guc structure. - * - * This function will calculate and initialize the ggtt_pin_bias value - * based on the GuC WOPCM size. - */ -static void guc_init_ggtt_pin_bias(struct intel_guc *guc) -{ - struct drm_i915_private *i915 = guc_to_i915(guc); - - GEM_BUG_ON(!i915->wopcm.size); - - guc->ggtt_pin_bias = i915->wopcm.guc.size; -} - /** * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage * @guc: the guc @@ -640,6 +620,7 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size) struct drm_i915_private *dev_priv = guc_to_i915(guc); struct drm_i915_gem_object *obj; struct i915_vma *vma; + u64 flags; int ret; obj = i915_gem_object_create(dev_priv, size); @@ -650,8 +631,8 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size) if (IS_ERR(vma)) goto err; - ret = i915_vma_pin(vma, 0, 0, - PIN_GLOBAL | PIN_OFFSET_BIAS | guc->ggtt_pin_bias); + flags = PIN_GLOBAL | PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma); + ret = i915_vma_pin(vma, 0, 0, flags); if (ret) { vma = ERR_PTR(ret); goto err; @@ -663,3 +644,20 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size) i915_gem_object_put(obj); return vma; } + +/** + * intel_guc_reserved_gtt_size() + * @guc: intel_guc structure + * + * The GuC WOPCM mapping shadows the lower part of the GGTT, so if we are using + * GuC we can't have any objects pinned in that region. This function returns + * the size of the shadowed region. + * + * Returns: + * 0 if GuC is not present or not in use. + * Otherwise, the GuC WOPCM size. + */ +u32 intel_guc_reserved_gtt_size(struct intel_guc *guc) +{ + return guc_to_i915(guc)->wopcm.guc.size; +} diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h index 4121928a495e..ad42faf48c46 100644 --- a/drivers/gpu/drm/i915/intel_guc.h +++ b/drivers/gpu/drm/i915/intel_guc.h @@ -49,9 +49,6 @@ struct intel_guc { struct intel_guc_log log; struct intel_guc_ct ct; - /* Offset where Non-WOPCM memory starts. */ - u32 ggtt_pin_bias; - /* Log snapshot if GuC errors during load */ struct drm_i915_gem_object *load_err_log; @@ -130,10 +127,10 @@ static inline void intel_guc_to_host_event_handler(struct intel_guc *guc) * @vma: i915 graphics virtual memory area. * * GuC does not allow any gfx GGTT address that falls into range - * [0, GuC ggtt_pin_bias), which is reserved for Boot ROM, SRAM and WOPCM. - * Currently, in order to exclude [0, GuC ggtt_pin_bias) address space from + * [0, ggtt.pin_bias), which is reserved for Boot ROM, SRAM and WOPCM. + * Currently, in order to exclude [0, ggtt.pin_bias) address space from * GGTT, all gfx objects used by GuC are allocated with intel_guc_allocate_vma() - * and pinned with PIN_OFFSET_BIAS along with the value of GuC ggtt_pin_bias. + * and pinned with PIN_OFFSET_BIAS along with the value of ggtt.pin_bias. * * Return: GGTT offset of the @vma. */ @@ -142,7 +139,7 @@ static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc, { u32 offset = i915_ggtt_offset(vma); - GEM_BUG_ON(offset < guc->ggtt_pin_bias); + GEM_BUG_ON(offset < i915_ggtt_pin_bias(vma)); GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP)); return offset; @@ -168,6 +165,7 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset); int intel_guc_suspend(struct intel_guc *guc); int intel_guc_resume(struct intel_guc *guc); struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size); +u32 intel_guc_reserved_gtt_size(struct intel_guc *guc); static inline int intel_guc_sanitize(struct intel_guc *guc) { diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c index ffcad5fad6a7..37ef540dd280 100644 --- a/drivers/gpu/drm/i915/intel_huc.c +++ b/drivers/gpu/drm/i915/intel_huc.c @@ -63,7 +63,7 @@ int intel_huc_auth(struct intel_huc *huc) return -ENOEXEC; vma = i915_gem_object_ggtt_pin(huc->fw.obj, NULL, 0, 0, - PIN_OFFSET_BIAS | guc->ggtt_pin_bias); + PIN_OFFSET_BIAS | i915->ggtt.pin_bias); if (IS_ERR(vma)) { ret = PTR_ERR(vma); DRM_ERROR("HuC: Failed to pin huc fw object %d\n", ret); diff --git a/drivers/gpu/drm/i915/intel_uc_fw.c b/drivers/gpu/drm/i915/intel_uc_fw.c index 6e8e0b546743..fd496416087c 100644 --- a/drivers/gpu/drm/i915/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/intel_uc_fw.c @@ -222,7 +222,7 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, goto fail; } - ggtt_pin_bias = to_i915(uc_fw->obj->base.dev)->guc.ggtt_pin_bias; + ggtt_pin_bias = to_i915(uc_fw->obj->base.dev)->ggtt.pin_bias; vma = i915_gem_object_ggtt_pin(uc_fw->obj, NULL, 0, 0, PIN_OFFSET_BIAS | ggtt_pin_bias); if (IS_ERR(vma)) { -- GitLab From 496bcce3c9bf50ccf74b3050669600631cbf8138 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Bartmi=C5=84ski?= Date: Fri, 27 Jul 2018 16:11:46 +0200 Subject: [PATCH 0038/1692] drm/i915: Remove unnecessary ggtt_offset_bias from i915_gem_context MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since ggtt_offset_bias is now stored in ggtt.pin_bias, it is duplicated inside i915_gem_context, and can instead be accessed directly from ggtt. v3: Added a helper function to retrieve the ggtt.pin_bias from the vma. v4: Moved the helper function to the previous patch in the series. Dropped the bias from intel_ring_pin. This introduces a slight functional change since we are always pinning the ring a bit higher if GuC is present even though we don't really need to. v8: Fixed patch not applying on the most recent upstream. Signed-off-by: Jakub Bartmiński Cc: Chris Wilson Cc: Michał Winiarski Cc: Michal Wajdeczko Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20180727141148.30874-4-jakub.bartminski@intel.com --- drivers/gpu/drm/i915/i915_gem_context.c | 2 -- drivers/gpu/drm/i915/i915_gem_context.h | 3 --- drivers/gpu/drm/i915/intel_lrc.c | 5 ++--- drivers/gpu/drm/i915/intel_ringbuffer.c | 16 ++++++---------- drivers/gpu/drm/i915/intel_ringbuffer.h | 4 +--- 5 files changed, 9 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 32f96b8cd9c4..f15a039772db 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -329,8 +329,6 @@ __create_hw_context(struct drm_i915_private *dev_priv, ctx->desc_template = default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt); - ctx->ggtt_offset_bias = dev_priv->ggtt.pin_bias; - return ctx; err_pid: diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h index b116e4942c10..851dad6decd7 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.h +++ b/drivers/gpu/drm/i915/i915_gem_context.h @@ -147,9 +147,6 @@ struct i915_gem_context { struct i915_sched_attr sched; - /** ggtt_offset_bias: placement restriction for context objects */ - u32 ggtt_offset_bias; - /** engine: per-engine logical HW state */ struct intel_context { struct i915_gem_context *gem_context; diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 66c7252526f3..7879791b263b 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -1303,8 +1303,7 @@ static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma) } flags = PIN_GLOBAL | PIN_HIGH; - if (ctx->ggtt_offset_bias) - flags |= PIN_OFFSET_BIAS | ctx->ggtt_offset_bias; + flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma); return i915_vma_pin(vma, 0, 0, flags); } @@ -1332,7 +1331,7 @@ __execlists_context_pin(struct intel_engine_cs *engine, goto unpin_vma; } - ret = intel_ring_pin(ce->ring, ctx->i915, ctx->ggtt_offset_bias); + ret = intel_ring_pin(ce->ring, ctx->i915); if (ret) goto unpin_map; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 27b24000412f..b293e1dedb8e 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1003,11 +1003,7 @@ i915_emit_bb_start(struct i915_request *rq, return 0; } - - -int intel_ring_pin(struct intel_ring *ring, - struct drm_i915_private *i915, - unsigned int offset_bias) +int intel_ring_pin(struct intel_ring *ring, struct drm_i915_private *i915) { enum i915_map_type map = HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC; struct i915_vma *vma = ring->vma; @@ -1017,10 +1013,11 @@ int intel_ring_pin(struct intel_ring *ring, GEM_BUG_ON(ring->vaddr); - flags = PIN_GLOBAL; - if (offset_bias) - flags |= PIN_OFFSET_BIAS | offset_bias; + + /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ + flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma); + if (vma->obj->stolen) flags |= PIN_MAPPABLE; else @@ -1408,8 +1405,7 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine) goto err; } - /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ - err = intel_ring_pin(ring, engine->i915, I915_GTT_PAGE_SIZE); + err = intel_ring_pin(ring, engine->i915); if (err) goto err_ring; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index f5ffa6d31e82..399ec58d1f9d 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -797,9 +797,7 @@ struct intel_ring * intel_engine_create_ring(struct intel_engine_cs *engine, struct i915_timeline *timeline, int size); -int intel_ring_pin(struct intel_ring *ring, - struct drm_i915_private *i915, - unsigned int offset_bias); +int intel_ring_pin(struct intel_ring *ring, struct drm_i915_private *i915); void intel_ring_reset(struct intel_ring *ring, u32 tail); unsigned int intel_ring_update_space(struct intel_ring *ring); void intel_ring_unpin(struct intel_ring *ring); -- GitLab From 905febf592f7280084ee853b05d7bd59e26c4ca0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Bartmi=C5=84ski?= Date: Fri, 27 Jul 2018 16:11:47 +0200 Subject: [PATCH 0039/1692] drm/i915: Add a fault injection point to WOPCM init MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a fault injection point in the WOPCM initialization path. v4: Move the injection inside the WOPCM init function. Signed-off-by: Jakub Bartmiński Cc: Chris Wilson Cc: Michał Winiarski Cc: Michal Wajdeczko Reviewed-by: Michal Wajdeczko Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20180727141148.30874-5-jakub.bartminski@intel.com --- drivers/gpu/drm/i915/intel_wopcm.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c index 09cc62b0d7ca..92cb82dd0c07 100644 --- a/drivers/gpu/drm/i915/intel_wopcm.c +++ b/drivers/gpu/drm/i915/intel_wopcm.c @@ -168,6 +168,9 @@ int intel_wopcm_init(struct intel_wopcm *wopcm) GEM_BUG_ON(!wopcm->size); + if (i915_inject_load_failure()) + return -E2BIG; + if (guc_fw_size >= wopcm->size) { DRM_ERROR("GuC FW (%uKiB) is too big to fit in WOPCM.", guc_fw_size / 1024); -- GitLab From 5503cb0decdc03c2f9dad53560bd5963aeb8fc8a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 27 Jul 2018 16:55:01 +0100 Subject: [PATCH 0040/1692] drm/i915: Drop unneed i915 parameter from intel_ring_pin() As we now have a ring->vma available, we can just lookup our i915 pointer from inside the vm, and so not require the unsightly parameter. Signed-off-by: Chris Wilson Reviewed-by: Michal Wajdeczko Link: https://patchwork.freedesktop.org/patch/msgid/20180727155501.18963-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_lrc.c | 2 +- drivers/gpu/drm/i915/intel_ringbuffer.c | 7 ++++--- drivers/gpu/drm/i915/intel_ringbuffer.h | 2 +- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 7879791b263b..fad689efb67a 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -1331,7 +1331,7 @@ __execlists_context_pin(struct intel_engine_cs *engine, goto unpin_vma; } - ret = intel_ring_pin(ce->ring, ctx->i915); + ret = intel_ring_pin(ce->ring); if (ret) goto unpin_map; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index b293e1dedb8e..d1e03b7fbffa 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1003,10 +1003,11 @@ i915_emit_bb_start(struct i915_request *rq, return 0; } -int intel_ring_pin(struct intel_ring *ring, struct drm_i915_private *i915) +int intel_ring_pin(struct intel_ring *ring) { - enum i915_map_type map = HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC; struct i915_vma *vma = ring->vma; + enum i915_map_type map = + HAS_LLC(vma->vm->i915) ? I915_MAP_WB : I915_MAP_WC; unsigned int flags; void *addr; int ret; @@ -1405,7 +1406,7 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine) goto err; } - err = intel_ring_pin(ring, engine->i915); + err = intel_ring_pin(ring); if (err) goto err_ring; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 399ec58d1f9d..57f3787ed6ec 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -797,7 +797,7 @@ struct intel_ring * intel_engine_create_ring(struct intel_engine_cs *engine, struct i915_timeline *timeline, int size); -int intel_ring_pin(struct intel_ring *ring, struct drm_i915_private *i915); +int intel_ring_pin(struct intel_ring *ring); void intel_ring_reset(struct intel_ring *ring, u32 tail); unsigned int intel_ring_update_space(struct intel_ring *ring); void intel_ring_unpin(struct intel_ring *ring); -- GitLab From c50dfe79ec3ea28efe494808576d281738056d90 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Wed, 25 Jul 2018 17:12:29 -0700 Subject: [PATCH 0041/1692] drm/i915/icl: don't set CNL_DDI_CLOCK_REG_ACCESS_ON anymore MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The new recommendation from the spec is to simply not set this bit anymore. Not setting the bit would prevent some hangs that our driver manages to avoid since commit c8af5274c3cb ("drm/i915: enable the pipe/transcoder/planes later on HSW+"), and the theoretical downside of not setting the bit doesn't seem realistic according to the HW team. Let's follow their recommendation. BSpec: 20233 References: commit c8af5274c3cb ("drm/i915: enable the pipe/transcoder/planes later on HSW+") Cc: José Roberto de Souza Reviewed-by: José Roberto de Souza Signed-off-by: Paulo Zanoni Link: https://patchwork.freedesktop.org/patch/msgid/20180726001229.13791-1-paulo.r.zanoni@intel.com --- drivers/gpu/drm/i915/intel_runtime_pm.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 6b5aa3b074ec..cf89141b2281 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -3372,10 +3372,6 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv, /* 7. Setup MBUS. */ icl_mbus_init(dev_priv); - - /* 8. CHICKEN_DCPR_1 */ - I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) | - CNL_DDI_CLOCK_REG_ACCESS_ON); } static void icl_display_core_uninit(struct drm_i915_private *dev_priv) -- GitLab From f00ca81510b9aa81c88c916c253509e879740342 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Thu, 7 Jun 2018 16:07:00 -0700 Subject: [PATCH 0042/1692] drm/i915: inline skl_copy_ddb_for_pipe() to its only caller While things may have been different before, right now the function is very simple and has a single caller. IMHO any possible benefits from an abstraction here are gone and not worth the price of the current indirection while reading the code. Cc: Mahesh Kumar Reviewed-by: Mahesh Kumar Signed-off-by: Paulo Zanoni Link: https://patchwork.freedesktop.org/patch/msgid/20180607230700.28359-1-paulo.r.zanoni@intel.com --- drivers/gpu/drm/i915/intel_pm.c | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 7312ecb73415..f175923939ae 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -5141,17 +5141,6 @@ skl_compute_ddb(struct drm_atomic_state *state) return 0; } -static void -skl_copy_ddb_for_pipe(struct skl_ddb_values *dst, - struct skl_ddb_values *src, - enum pipe pipe) -{ - memcpy(dst->ddb.uv_plane[pipe], src->ddb.uv_plane[pipe], - sizeof(dst->ddb.uv_plane[pipe])); - memcpy(dst->ddb.plane[pipe], src->ddb.plane[pipe], - sizeof(dst->ddb.plane[pipe])); -} - static void skl_print_wm_changes(const struct drm_atomic_state *state) { @@ -5381,7 +5370,10 @@ static void skl_initial_wm(struct intel_atomic_state *state, if (cstate->base.active_changed) skl_atomic_update_crtc_wm(state, cstate); - skl_copy_ddb_for_pipe(hw_vals, results, pipe); + memcpy(hw_vals->ddb.uv_plane[pipe], results->ddb.uv_plane[pipe], + sizeof(hw_vals->ddb.uv_plane[pipe])); + memcpy(hw_vals->ddb.plane[pipe], results->ddb.plane[pipe], + sizeof(hw_vals->ddb.plane[pipe])); mutex_unlock(&dev_priv->wm.wm_mutex); } -- GitLab From 2b7edeb008527b1dd649d3a581792825b57df08a Mon Sep 17 00:00:00 2001 From: Anusha Srivatsa Date: Thu, 26 Jul 2018 16:35:14 -0700 Subject: [PATCH 0043/1692] drm/i915/icl: Add TBT checks for PLL calculations Add missing TBT check in the Pll calculation. v2: do not use a auxiliary function to check if status is TBT or not. (Paulo) v3: Code style changes. (Paulo) Cc: Paulo Zanoni Cc: Lucas De Marchi Reviewed-by: Paulo Zanoni Signed-off-by: Anusha Srivatsa Signed-off-by: Paulo Zanoni Link: https://patchwork.freedesktop.org/patch/msgid/1532648115-29795-1-git-send-email-anusha.srivatsa@intel.com --- drivers/gpu/drm/i915/intel_dpll_mgr.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index 7e5e6eb5dfe2..20c90688a48a 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c @@ -2866,6 +2866,8 @@ static struct intel_shared_dpll * icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { + struct intel_digital_port *intel_dig_port = + enc_to_dig_port(&encoder->base); struct intel_shared_dpll *pll; struct intel_dpll_hw_state pll_state = {}; enum port port = encoder->port; @@ -2885,7 +2887,7 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, case PORT_D: case PORT_E: case PORT_F: - if (0 /* TODO: TBT PLLs */) { + if (intel_dig_port->tc_type == TC_PORT_TBT) { min = DPLL_ID_ICL_TBTPLL; max = min; ret = icl_calc_dpll_state(crtc_state, encoder, clock, -- GitLab From 6f211ed4343832ce5c18ea68e860705b3fdfa82f Mon Sep 17 00:00:00 2001 From: Anusha Srivatsa Date: Thu, 26 Jul 2018 16:35:15 -0700 Subject: [PATCH 0044/1692] drm/i915/icl: Set TBT IO in Aux transaction For a TBT sequence, we need to set the IO type to TBT in DDI_AUX_CTL. v2: Avoid duplications.(Paulo) Cc: Paulo Zanoni Reviewed-by: Paulo Zanoni Signed-off-by: Anusha Srivatsa Signed-off-by: Paulo Zanoni Link: https://patchwork.freedesktop.org/patch/msgid/1532648115-29795-2-git-send-email-anusha.srivatsa@intel.com --- drivers/gpu/drm/i915/i915_reg.h | 1 + drivers/gpu/drm/i915/intel_dp.c | 26 +++++++++++++++++--------- 2 files changed, 18 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 5530c470f30d..7bdc214ffb6e 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -5558,6 +5558,7 @@ enum { #define DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL (1 << 14) #define DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL (1 << 13) #define DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL (1 << 12) +#define DP_AUX_CH_CTL_TBT_IO (1 << 11) #define DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL_MASK (0x1f << 5) #define DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(c) (((c) - 1) << 5) #define DP_AUX_CH_CTL_SYNC_PULSE_SKL(c) ((c) - 1) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index ac59590b281b..8e0e14ba534f 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1208,15 +1208,23 @@ static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp, int send_bytes, uint32_t unused) { - return DP_AUX_CH_CTL_SEND_BUSY | - DP_AUX_CH_CTL_DONE | - DP_AUX_CH_CTL_INTERRUPT | - DP_AUX_CH_CTL_TIME_OUT_ERROR | - DP_AUX_CH_CTL_TIME_OUT_MAX | - DP_AUX_CH_CTL_RECEIVE_ERROR | - (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | - DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) | - DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + uint32_t ret; + + ret = DP_AUX_CH_CTL_SEND_BUSY | + DP_AUX_CH_CTL_DONE | + DP_AUX_CH_CTL_INTERRUPT | + DP_AUX_CH_CTL_TIME_OUT_ERROR | + DP_AUX_CH_CTL_TIME_OUT_MAX | + DP_AUX_CH_CTL_RECEIVE_ERROR | + (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | + DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) | + DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); + + if (intel_dig_port->tc_type == TC_PORT_TBT) + ret |= DP_AUX_CH_CTL_TBT_IO; + + return ret; } static int -- GitLab From 86c1c87d0e6241cbe35bd52badfc84b154e1b959 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 26 Jul 2018 17:15:27 +0100 Subject: [PATCH 0045/1692] drm/i915: Downgrade Gen9 Plane WM latency error According to intel_read_wm_latency() it is perfectly legal for one WM and all subsequent levels to be 0 (and the deeper powersaving states disabled), so don't shout *ERROR*, over and over again. Signed-off-by: Chris Wilson Cc: Maarten Lankhorst Cc: Ville Syrjala Acked-by: Maarten Lankhorst Link: https://patchwork.freedesktop.org/patch/msgid/20180726161527.10516-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_pm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index f175923939ae..8a4152244571 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2942,8 +2942,8 @@ static void intel_print_wm_latency(struct drm_i915_private *dev_priv, unsigned int latency = wm[level]; if (latency == 0) { - DRM_ERROR("%s WM%d latency not provided\n", - name, level); + DRM_DEBUG_KMS("%s WM%d latency not provided\n", + name, level); continue; } -- GitLab From 39f3be162c46bc2349ad7a5bd89536eb83561c81 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 30 Jul 2018 08:53:50 +0100 Subject: [PATCH 0046/1692] drm/i915: Kick waiters on resetting legacy rings For reasons unknown, interrupts following a reset do not arrive, but this can be papered over by kicking any waiter and peeking at the breadcrumbs following the reset. Testcase: igt/gem_eio/reset-stress References: https://bugs.freedesktop.org/show_bug.cgi?id=105957 Signed-off-by: Chris Wilson Acked-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20180730075351.15569-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_ringbuffer.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index d1e03b7fbffa..80a8b6e57374 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -527,6 +527,8 @@ static int init_ring_common(struct intel_engine_cs *engine) if (INTEL_GEN(dev_priv) > 2) I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING)); + /* Papering over lost _interrupts_ immediately following the restart */ + intel_engine_wakeup(engine); out: intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); -- GitLab From f6844a85e0c96a55c61fa3e611f414999b11e4de Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 30 Jul 2018 08:53:51 +0100 Subject: [PATCH 0047/1692] drm/i915/selftests: Replace opencoded clflush with drm_clflush_virt_range We occasionally see that the clflush prior to a read of GPU data is returning stale data, reminiscent of much earlier bugs fixed by adding a second clflush for serialisation. As drm_clflush_virt_range() already supplies the workaround, use it rather than open code the clflush instruction. References: 396f5d62d1a5 ("drm: Restore double clflush on the last partial cacheline") Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20180730075351.15569-3-chris@chris-wilson.co.uk --- .../drm/i915/selftests/i915_gem_coherency.c | 38 +++++++++---------- 1 file changed, 17 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c index 3a095c37c120..4e6a221063ac 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c @@ -33,7 +33,8 @@ static int cpu_set(struct drm_i915_gem_object *obj, { unsigned int needs_clflush; struct page *page; - u32 *map; + void *map; + u32 *cpu; int err; err = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush); @@ -42,24 +43,19 @@ static int cpu_set(struct drm_i915_gem_object *obj, page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); map = kmap_atomic(page); + cpu = map + offset_in_page(offset); - if (needs_clflush & CLFLUSH_BEFORE) { - mb(); - clflush(map+offset_in_page(offset) / sizeof(*map)); - mb(); - } + if (needs_clflush & CLFLUSH_BEFORE) + drm_clflush_virt_range(cpu, sizeof(*cpu)); - map[offset_in_page(offset) / sizeof(*map)] = v; + *cpu = v; - if (needs_clflush & CLFLUSH_AFTER) { - mb(); - clflush(map+offset_in_page(offset) / sizeof(*map)); - mb(); - } + if (needs_clflush & CLFLUSH_AFTER) + drm_clflush_virt_range(cpu, sizeof(*cpu)); kunmap_atomic(map); - i915_gem_obj_finish_shmem_access(obj); + return 0; } @@ -69,7 +65,8 @@ static int cpu_get(struct drm_i915_gem_object *obj, { unsigned int needs_clflush; struct page *page; - u32 *map; + void *map; + u32 *cpu; int err; err = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush); @@ -78,17 +75,16 @@ static int cpu_get(struct drm_i915_gem_object *obj, page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); map = kmap_atomic(page); + cpu = map + offset_in_page(offset); - if (needs_clflush & CLFLUSH_BEFORE) { - mb(); - clflush(map+offset_in_page(offset) / sizeof(*map)); - mb(); - } + if (needs_clflush & CLFLUSH_BEFORE) + drm_clflush_virt_range(cpu, sizeof(*cpu)); - *v = map[offset_in_page(offset) / sizeof(*map)]; - kunmap_atomic(map); + *v = *cpu; + kunmap_atomic(map); i915_gem_obj_finish_shmem_access(obj); + return 0; } -- GitLab From 3d94361aa13a0135a1b67d27a80a5158c93d6505 Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Mon, 30 Jul 2018 13:05:44 +0100 Subject: [PATCH 0048/1692] drm/i915/gtt: remove px_page Entries will either be pointing to scratch or real PD, making the px_page(pd) check pointless. Also since there are no other users of px_page, just remove it. Signed-off-by: Matthew Auld Cc: Chris Wilson Cc: Michel Thierry Reviewed-by: Chris Wilson Reviewed-by: Mika Kuoppala Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20180730120544.20784-1-matthew.auld@intel.com --- drivers/gpu/drm/i915/i915_gem_gtt.c | 3 --- drivers/gpu/drm/i915/i915_gem_gtt.h | 1 - 2 files changed, 4 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 87219870d559..4137af4bd8f5 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1251,9 +1251,6 @@ static void gen8_free_page_tables(struct i915_address_space *vm, { int i; - if (!px_page(pd)) - return; - for (i = 0; i < I915_PDES; i++) { if (pd->page_table[i] != vm->scratch_pt) free_pt(vm, pd->page_table[i]); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index ce945bf78a89..dd161c187a68 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -229,7 +229,6 @@ struct i915_page_dma { }; #define px_base(px) (&(px)->base) -#define px_page(px) (px_base(px)->page) #define px_dma(px) (px_base(px)->daddr) struct i915_page_table { -- GitLab From 60548c554be2830d29d2533dad0ac8133347ee51 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 31 Jul 2018 14:26:29 +0100 Subject: [PATCH 0049/1692] drm/i915: Interactive RPS mode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit RPS provides a feedback loop where we use the load during the previous evaluation interval to decide whether to up or down clock the GPU frequency. Our responsiveness is split into 3 regimes, a high and low plateau with the intent to keep the gpu clocked high to cover occasional stalls under high load, and low despite occasional glitches under steady low load, and inbetween. However, we run into situations like kodi where we want to stay at low power (video decoding is done efficiently inside the fixed function HW and doesn't need high clocks even for high bitrate streams), but just occasionally the pipeline is more complex than a video decode and we need a smidgen of extra GPU power to present on time. In the high power regime, we sample at sub frame intervals with a bias to upclocking, and conversely at low power we sample over a few frames worth to provide what we consider to be the right levels of responsiveness respectively. At low power, we more or less expect to be kicked out to high power at the start of a busy sequence by waitboosting. Prior to commit e9af4ea2b9e7 ("drm/i915: Avoid waitboosting on the active request") whenever we missed the frame or stalled, we would immediate go full throttle and upclock the GPU to max. But in commit e9af4ea2b9e7, we relaxed the waitboosting to only apply if the pipeline was deep to avoid over-committing resources for a near miss. Sadly though, a near miss is still a miss, and perceptible as jitter in the frame delivery. To try and prevent the near miss before having to resort to boosting after the fact, we use the pageflip queue as an indication that we are in an "interactive" regime and so should sample the load more frequently to provide power before the frame misses it vblank. This will make us more favorable to providing a small power increase (one or two bins) as required rather than going all the way to maximum and then having to work back down again. (We still keep the waitboosting mechanism around just in case a dramatic change in system load requires urgent uplocking, faster than we can provide in a few evaluation intervals.) v2: Reduce rps_set_interactive to a boolean parameter to avoid the confusion of what if they wanted a new power mode after pinning to a different mode (which to choose?) v3: Only reprogram RPS while the GT is awake, it will be set when we wake the GT, and while off warns about being used outside of rpm. v4: Fix deferred application of interactive mode v5: s/state/interactive/ v6: Group the mutex with its principle in a substruct Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=107111 Fixes: e9af4ea2b9e7 ("drm/i915: Avoid waitboosting on the active request") Signed-off-by: Chris Wilson Cc: Joonas Lahtinen Cc: Tvrtko Ursulin Cc: Radoslaw Szwichtenberg Cc: Ville Syrjälä Reviewed-by: Joonas Lahtinen Link: https://patchwork.freedesktop.org/patch/msgid/20180731132629.3381-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_debugfs.c | 13 ++-- drivers/gpu/drm/i915/i915_drv.h | 16 +++-- drivers/gpu/drm/i915/i915_irq.c | 4 +- drivers/gpu/drm/i915/intel_display.c | 20 ++++++ drivers/gpu/drm/i915/intel_drv.h | 2 + drivers/gpu/drm/i915/intel_pm.c | 101 ++++++++++++++++++--------- 6 files changed, 111 insertions(+), 45 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 59dc0610ea44..f9ce35da4123 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1218,7 +1218,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused) rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup)); seq_printf(m, "RP PREV UP: %d (%dus)\n", rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup)); - seq_printf(m, "Up threshold: %d%%\n", rps->up_threshold); + seq_printf(m, "Up threshold: %d%%\n", + rps->power.up_threshold); seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n", rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei)); @@ -1226,7 +1227,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused) rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown)); seq_printf(m, "RP PREV DOWN: %d (%dus)\n", rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown)); - seq_printf(m, "Down threshold: %d%%\n", rps->down_threshold); + seq_printf(m, "Down threshold: %d%%\n", + rps->power.down_threshold); max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 : rp_state_cap >> 16) & 0xff; @@ -2218,6 +2220,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv)); seq_printf(m, "Boosts outstanding? %d\n", atomic_read(&rps->num_waiters)); + seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive)); seq_printf(m, "Frequency requested %d\n", intel_gpu_freq(dev_priv, rps->cur_freq)); seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n", @@ -2261,13 +2264,13 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n", - rps_power_to_str(rps->power)); + rps_power_to_str(rps->power.mode)); seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n", rpup && rpupei ? 100 * rpup / rpupei : 0, - rps->up_threshold); + rps->power.up_threshold); seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n", rpdown && rpdownei ? 100 * rpdown / rpdownei : 0, - rps->down_threshold); + rps->power.down_threshold); } else { seq_puts(m, "\nRPS Autotuning inactive\n"); } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 0f49f9988dfa..4aca5344863d 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -779,11 +779,17 @@ struct intel_rps { u8 rp0_freq; /* Non-overclocked max frequency. */ u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */ - u8 up_threshold; /* Current %busy required to uplock */ - u8 down_threshold; /* Current %busy required to downclock */ - int last_adj; - enum { LOW_POWER, BETWEEN, HIGH_POWER } power; + + struct { + struct mutex mutex; + + enum { LOW_POWER, BETWEEN, HIGH_POWER } mode; + unsigned int interactive; + + u8 up_threshold; /* Current %busy required to uplock */ + u8 down_threshold; /* Current %busy required to downclock */ + } power; bool enabled; atomic_t num_waiters; @@ -3422,6 +3428,8 @@ extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv); extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val); extern void intel_init_pch_refclk(struct drm_i915_private *dev_priv); extern int intel_set_rps(struct drm_i915_private *dev_priv, u8 val); +extern void intel_rps_mark_interactive(struct drm_i915_private *i915, + bool interactive); extern bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 5dadefca2ad2..90628a47ae17 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1265,9 +1265,9 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) c0 = max(render, media); c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */ - if (c0 > time * rps->up_threshold) + if (c0 > time * rps->power.up_threshold) events = GEN6_PM_RP_UP_THRESHOLD; - else if (c0 < time * rps->down_threshold) + else if (c0 < time * rps->power.down_threshold) events = GEN6_PM_RP_DOWN_THRESHOLD; } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 577b30dde45b..73c6d56ba3ec 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -13104,6 +13104,19 @@ intel_prepare_plane_fb(struct drm_plane *plane, add_rps_boost_after_vblank(new_state->crtc, new_state->fence); } + /* + * We declare pageflips to be interactive and so merit a small bias + * towards upclocking to deliver the frame on time. By only changing + * the RPS thresholds to sample more regularly and aim for higher + * clocks we can hopefully deliver low power workloads (like kodi) + * that are not quite steady state without resorting to forcing + * maximum clocks following a vblank miss (see do_rps_boost()). + */ + if (!intel_state->rps_interactive) { + intel_rps_mark_interactive(dev_priv, true); + intel_state->rps_interactive = true; + } + return 0; } @@ -13120,8 +13133,15 @@ void intel_cleanup_plane_fb(struct drm_plane *plane, struct drm_plane_state *old_state) { + struct intel_atomic_state *intel_state = + to_intel_atomic_state(old_state->state); struct drm_i915_private *dev_priv = to_i915(plane->dev); + if (intel_state->rps_interactive) { + intel_rps_mark_interactive(dev_priv, false); + intel_state->rps_interactive = false; + } + /* Should only be called after a successful intel_prepare_plane_fb()! */ mutex_lock(&dev_priv->drm.struct_mutex); intel_plane_unpin_fb(to_intel_plane_state(old_state)); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 99a5f5be5b82..1ad7c1124bef 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -484,6 +484,8 @@ struct intel_atomic_state { */ bool skip_intermediate_wm; + bool rps_interactive; + /* Gen9+ only */ struct skl_ddb_values wm_results; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 8a4152244571..2531eb75bdce 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -6256,42 +6256,15 @@ static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val) return limits; } -static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) +static void rps_set_power(struct drm_i915_private *dev_priv, int new_power) { struct intel_rps *rps = &dev_priv->gt_pm.rps; - int new_power; u32 threshold_up = 0, threshold_down = 0; /* in % */ u32 ei_up = 0, ei_down = 0; - new_power = rps->power; - switch (rps->power) { - case LOW_POWER: - if (val > rps->efficient_freq + 1 && - val > rps->cur_freq) - new_power = BETWEEN; - break; - - case BETWEEN: - if (val <= rps->efficient_freq && - val < rps->cur_freq) - new_power = LOW_POWER; - else if (val >= rps->rp0_freq && - val > rps->cur_freq) - new_power = HIGH_POWER; - break; + lockdep_assert_held(&rps->power.mutex); - case HIGH_POWER: - if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 && - val < rps->cur_freq) - new_power = BETWEEN; - break; - } - /* Max/min bins are special */ - if (val <= rps->min_freq_softlimit) - new_power = LOW_POWER; - if (val >= rps->max_freq_softlimit) - new_power = HIGH_POWER; - if (new_power == rps->power) + if (new_power == rps->power.mode) return; /* Note the units here are not exactly 1us, but 1280ns. */ @@ -6354,12 +6327,71 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) GEN6_RP_DOWN_IDLE_AVG); skip_hw_write: - rps->power = new_power; - rps->up_threshold = threshold_up; - rps->down_threshold = threshold_down; + rps->power.mode = new_power; + rps->power.up_threshold = threshold_up; + rps->power.down_threshold = threshold_down; +} + +static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) +{ + struct intel_rps *rps = &dev_priv->gt_pm.rps; + int new_power; + + new_power = rps->power.mode; + switch (rps->power.mode) { + case LOW_POWER: + if (val > rps->efficient_freq + 1 && + val > rps->cur_freq) + new_power = BETWEEN; + break; + + case BETWEEN: + if (val <= rps->efficient_freq && + val < rps->cur_freq) + new_power = LOW_POWER; + else if (val >= rps->rp0_freq && + val > rps->cur_freq) + new_power = HIGH_POWER; + break; + + case HIGH_POWER: + if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 && + val < rps->cur_freq) + new_power = BETWEEN; + break; + } + /* Max/min bins are special */ + if (val <= rps->min_freq_softlimit) + new_power = LOW_POWER; + if (val >= rps->max_freq_softlimit) + new_power = HIGH_POWER; + + mutex_lock(&rps->power.mutex); + if (rps->power.interactive) + new_power = HIGH_POWER; + rps_set_power(dev_priv, new_power); + mutex_unlock(&rps->power.mutex); rps->last_adj = 0; } +void intel_rps_mark_interactive(struct drm_i915_private *i915, bool interactive) +{ + struct intel_rps *rps = &i915->gt_pm.rps; + + if (INTEL_GEN(i915) < 6) + return; + + mutex_lock(&rps->power.mutex); + if (interactive) { + if (!rps->power.interactive++ && READ_ONCE(i915->gt.awake)) + rps_set_power(i915, HIGH_POWER); + } else { + GEM_BUG_ON(!rps->power.interactive); + rps->power.interactive--; + } + mutex_unlock(&rps->power.mutex); +} + static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val) { struct intel_rps *rps = &dev_priv->gt_pm.rps; @@ -6772,7 +6804,7 @@ static void reset_rps(struct drm_i915_private *dev_priv, u8 freq = rps->cur_freq; /* force a reset */ - rps->power = -1; + rps->power.mode = -1; rps->cur_freq = -1; if (set(dev_priv, freq)) @@ -9596,6 +9628,7 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val) void intel_pm_setup(struct drm_i915_private *dev_priv) { mutex_init(&dev_priv->pcu_lock); + mutex_init(&dev_priv->gt_pm.rps.power.mutex); atomic_set(&dev_priv->gt_pm.rps.num_waiters, 0); -- GitLab From 21eb1850fa0bd0a9b729bf3708da78888433027f Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 1 Aug 2018 11:47:21 +0100 Subject: [PATCH 0050/1692] drm/i95: Mark GGTT as incoherent for gen10+ The evidence suggests that we need to start treating writes via GGTT as incoherent for gen10+, that is that they are internally buffered and not immediately visible via a read along a different physical path. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=107398 Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=107400 Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=107435 Signed-off-by: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20180801104721.4030-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_pci.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index e443fe44da3a..adf80563d0a1 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -590,6 +590,7 @@ static const struct intel_device_info intel_coffeelake_gt3_info = { GEN9_FEATURES, \ GEN(10), \ .ddb_size = 1024, \ + .has_coherent_ggtt = false, \ GLK_COLORS static const struct intel_device_info intel_cannonlake_info = { -- GitLab From c358514ba8da9e235876db1628cedd19a35803c6 Mon Sep 17 00:00:00 2001 From: Mika Kuoppala Date: Mon, 30 Jul 2018 15:06:36 +0300 Subject: [PATCH 0051/1692] Revert "drm/i915/icl: WaEnableFloatBlendOptimization" The register for 0xe420 is unable to hold any value, including this bit. The documentation is also mixed between having a register bit for toggle and having a state command setup for it. Apparently the register toggle is deprecated. Remove the register toggle as evidence shows it's futile. The thing remaining is an apology and humble request for Mesa folks to resurrect their state setup for this as they were on right track from start. This reverts commit 0bf059f3532bb39c52d917142206a8554fc2f1c5. Fixes: 0bf059f3532b ("drm/i915/icl: WaEnableFloatBlendOptimization") References: HSDES#1406393558 Cc: Oscar Mateo Cc: Anuj Phogat Cc: Chris Wilson Cc: Lionel Landwerlin Signed-off-by: Mika Kuoppala Acked-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20180730120636.26958-1-mika.kuoppala@linux.intel.com --- drivers/gpu/drm/i915/i915_reg.h | 3 --- drivers/gpu/drm/i915/intel_workarounds.c | 3 --- 2 files changed, 6 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 7bdc214ffb6e..e0f5999fff07 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2859,9 +2859,6 @@ enum i915_power_well_id { #define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1 << 6) #define GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE (1 << 1) -#define GEN10_CACHE_MODE_SS _MMIO(0xe420) -#define FLOAT_BLEND_OPTIMIZATION_ENABLE (1 << 4) - #define GEN6_BLITTER_ECOSKPD _MMIO(0x221d0) #define GEN6_BLITTER_LOCK_SHIFT 16 #define GEN6_BLITTER_FBC_NOTIFY (1 << 3) diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c index f8bb32e974f6..4bcdeaf8d98f 100644 --- a/drivers/gpu/drm/i915/intel_workarounds.c +++ b/drivers/gpu/drm/i915/intel_workarounds.c @@ -508,9 +508,6 @@ static int icl_ctx_workarounds_init(struct drm_i915_private *dev_priv) WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3, GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC); - /* WaEnableFloatBlendOptimization:icl */ - WA_SET_BIT_MASKED(GEN10_CACHE_MODE_SS, FLOAT_BLEND_OPTIMIZATION_ENABLE); - return 0; } -- GitLab From d0f5cc5db11470a160788c123cc67bdb0cad1904 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 30 Jul 2018 17:43:25 +0100 Subject: [PATCH 0052/1692] drm/i915/execlists: Terminate the context image with BB_END In the aub trace utility, the context images are terminated with a MI_BATCH_BUFFER_END; the simulator is reported as complaining otherwise. Do the same for our protocontext image for completeness, and in passing apply the magic bit for gen10 to mark the end of the context image. Reported-by: Lionel Landwerlin Signed-off-by: Chris Wilson Cc: Lionel Landwerlin Reviewed-by: Lionel Landwerlin Link: https://patchwork.freedesktop.org/patch/msgid/20180730164325.12770-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_lrc.c | 4 ++++ drivers/gpu/drm/i915/intel_lrc_reg.h | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index fad689efb67a..b0be180c6294 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -2653,6 +2653,10 @@ static void execlists_init_reg_state(u32 *regs, i915_oa_init_reg_state(engine, ctx, regs); } + + regs[CTX_END] = MI_BATCH_BUFFER_END; + if (INTEL_GEN(dev_priv) >= 10) + regs[CTX_END] |= BIT(0); } static int diff --git a/drivers/gpu/drm/i915/intel_lrc_reg.h b/drivers/gpu/drm/i915/intel_lrc_reg.h index 169a2239d6c7..5ef932d810a7 100644 --- a/drivers/gpu/drm/i915/intel_lrc_reg.h +++ b/drivers/gpu/drm/i915/intel_lrc_reg.h @@ -37,7 +37,7 @@ #define CTX_PDP0_LDW 0x32 #define CTX_LRI_HEADER_2 0x41 #define CTX_R_PWR_CLK_STATE 0x42 -#define CTX_GPGPU_CSR_BASE_ADDRESS 0x44 +#define CTX_END 0x44 #define CTX_REG(reg_state, pos, reg, val) do { \ u32 *reg_state__ = (reg_state); \ -- GitLab From db47685da1d8224f0eaa35c32e1a1ea97b05bae0 Mon Sep 17 00:00:00 2001 From: Zhao Yan Date: Wed, 1 Aug 2018 00:15:09 -0400 Subject: [PATCH 0053/1692] drm/i915/gvt: add a fastpath for cmd parsing on MI_NOOP MI_NOOP is a common command appearing in almost all command buffers, put it into a fastpath can improve perfomance, especially in command buffers contains lots of MI_NOOPs (0s). Take glmark2 as an example, 3% performance increase is observed after introduced this patch. Meanwhile, in case where abundant in MI_NOOPs, up to 12% performance increase is measured. v2: use lowercase for index of MI_NOOP in cmd_info (zhenyu wang) Signed-off-by: Li Weinan Signed-off-by: Zhao Yan Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/cmd_parser.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 0dd88fe2e39a..4ba503ef5d2c 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -1817,6 +1817,8 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s) return ret; } +static int mi_noop_index; + static struct cmd_info cmd_info[] = { {"MI_NOOP", OP_MI_NOOP, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL}, @@ -2502,7 +2504,12 @@ static int cmd_parser_exec(struct parser_exec_state *s) cmd = cmd_val(s, 0); - info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); + /* fastpath for MI_NOOP */ + if (cmd == MI_NOOP) + info = &cmd_info[mi_noop_index]; + else + info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); + if (info == NULL) { gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n", cmd, get_opcode(cmd, s->ring_id), @@ -2904,6 +2911,8 @@ static int init_cmd_table(struct intel_gvt *gvt) info->name); return -EEXIST; } + if (cmd_info[i].opcode == OP_MI_NOOP) + mi_noop_index = i; INIT_HLIST_NODE(&e->hlist); add_cmd_entry(gvt, e); -- GitLab From 8bfa02c885ee538353f52b36d16de90655e1bcde Mon Sep 17 00:00:00 2001 From: Zhao Yan Date: Wed, 1 Aug 2018 00:15:31 -0400 Subject: [PATCH 0054/1692] drm/i915/gvt: only copy the first page for restore inhibit context if a context is a restore inhibit context, gfx hw only load the first page for ring context, so we only need to copy from guest the 1 page too. v3: use "return" instead of "goto" for inhibit case. (zhenyu wang) v2: move judgement of restore inhibit to a macro in mmio_context.h Signed-off-by: Zhao Yan Acked-by: Hang Yuan Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/mmio_context.h | 3 ++ drivers/gpu/drm/i915/gvt/scheduler.c | 60 +++++++++++++------------ 2 files changed, 34 insertions(+), 29 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.h b/drivers/gpu/drm/i915/gvt/mmio_context.h index 5c3b9ff9f96a..f7eaa442403f 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.h +++ b/drivers/gpu/drm/i915/gvt/mmio_context.h @@ -53,5 +53,8 @@ bool is_inhibit_context(struct intel_context *ce); int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu, struct i915_request *req); +#define IS_RESTORE_INHIBIT(a) \ + (_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) == \ + ((a) & _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT))) #endif diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 928818f218f7..049c0ea324fa 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -132,35 +132,6 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) unsigned long context_gpa, context_page_num; int i; - gvt_dbg_sched("ring id %d workload lrca %x", ring_id, - workload->ctx_desc.lrca); - - context_page_num = gvt->dev_priv->engine[ring_id]->context_size; - - context_page_num = context_page_num >> PAGE_SHIFT; - - if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS) - context_page_num = 19; - - i = 2; - - while (i < context_page_num) { - context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, - (u32)((workload->ctx_desc.lrca + i) << - I915_GTT_PAGE_SHIFT)); - if (context_gpa == INTEL_GVT_INVALID_ADDR) { - gvt_vgpu_err("Invalid guest context descriptor\n"); - return -EFAULT; - } - - page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i); - dst = kmap(page); - intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst, - I915_GTT_PAGE_SIZE); - kunmap(page); - i++; - } - page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); shadow_ring_context = kmap(page); @@ -195,6 +166,37 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) sr_oa_regs(workload, (u32 *)shadow_ring_context, false); kunmap(page); + + if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val)) + return 0; + + gvt_dbg_sched("ring id %d workload lrca %x", ring_id, + workload->ctx_desc.lrca); + + context_page_num = gvt->dev_priv->engine[ring_id]->context_size; + + context_page_num = context_page_num >> PAGE_SHIFT; + + if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS) + context_page_num = 19; + + i = 2; + while (i < context_page_num) { + context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, + (u32)((workload->ctx_desc.lrca + i) << + I915_GTT_PAGE_SHIFT)); + if (context_gpa == INTEL_GVT_INVALID_ADDR) { + gvt_vgpu_err("Invalid guest context descriptor\n"); + return -EFAULT; + } + + page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i); + dst = kmap(page); + intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst, + I915_GTT_PAGE_SIZE); + kunmap(page); + i++; + } return 0; } -- GitLab From 0d55babc8392754352f1058866dd4182ae587d11 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 2 Aug 2018 11:06:28 +0100 Subject: [PATCH 0055/1692] drm/i915: Drop stray clearing of rps->last_adj We used to reset last_adj to 0 on crossing a power domain boundary, to slow down our rate of change. However, commit 60548c554be2 ("drm/i915: Interactive RPS mode") accidentally caused it to be reset on every frequency update, nerfing the fast response granted by the slow start algorithm. Fixes: 60548c554be2 ("drm/i915: Interactive RPS mode") Testcase: igt/pm_rps/mix-max-config-loaded Signed-off-by: Chris Wilson Cc: Joonas Lahtinen Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20180802100631.31305-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_pm.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 2531eb75bdce..f90a3c7f1c40 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -6371,7 +6371,6 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) new_power = HIGH_POWER; rps_set_power(dev_priv, new_power); mutex_unlock(&rps->power.mutex); - rps->last_adj = 0; } void intel_rps_mark_interactive(struct drm_i915_private *i915, bool interactive) -- GitLab From 12a6c931beffeaaa350ef562e91e49918b4b3e68 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Tue, 31 Jul 2018 17:46:14 -0700 Subject: [PATCH 0056/1692] drm/i915/icl: avoid unclaimed PLANE_NV12_BUF_CFG register We don't have proper watermark NV12 support on ICL due to differences in how it should be implemented. In commit 234059da0f33 ("drm/i915/icl: NV12 y-plane ddb is not in same plane") we avoided writing the non-existent PLANE_NV12_BUF_CFG registers but we forgot to also avoid them on the hardware state readout. While the code is still not correct, at least now we can avoid unclaimed register error messages when dealing with RGB formats, which makes CI happier. Also add some FIXME comments in order to make it even more clear that there's still work to do. References: commit 234059da0f33 ("drm/i915/icl: NV12 y-plane ddb is not in same plane") Cc: Mahesh Kumar Reviewed-by: Mahesh Kumar Signed-off-by: Paulo Zanoni Link: https://patchwork.freedesktop.org/patch/msgid/20180801004614.22149-1-paulo.r.zanoni@intel.com --- drivers/gpu/drm/i915/intel_pm.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index f90a3c7f1c40..24f0cab02bbc 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3909,7 +3909,12 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv, val & PLANE_CTL_ALPHA_MASK); val = I915_READ(PLANE_BUF_CFG(pipe, plane_id)); - val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id)); + /* + * FIXME: add proper NV12 support for ICL. Avoid reading unclaimed + * registers for now. + */ + if (INTEL_GEN(dev_priv) < 11) + val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id)); if (fourcc == DRM_FORMAT_NV12) { skl_ddb_entry_init_from_hw(dev_priv, @@ -4977,6 +4982,7 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc, skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id), &ddb->plane[pipe][plane_id]); + /* FIXME: add proper NV12 support for ICL. */ if (INTEL_GEN(dev_priv) >= 11) return skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id), -- GitLab From 46e831abe864a6b59fa3de253a681c0f2ee1bf2f Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 2 Aug 2018 15:04:16 +0100 Subject: [PATCH 0057/1692] drm/i915/lpe: Mark LPE audio runtime pm as "no callbacks" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The LPE audio is a child device of i915, it is powered up and down alongside the igfx and presents no independent runtime interface. This aptly fulfils the description of a "No-Callback" Device, so mark it thus. Fixes: 183c00350ccd ("drm/i915: Fix runtime PM for LPE audio") Testcase: igt/pm_rpm/basic-pci-d3-state Testcase: igt/pm_rpm/basic-rte Signed-off-by: Chris Wilson Cc: Takashi Iwai Cc: Pierre-Louis Bossart Cc: Ville Syrjälä Cc: stable@vger.kernel.org Reviewed-by: Joonas Lahtinen Link: https://patchwork.freedesktop.org/patch/msgid/20180802140416.6062-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_lpe_audio.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c index 6269750e2b54..430732720e65 100644 --- a/drivers/gpu/drm/i915/intel_lpe_audio.c +++ b/drivers/gpu/drm/i915/intel_lpe_audio.c @@ -126,9 +126,7 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv) return platdev; } - pm_runtime_forbid(&platdev->dev); - pm_runtime_set_active(&platdev->dev); - pm_runtime_enable(&platdev->dev); + pm_runtime_no_callbacks(&platdev->dev); return platdev; } -- GitLab From f4de7794de84f06dc56f0adc8ce34ab8294138a9 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 2 Aug 2018 11:06:29 +0100 Subject: [PATCH 0058/1692] drm/i915: Unconditionally clear the pm/guc GT IIR upon acking Having stored the IIR for action, we should always clear it. Signed-off-by: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20180802100631.31305-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_irq.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 90628a47ae17..e37e3ec22a79 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1534,11 +1534,8 @@ static void gen8_gt_irq_ack(struct drm_i915_private *i915, if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2)); - if (likely(gt_iir[2] & (i915->pm_rps_events | - i915->pm_guc_events))) - raw_reg_write(regs, GEN8_GT_IIR(2), - gt_iir[2] & (i915->pm_rps_events | - i915->pm_guc_events)); + if (likely(gt_iir[2])) + raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]); } if (master_ctl & GEN8_GT_VECS_IRQ) { -- GitLab From 4668f69544328dd04f39546274d5cd7a1cde2240 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 2 Aug 2018 11:06:30 +0100 Subject: [PATCH 0059/1692] drm/i915: Clear all residual RPS events on disabling interrupts Make sure that the RPS IIR is completely clear on disabling so we should not get any more interrupts after idling. Since the IIR is shared with the guc, we have to be careful to only clobber RPS events. Signed-off-by: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20180802100631.31305-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_irq.c | 8 +++++--- drivers/gpu/drm/i915/i915_reg.h | 6 ++++-- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index e37e3ec22a79..8084e35b25c5 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -478,7 +478,7 @@ void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv) void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv) { spin_lock_irq(&dev_priv->irq_lock); - gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events); + gen6_reset_pm_iir(dev_priv, GEN6_PM_RPS_EVENTS); dev_priv->gt_pm.rps.pm_iir = 0; spin_unlock_irq(&dev_priv->irq_lock); } @@ -516,7 +516,7 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u)); - gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); + gen6_disable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); spin_unlock_irq(&dev_priv->irq_lock); synchronize_irq(dev_priv->drm.irq); @@ -4778,7 +4778,9 @@ void intel_irq_init(struct drm_i915_private *dev_priv) /* WaGsvRC0ResidencyMethod:vlv */ dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; else - dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; + dev_priv->pm_rps_events = (GEN6_PM_RP_UP_THRESHOLD | + GEN6_PM_RP_DOWN_THRESHOLD | + GEN6_PM_RP_DOWN_TIMEOUT); rps->pm_intrmsk_mbz = 0; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index e0f5999fff07..4b656f31fde9 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -8582,8 +8582,10 @@ enum { #define GEN6_PM_RP_DOWN_THRESHOLD (1 << 4) #define GEN6_PM_RP_UP_EI_EXPIRED (1 << 2) #define GEN6_PM_RP_DOWN_EI_EXPIRED (1 << 1) -#define GEN6_PM_RPS_EVENTS (GEN6_PM_RP_UP_THRESHOLD | \ - GEN6_PM_RP_DOWN_THRESHOLD | \ +#define GEN6_PM_RPS_EVENTS (GEN6_PM_RP_UP_EI_EXPIRED | \ + GEN6_PM_RP_UP_THRESHOLD | \ + GEN6_PM_RP_DOWN_EI_EXPIRED | \ + GEN6_PM_RP_DOWN_THRESHOLD | \ GEN6_PM_RP_DOWN_TIMEOUT) #define GEN7_GT_SCRATCH(i) _MMIO(0x4F100 + (i) * 4) -- GitLab From c444ad790cc2b2bb6925a0b56716712a79624b4e Mon Sep 17 00:00:00 2001 From: Gwan-gyeong Mun Date: Fri, 3 Aug 2018 19:41:50 +0300 Subject: [PATCH 0060/1692] drm/i915: Fix typo in i915_drm_resume() Trivial typo, s/loose/lose/, in i915_drm_resume. Signed-off-by: Gwan-gyeong Mun Link: https://patchwork.freedesktop.org/patch/msgid/20180803164150.8185-1-gwan-gyeong.mun@intel.com Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 18a45e7a3d7c..64e0ea4bef67 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1758,7 +1758,7 @@ static int i915_drm_resume(struct drm_device *dev) /* * ... but also need to make sure that hotplug processing * doesn't cause havoc. Like in the driver load code we don't - * bother with the tiny race here where we might loose hotplug + * bother with the tiny race here where we might lose hotplug * notifications. * */ intel_hpd_init(dev_priv); -- GitLab From 48928d4b5d6243296a95d60edd2dcbc8e39512b7 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Thu, 19 Jul 2018 10:05:56 -0700 Subject: [PATCH 0061/1692] drm/i915/icl: move has_resource_streamer to GEN11_FEATURES Resource streamer has been removed on GEN11 so move it to the FEATURES macro. Signed-off-by: Lucas De Marchi Reviewed-by: Daniele Ceraolo Spurio Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20180719170557.10729-1-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 2 +- drivers/gpu/drm/i915/i915_pci.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 3f0c612d42e7..1932bc227942 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -2223,7 +2223,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, if (args->flags & I915_EXEC_RESOURCE_STREAMER) { if (!HAS_RESOURCE_STREAMER(eb.i915)) { - DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n"); + DRM_DEBUG("RS is only allowed for Haswell and Gen8 - Gen10\n"); return -EINVAL; } if (eb.engine->id != RCS) { diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index adf80563d0a1..8a9a9009db62 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -604,13 +604,13 @@ static const struct intel_device_info intel_cannonlake_info = { GEN(11), \ .ddb_size = 2048, \ .has_csr = 0, \ + .has_resource_streamer = 0, \ .has_logical_ring_elsq = 1 static const struct intel_device_info intel_icelake_11_info = { GEN11_FEATURES, PLATFORM(INTEL_ICELAKE), .is_alpha_support = 1, - .has_resource_streamer = 0, .ring_mask = RENDER_RING | BLT_RING | VEBOX_RING | BSD_RING | BSD3_RING, }; -- GitLab From 08e3e21a24d23db6a4adca90f7cb40d69e09d35c Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Fri, 3 Aug 2018 16:24:43 -0700 Subject: [PATCH 0062/1692] drm/i915: kill resource streamer support After disabling resource streamer on ICL (due to it actually not existing there), I got feedback that there have been some experimental patches for mesa to use RS years ago, but nothing ever landed or shipped because there was no performance improvement. This removes it from kernel keeping the uapi defines around for compatibility. v2: - re-add the inadvertent removal of CTX_CTRL_INHIBIT_SYN_CTX_SWITCH - don't bother trying to document removed params on uapi header: applications should know that from the query. (from Chris) v3: - disable CTX_CTRL_RS_CTX_ENABLE istead of removing it - reword commit message after Daniele confirmed no performance regression on his machine - reword commit message to make clear RS is being removed due to never been used v4: - move I915_EXEC_RESOURCE_STREAMER to __I915_EXEC_ILLEGAL_FLAGS so the check on ioctl() is made much earlier by i915_gem_check_execbuffer() (suggested by Tvrtko) Signed-off-by: Lucas De Marchi Acked-by: Daniele Ceraolo Spurio Reviewed-by: Tvrtko Ursulin Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20180803232443.17193-1-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/i915_drv.c | 2 +- drivers/gpu/drm/i915/i915_drv.h | 2 -- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 18 +++--------------- drivers/gpu/drm/i915/i915_pci.c | 4 ---- drivers/gpu/drm/i915/intel_device_info.h | 1 - drivers/gpu/drm/i915/intel_lrc.c | 10 ++++------ drivers/gpu/drm/i915/intel_ringbuffer.c | 4 +--- drivers/gpu/drm/i915/intel_ringbuffer.h | 1 - 8 files changed, 9 insertions(+), 33 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 64e0ea4bef67..3857e7963fc5 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -373,7 +373,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data, value = 2; break; case I915_PARAM_HAS_RESOURCE_STREAMER: - value = HAS_RESOURCE_STREAMER(dev_priv); + value = 0; break; case I915_PARAM_HAS_POOLED_EU: value = HAS_POOLED_EU(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 4aca5344863d..657f46e0cae9 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2610,8 +2610,6 @@ intel_info(const struct drm_i915_private *dev_priv) #define USES_GUC_SUBMISSION(dev_priv) intel_uc_is_using_guc_submission() #define USES_HUC(dev_priv) intel_uc_is_using_huc() -#define HAS_RESOURCE_STREAMER(dev_priv) ((dev_priv)->info.has_resource_streamer) - #define HAS_POOLED_EU(dev_priv) ((dev_priv)->info.has_pooled_eu) #define INTEL_PCH_DEVICE_ID_MASK 0xff80 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 1932bc227942..a926d7d47183 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -64,7 +64,9 @@ enum { #define BATCH_OFFSET_BIAS (256*1024) #define __I915_EXEC_ILLEGAL_FLAGS \ - (__I915_EXEC_UNKNOWN_FLAGS | I915_EXEC_CONSTANTS_MASK) + (__I915_EXEC_UNKNOWN_FLAGS | \ + I915_EXEC_CONSTANTS_MASK | \ + I915_EXEC_RESOURCE_STREAMER) /* Catch emission of unexpected errors for CI! */ #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) @@ -2221,20 +2223,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, if (!eb.engine) return -EINVAL; - if (args->flags & I915_EXEC_RESOURCE_STREAMER) { - if (!HAS_RESOURCE_STREAMER(eb.i915)) { - DRM_DEBUG("RS is only allowed for Haswell and Gen8 - Gen10\n"); - return -EINVAL; - } - if (eb.engine->id != RCS) { - DRM_DEBUG("RS is not available on %s\n", - eb.engine->name); - return -EINVAL; - } - - eb.batch_flags |= I915_DISPATCH_RS; - } - if (args->flags & I915_EXEC_FENCE_IN) { in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2)); if (!in_fence) diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 8a9a9009db62..e931b48369dd 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -368,7 +368,6 @@ static const struct intel_device_info intel_valleyview_info = { .has_ddi = 1, \ .has_fpga_dbg = 1, \ .has_psr = 1, \ - .has_resource_streamer = 1, \ .has_dp_mst = 1, \ .has_rc6p = 0 /* RC6p removed-by HSW */, \ .has_runtime_pm = 1 @@ -441,7 +440,6 @@ static const struct intel_device_info intel_cherryview_info = { .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, .has_64bit_reloc = 1, .has_runtime_pm = 1, - .has_resource_streamer = 1, .has_rc6 = 1, .has_logical_ring_contexts = 1, .has_gmch_display = 1, @@ -515,7 +513,6 @@ static const struct intel_device_info intel_skylake_gt4_info = { .has_runtime_pm = 1, \ .has_pooled_eu = 0, \ .has_csr = 1, \ - .has_resource_streamer = 1, \ .has_rc6 = 1, \ .has_dp_mst = 1, \ .has_logical_ring_contexts = 1, \ @@ -604,7 +601,6 @@ static const struct intel_device_info intel_cannonlake_info = { GEN(11), \ .ddb_size = 2048, \ .has_csr = 0, \ - .has_resource_streamer = 0, \ .has_logical_ring_elsq = 1 static const struct intel_device_info intel_icelake_11_info = { diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index 07e8364d1a8c..6eecd64734d5 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -103,7 +103,6 @@ enum intel_platform { func(has_psr); \ func(has_rc6); \ func(has_rc6p); \ - func(has_resource_streamer); \ func(has_runtime_pm); \ func(has_snoop); \ func(has_coherent_ggtt); \ diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index b0be180c6294..e5385dbfcdda 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -2065,8 +2065,7 @@ static int gen8_emit_bb_start(struct i915_request *rq, /* FIXME(BDW): Address space and security selectors. */ *cs++ = MI_BATCH_BUFFER_START_GEN8 | - (flags & I915_DISPATCH_SECURE ? 0 : BIT(8)) | - (flags & I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0); + (flags & I915_DISPATCH_SECURE ? 0 : BIT(8)); *cs++ = lower_32_bits(offset); *cs++ = upper_32_bits(offset); @@ -2584,10 +2583,9 @@ static void execlists_init_reg_state(u32 *regs, CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(engine), _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | - CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT) | - _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH | - (HAS_RESOURCE_STREAMER(dev_priv) ? - CTX_CTRL_RS_CTX_ENABLE : 0))); + CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT | + CTX_CTRL_RS_CTX_ENABLE) | + _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH)); CTX_REG(regs, CTX_RING_HEAD, RING_HEAD(base), 0); CTX_REG(regs, CTX_RING_TAIL, RING_TAIL(base), 0); CTX_REG(regs, CTX_RING_BUFFER_START, RING_START(base), 0); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 80a8b6e57374..8003cef767ba 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1980,9 +1980,7 @@ hsw_emit_bb_start(struct i915_request *rq, return PTR_ERR(cs); *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ? - 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) | - (dispatch_flags & I915_DISPATCH_RS ? - MI_BATCH_RESOURCE_STREAMER : 0); + 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW); /* bit0-7 is the length on GEN6+ */ *cs++ = offset; intel_ring_advance(rq, cs); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 57f3787ed6ec..8837079cb8b3 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -474,7 +474,6 @@ struct intel_engine_cs { unsigned int dispatch_flags); #define I915_DISPATCH_SECURE BIT(0) #define I915_DISPATCH_PINNED BIT(1) -#define I915_DISPATCH_RS BIT(2) void (*emit_breadcrumb)(struct i915_request *rq, u32 *cs); int emit_breadcrumb_sz; -- GitLab From a6476ebd4350d51146ef0492b4b06bc0d31e8827 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 6 Aug 2018 15:56:47 +0100 Subject: [PATCH 0063/1692] drm/i915: Stop dropping irq around resets A long time ago, we were afraid of handling interrupts and signaling waiters during a reset, worrying that the confusion in request handling would interfere with our attempts to process the reset in an orderly fashion. Since then, we have isolated our irq-driven request handling by virtue of the engine->timeline.lock and control of kthreads where required, eliminating the danger of concurrently processing interrupts. Signed-off-by: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20180806145647.13131-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_drv.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 3857e7963fc5..ed0169d49876 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1918,7 +1918,6 @@ void i915_reset(struct drm_i915_private *i915, dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason); error->reset_count++; - disable_irq(i915->drm.irq); ret = i915_gem_reset_prepare(i915); if (ret) { dev_err(i915->drm.dev, "GPU recovery failed\n"); @@ -1980,8 +1979,6 @@ void i915_reset(struct drm_i915_private *i915, finish: i915_gem_reset_finish(i915); - enable_irq(i915->drm.irq); - wakeup: clear_bit(I915_RESET_HANDOFF, &error->flags); wake_up_bit(&error->flags, I915_RESET_HANDOFF); -- GitLab From e6a59382924e2d007b554a2aebcd4445ebb01fef Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 6 Aug 2018 15:46:04 +0100 Subject: [PATCH 0064/1692] drm/i915/selftests: Unconditionally do a chipset flush before emit_bb_start Experience teaches us over and over again that coherency on Baytrail requires the odd heavy hammer, and in particular clflush alone is not enough to guarrantee that writes from the CPU are picked up by the CS. Do as we do elsewhere and ensure we have an unconditional i915_gem_chipset_flush() after writing to memory and submitting a batch to HW. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=107499 Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20180806144604.8346-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/selftests/huge_pages.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c index 7efb326badcd..e272127783fe 100644 --- a/drivers/gpu/drm/i915/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/selftests/huge_pages.c @@ -906,7 +906,11 @@ gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val) if (IS_ERR(obj)) return ERR_CAST(obj); - cmd = i915_gem_object_pin_map(obj, I915_MAP_WB); + err = i915_gem_object_set_to_wc_domain(obj, true); + if (err) + goto err; + + cmd = i915_gem_object_pin_map(obj, I915_MAP_WC); if (IS_ERR(cmd)) { err = PTR_ERR(cmd); goto err; @@ -936,13 +940,10 @@ gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val) } *cmd = MI_BATCH_BUFFER_END; + i915_gem_chipset_flush(i915); i915_gem_object_unpin_map(obj); - err = i915_gem_object_set_to_gtt_domain(obj, false); - if (err) - goto err; - batch = i915_vma_instance(obj, vma->vm, NULL); if (IS_ERR(batch)) { err = PTR_ERR(batch); -- GitLab From 63ef26237ba846b8898c229a85bfae1cf4fac845 Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Tue, 31 Jul 2018 11:02:11 +0800 Subject: [PATCH 0065/1692] drm/i915/gvt: make dma map/unmap kvmgt functions as static Make kvmgt_dma_map/unmap_guest_page as static function. Reviewed-by: Hang Yuan Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/kvmgt.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 685cb3de6dab..60d9f5d9fb1e 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1676,7 +1676,7 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) return pfn; } -int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, +static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, unsigned long size, dma_addr_t *dma_addr) { struct kvmgt_guest_info *info; @@ -1725,7 +1725,7 @@ static void __gvt_dma_release(struct kref *ref) __gvt_cache_remove_entry(entry->vgpu, entry); } -void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr) +static void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr) { struct kvmgt_guest_info *info; struct gvt_dma *entry; -- GitLab From a752b070a67823174565322cc48b2668daf9a8da Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Tue, 31 Jul 2018 11:02:12 +0800 Subject: [PATCH 0066/1692] drm/i915/gvt: Fix function comment doc errors Caught by W=1 to fix left wrong function comment doc. Reviewed-by: Hang Yuan Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/cfg_space.c | 12 ++++++++++++ drivers/gpu/drm/i915/gvt/display.c | 1 + drivers/gpu/drm/i915/gvt/edid.c | 9 +++++++++ drivers/gpu/drm/i915/gvt/gtt.c | 9 ++++++--- drivers/gpu/drm/i915/gvt/gvt.c | 3 +-- drivers/gpu/drm/i915/gvt/handlers.c | 1 + drivers/gpu/drm/i915/gvt/mmio.c | 3 ++- drivers/gpu/drm/i915/gvt/opregion.c | 1 - drivers/gpu/drm/i915/gvt/page_track.c | 2 ++ drivers/gpu/drm/i915/gvt/scheduler.c | 4 +++- 10 files changed, 37 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c index c62346fdc05d..19cf1bbe059d 100644 --- a/drivers/gpu/drm/i915/gvt/cfg_space.c +++ b/drivers/gpu/drm/i915/gvt/cfg_space.c @@ -56,6 +56,10 @@ static const u8 pci_cfg_space_rw_bmp[PCI_INTERRUPT_LINE + 4] = { /** * vgpu_pci_cfg_mem_write - write virtual cfg space memory + * @vgpu: target vgpu + * @off: offset + * @src: src ptr to write + * @bytes: number of bytes * * Use this function to write virtual cfg space memory. * For standard cfg space, only RW bits can be changed, @@ -91,6 +95,10 @@ static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off, /** * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read + * @vgpu: target vgpu + * @offset: offset + * @p_data: return data ptr + * @bytes: number of bytes to read * * Returns: * Zero on success, negative error code if failed. @@ -278,6 +286,10 @@ static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset, /** * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space write + * @vgpu: target vgpu + * @offset: offset + * @p_data: write data ptr + * @bytes: number of bytes to write * * Returns: * Zero on success, negative error code if failed. diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 6ee50cb328f8..379fc81da863 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -462,6 +462,7 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu) /** * intel_vgpu_init_display- initialize vGPU virtual display emulation * @vgpu: a vGPU + * @resolution: resolution index for intel_vgpu_edid * * This function is used to initialize vGPU virtual display emulation stuffs * diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c index 4b98539025c5..5d4bb35bb889 100644 --- a/drivers/gpu/drm/i915/gvt/edid.c +++ b/drivers/gpu/drm/i915/gvt/edid.c @@ -340,6 +340,9 @@ static int gmbus2_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, /** * intel_gvt_i2c_handle_gmbus_read - emulate gmbus register mmio read * @vgpu: a vGPU + * @offset: reg offset + * @p_data: data return buffer + * @bytes: access data length * * This function is used to emulate gmbus register mmio read * @@ -365,6 +368,9 @@ int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu, /** * intel_gvt_i2c_handle_gmbus_write - emulate gmbus register mmio write * @vgpu: a vGPU + * @offset: reg offset + * @p_data: data return buffer + * @bytes: access data length * * This function is used to emulate gmbus register mmio write * @@ -437,6 +443,9 @@ static inline int get_aux_ch_reg(unsigned int offset) /** * intel_gvt_i2c_handle_aux_ch_write - emulate AUX channel register write * @vgpu: a vGPU + * @port_idx: port index + * @offset: reg offset + * @p_data: write ptr * * This function is used to emulate AUX channel register write * diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 156ceeeb7446..f1a4cb1b8438 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -1113,6 +1113,10 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se, } /** + * Check if can do 2M page + * @vgpu: target vgpu + * @entry: target pfn's gtt entry + * * Return 1 if 2MB huge gtt shadowing is possilbe, 0 if miscondition, * negtive if found err. */ @@ -1942,7 +1946,7 @@ void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm) /** * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object - * @vgpu: a vGPU + * @mm: target vgpu mm * * This function is called when user wants to use a vGPU mm object. If this * mm object hasn't been shadowed yet, the shadow will be populated at this @@ -2462,8 +2466,7 @@ static int setup_spt_oos(struct intel_gvt *gvt) /** * intel_vgpu_find_ppgtt_mm - find a PPGTT mm object * @vgpu: a vGPU - * @page_table_level: PPGTT page table level - * @root_entry: PPGTT page table root pointers + * @pdps: pdp root array * * This function is used to find a PPGTT mm object from mm object pool * diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index 712f9d14e720..e14416d97e73 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -188,7 +188,6 @@ static const struct intel_gvt_ops intel_gvt_ops = { /** * intel_gvt_init_host - Load MPT modules and detect if we're running in host - * @gvt: intel gvt device * * This function is called at the driver loading stage. If failed to find a * loadable MPT module or detect currently we're running in a VM, then GVT-g @@ -302,7 +301,7 @@ static int init_service_thread(struct intel_gvt *gvt) /** * intel_gvt_clean_device - clean a GVT device - * @gvt: intel gvt device + * @dev_priv: i915 private * * This function is called at the driver unloading stage, to free the * resources owned by a GVT device. diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index e2e252c67de8..131b37b038f0 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -3405,6 +3405,7 @@ bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt, * @offset: register offset * @pdata: data buffer * @bytes: data length + * @is_read: read or write * * Returns: * Zero on success, negative error code if failed. diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index 2be1be2cf49a..dcd31e3781df 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -39,6 +39,7 @@ /** * intel_vgpu_gpa_to_mmio_offset - translate a GPA to MMIO offset * @vgpu: a vGPU + * @gpa: guest physical address * * Returns: * Zero on success, negative error code if failed @@ -228,7 +229,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, /** * intel_vgpu_reset_mmio - reset virtual MMIO space * @vgpu: a vGPU - * + * @dmlr: whether this is device model level reset */ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr) { diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index fa75a2eead90..82586c8e434f 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c @@ -216,7 +216,6 @@ static void virt_vbt_generation(struct vbt *v) /** * intel_vgpu_init_opregion - initialize the stuff used to emulate opregion * @vgpu: a vGPU - * @gpa: guest physical address of opregion * * Returns: * Zero on success, negative error code if failed. diff --git a/drivers/gpu/drm/i915/gvt/page_track.c b/drivers/gpu/drm/i915/gvt/page_track.c index 256d0db8bbb1..84856022528e 100644 --- a/drivers/gpu/drm/i915/gvt/page_track.c +++ b/drivers/gpu/drm/i915/gvt/page_track.c @@ -41,6 +41,8 @@ struct intel_vgpu_page_track *intel_vgpu_find_page_track( * intel_vgpu_register_page_track - register a guest page to be tacked * @vgpu: a vGPU * @gfn: the gfn of guest page + * @handler: page track handler + * @priv: tracker private * * Returns: * zero on success, negative error code if failed. diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 049c0ea324fa..cd6f63b5a40a 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -1135,6 +1135,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) /** * intel_vgpu_select_submission_ops - select virtual submission interface * @vgpu: a vGPU + * @engine_mask: either ALL_ENGINES or target engine mask * @interface: expected vGPU virtual submission interface * * This function is called when guest configures submission interface. @@ -1187,7 +1188,7 @@ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu, /** * intel_vgpu_destroy_workload - destroy a vGPU workload - * @vgpu: a vGPU + * @workload: workload to destroy * * This function is called when destroy a vGPU workload. * @@ -1279,6 +1280,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload) /** * intel_vgpu_create_workload - create a vGPU workload * @vgpu: a vGPU + * @ring_id: ring index * @desc: a guest context descriptor * * This function is called when creating a vGPU workload. -- GitLab From 69ca5af4ff9a3ff96e4595c2b7522c01a2641779 Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Tue, 31 Jul 2018 11:02:13 +0800 Subject: [PATCH 0067/1692] drm/i915/gvt: Move some MMIO definitions to reg.h To consolidate all gvt private MMIO definition in one place, this moves some not yet used in i915 to reg.h. Reviewed-by: Hang Yuan Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/mmio_context.c | 13 ------------- drivers/gpu/drm/i915/gvt/reg.h | 9 +++++++++ 2 files changed, 9 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 20be9a92600f..d20f2c9bda82 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -37,19 +37,6 @@ #include "gvt.h" #include "trace.h" -/** - * Defined in Intel Open Source PRM. - * Ref: https://01.org/linuxgraphics/documentation/hardware-specification-prms - */ -#define TRVATTL3PTRDW(i) _MMIO(0x4de0 + (i)*4) -#define TRNULLDETCT _MMIO(0x4de8) -#define TRINVTILEDETCT _MMIO(0x4dec) -#define TRVADR _MMIO(0x4df0) -#define TRTTE _MMIO(0x4df4) -#define RING_EXCC(base) _MMIO((base) + 0x28) -#define RING_GFX_MODE(base) _MMIO((base) + 0x29c) -#define VF_GUARDBAND _MMIO(0x83a4) - #define GEN9_MOCS_SIZE 64 /* Raw offset is appened to each line for convenience. */ diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h index d4f7ce6dc1d7..60dcc6bb8425 100644 --- a/drivers/gpu/drm/i915/gvt/reg.h +++ b/drivers/gpu/drm/i915/gvt/reg.h @@ -77,4 +77,13 @@ #define _RING_CTL_BUF_SIZE(ctl) (((ctl) & RB_TAIL_SIZE_MASK) + \ I915_GTT_PAGE_SIZE) +#define TRVATTL3PTRDW(i) _MMIO(0x4de0 + (i) * 4) +#define TRNULLDETCT _MMIO(0x4de8) +#define TRINVTILEDETCT _MMIO(0x4dec) +#define TRVADR _MMIO(0x4df0) +#define TRTTE _MMIO(0x4df4) +#define RING_EXCC(base) _MMIO((base) + 0x28) +#define RING_GFX_MODE(base) _MMIO((base) + 0x29c) +#define VF_GUARDBAND _MMIO(0x83a4) + #endif -- GitLab From aaa023782fda76a3eacd9f3c8c14ad7382d86cf2 Mon Sep 17 00:00:00 2001 From: Mahesh Kumar Date: Tue, 31 Jul 2018 19:54:44 +0530 Subject: [PATCH 0068/1692] drm/i915: ddb_size is of u16 type ddb_size is u16 so use same return type for intel_get_ddb_size wrapper. Signed-off-by: Mahesh Kumar Reviewed-by: Chris Wilson Reviewed-by: Maarten Lankhorst Signed-off-by: Maarten Lankhorst Link: https://patchwork.freedesktop.org/patch/msgid/20180731142445.30723-2-mahesh1.kumar@intel.com --- drivers/gpu/drm/i915/intel_pm.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 24f0cab02bbc..0e4b8328a971 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3771,11 +3771,11 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state) return true; } -static unsigned int intel_get_ddb_size(struct drm_i915_private *dev_priv, - const struct intel_crtc_state *cstate, - const unsigned int total_data_rate, - const int num_active, - struct skl_ddb_allocation *ddb) +static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv, + const struct intel_crtc_state *cstate, + const unsigned int total_data_rate, + const int num_active, + struct skl_ddb_allocation *ddb) { const struct drm_display_mode *adjusted_mode; u64 total_data_bw; @@ -3814,7 +3814,7 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, struct intel_atomic_state *intel_state = to_intel_atomic_state(state); struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *for_crtc = cstate->base.crtc; - unsigned int pipe_size, ddb_size; + u16 pipe_size, ddb_size; int nth_active_pipe; if (WARN_ON(!state) || !cstate->base.active) { -- GitLab From cf1f697acb04d2e06c117436cc55e52760f1ea7c Mon Sep 17 00:00:00 2001 From: Mahesh Kumar Date: Wed, 1 Aug 2018 20:41:13 +0530 Subject: [PATCH 0069/1692] drm/i915/skl: distribute DDB based on panel resolution We distribute DDB equally among all pipes irrespective of display buffer requirement of each pipe. This leads to a situation where high resolution y-tiled display can not be enabled with 2 low resolution displays. Main contributing factor for DDB requirement is width of the display. This patch make changes to distribute ddb based on display width. So display with higher width will get bigger chunk of DDB. Changes Since V1: - pipe_size/ddb_size will not overflow u16 so use appropriate data-types during computation (Chris) Changes Since V2: - avoid redundancy and possible truncation errors (Chris) Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=107113 Cc: raviraj.p.sitaram@intel.com Cc: Chris Wilson Signed-off-by: Mahesh Kumar Reviewed-by: Chris Wilson Reviewed-by: Maarten Lankhorst Signed-off-by: Maarten Lankhorst Link: https://patchwork.freedesktop.org/patch/msgid/20180801151113.5337-1-mahesh1.kumar@intel.com --- drivers/gpu/drm/i915/intel_pm.c | 55 ++++++++++++++++++++++++--------- 1 file changed, 40 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 0e4b8328a971..03654f5f68c3 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3814,8 +3814,12 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, struct intel_atomic_state *intel_state = to_intel_atomic_state(state); struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *for_crtc = cstate->base.crtc; - u16 pipe_size, ddb_size; - int nth_active_pipe; + const struct drm_crtc_state *crtc_state; + const struct drm_crtc *crtc; + u32 pipe_width = 0, total_width = 0, width_before_pipe = 0; + enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe; + u16 ddb_size; + u32 i; if (WARN_ON(!state) || !cstate->base.active) { alloc->start = 0; @@ -3833,14 +3837,14 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, *num_active, ddb); /* - * If the state doesn't change the active CRTC's, then there's - * no need to recalculate; the existing pipe allocation limits - * should remain unchanged. Note that we're safe from racing - * commits since any racing commit that changes the active CRTC - * list would need to grab _all_ crtc locks, including the one - * we currently hold. + * If the state doesn't change the active CRTC's or there is no + * modeset request, then there's no need to recalculate; + * the existing pipe allocation limits should remain unchanged. + * Note that we're safe from racing commits since any racing commit + * that changes the active CRTC list or do modeset would need to + * grab _all_ crtc locks, including the one we currently hold. */ - if (!intel_state->active_pipe_changes) { + if (!intel_state->active_pipe_changes && !intel_state->modeset) { /* * alloc may be cleared by clear_intel_crtc_state, * copy from old state to be sure @@ -3849,11 +3853,32 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, return; } - nth_active_pipe = hweight32(intel_state->active_crtcs & - (drm_crtc_mask(for_crtc) - 1)); - pipe_size = ddb_size / hweight32(intel_state->active_crtcs); - alloc->start = nth_active_pipe * ddb_size / *num_active; - alloc->end = alloc->start + pipe_size; + /* + * Watermark/ddb requirement highly depends upon width of the + * framebuffer, So instead of allocating DDB equally among pipes + * distribute DDB based on resolution/width of the display. + */ + for_each_new_crtc_in_state(state, crtc, crtc_state, i) { + const struct drm_display_mode *adjusted_mode; + int hdisplay, vdisplay; + enum pipe pipe; + + if (!crtc_state->enable) + continue; + + pipe = to_intel_crtc(crtc)->pipe; + adjusted_mode = &crtc_state->adjusted_mode; + drm_mode_get_hv_timing(adjusted_mode, &hdisplay, &vdisplay); + total_width += hdisplay; + + if (pipe < for_pipe) + width_before_pipe += hdisplay; + else if (pipe == for_pipe) + pipe_width = hdisplay; + } + + alloc->start = ddb_size * width_before_pipe / total_width; + alloc->end = ddb_size * (width_before_pipe + pipe_width) / total_width; } static unsigned int skl_cursor_allocation(int num_active) @@ -5254,7 +5279,7 @@ skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed) * any other display updates race with this transaction, so we need * to grab the lock on *all* CRTC's. */ - if (intel_state->active_pipe_changes) { + if (intel_state->active_pipe_changes || intel_state->modeset) { realloc_pipes = ~0; intel_state->wm_results.dirty_pipes = ~0; } -- GitLab From 97f0615800041b145b36e00df65526e0fa6927bd Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 6 Aug 2018 12:26:05 +0100 Subject: [PATCH 0070/1692] drm/i915: Pull seqno started checks together We have a few instances of checking seqno-1 to see if the HW has started the request. Pull those together under a helper. v2: Pull the !seqno assertion higher, as given seqno==1 we may indeed check to see if we have started using seqno==0. Suggested-by: Tvrtko Ursulin Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20180806112605.20725-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_request.c | 9 +++--- drivers/gpu/drm/i915/i915_request.h | 39 +++++++++++++++--------- drivers/gpu/drm/i915/intel_breadcrumbs.c | 6 ++-- drivers/gpu/drm/i915/intel_engine_cs.c | 3 +- drivers/gpu/drm/i915/intel_hangcheck.c | 2 +- drivers/gpu/drm/i915/intel_ringbuffer.h | 33 ++++++++++++++++---- 6 files changed, 60 insertions(+), 32 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 5c2c93cbab12..09ed48833b54 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -527,7 +527,7 @@ void __i915_request_submit(struct i915_request *request) seqno = timeline_get_seqno(&engine->timeline); GEM_BUG_ON(!seqno); - GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), seqno)); + GEM_BUG_ON(intel_engine_signaled(engine, seqno)); /* We may be recursing from the signal callback of another i915 fence */ spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); @@ -579,8 +579,7 @@ void __i915_request_unsubmit(struct i915_request *request) */ GEM_BUG_ON(!request->global_seqno); GEM_BUG_ON(request->global_seqno != engine->timeline.seqno); - GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), - request->global_seqno)); + GEM_BUG_ON(intel_engine_has_completed(engine, request->global_seqno)); engine->timeline.seqno--; /* We may be recursing from the signal callback of another i915 fence */ @@ -1205,7 +1204,7 @@ static bool __i915_spin_request(const struct i915_request *rq, * it is a fair assumption that it will not complete within our * relatively short timeout. */ - if (!i915_seqno_passed(intel_engine_get_seqno(engine), seqno - 1)) + if (!intel_engine_has_started(engine, seqno)) return false; /* @@ -1222,7 +1221,7 @@ static bool __i915_spin_request(const struct i915_request *rq, irq = READ_ONCE(engine->breadcrumbs.irq_count); timeout_us += local_clock_us(&cpu); do { - if (i915_seqno_passed(intel_engine_get_seqno(engine), seqno)) + if (intel_engine_has_completed(engine, seqno)) return seqno == i915_request_global_seqno(rq); /* diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index e1c9365dfefb..9898301ab7ef 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -272,7 +272,10 @@ long i915_request_wait(struct i915_request *rq, #define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */ #define I915_WAIT_FOR_IDLE_BOOST BIT(3) -static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine); +static inline bool intel_engine_has_started(struct intel_engine_cs *engine, + u32 seqno); +static inline bool intel_engine_has_completed(struct intel_engine_cs *engine, + u32 seqno); /** * Returns true if seq1 is later than seq2. @@ -282,11 +285,31 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2) return (s32)(seq1 - seq2) >= 0; } +/** + * i915_request_started - check if the request has begun being executed + * @rq: the request + * + * Returns true if the request has been submitted to hardware, and the hardware + * has advanced passed the end of the previous request and so should be either + * currently processing the request (though it may be preempted and so + * not necessarily the next request to complete) or have completed the request. + */ +static inline bool i915_request_started(const struct i915_request *rq) +{ + u32 seqno; + + seqno = i915_request_global_seqno(rq); + if (!seqno) /* not yet submitted to HW */ + return false; + + return intel_engine_has_started(rq->engine, seqno); +} + static inline bool __i915_request_completed(const struct i915_request *rq, u32 seqno) { GEM_BUG_ON(!seqno); - return i915_seqno_passed(intel_engine_get_seqno(rq->engine), seqno) && + return intel_engine_has_completed(rq->engine, seqno) && seqno == i915_request_global_seqno(rq); } @@ -301,18 +324,6 @@ static inline bool i915_request_completed(const struct i915_request *rq) return __i915_request_completed(rq, seqno); } -static inline bool i915_request_started(const struct i915_request *rq) -{ - u32 seqno; - - seqno = i915_request_global_seqno(rq); - if (!seqno) - return false; - - return i915_seqno_passed(intel_engine_get_seqno(rq->engine), - seqno - 1); -} - static inline bool i915_sched_node_signaled(const struct i915_sched_node *node) { const struct i915_request *rq = diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index 1db6ba7d926e..84bf8d827136 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c @@ -256,8 +256,7 @@ void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine) spin_unlock(&b->irq_lock); rbtree_postorder_for_each_entry_safe(wait, n, &b->waiters, node) { - GEM_BUG_ON(!i915_seqno_passed(intel_engine_get_seqno(engine), - wait->seqno)); + GEM_BUG_ON(!intel_engine_signaled(engine, wait->seqno)); RB_CLEAR_NODE(&wait->node); wake_up_process(wait->tsk); } @@ -508,8 +507,7 @@ bool intel_engine_add_wait(struct intel_engine_cs *engine, return armed; /* Make the caller recheck if its request has already started. */ - return i915_seqno_passed(intel_engine_get_seqno(engine), - wait->seqno - 1); + return intel_engine_has_started(engine, wait->seqno); } static inline bool chain_wakeup(struct rb_node *rb, int priority) diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 67c4fc5d737c..99d5a24219c1 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -968,8 +968,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine) return true; /* Any inflight/incomplete requests? */ - if (!i915_seqno_passed(intel_engine_get_seqno(engine), - intel_engine_last_submit(engine))) + if (!intel_engine_signaled(engine, intel_engine_last_submit(engine))) return false; if (I915_SELFTEST_ONLY(engine->breadcrumbs.mock)) diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c index 2fc7a0dd0df9..e26d05a46451 100644 --- a/drivers/gpu/drm/i915/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/intel_hangcheck.c @@ -142,7 +142,7 @@ static int semaphore_passed(struct intel_engine_cs *engine) if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES) return -1; - if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno)) + if (intel_engine_signaled(signaller, seqno)) return 1; /* cursory check for an unkickable deadlock */ diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 8837079cb8b3..9090885d57de 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -910,14 +910,10 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine); u64 intel_engine_get_active_head(const struct intel_engine_cs *engine); u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine); -static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine) -{ - return intel_read_status_page(engine, I915_GEM_HWS_INDEX); -} - static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine) { - /* We are only peeking at the tail of the submit queue (and not the + /* + * We are only peeking at the tail of the submit queue (and not the * queue itself) in order to gain a hint as to the current active * state of the engine. Callers are not expected to be taking * engine->timeline->lock, nor are they expected to be concerned @@ -927,6 +923,31 @@ static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine) return READ_ONCE(engine->timeline.seqno); } +static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine) +{ + return intel_read_status_page(engine, I915_GEM_HWS_INDEX); +} + +static inline bool intel_engine_signaled(struct intel_engine_cs *engine, + u32 seqno) +{ + return i915_seqno_passed(intel_engine_get_seqno(engine), seqno); +} + +static inline bool intel_engine_has_completed(struct intel_engine_cs *engine, + u32 seqno) +{ + GEM_BUG_ON(!seqno); + return intel_engine_signaled(engine, seqno); +} + +static inline bool intel_engine_has_started(struct intel_engine_cs *engine, + u32 seqno) +{ + GEM_BUG_ON(!seqno); + return intel_engine_signaled(engine, seqno - 1); +} + void intel_engine_get_instdone(struct intel_engine_cs *engine, struct intel_instdone *instdone); -- GitLab From ae9b06ca067d5c22286c7290553c4b290607a042 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Mon, 6 Aug 2018 12:58:34 +0300 Subject: [PATCH 0071/1692] drm/i915/icl: Fix power well anonymous union initializers Similarly to commit 0a445945be6d ("drm/i915: Work around GCC anonymous union initialization bug") we need to initialize anonymous unions inside extra braces to work around a GCC4.4 build error. v2: - Fix checkpatch errors in commit log. (Paulo) Cc: Chris Wilson Cc: Ville Syrjala Cc: Paulo Zanoni Cc: Jani Nikula Signed-off-by: Imre Deak Reviewed-by: Rodrigo Vivi Reviewed-by: Paulo Zanoni Link: https://patchwork.freedesktop.org/patch/msgid/20180806095843.13294-2-imre.deak@intel.com --- drivers/gpu/drm/i915/intel_runtime_pm.c | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index cf89141b2281..11cb2a70e3fe 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -2620,14 +2620,18 @@ static struct i915_power_well icl_power_wells[] = { .domains = 0, .ops = &hsw_power_well_ops, .id = ICL_DISP_PW_1, - .hsw.has_fuses = true, + { + .hsw.has_fuses = true, + }, }, { .name = "power well 2", .domains = ICL_PW_2_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = ICL_DISP_PW_2, - .hsw.has_fuses = true, + { + .hsw.has_fuses = true, + }, }, { .name = "DC off", @@ -2640,9 +2644,11 @@ static struct i915_power_well icl_power_wells[] = { .domains = ICL_PW_3_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = ICL_DISP_PW_3, - .hsw.irq_pipe_mask = BIT(PIPE_B), - .hsw.has_vga = true, - .hsw.has_fuses = true, + { + .hsw.irq_pipe_mask = BIT(PIPE_B), + .hsw.has_vga = true, + .hsw.has_fuses = true, + }, }, { .name = "DDI A IO", @@ -2745,8 +2751,10 @@ static struct i915_power_well icl_power_wells[] = { .domains = ICL_PW_4_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = ICL_DISP_PW_4, - .hsw.has_fuses = true, - .hsw.irq_pipe_mask = BIT(PIPE_C), + { + .hsw.has_fuses = true, + .hsw.irq_pipe_mask = BIT(PIPE_C), + }, }, }; -- GitLab From 48a287ed9d624d8eae65e2dd1b12915b2b853644 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Mon, 6 Aug 2018 12:58:35 +0300 Subject: [PATCH 0072/1692] drm/i915: Rename intel_power_domains_fini() to intel_power_domains_fini_hw() intel_power_domains_fini() rolls back what was done in intel_power_domains_init_hw(), so rename and move it accordingly. This allows us adding a cleanup function later for intel_power_domains_init() in a cleaner way. No functional change. v2: - Fix checkpatch error adding missing param name to function declaration. (Paulo) Cc: Ville Syrjala Cc: Paulo Zanoni Cc: Jani Nikula Signed-off-by: Imre Deak Reviewed-by: Rodrigo Vivi Reviewed-by: Paulo Zanoni Link: https://patchwork.freedesktop.org/patch/msgid/20180806095843.13294-3-imre.deak@intel.com --- drivers/gpu/drm/i915/i915_drv.c | 4 +- drivers/gpu/drm/i915/intel_drv.h | 2 +- drivers/gpu/drm/i915/intel_runtime_pm.c | 69 ++++++++++++------------- 3 files changed, 37 insertions(+), 38 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index ed0169d49876..6396318cf73c 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -712,7 +712,7 @@ static int i915_load_modeset_init(struct drm_device *dev) intel_teardown_gmbus(dev_priv); cleanup_csr: intel_csr_ucode_fini(dev_priv); - intel_power_domains_fini(dev_priv); + intel_power_domains_fini_hw(dev_priv); vga_switcheroo_unregister_client(pdev); cleanup_vga_client: vga_client_register(pdev, NULL, NULL, NULL); @@ -1462,7 +1462,7 @@ void i915_driver_unload(struct drm_device *dev) i915_gem_fini(dev_priv); intel_fbc_cleanup_cfb(dev_priv); - intel_power_domains_fini(dev_priv); + intel_power_domains_fini_hw(dev_priv); i915_driver_cleanup_hw(dev_priv); i915_driver_cleanup_mmio(dev_priv); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 1ad7c1124bef..3576ab5735b1 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1948,8 +1948,8 @@ int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state); /* intel_runtime_pm.c */ int intel_power_domains_init(struct drm_i915_private *); -void intel_power_domains_fini(struct drm_i915_private *); void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume); +void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv); void intel_power_domains_suspend(struct drm_i915_private *dev_priv); void intel_power_domains_verify_state(struct drm_i915_private *dev_priv); void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume); diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 11cb2a70e3fe..e82aa38bfd23 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -2902,41 +2902,6 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) return 0; } -/** - * intel_power_domains_fini - finalizes the power domain structures - * @dev_priv: i915 device instance - * - * Finalizes the power domain structures for @dev_priv depending upon the - * supported platform. This function also disables runtime pm and ensures that - * the device stays powered up so that the driver can be reloaded. - */ -void intel_power_domains_fini(struct drm_i915_private *dev_priv) -{ - struct device *kdev = &dev_priv->drm.pdev->dev; - - /* - * The i915.ko module is still not prepared to be loaded when - * the power well is not enabled, so just enable it in case - * we're going to unload/reload. - * The following also reacquires the RPM reference the core passed - * to the driver during loading, which is dropped in - * intel_runtime_pm_enable(). We have to hand back the control of the - * device to the core with this reference held. - */ - intel_display_set_init_power(dev_priv, true); - - /* Remove the refcount we took to keep power well support disabled. */ - if (!i915_modparams.disable_power_well) - intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); - - /* - * Remove the refcount we took in intel_runtime_pm_enable() in case - * the platform doesn't support runtime PM. - */ - if (!HAS_RUNTIME_PM(dev_priv)) - pm_runtime_put(kdev); -} - static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) { struct i915_power_domains *power_domains = &dev_priv->power_domains; @@ -3576,6 +3541,40 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume) power_domains->initializing = false; } +/** + * intel_power_domains_fini_hw - deinitialize hw power domain state + * @dev_priv: i915 device instance + * + * De-initializes the display power domain HW state. It also ensures that the + * device stays powered up so that the driver can be reloaded. + */ +void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv) +{ + struct device *kdev = &dev_priv->drm.pdev->dev; + + /* + * The i915.ko module is still not prepared to be loaded when + * the power well is not enabled, so just enable it in case + * we're going to unload/reload. + * The following also reacquires the RPM reference the core passed + * to the driver during loading, which is dropped in + * intel_runtime_pm_enable(). We have to hand back the control of the + * device to the core with this reference held. + */ + intel_display_set_init_power(dev_priv, true); + + /* Remove the refcount we took to keep power well support disabled. */ + if (!i915_modparams.disable_power_well) + intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); + + /* + * Remove the refcount we took in intel_runtime_pm_enable() in case + * the platform doesn't support runtime PM. + */ + if (!HAS_RUNTIME_PM(dev_priv)) + pm_runtime_put(kdev); +} + /** * intel_power_domains_suspend - suspend power domain state * @dev_priv: i915 device instance -- GitLab From 3ae27f7e103d95a820061fa692d0fe53303ccf98 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Mon, 6 Aug 2018 12:58:36 +0300 Subject: [PATCH 0073/1692] drm/i915/vlv: Remove redundant power well ID asserts The callbacks these asserts are called from are used from a single power well, so not much point in checking that. The check also requires a unique power well ID that we would need to keep around only for this purpose. (A follow-up patch removes power well IDs not needed for direct power well access). Cc: Ville Syrjala Cc: Paulo Zanoni Cc: Jani Nikula Signed-off-by: Imre Deak Reviewed-by: Paulo Zanoni Link: https://patchwork.freedesktop.org/patch/msgid/20180806095843.13294-4-imre.deak@intel.com --- drivers/gpu/drm/i915/intel_runtime_pm.c | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index e82aa38bfd23..b2d182cc3319 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -1045,8 +1045,6 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D); - vlv_set_power_well(dev_priv, power_well, true); vlv_display_power_well_init(dev_priv); @@ -1055,8 +1053,6 @@ static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D); - vlv_display_power_well_deinit(dev_priv); vlv_set_power_well(dev_priv, power_well, false); @@ -1065,8 +1061,6 @@ static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC); - /* since ref/cri clock was enabled */ udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ @@ -1091,8 +1085,6 @@ static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, { enum pipe pipe; - WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC); - for_each_pipe(dev_priv, pipe) assert_pll_disabled(dev_priv, pipe); @@ -1516,8 +1508,6 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - WARN_ON_ONCE(power_well->id != CHV_DISP_PW_PIPE_A); - chv_set_pipe_power_well(dev_priv, power_well, true); vlv_display_power_well_init(dev_priv); @@ -1526,8 +1516,6 @@ static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - WARN_ON_ONCE(power_well->id != CHV_DISP_PW_PIPE_A); - vlv_display_power_well_deinit(dev_priv); chv_set_pipe_power_well(dev_priv, power_well, false); -- GitLab From f28ec6f4ea483554aacc59e8eb4a7667ecaf58ad Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Mon, 6 Aug 2018 12:58:37 +0300 Subject: [PATCH 0074/1692] drm/i915: Constify power well descriptors It makes sense to keep unchanging data const. Extract such fields from the i915_power_well struct into a new i915_power_well_desc struct that we initialize during compile time. For the rest of the dynamic fields allocate an array of i915_power_well objects in i915 dev_priv, and link to each of these objects their corresponding i915_power_well_desc object. v2: - Fix checkpatch warnings about missing param name in fn declaration and lines over 80 chars. (Paulo) - Move check for unique IDs to __set_power_wells(). Cc: Ville Syrjala Cc: Paulo Zanoni Cc: Jani Nikula Signed-off-by: Imre Deak [Fixed checkpatch warn in __set_power_wells()] Reviewed-by: Paulo Zanoni Link: https://patchwork.freedesktop.org/patch/msgid/20180806095843.13294-5-imre.deak@intel.com --- drivers/gpu/drm/i915/i915_debugfs.c | 4 +- drivers/gpu/drm/i915/i915_drv.c | 8 +- drivers/gpu/drm/i915/i915_drv.h | 14 +- drivers/gpu/drm/i915/intel_display.h | 4 +- drivers/gpu/drm/i915/intel_drv.h | 1 + drivers/gpu/drm/i915/intel_hdcp.c | 6 +- drivers/gpu/drm/i915/intel_runtime_pm.c | 206 ++++++++++++++---------- 7 files changed, 141 insertions(+), 102 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index f9ce35da4123..23f38bc257a2 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -2845,10 +2845,10 @@ static int i915_power_domain_info(struct seq_file *m, void *unused) enum intel_display_power_domain power_domain; power_well = &power_domains->power_wells[i]; - seq_printf(m, "%-25s %d\n", power_well->name, + seq_printf(m, "%-25s %d\n", power_well->desc->name, power_well->count); - for_each_power_domain(power_domain, power_well->domains) + for_each_power_domain(power_domain, power_well->desc->domains) seq_printf(m, " %-23s %d\n", intel_display_power_domain_str(power_domain), power_domains->domain_use_count[power_domain]); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 6396318cf73c..9dce55182c3a 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -924,7 +924,9 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, intel_uc_init_early(dev_priv); intel_pm_setup(dev_priv); intel_init_dpio(dev_priv); - intel_power_domains_init(dev_priv); + ret = intel_power_domains_init(dev_priv); + if (ret < 0) + goto err_uc; intel_irq_init(dev_priv); intel_hangcheck_init(dev_priv); intel_init_display_hooks(dev_priv); @@ -936,6 +938,9 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, return 0; +err_uc: + intel_uc_cleanup_early(dev_priv); + i915_gem_cleanup_early(dev_priv); err_workqueues: i915_workqueues_cleanup(dev_priv); err_engines: @@ -950,6 +955,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv) { intel_irq_fini(dev_priv); + intel_power_domains_cleanup(dev_priv); intel_uc_cleanup_early(dev_priv); i915_gem_cleanup_early(dev_priv); i915_workqueues_cleanup(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 657f46e0cae9..5cedd65326c5 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -868,13 +868,9 @@ struct i915_power_well_ops { }; /* Power well structure for haswell */ -struct i915_power_well { +struct i915_power_well_desc { const char *name; bool always_on; - /* power well enable/disable usage count */ - int count; - /* cached hw enabled state */ - bool hw_enabled; u64 domains; /* unique identifier for this power well */ enum i915_power_well_id id; @@ -897,6 +893,14 @@ struct i915_power_well { const struct i915_power_well_ops *ops; }; +struct i915_power_well { + const struct i915_power_well_desc *desc; + /* power well enable/disable usage count */ + int count; + /* cached hw enabled state */ + bool hw_enabled; +}; + struct i915_power_domains { /* * Power wells needed for initialization at driver init and suspend diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h index 0a79a46d5805..6a28bac71128 100644 --- a/drivers/gpu/drm/i915/intel_display.h +++ b/drivers/gpu/drm/i915/intel_display.h @@ -329,11 +329,11 @@ struct intel_link_m_n { #define for_each_power_domain_well(__dev_priv, __power_well, __domain_mask) \ for_each_power_well(__dev_priv, __power_well) \ - for_each_if((__power_well)->domains & (__domain_mask)) + for_each_if((__power_well)->desc->domains & (__domain_mask)) #define for_each_power_domain_well_rev(__dev_priv, __power_well, __domain_mask) \ for_each_power_well_rev(__dev_priv, __power_well) \ - for_each_if((__power_well)->domains & (__domain_mask)) + for_each_if((__power_well)->desc->domains & (__domain_mask)) #define for_each_new_intel_plane_in_state(__state, plane, new_plane_state, __i) \ for ((__i) = 0; \ diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 3576ab5735b1..0601abb8c71f 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1948,6 +1948,7 @@ int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state); /* intel_runtime_pm.c */ int intel_power_domains_init(struct drm_i915_private *); +void intel_power_domains_cleanup(struct drm_i915_private *dev_priv); void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume); void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv); void intel_power_domains_suspend(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c index 0cc6a861bcf8..26e48fc95543 100644 --- a/drivers/gpu/drm/i915/intel_hdcp.c +++ b/drivers/gpu/drm/i915/intel_hdcp.c @@ -57,9 +57,9 @@ static bool hdcp_key_loadable(struct drm_i915_private *dev_priv) /* PG1 (power well #1) needs to be enabled */ for_each_power_well(dev_priv, power_well) { - if (power_well->id == id) { - enabled = power_well->ops->is_enabled(dev_priv, - power_well); + if (power_well->desc->id == id) { + enabled = power_well->desc->ops->is_enabled(dev_priv, + power_well); break; } } diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index b2d182cc3319..9f44a2b0113a 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -159,17 +159,17 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) static void intel_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - DRM_DEBUG_KMS("enabling %s\n", power_well->name); - power_well->ops->enable(dev_priv, power_well); + DRM_DEBUG_KMS("enabling %s\n", power_well->desc->name); + power_well->desc->ops->enable(dev_priv, power_well); power_well->hw_enabled = true; } static void intel_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - DRM_DEBUG_KMS("disabling %s\n", power_well->name); + DRM_DEBUG_KMS("disabling %s\n", power_well->desc->name); power_well->hw_enabled = false; - power_well->ops->disable(dev_priv, power_well); + power_well->desc->ops->disable(dev_priv, power_well); } static void intel_power_well_get(struct drm_i915_private *dev_priv, @@ -183,7 +183,7 @@ static void intel_power_well_put(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { WARN(!power_well->count, "Use count on power well %s is already zero", - power_well->name); + power_well->desc->name); if (!--power_well->count) intel_power_well_disable(dev_priv, power_well); @@ -213,7 +213,7 @@ bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, is_enabled = true; for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain)) { - if (power_well->always_on) + if (power_well->desc->always_on) continue; if (!power_well->hw_enabled) { @@ -323,7 +323,7 @@ static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv, static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - enum i915_power_well_id id = power_well->id; + enum i915_power_well_id id = power_well->desc->id; /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */ WARN_ON(intel_wait_for_register(dev_priv, @@ -350,7 +350,7 @@ static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv, static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - enum i915_power_well_id id = power_well->id; + enum i915_power_well_id id = power_well->desc->id; bool disabled; u32 reqs; @@ -370,7 +370,7 @@ static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv, return; DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n", - power_well->name, + power_well->desc->name, !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8)); } @@ -386,8 +386,8 @@ static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv, static void hsw_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - enum i915_power_well_id id = power_well->id; - bool wait_fuses = power_well->hsw.has_fuses; + enum i915_power_well_id id = power_well->desc->id; + bool wait_fuses = power_well->desc->hsw.has_fuses; enum skl_power_gate uninitialized_var(pg); u32 val; @@ -421,17 +421,19 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv, if (wait_fuses) gen9_wait_for_power_well_fuses(dev_priv, pg); - hsw_power_well_post_enable(dev_priv, power_well->hsw.irq_pipe_mask, - power_well->hsw.has_vga); + hsw_power_well_post_enable(dev_priv, + power_well->desc->hsw.irq_pipe_mask, + power_well->desc->hsw.has_vga); } static void hsw_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - enum i915_power_well_id id = power_well->id; + enum i915_power_well_id id = power_well->desc->id; u32 val; - hsw_power_well_pre_disable(dev_priv, power_well->hsw.irq_pipe_mask); + hsw_power_well_pre_disable(dev_priv, + power_well->desc->hsw.irq_pipe_mask); val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)); I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), @@ -445,7 +447,7 @@ static void icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - enum i915_power_well_id id = power_well->id; + enum i915_power_well_id id = power_well->desc->id; enum port port = ICL_AUX_PW_TO_PORT(id); u32 val; @@ -462,7 +464,7 @@ static void icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - enum i915_power_well_id id = power_well->id; + enum i915_power_well_id id = power_well->desc->id; enum port port = ICL_AUX_PW_TO_PORT(id); u32 val; @@ -484,7 +486,7 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - enum i915_power_well_id id = power_well->id; + enum i915_power_well_id id = power_well->desc->id; u32 mask = HSW_PWR_WELL_CTL_REQ(id) | HSW_PWR_WELL_CTL_STATE(id); return (I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) & mask) == mask; @@ -723,7 +725,7 @@ static void skl_enable_dc6(struct drm_i915_private *dev_priv) static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - enum i915_power_well_id id = power_well->id; + enum i915_power_well_id id = power_well->desc->id; u32 mask = HSW_PWR_WELL_CTL_REQ(id); u32 bios_req = I915_READ(HSW_PWR_WELL_CTL_BIOS(id)); @@ -740,19 +742,19 @@ static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - bxt_ddi_phy_init(dev_priv, power_well->bxt.phy); + bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy); } static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - bxt_ddi_phy_uninit(dev_priv, power_well->bxt.phy); + bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy); } static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - return bxt_ddi_phy_is_enabled(dev_priv, power_well->bxt.phy); + return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy); } static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) @@ -761,16 +763,17 @@ static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A); if (power_well->count > 0) - bxt_ddi_phy_verify_state(dev_priv, power_well->bxt.phy); + bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC); if (power_well->count > 0) - bxt_ddi_phy_verify_state(dev_priv, power_well->bxt.phy); + bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); if (IS_GEMINILAKE(dev_priv)) { power_well = lookup_power_well(dev_priv, GLK_DPIO_CMN_C); if (power_well->count > 0) - bxt_ddi_phy_verify_state(dev_priv, power_well->bxt.phy); + bxt_ddi_phy_verify_state(dev_priv, + power_well->desc->bxt.phy); } } @@ -869,7 +872,7 @@ static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv, static void vlv_set_power_well(struct drm_i915_private *dev_priv, struct i915_power_well *power_well, bool enable) { - enum i915_power_well_id power_well_id = power_well->id; + enum i915_power_well_id power_well_id = power_well->desc->id; u32 mask; u32 state; u32 ctrl; @@ -917,7 +920,7 @@ static void vlv_power_well_disable(struct drm_i915_private *dev_priv, static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - enum i915_power_well_id power_well_id = power_well->id; + enum i915_power_well_id power_well_id = power_well->desc->id; bool enabled = false; u32 mask; u32 state; @@ -1107,7 +1110,7 @@ lookup_power_well(struct drm_i915_private *dev_priv, struct i915_power_well *power_well; power_well = &power_domains->power_wells[i]; - if (power_well->id == power_well_id) + if (power_well->desc->id == power_well_id) return power_well; } @@ -1146,7 +1149,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv) PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) | PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1)); - if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) { + if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) { phy_status |= PHY_POWERGOOD(DPIO_PHY0); /* this assumes override is only used to enable lanes */ @@ -1187,7 +1190,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv) phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1); } - if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) { + if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) { phy_status |= PHY_POWERGOOD(DPIO_PHY1); /* this assumes override is only used to enable lanes */ @@ -1231,10 +1234,10 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, enum pipe pipe; uint32_t tmp; - WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC && - power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D); + WARN_ON_ONCE(power_well->desc->id != PUNIT_POWER_WELL_DPIO_CMN_BC && + power_well->desc->id != PUNIT_POWER_WELL_DPIO_CMN_D); - if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) { + if (power_well->desc->id == PUNIT_POWER_WELL_DPIO_CMN_BC) { pipe = PIPE_A; phy = DPIO_PHY0; } else { @@ -1262,7 +1265,7 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, DPIO_SUS_CLK_CONFIG_GATE_CLKREQ; vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp); - if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) { + if (power_well->desc->id == PUNIT_POWER_WELL_DPIO_CMN_BC) { tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1); tmp |= DPIO_DYNPWRDOWNEN_CH1; vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp); @@ -1293,10 +1296,10 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, { enum dpio_phy phy; - WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC && - power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D); + WARN_ON_ONCE(power_well->desc->id != PUNIT_POWER_WELL_DPIO_CMN_BC && + power_well->desc->id != PUNIT_POWER_WELL_DPIO_CMN_D); - if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) { + if (power_well->desc->id == PUNIT_POWER_WELL_DPIO_CMN_BC) { phy = DPIO_PHY0; assert_pll_disabled(dev_priv, PIPE_A); assert_pll_disabled(dev_priv, PIPE_B); @@ -2051,7 +2054,7 @@ static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = { .is_enabled = vlv_power_well_enabled, }; -static struct i915_power_well i9xx_always_on_power_well[] = { +static const struct i915_power_well_desc i9xx_always_on_power_well[] = { { .name = "always-on", .always_on = 1, @@ -2068,7 +2071,7 @@ static const struct i915_power_well_ops i830_pipes_power_well_ops = { .is_enabled = i830_pipes_power_well_enabled, }; -static struct i915_power_well i830_power_wells[] = { +static const struct i915_power_well_desc i830_power_wells[] = { { .name = "always-on", .always_on = 1, @@ -2105,7 +2108,7 @@ static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = { .is_enabled = bxt_dpio_cmn_power_well_enabled, }; -static struct i915_power_well hsw_power_wells[] = { +static const struct i915_power_well_desc hsw_power_wells[] = { { .name = "always-on", .always_on = 1, @@ -2124,7 +2127,7 @@ static struct i915_power_well hsw_power_wells[] = { }, }; -static struct i915_power_well bdw_power_wells[] = { +static const struct i915_power_well_desc bdw_power_wells[] = { { .name = "always-on", .always_on = 1, @@ -2165,7 +2168,7 @@ static const struct i915_power_well_ops vlv_dpio_power_well_ops = { .is_enabled = vlv_power_well_enabled, }; -static struct i915_power_well vlv_power_wells[] = { +static const struct i915_power_well_desc vlv_power_wells[] = { { .name = "always-on", .always_on = 1, @@ -2223,7 +2226,7 @@ static struct i915_power_well vlv_power_wells[] = { }, }; -static struct i915_power_well chv_power_wells[] = { +static const struct i915_power_well_desc chv_power_wells[] = { { .name = "always-on", .always_on = 1, @@ -2263,12 +2266,12 @@ bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, bool ret; power_well = lookup_power_well(dev_priv, power_well_id); - ret = power_well->ops->is_enabled(dev_priv, power_well); + ret = power_well->desc->ops->is_enabled(dev_priv, power_well); return ret; } -static struct i915_power_well skl_power_wells[] = { +static const struct i915_power_well_desc skl_power_wells[] = { { .name = "always-on", .always_on = 1, @@ -2336,7 +2339,7 @@ static struct i915_power_well skl_power_wells[] = { }, }; -static struct i915_power_well bxt_power_wells[] = { +static const struct i915_power_well_desc bxt_power_wells[] = { { .name = "always-on", .always_on = 1, @@ -2390,7 +2393,7 @@ static struct i915_power_well bxt_power_wells[] = { }, }; -static struct i915_power_well glk_power_wells[] = { +static const struct i915_power_well_desc glk_power_wells[] = { { .name = "always-on", .always_on = 1, @@ -2490,7 +2493,7 @@ static struct i915_power_well glk_power_wells[] = { }, }; -static struct i915_power_well cnl_power_wells[] = { +static const struct i915_power_well_desc cnl_power_wells[] = { { .name = "always-on", .always_on = 1, @@ -2594,7 +2597,7 @@ static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = { .is_enabled = hsw_power_well_enabled, }; -static struct i915_power_well icl_power_wells[] = { +static const struct i915_power_well_desc icl_power_wells[] = { { .name = "always-on", .always_on = 1, @@ -2805,26 +2808,38 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv, return mask; } -static void assert_power_well_ids_unique(struct drm_i915_private *dev_priv) +static int +__set_power_wells(struct i915_power_domains *power_domains, + const struct i915_power_well_desc *power_well_descs, + int power_well_count) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; - u64 power_well_ids; + u64 power_well_ids = 0; int i; - power_well_ids = 0; - for (i = 0; i < power_domains->power_well_count; i++) { - enum i915_power_well_id id = power_domains->power_wells[i].id; + power_domains->power_well_count = power_well_count; + power_domains->power_wells = + kcalloc(power_well_count, + sizeof(*power_domains->power_wells), + GFP_KERNEL); + if (!power_domains->power_wells) + return -ENOMEM; + + for (i = 0; i < power_well_count; i++) { + enum i915_power_well_id id = power_well_descs[i].id; + + power_domains->power_wells[i].desc = &power_well_descs[i]; WARN_ON(id >= sizeof(power_well_ids) * 8); WARN_ON(power_well_ids & BIT_ULL(id)); power_well_ids |= BIT_ULL(id); } + + return 0; } -#define set_power_wells(power_domains, __power_wells) ({ \ - (power_domains)->power_wells = (__power_wells); \ - (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \ -}) +#define set_power_wells(power_domains, __power_well_descs) \ + __set_power_wells(power_domains, __power_well_descs, \ + ARRAY_SIZE(__power_well_descs)) /** * intel_power_domains_init - initializes the power domain structures @@ -2836,6 +2851,7 @@ static void assert_power_well_ids_unique(struct drm_i915_private *dev_priv) int intel_power_domains_init(struct drm_i915_private *dev_priv) { struct i915_power_domains *power_domains = &dev_priv->power_domains; + int err; i915_modparams.disable_power_well = sanitize_disable_power_well_option(dev_priv, @@ -2852,15 +2868,15 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) * the disabling order is reversed. */ if (IS_ICELAKE(dev_priv)) { - set_power_wells(power_domains, icl_power_wells); + err = set_power_wells(power_domains, icl_power_wells); } else if (IS_HASWELL(dev_priv)) { - set_power_wells(power_domains, hsw_power_wells); + err = set_power_wells(power_domains, hsw_power_wells); } else if (IS_BROADWELL(dev_priv)) { - set_power_wells(power_domains, bdw_power_wells); + err = set_power_wells(power_domains, bdw_power_wells); } else if (IS_GEN9_BC(dev_priv)) { - set_power_wells(power_domains, skl_power_wells); + err = set_power_wells(power_domains, skl_power_wells); } else if (IS_CANNONLAKE(dev_priv)) { - set_power_wells(power_domains, cnl_power_wells); + err = set_power_wells(power_domains, cnl_power_wells); /* * DDI and Aux IO are getting enabled for all ports @@ -2872,22 +2888,31 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) power_domains->power_well_count -= 2; } else if (IS_BROXTON(dev_priv)) { - set_power_wells(power_domains, bxt_power_wells); + err = set_power_wells(power_domains, bxt_power_wells); } else if (IS_GEMINILAKE(dev_priv)) { - set_power_wells(power_domains, glk_power_wells); + err = set_power_wells(power_domains, glk_power_wells); } else if (IS_CHERRYVIEW(dev_priv)) { - set_power_wells(power_domains, chv_power_wells); + err = set_power_wells(power_domains, chv_power_wells); } else if (IS_VALLEYVIEW(dev_priv)) { - set_power_wells(power_domains, vlv_power_wells); + err = set_power_wells(power_domains, vlv_power_wells); } else if (IS_I830(dev_priv)) { - set_power_wells(power_domains, i830_power_wells); + err = set_power_wells(power_domains, i830_power_wells); } else { - set_power_wells(power_domains, i9xx_always_on_power_well); + err = set_power_wells(power_domains, i9xx_always_on_power_well); } - assert_power_well_ids_unique(dev_priv); + return err; +} - return 0; +/** + * intel_power_domains_cleanup - clean up power domains resources + * @dev_priv: i915 device instance + * + * Release any resources acquired by intel_power_domains_init() + */ +void intel_power_domains_cleanup(struct drm_i915_private *dev_priv) +{ + kfree(dev_priv->power_domains.power_wells); } static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) @@ -2897,9 +2922,9 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) mutex_lock(&power_domains->lock); for_each_power_well(dev_priv, power_well) { - power_well->ops->sync_hw(dev_priv, power_well); - power_well->hw_enabled = power_well->ops->is_enabled(dev_priv, - power_well); + power_well->desc->ops->sync_hw(dev_priv, power_well); + power_well->hw_enabled = + power_well->desc->ops->is_enabled(dev_priv, power_well); } mutex_unlock(&power_domains->lock); } @@ -3398,7 +3423,7 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv) * override and set the lane powerdown bits accding to the * current lane status. */ - if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) { + if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) { uint32_t status = I915_READ(DPLL(PIPE_A)); unsigned int mask; @@ -3429,7 +3454,7 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv) dev_priv->chv_phy_assert[DPIO_PHY0] = true; } - if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) { + if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) { uint32_t status = I915_READ(DPIO_PHY_STATUS); unsigned int mask; @@ -3465,15 +3490,15 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D); /* If the display might be already active skip this */ - if (cmn->ops->is_enabled(dev_priv, cmn) && - disp2d->ops->is_enabled(dev_priv, disp2d) && + if (cmn->desc->ops->is_enabled(dev_priv, cmn) && + disp2d->desc->ops->is_enabled(dev_priv, disp2d) && I915_READ(DPIO_CTL) & DPIO_CMNRST) return; DRM_DEBUG_KMS("toggling display PHY side reset\n"); /* cmnlane needs DPLL registers */ - disp2d->ops->enable(dev_priv, disp2d); + disp2d->desc->ops->enable(dev_priv, disp2d); /* * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: @@ -3482,7 +3507,7 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) * Simply ungating isn't enough to reset the PHY enough to get * ports and lanes running. */ - cmn->ops->disable(dev_priv, cmn); + cmn->desc->ops->disable(dev_priv, cmn); } /** @@ -3598,9 +3623,9 @@ static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv) enum intel_display_power_domain domain; DRM_DEBUG_DRIVER("%-25s %d\n", - power_well->name, power_well->count); + power_well->desc->name, power_well->count); - for_each_power_domain(domain, power_well->domains) + for_each_power_domain(domain, power_well->desc->domains) DRM_DEBUG_DRIVER(" %-23s %d\n", intel_display_power_domain_str(domain), power_domains->domain_use_count[domain]); @@ -3636,22 +3661,25 @@ void intel_power_domains_verify_state(struct drm_i915_private *dev_priv) * and PW1 power wells) are under FW control, so ignore them, * since their state can change asynchronously. */ - if (!power_well->domains) + if (!power_well->desc->domains) continue; - enabled = power_well->ops->is_enabled(dev_priv, power_well); - if ((power_well->count || power_well->always_on) != enabled) + enabled = power_well->desc->ops->is_enabled(dev_priv, + power_well); + if ((power_well->count || power_well->desc->always_on) != + enabled) DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)", - power_well->name, power_well->count, enabled); + power_well->desc->name, + power_well->count, enabled); domains_count = 0; - for_each_power_domain(domain, power_well->domains) + for_each_power_domain(domain, power_well->desc->domains) domains_count += power_domains->domain_use_count[domain]; if (power_well->count != domains_count) { DRM_ERROR("power well %s refcount/domain refcount mismatch " "(refcount %d/domains refcount %d)\n", - power_well->name, power_well->count, + power_well->desc->name, power_well->count, domains_count); dump_domain_info = true; } -- GitLab From d13dd05a1f20262e32335a1f1363809185e3d2e1 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Mon, 6 Aug 2018 12:58:38 +0300 Subject: [PATCH 0075/1692] drm/i915/vlv: Use power well CTL IDX instead of ID Atm, we determine the control/status flag offsets within the PUNIT control/status registers based on the power well's ID. Since the power well ID enum is global across all platforms, the associated macros to get the flag offsets involves some magic. This makes checking the register/bit definitions against the specification more difficult than necessary. Also the values in the power well ID enum must stay fixed, making code maintenance of the enum cumbersome. To solve the above define the control/status flag indices right after the corresponding registers and use these to derive the control/status flag values by storing the indices in the i915_power_well_desc struct. Initializing anonymous union fields require the preceding field in the struct to be explicitly initialized - even when using named initializers - and the initialization to be done right before the union initialization, hence the reordering of the .id fields. v2: - Clarify commit log message about anonymous union initializers. (Paulo) Cc: Ville Syrjala Cc: Paulo Zanoni Cc: Jani Nikula Signed-off-by: Imre Deak Reviewed-by: Paulo Zanoni Link: https://patchwork.freedesktop.org/patch/msgid/20180806095843.13294-6-imre.deak@intel.com --- drivers/gpu/drm/i915/i915_drv.h | 7 ++++ drivers/gpu/drm/i915/i915_reg.h | 22 ++++++++--- drivers/gpu/drm/i915/intel_runtime_pm.c | 52 ++++++++++++++++++------- 3 files changed, 62 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 5cedd65326c5..6fa61403d456 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -879,6 +879,13 @@ struct i915_power_well_desc { * well specific. */ union { + struct { + /* + * request/status flag index in the PUNIT power well + * control/status registers. + */ + u8 idx; + } vlv; struct { enum dpio_phy phy; } bxt; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 4b656f31fde9..ed30b4f8b948 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1144,11 +1144,23 @@ enum i915_power_well_id { #define PUNIT_REG_PWRGT_CTRL 0x60 #define PUNIT_REG_PWRGT_STATUS 0x61 -#define PUNIT_PWRGT_MASK(power_well) (3 << ((power_well) * 2)) -#define PUNIT_PWRGT_PWR_ON(power_well) (0 << ((power_well) * 2)) -#define PUNIT_PWRGT_CLK_GATE(power_well) (1 << ((power_well) * 2)) -#define PUNIT_PWRGT_RESET(power_well) (2 << ((power_well) * 2)) -#define PUNIT_PWRGT_PWR_GATE(power_well) (3 << ((power_well) * 2)) +#define PUNIT_PWRGT_MASK(pw_idx) (3 << ((pw_idx) * 2)) +#define PUNIT_PWRGT_PWR_ON(pw_idx) (0 << ((pw_idx) * 2)) +#define PUNIT_PWRGT_CLK_GATE(pw_idx) (1 << ((pw_idx) * 2)) +#define PUNIT_PWRGT_RESET(pw_idx) (2 << ((pw_idx) * 2)) +#define PUNIT_PWRGT_PWR_GATE(pw_idx) (3 << ((pw_idx) * 2)) + +#define PUNIT_PWGT_IDX_RENDER 0 +#define PUNIT_PWGT_IDX_MEDIA 1 +#define PUNIT_PWGT_IDX_DISP2D 3 +#define PUNIT_PWGT_IDX_DPIO_CMN_BC 5 +#define PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01 6 +#define PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23 7 +#define PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01 8 +#define PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23 9 +#define PUNIT_PWGT_IDX_DPIO_RX0 10 +#define PUNIT_PWGT_IDX_DPIO_RX1 11 +#define PUNIT_PWGT_IDX_DPIO_CMN_D 12 #define PUNIT_REG_GPU_LFM 0xd3 #define PUNIT_REG_GPU_FREQ_REQ 0xd4 diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 9f44a2b0113a..bcdf04847b49 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -872,14 +872,14 @@ static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv, static void vlv_set_power_well(struct drm_i915_private *dev_priv, struct i915_power_well *power_well, bool enable) { - enum i915_power_well_id power_well_id = power_well->desc->id; + int pw_idx = power_well->desc->vlv.idx; u32 mask; u32 state; u32 ctrl; - mask = PUNIT_PWRGT_MASK(power_well_id); - state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) : - PUNIT_PWRGT_PWR_GATE(power_well_id); + mask = PUNIT_PWRGT_MASK(pw_idx); + state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) : + PUNIT_PWRGT_PWR_GATE(pw_idx); mutex_lock(&dev_priv->pcu_lock); @@ -920,14 +920,14 @@ static void vlv_power_well_disable(struct drm_i915_private *dev_priv, static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - enum i915_power_well_id power_well_id = power_well->desc->id; + int pw_idx = power_well->desc->vlv.idx; bool enabled = false; u32 mask; u32 state; u32 ctrl; - mask = PUNIT_PWRGT_MASK(power_well_id); - ctrl = PUNIT_PWRGT_PWR_ON(power_well_id); + mask = PUNIT_PWRGT_MASK(pw_idx); + ctrl = PUNIT_PWRGT_PWR_ON(pw_idx); mutex_lock(&dev_priv->pcu_lock); @@ -936,8 +936,8 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, * We only ever set the power-on and power-gate states, anything * else is unexpected. */ - WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) && - state != PUNIT_PWRGT_PWR_GATE(power_well_id)); + WARN_ON(state != PUNIT_PWRGT_PWR_ON(pw_idx) && + state != PUNIT_PWRGT_PWR_GATE(pw_idx)); if (state == ctrl) enabled = true; @@ -2179,8 +2179,11 @@ static const struct i915_power_well_desc vlv_power_wells[] = { { .name = "display", .domains = VLV_DISPLAY_POWER_DOMAINS, - .id = PUNIT_POWER_WELL_DISP2D, .ops = &vlv_display_power_well_ops, + .id = PUNIT_POWER_WELL_DISP2D, + { + .vlv.idx = PUNIT_PWGT_IDX_DISP2D, + }, }, { .name = "dpio-tx-b-01", @@ -2190,6 +2193,9 @@ static const struct i915_power_well_desc vlv_power_wells[] = { VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, .ops = &vlv_dpio_power_well_ops, .id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01, + { + .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01, + }, }, { .name = "dpio-tx-b-23", @@ -2199,6 +2205,9 @@ static const struct i915_power_well_desc vlv_power_wells[] = { VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, .ops = &vlv_dpio_power_well_ops, .id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23, + { + .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23, + }, }, { .name = "dpio-tx-c-01", @@ -2208,6 +2217,9 @@ static const struct i915_power_well_desc vlv_power_wells[] = { VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, .ops = &vlv_dpio_power_well_ops, .id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01, + { + .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01, + }, }, { .name = "dpio-tx-c-23", @@ -2217,12 +2229,18 @@ static const struct i915_power_well_desc vlv_power_wells[] = { VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, .ops = &vlv_dpio_power_well_ops, .id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23, + { + .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23, + }, }, { .name = "dpio-common", .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, - .id = PUNIT_POWER_WELL_DPIO_CMN_BC, .ops = &vlv_dpio_cmn_power_well_ops, + .id = PUNIT_POWER_WELL_DPIO_CMN_BC, + { + .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, + }, }, }; @@ -2242,20 +2260,26 @@ static const struct i915_power_well_desc chv_power_wells[] = { * required for any pipe to work. */ .domains = CHV_DISPLAY_POWER_DOMAINS, - .id = CHV_DISP_PW_PIPE_A, .ops = &chv_pipe_power_well_ops, + .id = CHV_DISP_PW_PIPE_A, }, { .name = "dpio-common-bc", .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS, - .id = PUNIT_POWER_WELL_DPIO_CMN_BC, .ops = &chv_dpio_cmn_power_well_ops, + .id = PUNIT_POWER_WELL_DPIO_CMN_BC, + { + .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, + }, }, { .name = "dpio-common-d", .domains = CHV_DPIO_CMN_D_POWER_DOMAINS, - .id = PUNIT_POWER_WELL_DPIO_CMN_D, .ops = &chv_dpio_cmn_power_well_ops, + .id = PUNIT_POWER_WELL_DPIO_CMN_D, + { + .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D, + }, }, }; -- GitLab From 75e39688f350f63dc916c1b9d01c973a3a7bf5c8 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Mon, 6 Aug 2018 12:58:39 +0300 Subject: [PATCH 0076/1692] drm/i915/ddi: Use power well CTL IDX instead of ID Similarly to the previous patch use a separate request/status HW flag index defined right after the corresponding control registers instead of depending for this on the power well IDs. Since the set of control/status registers varies among the different power wells (on a single platform), also add a new i915_power_well_registers struct that we populate and assign to each DDI power well as needed. Also clarify a bit the code comment describing the function and layout of the control registers. This also fixes a problem on ICL, where we incorrectly read the KVMR control register in hsw_power_well_requesters() even for DDI and AUX power wells. v2: - Clarify platform range tags in code comments. (Paulo) - Fix line over 80 chars checkpatch warning. Cc: Ville Syrjala Cc: Paulo Zanoni Cc: Jani Nikula Signed-off-by: Imre Deak Reviewed-by: Paulo Zanoni Link: https://patchwork.freedesktop.org/patch/msgid/20180806095843.13294-7-imre.deak@intel.com --- drivers/gpu/drm/i915/gvt/handlers.c | 31 +-- drivers/gpu/drm/i915/i915_drv.h | 13 + drivers/gpu/drm/i915/i915_reg.h | 128 ++++++---- drivers/gpu/drm/i915/intel_display.c | 5 +- drivers/gpu/drm/i915/intel_runtime_pm.c | 302 ++++++++++++++++++++---- 5 files changed, 361 insertions(+), 118 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 6b50f850dc28..749c704ca304 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1287,12 +1287,13 @@ static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu, { write_vreg(vgpu, offset, p_data, bytes); - if (vgpu_vreg(vgpu, offset) & HSW_PWR_WELL_CTL_REQ(HSW_DISP_PW_GLOBAL)) + if (vgpu_vreg(vgpu, offset) & + HSW_PWR_WELL_CTL_REQ(HSW_PW_CTL_IDX_GLOBAL)) vgpu_vreg(vgpu, offset) |= - HSW_PWR_WELL_CTL_STATE(HSW_DISP_PW_GLOBAL); + HSW_PWR_WELL_CTL_STATE(HSW_PW_CTL_IDX_GLOBAL); else vgpu_vreg(vgpu, offset) &= - ~HSW_PWR_WELL_CTL_STATE(HSW_DISP_PW_GLOBAL); + ~HSW_PWR_WELL_CTL_STATE(HSW_PW_CTL_IDX_GLOBAL); return 0; } @@ -2443,17 +2444,10 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_D(GEN6_RC6p_THRESHOLD, D_ALL); MMIO_D(GEN6_RC6pp_THRESHOLD, D_ALL); MMIO_D(GEN6_PMINTRMSK, D_ALL); - /* - * Use an arbitrary power well controlled by the PWR_WELL_CTL - * register. - */ - MMIO_DH(HSW_PWR_WELL_CTL_BIOS(HSW_DISP_PW_GLOBAL), D_BDW, NULL, - power_well_ctl_mmio_write); - MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(HSW_DISP_PW_GLOBAL), D_BDW, NULL, - power_well_ctl_mmio_write); - MMIO_DH(HSW_PWR_WELL_CTL_KVMR, D_BDW, NULL, power_well_ctl_mmio_write); - MMIO_DH(HSW_PWR_WELL_CTL_DEBUG(HSW_DISP_PW_GLOBAL), D_BDW, NULL, - power_well_ctl_mmio_write); + MMIO_DH(HSW_PWR_WELL_CTL1, D_BDW, NULL, power_well_ctl_mmio_write); + MMIO_DH(HSW_PWR_WELL_CTL2, D_BDW, NULL, power_well_ctl_mmio_write); + MMIO_DH(HSW_PWR_WELL_CTL3, D_BDW, NULL, power_well_ctl_mmio_write); + MMIO_DH(HSW_PWR_WELL_CTL4, D_BDW, NULL, power_well_ctl_mmio_write); MMIO_DH(HSW_PWR_WELL_CTL5, D_BDW, NULL, power_well_ctl_mmio_write); MMIO_DH(HSW_PWR_WELL_CTL6, D_BDW, NULL, power_well_ctl_mmio_write); @@ -2804,13 +2798,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_F(_MMIO(_DPD_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, dp_aux_ch_ctl_mmio_write); - /* - * Use an arbitrary power well controlled by the PWR_WELL_CTL - * register. - */ - MMIO_D(HSW_PWR_WELL_CTL_BIOS(SKL_DISP_PW_MISC_IO), D_SKL_PLUS); - MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(SKL_DISP_PW_MISC_IO), D_SKL_PLUS, NULL, - skl_power_well_ctl_write); + MMIO_D(HSW_PWR_WELL_CTL1, D_SKL_PLUS); + MMIO_DH(HSW_PWR_WELL_CTL2, D_SKL_PLUS, NULL, skl_power_well_ctl_write); MMIO_D(_MMIO(0xa210), D_SKL_PLUS); MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 6fa61403d456..0b10a30b7d96 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -867,6 +867,13 @@ struct i915_power_well_ops { struct i915_power_well *power_well); }; +struct i915_power_well_regs { + i915_reg_t bios; + i915_reg_t driver; + i915_reg_t kvmr; + i915_reg_t debug; +}; + /* Power well structure for haswell */ struct i915_power_well_desc { const char *name; @@ -890,6 +897,12 @@ struct i915_power_well_desc { enum dpio_phy phy; } bxt; struct { + const struct i915_power_well_regs *regs; + /* + * request/status flag index in the power well + * constrol/status registers. + */ + u8 idx; /* Mask of pipes whose IRQ logic is backed by the pw */ u8 irq_pipe_mask; /* The pw is backing the VGA functionality */ diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index ed30b4f8b948..4f5eb49934b4 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -8925,46 +8925,78 @@ enum { #define HSW_AUD_CHICKENBIT _MMIO(0x65f10) #define SKL_AUD_CODEC_WAKE_SIGNAL (1 << 15) -/* HSW Power Wells */ -#define _HSW_PWR_WELL_CTL1 0x45400 -#define _HSW_PWR_WELL_CTL2 0x45404 -#define _HSW_PWR_WELL_CTL3 0x45408 -#define _HSW_PWR_WELL_CTL4 0x4540C - -#define _ICL_PWR_WELL_CTL_AUX1 0x45440 -#define _ICL_PWR_WELL_CTL_AUX2 0x45444 -#define _ICL_PWR_WELL_CTL_AUX4 0x4544C - -#define _ICL_PWR_WELL_CTL_DDI1 0x45450 -#define _ICL_PWR_WELL_CTL_DDI2 0x45454 -#define _ICL_PWR_WELL_CTL_DDI4 0x4545C - /* - * Each power well control register contains up to 16 (request, status) HW - * flag tuples. The register index and HW flag shift is determined by the - * power well ID (see i915_power_well_id). There are 4 possible sources of - * power well requests each source having its own set of control registers: - * BIOS, DRIVER, KVMR, DEBUG. + * HSW - ICL power wells + * + * Platforms have up to 3 power well control register sets, each set + * controlling up to 16 power wells via a request/status HW flag tuple: + * - main (HSW_PWR_WELL_CTL[1-4]) + * - AUX (ICL_PWR_WELL_CTL_AUX[1-4]) + * - DDI (ICL_PWR_WELL_CTL_DDI[1-4]) + * Each control register set consists of up to 4 registers used by different + * sources that can request a power well to be enabled: + * - BIOS (HSW_PWR_WELL_CTL1/ICL_PWR_WELL_CTL_AUX1/ICL_PWR_WELL_CTL_DDI1) + * - DRIVER (HSW_PWR_WELL_CTL2/ICL_PWR_WELL_CTL_AUX2/ICL_PWR_WELL_CTL_DDI2) + * - KVMR (HSW_PWR_WELL_CTL3) (only in the main register set) + * - DEBUG (HSW_PWR_WELL_CTL4/ICL_PWR_WELL_CTL_AUX4/ICL_PWR_WELL_CTL_DDI4) */ -#define _HSW_PW_REG_IDX(pw) ((pw) >> 4) -#define _HSW_PW_SHIFT(pw) (((pw) & 0xf) * 2) -#define HSW_PWR_WELL_CTL_BIOS(pw) _MMIO(_PICK(_HSW_PW_REG_IDX(pw), \ - _HSW_PWR_WELL_CTL1, \ - _ICL_PWR_WELL_CTL_AUX1, \ - _ICL_PWR_WELL_CTL_DDI1)) -#define HSW_PWR_WELL_CTL_DRIVER(pw) _MMIO(_PICK(_HSW_PW_REG_IDX(pw), \ - _HSW_PWR_WELL_CTL2, \ - _ICL_PWR_WELL_CTL_AUX2, \ - _ICL_PWR_WELL_CTL_DDI2)) -/* KVMR doesn't have a reg for AUX or DDI power well control */ -#define HSW_PWR_WELL_CTL_KVMR _MMIO(_HSW_PWR_WELL_CTL3) -#define HSW_PWR_WELL_CTL_DEBUG(pw) _MMIO(_PICK(_HSW_PW_REG_IDX(pw), \ - _HSW_PWR_WELL_CTL4, \ - _ICL_PWR_WELL_CTL_AUX4, \ - _ICL_PWR_WELL_CTL_DDI4)) - -#define HSW_PWR_WELL_CTL_REQ(pw) (1 << (_HSW_PW_SHIFT(pw) + 1)) -#define HSW_PWR_WELL_CTL_STATE(pw) (1 << _HSW_PW_SHIFT(pw)) +#define HSW_PWR_WELL_CTL1 _MMIO(0x45400) +#define HSW_PWR_WELL_CTL2 _MMIO(0x45404) +#define HSW_PWR_WELL_CTL3 _MMIO(0x45408) +#define HSW_PWR_WELL_CTL4 _MMIO(0x4540C) +#define HSW_PWR_WELL_CTL_REQ(pw_idx) (0x2 << ((pw_idx) * 2)) +#define HSW_PWR_WELL_CTL_STATE(pw_idx) (0x1 << ((pw_idx) * 2)) + +/* HSW/BDW power well */ +#define HSW_PW_CTL_IDX_GLOBAL 15 + +/* SKL/BXT/GLK/CNL power wells */ +#define SKL_PW_CTL_IDX_PW_2 15 +#define SKL_PW_CTL_IDX_PW_1 14 +#define CNL_PW_CTL_IDX_AUX_F 12 +#define CNL_PW_CTL_IDX_AUX_D 11 +#define GLK_PW_CTL_IDX_AUX_C 10 +#define GLK_PW_CTL_IDX_AUX_B 9 +#define GLK_PW_CTL_IDX_AUX_A 8 +#define CNL_PW_CTL_IDX_DDI_F 6 +#define SKL_PW_CTL_IDX_DDI_D 4 +#define SKL_PW_CTL_IDX_DDI_C 3 +#define SKL_PW_CTL_IDX_DDI_B 2 +#define SKL_PW_CTL_IDX_DDI_A_E 1 +#define GLK_PW_CTL_IDX_DDI_A 1 +#define SKL_PW_CTL_IDX_MISC_IO 0 + +/* ICL - power wells */ +#define ICL_PW_CTL_IDX_PW_4 3 +#define ICL_PW_CTL_IDX_PW_3 2 +#define ICL_PW_CTL_IDX_PW_2 1 +#define ICL_PW_CTL_IDX_PW_1 0 + +#define ICL_PWR_WELL_CTL_AUX1 _MMIO(0x45440) +#define ICL_PWR_WELL_CTL_AUX2 _MMIO(0x45444) +#define ICL_PWR_WELL_CTL_AUX4 _MMIO(0x4544C) +#define ICL_PW_CTL_IDX_AUX_TBT4 11 +#define ICL_PW_CTL_IDX_AUX_TBT3 10 +#define ICL_PW_CTL_IDX_AUX_TBT2 9 +#define ICL_PW_CTL_IDX_AUX_TBT1 8 +#define ICL_PW_CTL_IDX_AUX_F 5 +#define ICL_PW_CTL_IDX_AUX_E 4 +#define ICL_PW_CTL_IDX_AUX_D 3 +#define ICL_PW_CTL_IDX_AUX_C 2 +#define ICL_PW_CTL_IDX_AUX_B 1 +#define ICL_PW_CTL_IDX_AUX_A 0 + +#define ICL_PWR_WELL_CTL_DDI1 _MMIO(0x45450) +#define ICL_PWR_WELL_CTL_DDI2 _MMIO(0x45454) +#define ICL_PWR_WELL_CTL_DDI4 _MMIO(0x4545C) +#define ICL_PW_CTL_IDX_DDI_F 5 +#define ICL_PW_CTL_IDX_DDI_E 4 +#define ICL_PW_CTL_IDX_DDI_D 3 +#define ICL_PW_CTL_IDX_DDI_C 2 +#define ICL_PW_CTL_IDX_DDI_B 1 +#define ICL_PW_CTL_IDX_DDI_A 0 + +/* HSW - power well misc debug registers */ #define HSW_PWR_WELL_CTL5 _MMIO(0x45410) #define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1 << 31) #define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1 << 20) @@ -8980,18 +9012,26 @@ enum skl_power_gate { #define SKL_FUSE_STATUS _MMIO(0x42000) #define SKL_FUSE_DOWNLOAD_STATUS (1 << 31) -/* PG0 (HW control->no power well ID), PG1..PG2 (SKL_DISP_PW1..SKL_DISP_PW2) */ -#define SKL_PW_TO_PG(pw) ((pw) - SKL_DISP_PW_1 + SKL_PG1) -/* PG0 (HW control->no power well ID), PG1..PG4 (ICL_DISP_PW1..ICL_DISP_PW4) */ -#define ICL_PW_TO_PG(pw) ((pw) - ICL_DISP_PW_1 + SKL_PG1) +/* + * PG0 is HW controlled, so doesn't have a corresponding power well control knob + * SKL_DISP_PW1_IDX..SKL_DISP_PW2_IDX -> PG1..PG2 + */ +#define SKL_PW_CTL_IDX_TO_PG(pw_idx) \ + ((pw_idx) - SKL_PW_CTL_IDX_PW_1 + SKL_PG1) +/* + * PG0 is HW controlled, so doesn't have a corresponding power well control knob + * ICL_DISP_PW1_IDX..ICL_DISP_PW4_IDX -> PG1..PG4 + */ +#define ICL_PW_CTL_IDX_TO_PG(pw_idx) \ + ((pw_idx) - ICL_PW_CTL_IDX_PW_1 + SKL_PG1) #define SKL_FUSE_PG_DIST_STATUS(pg) (1 << (27 - (pg))) -#define _CNL_AUX_REG_IDX(pw) ((pw) - 9) +#define _CNL_AUX_REG_IDX(pw_idx) ((pw_idx) - GLK_PW_CTL_IDX_AUX_B) #define _CNL_AUX_ANAOVRD1_B 0x162250 #define _CNL_AUX_ANAOVRD1_C 0x162210 #define _CNL_AUX_ANAOVRD1_D 0x1622D0 #define _CNL_AUX_ANAOVRD1_F 0x162A90 -#define CNL_AUX_ANAOVRD1(pw) _MMIO(_PICK(_CNL_AUX_REG_IDX(pw), \ +#define CNL_AUX_ANAOVRD1(pw_idx) _MMIO(_PICK(_CNL_AUX_REG_IDX(pw_idx), \ _CNL_AUX_ANAOVRD1_B, \ _CNL_AUX_ANAOVRD1_C, \ _CNL_AUX_ANAOVRD1_D, \ diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 73c6d56ba3ec..53e7a7e75384 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -8973,7 +8973,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n", pipe_name(crtc->pipe)); - I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL_DRIVER(HSW_DISP_PW_GLOBAL)), + I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2), "Display power well on\n"); I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n"); I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n"); @@ -16129,8 +16129,7 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv) return NULL; if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) - error->power_well_driver = - I915_READ(HSW_PWR_WELL_CTL_DRIVER(HSW_DISP_PW_GLOBAL)); + error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2); for_each_pipe(dev_priv, i) { error->pipe[i].power_domain_on = diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index bcdf04847b49..bba32df770b2 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -323,26 +323,29 @@ static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv, static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - enum i915_power_well_id id = power_well->desc->id; + const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; + int pw_idx = power_well->desc->hsw.idx; /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */ WARN_ON(intel_wait_for_register(dev_priv, - HSW_PWR_WELL_CTL_DRIVER(id), - HSW_PWR_WELL_CTL_STATE(id), - HSW_PWR_WELL_CTL_STATE(id), + regs->driver, + HSW_PWR_WELL_CTL_STATE(pw_idx), + HSW_PWR_WELL_CTL_STATE(pw_idx), 1)); } static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv, - enum i915_power_well_id id) + const struct i915_power_well_regs *regs, + int pw_idx) { - u32 req_mask = HSW_PWR_WELL_CTL_REQ(id); + u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx); u32 ret; - ret = I915_READ(HSW_PWR_WELL_CTL_BIOS(id)) & req_mask ? 1 : 0; - ret |= I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) & req_mask ? 2 : 0; - ret |= I915_READ(HSW_PWR_WELL_CTL_KVMR) & req_mask ? 4 : 0; - ret |= I915_READ(HSW_PWR_WELL_CTL_DEBUG(id)) & req_mask ? 8 : 0; + ret = I915_READ(regs->bios) & req_mask ? 1 : 0; + ret |= I915_READ(regs->driver) & req_mask ? 2 : 0; + if (regs->kvmr.reg) + ret |= I915_READ(regs->kvmr) & req_mask ? 4 : 0; + ret |= I915_READ(regs->debug) & req_mask ? 8 : 0; return ret; } @@ -350,7 +353,8 @@ static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv, static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - enum i915_power_well_id id = power_well->desc->id; + const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; + int pw_idx = power_well->desc->hsw.idx; bool disabled; u32 reqs; @@ -363,9 +367,9 @@ static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv, * Skip the wait in case any of the request bits are set and print a * diagnostic message. */ - wait_for((disabled = !(I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) & - HSW_PWR_WELL_CTL_STATE(id))) || - (reqs = hsw_power_well_requesters(dev_priv, id)), 1); + wait_for((disabled = !(I915_READ(regs->driver) & + HSW_PWR_WELL_CTL_STATE(pw_idx))) || + (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1); if (disabled) return; @@ -386,14 +390,15 @@ static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv, static void hsw_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - enum i915_power_well_id id = power_well->desc->id; + const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; + int pw_idx = power_well->desc->hsw.idx; bool wait_fuses = power_well->desc->hsw.has_fuses; enum skl_power_gate uninitialized_var(pg); u32 val; if (wait_fuses) { - pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_TO_PG(id) : - SKL_PW_TO_PG(id); + pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : + SKL_PW_CTL_IDX_TO_PG(pw_idx); /* * For PW1 we have to wait both for the PW0/PG0 fuse state * before enabling the power well and PW1/PG1's own fuse @@ -405,17 +410,17 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv, gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0); } - val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)); - I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), val | HSW_PWR_WELL_CTL_REQ(id)); + val = I915_READ(regs->driver); + I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx)); hsw_wait_for_power_well_enable(dev_priv, power_well); /* Display WA #1178: cnl */ if (IS_CANNONLAKE(dev_priv) && - (id == CNL_DISP_PW_AUX_B || id == CNL_DISP_PW_AUX_C || - id == CNL_DISP_PW_AUX_D || id == CNL_DISP_PW_AUX_F)) { - val = I915_READ(CNL_AUX_ANAOVRD1(id)); + pw_idx >= GLK_PW_CTL_IDX_AUX_B && + pw_idx <= CNL_PW_CTL_IDX_AUX_F) { + val = I915_READ(CNL_AUX_ANAOVRD1(pw_idx)); val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS; - I915_WRITE(CNL_AUX_ANAOVRD1(id), val); + I915_WRITE(CNL_AUX_ANAOVRD1(pw_idx), val); } if (wait_fuses) @@ -429,30 +434,31 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv, static void hsw_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - enum i915_power_well_id id = power_well->desc->id; + const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; + int pw_idx = power_well->desc->hsw.idx; u32 val; hsw_power_well_pre_disable(dev_priv, power_well->desc->hsw.irq_pipe_mask); - val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)); - I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), - val & ~HSW_PWR_WELL_CTL_REQ(id)); + val = I915_READ(regs->driver); + I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); hsw_wait_for_power_well_disable(dev_priv, power_well); } -#define ICL_AUX_PW_TO_PORT(pw) ((pw) - ICL_DISP_PW_AUX_A) +#define ICL_AUX_PW_TO_PORT(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A) static void icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - enum i915_power_well_id id = power_well->desc->id; - enum port port = ICL_AUX_PW_TO_PORT(id); + const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; + int pw_idx = power_well->desc->hsw.idx; + enum port port = ICL_AUX_PW_TO_PORT(pw_idx); u32 val; - val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)); - I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), val | HSW_PWR_WELL_CTL_REQ(id)); + val = I915_READ(regs->driver); + I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx)); val = I915_READ(ICL_PORT_CL_DW12(port)); I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX); @@ -464,16 +470,16 @@ static void icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - enum i915_power_well_id id = power_well->desc->id; - enum port port = ICL_AUX_PW_TO_PORT(id); + const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; + int pw_idx = power_well->desc->hsw.idx; + enum port port = ICL_AUX_PW_TO_PORT(pw_idx); u32 val; val = I915_READ(ICL_PORT_CL_DW12(port)); I915_WRITE(ICL_PORT_CL_DW12(port), val & ~ICL_LANE_ENABLE_AUX); - val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)); - I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), - val & ~HSW_PWR_WELL_CTL_REQ(id)); + val = I915_READ(regs->driver); + I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); hsw_wait_for_power_well_disable(dev_priv, power_well); } @@ -486,22 +492,22 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - enum i915_power_well_id id = power_well->desc->id; - u32 mask = HSW_PWR_WELL_CTL_REQ(id) | HSW_PWR_WELL_CTL_STATE(id); + const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; + int pw_idx = power_well->desc->hsw.idx; + u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) | + HSW_PWR_WELL_CTL_STATE(pw_idx); - return (I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) & mask) == mask; + return (I915_READ(regs->driver) & mask) == mask; } static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) { - enum i915_power_well_id id = SKL_DISP_PW_2; - WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9), "DC9 already programmed to be enabled.\n"); WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, "DC5 still not disabled to enable DC9.\n"); - WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) & - HSW_PWR_WELL_CTL_REQ(id), + WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL2) & + HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2), "Power well 2 on.\n"); WARN_ONCE(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n"); @@ -725,17 +731,18 @@ static void skl_enable_dc6(struct drm_i915_private *dev_priv) static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - enum i915_power_well_id id = power_well->desc->id; - u32 mask = HSW_PWR_WELL_CTL_REQ(id); - u32 bios_req = I915_READ(HSW_PWR_WELL_CTL_BIOS(id)); + const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; + int pw_idx = power_well->desc->hsw.idx; + u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx); + u32 bios_req = I915_READ(regs->bios); /* Take over the request bit if set by BIOS. */ if (bios_req & mask) { - u32 drv_req = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)); + u32 drv_req = I915_READ(regs->driver); if (!(drv_req & mask)) - I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), drv_req | mask); - I915_WRITE(HSW_PWR_WELL_CTL_BIOS(id), bios_req & ~mask); + I915_WRITE(regs->driver, drv_req | mask); + I915_WRITE(regs->bios, bios_req & ~mask); } } @@ -2108,6 +2115,13 @@ static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = { .is_enabled = bxt_dpio_cmn_power_well_enabled, }; +static const struct i915_power_well_regs hsw_power_well_regs = { + .bios = HSW_PWR_WELL_CTL1, + .driver = HSW_PWR_WELL_CTL2, + .kvmr = HSW_PWR_WELL_CTL3, + .debug = HSW_PWR_WELL_CTL4, +}; + static const struct i915_power_well_desc hsw_power_wells[] = { { .name = "always-on", @@ -2122,6 +2136,8 @@ static const struct i915_power_well_desc hsw_power_wells[] = { .ops = &hsw_power_well_ops, .id = HSW_DISP_PW_GLOBAL, { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, .hsw.has_vga = true, }, }, @@ -2141,6 +2157,8 @@ static const struct i915_power_well_desc bdw_power_wells[] = { .ops = &hsw_power_well_ops, .id = HSW_DISP_PW_GLOBAL, { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), .hsw.has_vga = true, }, @@ -2310,6 +2328,8 @@ static const struct i915_power_well_desc skl_power_wells[] = { .ops = &hsw_power_well_ops, .id = SKL_DISP_PW_1, { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_PW_1, .hsw.has_fuses = true, }, }, @@ -2319,6 +2339,10 @@ static const struct i915_power_well_desc skl_power_wells[] = { .domains = 0, .ops = &hsw_power_well_ops, .id = SKL_DISP_PW_MISC_IO, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_MISC_IO, + }, }, { .name = "DC off", @@ -2332,6 +2356,8 @@ static const struct i915_power_well_desc skl_power_wells[] = { .ops = &hsw_power_well_ops, .id = SKL_DISP_PW_2, { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_PW_2, .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), .hsw.has_vga = true, .hsw.has_fuses = true, @@ -2342,24 +2368,40 @@ static const struct i915_power_well_desc skl_power_wells[] = { .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = SKL_DISP_PW_DDI_A_E, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E, + }, }, { .name = "DDI B IO power well", .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = SKL_DISP_PW_DDI_B, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_DDI_B, + }, }, { .name = "DDI C IO power well", .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = SKL_DISP_PW_DDI_C, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_DDI_C, + }, }, { .name = "DDI D IO power well", .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = SKL_DISP_PW_DDI_D, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_DDI_D, + }, }, }; @@ -2377,6 +2419,8 @@ static const struct i915_power_well_desc bxt_power_wells[] = { .ops = &hsw_power_well_ops, .id = SKL_DISP_PW_1, { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_PW_1, .hsw.has_fuses = true, }, }, @@ -2392,6 +2436,8 @@ static const struct i915_power_well_desc bxt_power_wells[] = { .ops = &hsw_power_well_ops, .id = SKL_DISP_PW_2, { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_PW_2, .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), .hsw.has_vga = true, .hsw.has_fuses = true, @@ -2432,6 +2478,8 @@ static const struct i915_power_well_desc glk_power_wells[] = { .ops = &hsw_power_well_ops, .id = SKL_DISP_PW_1, { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_PW_1, .hsw.has_fuses = true, }, }, @@ -2447,6 +2495,8 @@ static const struct i915_power_well_desc glk_power_wells[] = { .ops = &hsw_power_well_ops, .id = SKL_DISP_PW_2, { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_PW_2, .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), .hsw.has_vga = true, .hsw.has_fuses = true, @@ -2484,36 +2534,60 @@ static const struct i915_power_well_desc glk_power_wells[] = { .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = GLK_DISP_PW_AUX_A, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = GLK_PW_CTL_IDX_AUX_A, + }, }, { .name = "AUX B", .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = GLK_DISP_PW_AUX_B, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = GLK_PW_CTL_IDX_AUX_B, + }, }, { .name = "AUX C", .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = GLK_DISP_PW_AUX_C, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = GLK_PW_CTL_IDX_AUX_C, + }, }, { .name = "DDI A IO power well", .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = GLK_DISP_PW_DDI_A, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = GLK_PW_CTL_IDX_DDI_A, + }, }, { .name = "DDI B IO power well", .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = SKL_DISP_PW_DDI_B, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_DDI_B, + }, }, { .name = "DDI C IO power well", .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = SKL_DISP_PW_DDI_C, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_DDI_C, + }, }, }; @@ -2532,6 +2606,8 @@ static const struct i915_power_well_desc cnl_power_wells[] = { .ops = &hsw_power_well_ops, .id = SKL_DISP_PW_1, { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_PW_1, .hsw.has_fuses = true, }, }, @@ -2540,24 +2616,40 @@ static const struct i915_power_well_desc cnl_power_wells[] = { .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = CNL_DISP_PW_AUX_A, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = GLK_PW_CTL_IDX_AUX_A, + }, }, { .name = "AUX B", .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = CNL_DISP_PW_AUX_B, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = GLK_PW_CTL_IDX_AUX_B, + }, }, { .name = "AUX C", .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = CNL_DISP_PW_AUX_C, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = GLK_PW_CTL_IDX_AUX_C, + }, }, { .name = "AUX D", .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = CNL_DISP_PW_AUX_D, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = CNL_PW_CTL_IDX_AUX_D, + }, }, { .name = "DC off", @@ -2571,6 +2663,8 @@ static const struct i915_power_well_desc cnl_power_wells[] = { .ops = &hsw_power_well_ops, .id = SKL_DISP_PW_2, { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_PW_2, .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), .hsw.has_vga = true, .hsw.has_fuses = true, @@ -2581,36 +2675,60 @@ static const struct i915_power_well_desc cnl_power_wells[] = { .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = CNL_DISP_PW_DDI_A, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = GLK_PW_CTL_IDX_DDI_A, + }, }, { .name = "DDI B IO power well", .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = SKL_DISP_PW_DDI_B, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_DDI_B, + }, }, { .name = "DDI C IO power well", .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = SKL_DISP_PW_DDI_C, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_DDI_C, + }, }, { .name = "DDI D IO power well", .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = SKL_DISP_PW_DDI_D, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_DDI_D, + }, }, { .name = "DDI F IO power well", .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = CNL_DISP_PW_DDI_F, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = CNL_PW_CTL_IDX_DDI_F, + }, }, { .name = "AUX F", .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = CNL_DISP_PW_AUX_F, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = CNL_PW_CTL_IDX_AUX_F, + }, }, }; @@ -2621,6 +2739,18 @@ static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = { .is_enabled = hsw_power_well_enabled, }; +static const struct i915_power_well_regs icl_aux_power_well_regs = { + .bios = ICL_PWR_WELL_CTL_AUX1, + .driver = ICL_PWR_WELL_CTL_AUX2, + .debug = ICL_PWR_WELL_CTL_AUX4, +}; + +static const struct i915_power_well_regs icl_ddi_power_well_regs = { + .bios = ICL_PWR_WELL_CTL_DDI1, + .driver = ICL_PWR_WELL_CTL_DDI2, + .debug = ICL_PWR_WELL_CTL_DDI4, +}; + static const struct i915_power_well_desc icl_power_wells[] = { { .name = "always-on", @@ -2636,6 +2766,8 @@ static const struct i915_power_well_desc icl_power_wells[] = { .ops = &hsw_power_well_ops, .id = ICL_DISP_PW_1, { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_PW_1, .hsw.has_fuses = true, }, }, @@ -2645,6 +2777,8 @@ static const struct i915_power_well_desc icl_power_wells[] = { .ops = &hsw_power_well_ops, .id = ICL_DISP_PW_2, { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_PW_2, .hsw.has_fuses = true, }, }, @@ -2660,6 +2794,8 @@ static const struct i915_power_well_desc icl_power_wells[] = { .ops = &hsw_power_well_ops, .id = ICL_DISP_PW_3, { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_PW_3, .hsw.irq_pipe_mask = BIT(PIPE_B), .hsw.has_vga = true, .hsw.has_fuses = true, @@ -2670,96 +2806,160 @@ static const struct i915_power_well_desc icl_power_wells[] = { .domains = ICL_DDI_IO_A_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = ICL_DISP_PW_DDI_A, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_DDI_A, + }, }, { .name = "DDI B IO", .domains = ICL_DDI_IO_B_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = ICL_DISP_PW_DDI_B, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_DDI_B, + }, }, { .name = "DDI C IO", .domains = ICL_DDI_IO_C_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = ICL_DISP_PW_DDI_C, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_DDI_C, + }, }, { .name = "DDI D IO", .domains = ICL_DDI_IO_D_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = ICL_DISP_PW_DDI_D, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_DDI_D, + }, }, { .name = "DDI E IO", .domains = ICL_DDI_IO_E_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = ICL_DISP_PW_DDI_E, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_DDI_E, + }, }, { .name = "DDI F IO", .domains = ICL_DDI_IO_F_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = ICL_DISP_PW_DDI_F, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_DDI_F, + }, }, { .name = "AUX A", .domains = ICL_AUX_A_IO_POWER_DOMAINS, .ops = &icl_combo_phy_aux_power_well_ops, .id = ICL_DISP_PW_AUX_A, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_A, + }, }, { .name = "AUX B", .domains = ICL_AUX_B_IO_POWER_DOMAINS, .ops = &icl_combo_phy_aux_power_well_ops, .id = ICL_DISP_PW_AUX_B, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_B, + }, }, { .name = "AUX C", .domains = ICL_AUX_C_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = ICL_DISP_PW_AUX_C, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_C, + }, }, { .name = "AUX D", .domains = ICL_AUX_D_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = ICL_DISP_PW_AUX_D, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_D, + }, }, { .name = "AUX E", .domains = ICL_AUX_E_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = ICL_DISP_PW_AUX_E, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_E, + }, }, { .name = "AUX F", .domains = ICL_AUX_F_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = ICL_DISP_PW_AUX_F, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_F, + }, }, { .name = "AUX TBT1", .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = ICL_DISP_PW_AUX_TBT1, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1, + }, }, { .name = "AUX TBT2", .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = ICL_DISP_PW_AUX_TBT2, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2, + }, }, { .name = "AUX TBT3", .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = ICL_DISP_PW_AUX_TBT3, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3, + }, }, { .name = "AUX TBT4", .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = ICL_DISP_PW_AUX_TBT4, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4, + }, }, { .name = "power well 4", @@ -2767,6 +2967,8 @@ static const struct i915_power_well_desc icl_power_wells[] = { .ops = &hsw_power_well_ops, .id = ICL_DISP_PW_4, { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_PW_4, .hsw.has_fuses = true, .hsw.irq_pipe_mask = BIT(PIPE_C), }, -- GitLab From 4739a9d2438bbc89e9aeea33b43680aeae2882e9 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Mon, 6 Aug 2018 12:58:40 +0300 Subject: [PATCH 0077/1692] drm/i915: Remove redundant power well IDs Now that we removed dependence on the power well IDs to determine the control register and request/status flag offsets the only purpose of power well IDs is to look up power wells directly bypassing the power domains framework. However this direct lookup isn't needed for most of the exisiting power wells and hopefully won't be needed for any new power wells in the future. To make maintenance of the power well ID enum easier, don't require a unique ID for each power well, only if it's necessary. Remove the IDs becoming redundant this way and assign to all the corresponding power wells a new DISP_PW_ID_NONE ID. After the previous two patches the IDs don't need to have a fixed value, so remove the explicit initializers and adjust the enum's code comment accordingly. v2: - Keep required ID assignments for HSW_DISP_PW_GLOBAL and ICL_DISP_PW_2. (Paulo) Cc: Ville Syrjala Cc: Paulo Zanoni Cc: Jani Nikula Signed-off-by: Imre Deak Reviewed-by: Paulo Zanoni Link: https://patchwork.freedesktop.org/patch/msgid/20180806095843.13294-8-imre.deak@intel.com --- drivers/gpu/drm/i915/i915_reg.h | 118 +++-------------------- drivers/gpu/drm/i915/intel_runtime_pm.c | 123 ++++++++++++------------ 2 files changed, 76 insertions(+), 165 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 4f5eb49934b4..4568adc8369a 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1029,117 +1029,25 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) /* * i915_power_well_id: * - * Platform specific IDs used to look up power wells and - except for custom - * power wells - to define request/status register flag bit positions. As such - * the set of IDs on a given platform must be unique and except for custom - * power wells their value must stay fixed. + * IDs used to look up power wells. Power wells accessed directly bypassing + * the power domains framework must be assigned a unique ID. The rest of power + * wells must be assigned DISP_PW_ID_NONE. */ enum i915_power_well_id { - /* - * I830 - * - custom power well - */ - I830_DISP_PW_PIPES = 0, - - /* - * VLV/CHV - * - PUNIT_REG_PWRGT_CTRL (bit: id*2), - * PUNIT_REG_PWRGT_STATUS (bit: id*2) (PUNIT HAS v0.8) - */ - PUNIT_POWER_WELL_RENDER = 0, - PUNIT_POWER_WELL_MEDIA = 1, - PUNIT_POWER_WELL_DISP2D = 3, - PUNIT_POWER_WELL_DPIO_CMN_BC = 5, - PUNIT_POWER_WELL_DPIO_TX_B_LANES_01 = 6, - PUNIT_POWER_WELL_DPIO_TX_B_LANES_23 = 7, - PUNIT_POWER_WELL_DPIO_TX_C_LANES_01 = 8, - PUNIT_POWER_WELL_DPIO_TX_C_LANES_23 = 9, - PUNIT_POWER_WELL_DPIO_RX0 = 10, - PUNIT_POWER_WELL_DPIO_RX1 = 11, - PUNIT_POWER_WELL_DPIO_CMN_D = 12, - /* - custom power well */ - CHV_DISP_PW_PIPE_A, /* 13 */ - - /* - * HSW/BDW - * - _HSW_PWR_WELL_CTL1-4 (status bit: id*2, req bit: id*2+1) - */ - HSW_DISP_PW_GLOBAL = 15, - - /* - * GEN9+ - * - _HSW_PWR_WELL_CTL1-4 (status bit: id*2, req bit: id*2+1) - */ - SKL_DISP_PW_MISC_IO = 0, - SKL_DISP_PW_DDI_A_E, - GLK_DISP_PW_DDI_A = SKL_DISP_PW_DDI_A_E, - CNL_DISP_PW_DDI_A = SKL_DISP_PW_DDI_A_E, - SKL_DISP_PW_DDI_B, - SKL_DISP_PW_DDI_C, - SKL_DISP_PW_DDI_D, - CNL_DISP_PW_DDI_F = 6, - - GLK_DISP_PW_AUX_A = 8, - GLK_DISP_PW_AUX_B, - GLK_DISP_PW_AUX_C, - CNL_DISP_PW_AUX_A = GLK_DISP_PW_AUX_A, - CNL_DISP_PW_AUX_B = GLK_DISP_PW_AUX_B, - CNL_DISP_PW_AUX_C = GLK_DISP_PW_AUX_C, - CNL_DISP_PW_AUX_D, - CNL_DISP_PW_AUX_F, - - SKL_DISP_PW_1 = 14, + DISP_PW_ID_NONE, + + PUNIT_POWER_WELL_DISP2D, + PUNIT_POWER_WELL_DPIO_CMN_BC, + PUNIT_POWER_WELL_DPIO_CMN_D, + HSW_DISP_PW_GLOBAL, + SKL_DISP_PW_MISC_IO, + SKL_DISP_PW_1, SKL_DISP_PW_2, - - /* - custom power wells */ BXT_DPIO_CMN_A, BXT_DPIO_CMN_BC, - GLK_DPIO_CMN_C, /* 18 */ - - /* - * GEN11+ - * - _HSW_PWR_WELL_CTL1-4 - * (status bit: (id&15)*2, req bit:(id&15)*2+1) - */ - ICL_DISP_PW_1 = 0, + GLK_DPIO_CMN_C, + ICL_DISP_PW_1, ICL_DISP_PW_2, - ICL_DISP_PW_3, - ICL_DISP_PW_4, - - /* - * - _HSW_PWR_WELL_CTL_AUX1/2/4 - * (status bit: (id&15)*2, req bit:(id&15)*2+1) - */ - ICL_DISP_PW_AUX_A = 16, - ICL_DISP_PW_AUX_B, - ICL_DISP_PW_AUX_C, - ICL_DISP_PW_AUX_D, - ICL_DISP_PW_AUX_E, - ICL_DISP_PW_AUX_F, - - ICL_DISP_PW_AUX_TBT1 = 24, - ICL_DISP_PW_AUX_TBT2, - ICL_DISP_PW_AUX_TBT3, - ICL_DISP_PW_AUX_TBT4, - - /* - * - _HSW_PWR_WELL_CTL_DDI1/2/4 - * (status bit: (id&15)*2, req bit:(id&15)*2+1) - */ - ICL_DISP_PW_DDI_A = 32, - ICL_DISP_PW_DDI_B, - ICL_DISP_PW_DDI_C, - ICL_DISP_PW_DDI_D, - ICL_DISP_PW_DDI_E, - ICL_DISP_PW_DDI_F, /* 37 */ - - /* - * Multiple platforms. - * Must start following the highest ID of any platform. - * - custom power wells - */ - SKL_DISP_PW_DC_OFF = 38, - I915_DISP_PW_ALWAYS_ON, }; #define PUNIT_REG_PWRGT_CTRL 0x60 diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index bba32df770b2..d98f19e02580 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -2067,7 +2067,7 @@ static const struct i915_power_well_desc i9xx_always_on_power_well[] = { .always_on = 1, .domains = POWER_DOMAIN_MASK, .ops = &i9xx_always_on_power_well_ops, - .id = I915_DISP_PW_ALWAYS_ON, + .id = DISP_PW_ID_NONE, }, }; @@ -2084,13 +2084,13 @@ static const struct i915_power_well_desc i830_power_wells[] = { .always_on = 1, .domains = POWER_DOMAIN_MASK, .ops = &i9xx_always_on_power_well_ops, - .id = I915_DISP_PW_ALWAYS_ON, + .id = DISP_PW_ID_NONE, }, { .name = "pipes", .domains = I830_PIPES_POWER_DOMAINS, .ops = &i830_pipes_power_well_ops, - .id = I830_DISP_PW_PIPES, + .id = DISP_PW_ID_NONE, }, }; @@ -2128,7 +2128,7 @@ static const struct i915_power_well_desc hsw_power_wells[] = { .always_on = 1, .domains = POWER_DOMAIN_MASK, .ops = &i9xx_always_on_power_well_ops, - .id = I915_DISP_PW_ALWAYS_ON, + .id = DISP_PW_ID_NONE, }, { .name = "display", @@ -2149,7 +2149,7 @@ static const struct i915_power_well_desc bdw_power_wells[] = { .always_on = 1, .domains = POWER_DOMAIN_MASK, .ops = &i9xx_always_on_power_well_ops, - .id = I915_DISP_PW_ALWAYS_ON, + .id = DISP_PW_ID_NONE, }, { .name = "display", @@ -2192,7 +2192,7 @@ static const struct i915_power_well_desc vlv_power_wells[] = { .always_on = 1, .domains = POWER_DOMAIN_MASK, .ops = &i9xx_always_on_power_well_ops, - .id = I915_DISP_PW_ALWAYS_ON, + .id = DISP_PW_ID_NONE, }, { .name = "display", @@ -2210,7 +2210,7 @@ static const struct i915_power_well_desc vlv_power_wells[] = { VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, .ops = &vlv_dpio_power_well_ops, - .id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01, + .id = DISP_PW_ID_NONE, { .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01, }, @@ -2222,7 +2222,7 @@ static const struct i915_power_well_desc vlv_power_wells[] = { VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, .ops = &vlv_dpio_power_well_ops, - .id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23, + .id = DISP_PW_ID_NONE, { .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23, }, @@ -2234,7 +2234,7 @@ static const struct i915_power_well_desc vlv_power_wells[] = { VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, .ops = &vlv_dpio_power_well_ops, - .id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01, + .id = DISP_PW_ID_NONE, { .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01, }, @@ -2246,7 +2246,7 @@ static const struct i915_power_well_desc vlv_power_wells[] = { VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, .ops = &vlv_dpio_power_well_ops, - .id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23, + .id = DISP_PW_ID_NONE, { .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23, }, @@ -2268,7 +2268,7 @@ static const struct i915_power_well_desc chv_power_wells[] = { .always_on = 1, .domains = POWER_DOMAIN_MASK, .ops = &i9xx_always_on_power_well_ops, - .id = I915_DISP_PW_ALWAYS_ON, + .id = DISP_PW_ID_NONE, }, { .name = "display", @@ -2279,7 +2279,7 @@ static const struct i915_power_well_desc chv_power_wells[] = { */ .domains = CHV_DISPLAY_POWER_DOMAINS, .ops = &chv_pipe_power_well_ops, - .id = CHV_DISP_PW_PIPE_A, + .id = DISP_PW_ID_NONE, }, { .name = "dpio-common-bc", @@ -2319,7 +2319,7 @@ static const struct i915_power_well_desc skl_power_wells[] = { .always_on = 1, .domains = POWER_DOMAIN_MASK, .ops = &i9xx_always_on_power_well_ops, - .id = I915_DISP_PW_ALWAYS_ON, + .id = DISP_PW_ID_NONE, }, { .name = "power well 1", @@ -2348,7 +2348,7 @@ static const struct i915_power_well_desc skl_power_wells[] = { .name = "DC off", .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS, .ops = &gen9_dc_off_power_well_ops, - .id = SKL_DISP_PW_DC_OFF, + .id = DISP_PW_ID_NONE, }, { .name = "power well 2", @@ -2367,7 +2367,7 @@ static const struct i915_power_well_desc skl_power_wells[] = { .name = "DDI A/E IO power well", .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_DDI_A_E, + .id = DISP_PW_ID_NONE, { .hsw.regs = &hsw_power_well_regs, .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E, @@ -2377,7 +2377,7 @@ static const struct i915_power_well_desc skl_power_wells[] = { .name = "DDI B IO power well", .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_DDI_B, + .id = DISP_PW_ID_NONE, { .hsw.regs = &hsw_power_well_regs, .hsw.idx = SKL_PW_CTL_IDX_DDI_B, @@ -2387,7 +2387,7 @@ static const struct i915_power_well_desc skl_power_wells[] = { .name = "DDI C IO power well", .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_DDI_C, + .id = DISP_PW_ID_NONE, { .hsw.regs = &hsw_power_well_regs, .hsw.idx = SKL_PW_CTL_IDX_DDI_C, @@ -2397,7 +2397,7 @@ static const struct i915_power_well_desc skl_power_wells[] = { .name = "DDI D IO power well", .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_DDI_D, + .id = DISP_PW_ID_NONE, { .hsw.regs = &hsw_power_well_regs, .hsw.idx = SKL_PW_CTL_IDX_DDI_D, @@ -2411,7 +2411,7 @@ static const struct i915_power_well_desc bxt_power_wells[] = { .always_on = 1, .domains = POWER_DOMAIN_MASK, .ops = &i9xx_always_on_power_well_ops, - .id = I915_DISP_PW_ALWAYS_ON, + .id = DISP_PW_ID_NONE, }, { .name = "power well 1", @@ -2428,7 +2428,7 @@ static const struct i915_power_well_desc bxt_power_wells[] = { .name = "DC off", .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS, .ops = &gen9_dc_off_power_well_ops, - .id = SKL_DISP_PW_DC_OFF, + .id = DISP_PW_ID_NONE, }, { .name = "power well 2", @@ -2469,7 +2469,7 @@ static const struct i915_power_well_desc glk_power_wells[] = { .always_on = 1, .domains = POWER_DOMAIN_MASK, .ops = &i9xx_always_on_power_well_ops, - .id = I915_DISP_PW_ALWAYS_ON, + .id = DISP_PW_ID_NONE, }, { .name = "power well 1", @@ -2487,7 +2487,7 @@ static const struct i915_power_well_desc glk_power_wells[] = { .name = "DC off", .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS, .ops = &gen9_dc_off_power_well_ops, - .id = SKL_DISP_PW_DC_OFF, + .id = DISP_PW_ID_NONE, }, { .name = "power well 2", @@ -2533,7 +2533,7 @@ static const struct i915_power_well_desc glk_power_wells[] = { .name = "AUX A", .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = GLK_DISP_PW_AUX_A, + .id = DISP_PW_ID_NONE, { .hsw.regs = &hsw_power_well_regs, .hsw.idx = GLK_PW_CTL_IDX_AUX_A, @@ -2543,7 +2543,7 @@ static const struct i915_power_well_desc glk_power_wells[] = { .name = "AUX B", .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = GLK_DISP_PW_AUX_B, + .id = DISP_PW_ID_NONE, { .hsw.regs = &hsw_power_well_regs, .hsw.idx = GLK_PW_CTL_IDX_AUX_B, @@ -2553,7 +2553,7 @@ static const struct i915_power_well_desc glk_power_wells[] = { .name = "AUX C", .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = GLK_DISP_PW_AUX_C, + .id = DISP_PW_ID_NONE, { .hsw.regs = &hsw_power_well_regs, .hsw.idx = GLK_PW_CTL_IDX_AUX_C, @@ -2563,7 +2563,7 @@ static const struct i915_power_well_desc glk_power_wells[] = { .name = "DDI A IO power well", .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = GLK_DISP_PW_DDI_A, + .id = DISP_PW_ID_NONE, { .hsw.regs = &hsw_power_well_regs, .hsw.idx = GLK_PW_CTL_IDX_DDI_A, @@ -2573,7 +2573,7 @@ static const struct i915_power_well_desc glk_power_wells[] = { .name = "DDI B IO power well", .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_DDI_B, + .id = DISP_PW_ID_NONE, { .hsw.regs = &hsw_power_well_regs, .hsw.idx = SKL_PW_CTL_IDX_DDI_B, @@ -2583,7 +2583,7 @@ static const struct i915_power_well_desc glk_power_wells[] = { .name = "DDI C IO power well", .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_DDI_C, + .id = DISP_PW_ID_NONE, { .hsw.regs = &hsw_power_well_regs, .hsw.idx = SKL_PW_CTL_IDX_DDI_C, @@ -2597,7 +2597,7 @@ static const struct i915_power_well_desc cnl_power_wells[] = { .always_on = 1, .domains = POWER_DOMAIN_MASK, .ops = &i9xx_always_on_power_well_ops, - .id = I915_DISP_PW_ALWAYS_ON, + .id = DISP_PW_ID_NONE, }, { .name = "power well 1", @@ -2615,7 +2615,7 @@ static const struct i915_power_well_desc cnl_power_wells[] = { .name = "AUX A", .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = CNL_DISP_PW_AUX_A, + .id = DISP_PW_ID_NONE, { .hsw.regs = &hsw_power_well_regs, .hsw.idx = GLK_PW_CTL_IDX_AUX_A, @@ -2625,7 +2625,7 @@ static const struct i915_power_well_desc cnl_power_wells[] = { .name = "AUX B", .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = CNL_DISP_PW_AUX_B, + .id = DISP_PW_ID_NONE, { .hsw.regs = &hsw_power_well_regs, .hsw.idx = GLK_PW_CTL_IDX_AUX_B, @@ -2635,7 +2635,7 @@ static const struct i915_power_well_desc cnl_power_wells[] = { .name = "AUX C", .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = CNL_DISP_PW_AUX_C, + .id = DISP_PW_ID_NONE, { .hsw.regs = &hsw_power_well_regs, .hsw.idx = GLK_PW_CTL_IDX_AUX_C, @@ -2645,7 +2645,7 @@ static const struct i915_power_well_desc cnl_power_wells[] = { .name = "AUX D", .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = CNL_DISP_PW_AUX_D, + .id = DISP_PW_ID_NONE, { .hsw.regs = &hsw_power_well_regs, .hsw.idx = CNL_PW_CTL_IDX_AUX_D, @@ -2655,7 +2655,7 @@ static const struct i915_power_well_desc cnl_power_wells[] = { .name = "DC off", .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS, .ops = &gen9_dc_off_power_well_ops, - .id = SKL_DISP_PW_DC_OFF, + .id = DISP_PW_ID_NONE, }, { .name = "power well 2", @@ -2674,7 +2674,7 @@ static const struct i915_power_well_desc cnl_power_wells[] = { .name = "DDI A IO power well", .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = CNL_DISP_PW_DDI_A, + .id = DISP_PW_ID_NONE, { .hsw.regs = &hsw_power_well_regs, .hsw.idx = GLK_PW_CTL_IDX_DDI_A, @@ -2684,7 +2684,7 @@ static const struct i915_power_well_desc cnl_power_wells[] = { .name = "DDI B IO power well", .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_DDI_B, + .id = DISP_PW_ID_NONE, { .hsw.regs = &hsw_power_well_regs, .hsw.idx = SKL_PW_CTL_IDX_DDI_B, @@ -2694,7 +2694,7 @@ static const struct i915_power_well_desc cnl_power_wells[] = { .name = "DDI C IO power well", .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_DDI_C, + .id = DISP_PW_ID_NONE, { .hsw.regs = &hsw_power_well_regs, .hsw.idx = SKL_PW_CTL_IDX_DDI_C, @@ -2704,7 +2704,7 @@ static const struct i915_power_well_desc cnl_power_wells[] = { .name = "DDI D IO power well", .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_DDI_D, + .id = DISP_PW_ID_NONE, { .hsw.regs = &hsw_power_well_regs, .hsw.idx = SKL_PW_CTL_IDX_DDI_D, @@ -2714,7 +2714,7 @@ static const struct i915_power_well_desc cnl_power_wells[] = { .name = "DDI F IO power well", .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = CNL_DISP_PW_DDI_F, + .id = DISP_PW_ID_NONE, { .hsw.regs = &hsw_power_well_regs, .hsw.idx = CNL_PW_CTL_IDX_DDI_F, @@ -2724,7 +2724,7 @@ static const struct i915_power_well_desc cnl_power_wells[] = { .name = "AUX F", .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = CNL_DISP_PW_AUX_F, + .id = DISP_PW_ID_NONE, { .hsw.regs = &hsw_power_well_regs, .hsw.idx = CNL_PW_CTL_IDX_AUX_F, @@ -2757,7 +2757,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { .always_on = 1, .domains = POWER_DOMAIN_MASK, .ops = &i9xx_always_on_power_well_ops, - .id = I915_DISP_PW_ALWAYS_ON, + .id = DISP_PW_ID_NONE, }, { .name = "power well 1", @@ -2786,13 +2786,13 @@ static const struct i915_power_well_desc icl_power_wells[] = { .name = "DC off", .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS, .ops = &gen9_dc_off_power_well_ops, - .id = SKL_DISP_PW_DC_OFF, + .id = DISP_PW_ID_NONE, }, { .name = "power well 3", .domains = ICL_PW_3_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_3, + .id = DISP_PW_ID_NONE, { .hsw.regs = &hsw_power_well_regs, .hsw.idx = ICL_PW_CTL_IDX_PW_3, @@ -2805,7 +2805,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { .name = "DDI A IO", .domains = ICL_DDI_IO_A_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_DDI_A, + .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_ddi_power_well_regs, .hsw.idx = ICL_PW_CTL_IDX_DDI_A, @@ -2815,7 +2815,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { .name = "DDI B IO", .domains = ICL_DDI_IO_B_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_DDI_B, + .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_ddi_power_well_regs, .hsw.idx = ICL_PW_CTL_IDX_DDI_B, @@ -2825,7 +2825,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { .name = "DDI C IO", .domains = ICL_DDI_IO_C_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_DDI_C, + .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_ddi_power_well_regs, .hsw.idx = ICL_PW_CTL_IDX_DDI_C, @@ -2835,7 +2835,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { .name = "DDI D IO", .domains = ICL_DDI_IO_D_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_DDI_D, + .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_ddi_power_well_regs, .hsw.idx = ICL_PW_CTL_IDX_DDI_D, @@ -2845,7 +2845,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { .name = "DDI E IO", .domains = ICL_DDI_IO_E_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_DDI_E, + .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_ddi_power_well_regs, .hsw.idx = ICL_PW_CTL_IDX_DDI_E, @@ -2855,7 +2855,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { .name = "DDI F IO", .domains = ICL_DDI_IO_F_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_DDI_F, + .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_ddi_power_well_regs, .hsw.idx = ICL_PW_CTL_IDX_DDI_F, @@ -2865,7 +2865,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { .name = "AUX A", .domains = ICL_AUX_A_IO_POWER_DOMAINS, .ops = &icl_combo_phy_aux_power_well_ops, - .id = ICL_DISP_PW_AUX_A, + .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, .hsw.idx = ICL_PW_CTL_IDX_AUX_A, @@ -2875,7 +2875,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { .name = "AUX B", .domains = ICL_AUX_B_IO_POWER_DOMAINS, .ops = &icl_combo_phy_aux_power_well_ops, - .id = ICL_DISP_PW_AUX_B, + .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, .hsw.idx = ICL_PW_CTL_IDX_AUX_B, @@ -2885,7 +2885,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { .name = "AUX C", .domains = ICL_AUX_C_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_AUX_C, + .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, .hsw.idx = ICL_PW_CTL_IDX_AUX_C, @@ -2895,7 +2895,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { .name = "AUX D", .domains = ICL_AUX_D_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_AUX_D, + .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, .hsw.idx = ICL_PW_CTL_IDX_AUX_D, @@ -2905,7 +2905,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { .name = "AUX E", .domains = ICL_AUX_E_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_AUX_E, + .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, .hsw.idx = ICL_PW_CTL_IDX_AUX_E, @@ -2915,7 +2915,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { .name = "AUX F", .domains = ICL_AUX_F_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_AUX_F, + .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, .hsw.idx = ICL_PW_CTL_IDX_AUX_F, @@ -2925,7 +2925,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { .name = "AUX TBT1", .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_AUX_TBT1, + .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1, @@ -2935,7 +2935,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { .name = "AUX TBT2", .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_AUX_TBT2, + .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2, @@ -2945,7 +2945,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { .name = "AUX TBT3", .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_AUX_TBT3, + .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3, @@ -2955,7 +2955,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { .name = "AUX TBT4", .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_AUX_TBT4, + .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4, @@ -2965,7 +2965,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { .name = "power well 4", .domains = ICL_PW_4_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_4, + .id = DISP_PW_ID_NONE, { .hsw.regs = &hsw_power_well_regs, .hsw.idx = ICL_PW_CTL_IDX_PW_4, @@ -3055,6 +3055,9 @@ __set_power_wells(struct i915_power_domains *power_domains, power_domains->power_wells[i].desc = &power_well_descs[i]; + if (id == DISP_PW_ID_NONE) + continue; + WARN_ON(id >= sizeof(power_well_ids) * 8); WARN_ON(power_well_ids & BIT_ULL(id)); power_well_ids |= BIT_ULL(id); -- GitLab From 2183b49933fce40eaf406e0ccfb57a3d4c50d9b8 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Mon, 6 Aug 2018 12:58:41 +0300 Subject: [PATCH 0078/1692] drm/i915: Make power well ID names more uniform The format for the ID names is _DISP_PW_* so rename the IDs not following this accordingly. Leave BXT_DPIO_CMN_BC as-is since we'll change that to use another existing ID in the next patch. v2: - Fix line over 80 chars checkpatch warning. Cc: Ville Syrjala Cc: Paulo Zanoni Cc: Jani Nikula Signed-off-by: Imre Deak Reviewed-by: Paulo Zanoni Link: https://patchwork.freedesktop.org/patch/msgid/20180806095843.13294-9-imre.deak@intel.com --- drivers/gpu/drm/i915/i915_reg.h | 10 +++--- drivers/gpu/drm/i915/intel_runtime_pm.c | 45 +++++++++++++------------ 2 files changed, 28 insertions(+), 27 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 4568adc8369a..ef1fa5054e88 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1036,16 +1036,16 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) enum i915_power_well_id { DISP_PW_ID_NONE, - PUNIT_POWER_WELL_DISP2D, - PUNIT_POWER_WELL_DPIO_CMN_BC, - PUNIT_POWER_WELL_DPIO_CMN_D, + VLV_DISP_PW_DISP2D, + BXT_DISP_PW_DPIO_CMN_A, + VLV_DISP_PW_DPIO_CMN_BC, + GLK_DISP_PW_DPIO_CMN_C, + CHV_DISP_PW_DPIO_CMN_D, HSW_DISP_PW_GLOBAL, SKL_DISP_PW_MISC_IO, SKL_DISP_PW_1, SKL_DISP_PW_2, - BXT_DPIO_CMN_A, BXT_DPIO_CMN_BC, - GLK_DPIO_CMN_C, ICL_DISP_PW_1, ICL_DISP_PW_2, }; diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index d98f19e02580..9df912bf50f4 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -768,7 +768,7 @@ static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) { struct i915_power_well *power_well; - power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A); + power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A); if (power_well->count > 0) bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); @@ -777,7 +777,8 @@ static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); if (IS_GEMINILAKE(dev_priv)) { - power_well = lookup_power_well(dev_priv, GLK_DPIO_CMN_C); + power_well = lookup_power_well(dev_priv, + GLK_DISP_PW_DPIO_CMN_C); if (power_well->count > 0) bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); @@ -1129,9 +1130,9 @@ lookup_power_well(struct drm_i915_private *dev_priv, static void assert_chv_phy_status(struct drm_i915_private *dev_priv) { struct i915_power_well *cmn_bc = - lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC); + lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); struct i915_power_well *cmn_d = - lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D); + lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); u32 phy_control = dev_priv->chv_phy_control; u32 phy_status = 0; u32 phy_status_mask = 0xffffffff; @@ -1241,10 +1242,10 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, enum pipe pipe; uint32_t tmp; - WARN_ON_ONCE(power_well->desc->id != PUNIT_POWER_WELL_DPIO_CMN_BC && - power_well->desc->id != PUNIT_POWER_WELL_DPIO_CMN_D); + WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && + power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); - if (power_well->desc->id == PUNIT_POWER_WELL_DPIO_CMN_BC) { + if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { pipe = PIPE_A; phy = DPIO_PHY0; } else { @@ -1272,7 +1273,7 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, DPIO_SUS_CLK_CONFIG_GATE_CLKREQ; vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp); - if (power_well->desc->id == PUNIT_POWER_WELL_DPIO_CMN_BC) { + if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1); tmp |= DPIO_DYNPWRDOWNEN_CH1; vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp); @@ -1303,10 +1304,10 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, { enum dpio_phy phy; - WARN_ON_ONCE(power_well->desc->id != PUNIT_POWER_WELL_DPIO_CMN_BC && - power_well->desc->id != PUNIT_POWER_WELL_DPIO_CMN_D); + WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && + power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); - if (power_well->desc->id == PUNIT_POWER_WELL_DPIO_CMN_BC) { + if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { phy = DPIO_PHY0; assert_pll_disabled(dev_priv, PIPE_A); assert_pll_disabled(dev_priv, PIPE_B); @@ -2198,7 +2199,7 @@ static const struct i915_power_well_desc vlv_power_wells[] = { .name = "display", .domains = VLV_DISPLAY_POWER_DOMAINS, .ops = &vlv_display_power_well_ops, - .id = PUNIT_POWER_WELL_DISP2D, + .id = VLV_DISP_PW_DISP2D, { .vlv.idx = PUNIT_PWGT_IDX_DISP2D, }, @@ -2255,7 +2256,7 @@ static const struct i915_power_well_desc vlv_power_wells[] = { .name = "dpio-common", .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, .ops = &vlv_dpio_cmn_power_well_ops, - .id = PUNIT_POWER_WELL_DPIO_CMN_BC, + .id = VLV_DISP_PW_DPIO_CMN_BC, { .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, }, @@ -2285,7 +2286,7 @@ static const struct i915_power_well_desc chv_power_wells[] = { .name = "dpio-common-bc", .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS, .ops = &chv_dpio_cmn_power_well_ops, - .id = PUNIT_POWER_WELL_DPIO_CMN_BC, + .id = VLV_DISP_PW_DPIO_CMN_BC, { .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, }, @@ -2294,7 +2295,7 @@ static const struct i915_power_well_desc chv_power_wells[] = { .name = "dpio-common-d", .domains = CHV_DPIO_CMN_D_POWER_DOMAINS, .ops = &chv_dpio_cmn_power_well_ops, - .id = PUNIT_POWER_WELL_DPIO_CMN_D, + .id = CHV_DISP_PW_DPIO_CMN_D, { .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D, }, @@ -2447,7 +2448,7 @@ static const struct i915_power_well_desc bxt_power_wells[] = { .name = "dpio-common-a", .domains = BXT_DPIO_CMN_A_POWER_DOMAINS, .ops = &bxt_dpio_cmn_power_well_ops, - .id = BXT_DPIO_CMN_A, + .id = BXT_DISP_PW_DPIO_CMN_A, { .bxt.phy = DPIO_PHY1, }, @@ -2506,7 +2507,7 @@ static const struct i915_power_well_desc glk_power_wells[] = { .name = "dpio-common-a", .domains = GLK_DPIO_CMN_A_POWER_DOMAINS, .ops = &bxt_dpio_cmn_power_well_ops, - .id = BXT_DPIO_CMN_A, + .id = BXT_DISP_PW_DPIO_CMN_A, { .bxt.phy = DPIO_PHY1, }, @@ -2524,7 +2525,7 @@ static const struct i915_power_well_desc glk_power_wells[] = { .name = "dpio-common-c", .domains = GLK_DPIO_CMN_C_POWER_DOMAINS, .ops = &bxt_dpio_cmn_power_well_ops, - .id = GLK_DPIO_CMN_C, + .id = GLK_DISP_PW_DPIO_CMN_C, { .bxt.phy = DPIO_PHY2, }, @@ -3627,9 +3628,9 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv) static void chv_phy_control_init(struct drm_i915_private *dev_priv) { struct i915_power_well *cmn_bc = - lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC); + lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); struct i915_power_well *cmn_d = - lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D); + lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); /* * DISPLAY_PHY_CONTROL can get corrupted if read. As a @@ -3714,9 +3715,9 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv) static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) { struct i915_power_well *cmn = - lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC); + lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); struct i915_power_well *disp2d = - lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D); + lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D); /* If the display might be already active skip this */ if (cmn->desc->ops->is_enabled(dev_priv, cmn) && -- GitLab From d9fcdc8d1f8e7b12111bb410abc4c27c411aa5d9 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Mon, 6 Aug 2018 12:58:42 +0300 Subject: [PATCH 0079/1692] drm/i915: Use existing power well IDs where possible There is no need for separate IDs for power wells on a new platform with the same functionality as an other power well on a previous platform, we can just reuse the ID from the previous platform. This is only possible after the previous patches where we removed dependence on the actual enum values. This also fixes a problem on ICL where in assert_can_enable_dc5/9() we would've failed to look up the PW#2 power well. v2: - Keep an ID assigned for the ICL PW#2 power well too. (Paulo) Cc: Ville Syrjala Cc: Paulo Zanoni Cc: Jani Nikula Signed-off-by: Imre Deak [Added comment about the ICL PW#2 fix to the commit log] Reviewed-by: Paulo Zanoni Link: https://patchwork.freedesktop.org/patch/msgid/20180806095843.13294-10-imre.deak@intel.com --- drivers/gpu/drm/i915/i915_reg.h | 3 --- drivers/gpu/drm/i915/intel_runtime_pm.c | 14 +++++++------- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index ef1fa5054e88..77b031874ee3 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1045,9 +1045,6 @@ enum i915_power_well_id { SKL_DISP_PW_MISC_IO, SKL_DISP_PW_1, SKL_DISP_PW_2, - BXT_DPIO_CMN_BC, - ICL_DISP_PW_1, - ICL_DISP_PW_2, }; #define PUNIT_REG_PWRGT_CTRL 0x60 diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 9df912bf50f4..e209edbc561d 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -772,7 +772,7 @@ static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) if (power_well->count > 0) bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); - power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC); + power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); if (power_well->count > 0) bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); @@ -2457,7 +2457,7 @@ static const struct i915_power_well_desc bxt_power_wells[] = { .name = "dpio-common-bc", .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS, .ops = &bxt_dpio_cmn_power_well_ops, - .id = BXT_DPIO_CMN_BC, + .id = VLV_DISP_PW_DPIO_CMN_BC, { .bxt.phy = DPIO_PHY0, }, @@ -2516,7 +2516,7 @@ static const struct i915_power_well_desc glk_power_wells[] = { .name = "dpio-common-b", .domains = GLK_DPIO_CMN_B_POWER_DOMAINS, .ops = &bxt_dpio_cmn_power_well_ops, - .id = BXT_DPIO_CMN_BC, + .id = VLV_DISP_PW_DPIO_CMN_BC, { .bxt.phy = DPIO_PHY0, }, @@ -2765,7 +2765,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { /* Handled by the DMC firmware */ .domains = 0, .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_1, + .id = SKL_DISP_PW_1, { .hsw.regs = &hsw_power_well_regs, .hsw.idx = ICL_PW_CTL_IDX_PW_1, @@ -2776,7 +2776,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { .name = "power well 2", .domains = ICL_PW_2_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_2, + .id = SKL_DISP_PW_2, { .hsw.regs = &hsw_power_well_regs, .hsw.idx = ICL_PW_CTL_IDX_PW_2, @@ -3576,7 +3576,7 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv, * The AUX IO power wells will be enabled on demand. */ mutex_lock(&power_domains->lock); - well = lookup_power_well(dev_priv, ICL_DISP_PW_1); + well = lookup_power_well(dev_priv, SKL_DISP_PW_1); intel_power_well_enable(dev_priv, well); mutex_unlock(&power_domains->lock); @@ -3613,7 +3613,7 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv) * disabled at this point. */ mutex_lock(&power_domains->lock); - well = lookup_power_well(dev_priv, ICL_DISP_PW_1); + well = lookup_power_well(dev_priv, SKL_DISP_PW_1); intel_power_well_disable(dev_priv, well); mutex_unlock(&power_domains->lock); -- GitLab From 1a260e1117a4ba5c39224fd00adcac78c84411b1 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Mon, 6 Aug 2018 12:58:43 +0300 Subject: [PATCH 0080/1692] drm/i915/icl: Add missing power gate enums On ICL there are 5 fused power gates, so add the two missing ones for clarity. Cc: Ville Syrjala Cc: Paulo Zanoni Cc: Jani Nikula Signed-off-by: Imre Deak Reviewed-by: Paulo Zanoni Link: https://patchwork.freedesktop.org/patch/msgid/20180806095843.13294-11-imre.deak@intel.com --- drivers/gpu/drm/i915/i915_reg.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 77b031874ee3..17575cfc22b5 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -8913,6 +8913,8 @@ enum skl_power_gate { SKL_PG0, SKL_PG1, SKL_PG2, + ICL_PG3, + ICL_PG4, }; #define SKL_FUSE_STATUS _MMIO(0x42000) -- GitLab From c1e63f6df3d3e9e4d0da67f6c8aabdfbe592371f Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 8 Aug 2018 11:50:59 +0100 Subject: [PATCH 0081/1692] drm/i915: Warn if we hit the timeout for wait-for-idle Hitting the timeout and finding that all engines are actually idle is indicative of an interrupt delivery problem. This problem is an issue that we need to fix, so make sure we log it and provide the GEM trace. Signed-off-by: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20180808105101.913-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 460f256114f7..71502512ac1f 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3823,6 +3823,12 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, if (timeout < 0) return timeout; } + if (GEM_SHOW_DEBUG() && !timeout) { + /* Presume that timeout was non-zero to begin with! */ + dev_warn(&i915->drm.pdev->dev, + "Missed idle-completion interrupt!\n"); + GEM_TRACE_DUMP(); + } err = wait_for_engines(i915); if (err) -- GitLab From a4a717010f4e8cacaa3f0cae8a22f25c39ae1d41 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 8 Aug 2018 11:51:00 +0100 Subject: [PATCH 0082/1692] drm/i915: Unmask user interrupts writes into HWSP on snb/ivb/vlv/hsw MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit An oddity occurs on Sandybridge, Ivybridge and Haswell (and presumably Valleyview) in that for the period following the GPU restart after a reset, there are no GT interrupts received. From Ville's notes, bit 0 in the HWSTAM corresponds to the render interrupt, and if we unmask it we do see immediate resumption of GT interrupt delivery (via the master irq handler) after the reset. v2: Limit the w/a to the render interrupt from rcs Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=107500 Fixes: c5498089463b ("drm/i915: Mask everything in ring HWSTAM on gen6+ in ringbuffer mode") References: d420a50c21ef ("drm/i915: Clean up the HWSTAM mess") Testcase: igt/gem_eio/reset-stress Signed-off-by: Chris Wilson Cc: Ville Syrjälä Acked-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20180808105101.913-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_ringbuffer.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 8003cef767ba..d40f55a8dc34 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -387,8 +387,18 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine) mmio = RING_HWS_PGA(engine->mmio_base); } - if (INTEL_GEN(dev_priv) >= 6) - I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff); + if (INTEL_GEN(dev_priv) >= 6) { + u32 mask = ~0u; + + /* + * Keep the render interrupt unmasked as this papers over + * lost interrupts following a reset. + */ + if (engine->id == RCS) + mask &= ~BIT(0); + + I915_WRITE(RING_HWSTAM(engine->mmio_base), mask); + } I915_WRITE(mmio, engine->status_page.ggtt_offset); POSTING_READ(mmio); -- GitLab From a69ab52b0358d630bfa31183a45903263b46eaf2 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 8 Aug 2018 11:51:01 +0100 Subject: [PATCH 0083/1692] drm/i915: Remove extra waiter kick on legacy resets MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now with a more efficacious workaround for the lost interrupts after reset, we can remove the hack of kicking the waiters after reset. The issue was that the kick only worked for the immediate window after the reset (those seqno that would complete in the time it took for the waiter thread to perform its check) but miss any seqno that lacked an interrupt afterwards. References: 39f3be162c46 ("drm/i915: Kick waiters on resetting legacy rings") Signed-off-by: Chris Wilson Cc: Matthew Auld Cc: Ville Syrjälä Acked-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20180808105101.913-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_ringbuffer.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index d40f55a8dc34..b65cf7832b39 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -537,8 +537,6 @@ static int init_ring_common(struct intel_engine_cs *engine) if (INTEL_GEN(dev_priv) > 2) I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING)); - /* Papering over lost _interrupts_ immediately following the restart */ - intel_engine_wakeup(engine); out: intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); -- GitLab From d0e062ebb3a44b56a7e672da568334c76f763552 Mon Sep 17 00:00:00 2001 From: Rodrigo Vivi Date: Fri, 3 Aug 2018 16:27:21 -0700 Subject: [PATCH 0084/1692] drm/i915/cfl: Add a new CFL PCI ID. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit One more CFL ID added to spec. Cc: José Roberto de Souza Signed-off-by: Rodrigo Vivi Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20180803232721.20038-1-rodrigo.vivi@intel.com --- include/drm/i915_pciids.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index fbf5cfc9b352..fd965ffbb92e 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -386,6 +386,7 @@ INTEL_VGA_DEVICE(0x3E91, info), /* SRV GT2 */ \ INTEL_VGA_DEVICE(0x3E92, info), /* SRV GT2 */ \ INTEL_VGA_DEVICE(0x3E96, info), /* SRV GT2 */ \ + INTEL_VGA_DEVICE(0x3E98, info), /* SRV GT2 */ \ INTEL_VGA_DEVICE(0x3E9A, info) /* SRV GT2 */ /* CFL H */ -- GitLab From d60996ab430c8a6033a0944c068edc5ec5becb9b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 8 Aug 2018 22:08:42 +0100 Subject: [PATCH 0085/1692] drm/i915: Restore user forcewake domains across suspend On suspend, we cancel the automatic forcewake and clear all other sources of forcewake so the machine can sleep before we do suspend. However, we expose the forcewake to userspace (only via debugfs, but nevertheless we do) and want to restore that upon resume or else our accounting will be off and we may not acquire the forcewake before we use it. So record which domains we cleared on suspend and reacquire them early on resume. v2: Hold the spinlock to appease our sanitychecks v3: s/fw_domains_user/fw_domains_saved/ to convey intent more clearly Reported-by: Imre Deak Fixes: b8473050805f ("drm/i915: Fix forcewake active domain tracking") Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Cc: Mika Kuoppala Cc: Imre Deak Reviewed-by: Imre Deak Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20180808210842.3555-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_uncore.c | 46 +++++++++++-------- drivers/gpu/drm/i915/intel_uncore.h | 1 + drivers/gpu/drm/i915/selftests/intel_uncore.c | 2 +- 3 files changed, 28 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 284be151f645..c2fcb51fc58a 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -369,8 +369,8 @@ intel_uncore_fw_release_timer(struct hrtimer *timer) } /* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */ -static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv, - bool restore) +static unsigned int +intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv) { unsigned long irqflags; struct intel_uncore_forcewake_domain *domain; @@ -422,20 +422,11 @@ static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv, dev_priv->uncore.funcs.force_wake_put(dev_priv, fw); fw_domains_reset(dev_priv, dev_priv->uncore.fw_domains); - - if (restore) { /* If reset with a user forcewake, try to restore */ - if (fw) - dev_priv->uncore.funcs.force_wake_get(dev_priv, fw); - - if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) - dev_priv->uncore.fifo_count = - fifo_free_entries(dev_priv); - } - - if (!restore) - assert_forcewakes_inactive(dev_priv); + assert_forcewakes_inactive(dev_priv); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); + + return fw; /* track the lost user forcewake domains */ } static u64 gen9_edram_size(struct drm_i915_private *dev_priv) @@ -544,7 +535,7 @@ check_for_unclaimed_mmio(struct drm_i915_private *dev_priv) } static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv, - bool restore_forcewake) + unsigned int restore_forcewake) { /* clear out unclaimed reg detection bit */ if (check_for_unclaimed_mmio(dev_priv)) @@ -559,7 +550,17 @@ static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv, } iosf_mbi_punit_acquire(); - intel_uncore_forcewake_reset(dev_priv, restore_forcewake); + intel_uncore_forcewake_reset(dev_priv); + if (restore_forcewake) { + spin_lock_irq(&dev_priv->uncore.lock); + dev_priv->uncore.funcs.force_wake_get(dev_priv, + restore_forcewake); + + if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) + dev_priv->uncore.fifo_count = + fifo_free_entries(dev_priv); + spin_unlock_irq(&dev_priv->uncore.lock); + } iosf_mbi_punit_release(); } @@ -568,13 +569,18 @@ void intel_uncore_suspend(struct drm_i915_private *dev_priv) iosf_mbi_punit_acquire(); iosf_mbi_unregister_pmic_bus_access_notifier_unlocked( &dev_priv->uncore.pmic_bus_access_nb); - intel_uncore_forcewake_reset(dev_priv, false); + dev_priv->uncore.fw_domains_saved = + intel_uncore_forcewake_reset(dev_priv); iosf_mbi_punit_release(); } void intel_uncore_resume_early(struct drm_i915_private *dev_priv) { - __intel_uncore_early_sanitize(dev_priv, true); + unsigned int restore_forcewake; + + restore_forcewake = fetch_and_zero(&dev_priv->uncore.fw_domains_saved); + __intel_uncore_early_sanitize(dev_priv, restore_forcewake); + iosf_mbi_register_pmic_bus_access_notifier( &dev_priv->uncore.pmic_bus_access_nb); i915_check_and_clear_faults(dev_priv); @@ -1555,7 +1561,7 @@ void intel_uncore_init(struct drm_i915_private *dev_priv) intel_uncore_edram_detect(dev_priv); intel_uncore_fw_domains_init(dev_priv); - __intel_uncore_early_sanitize(dev_priv, false); + __intel_uncore_early_sanitize(dev_priv, 0); dev_priv->uncore.unclaimed_mmio_check = 1; dev_priv->uncore.pmic_bus_access_nb.notifier_call = @@ -1642,7 +1648,7 @@ void intel_uncore_fini(struct drm_i915_private *dev_priv) iosf_mbi_punit_acquire(); iosf_mbi_unregister_pmic_bus_access_notifier_unlocked( &dev_priv->uncore.pmic_bus_access_nb); - intel_uncore_forcewake_reset(dev_priv, false); + intel_uncore_forcewake_reset(dev_priv); iosf_mbi_punit_release(); } diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h index 2fbe93178fb2..e5e157d288de 100644 --- a/drivers/gpu/drm/i915/intel_uncore.h +++ b/drivers/gpu/drm/i915/intel_uncore.h @@ -104,6 +104,7 @@ struct intel_uncore { enum forcewake_domains fw_domains; enum forcewake_domains fw_domains_active; + enum forcewake_domains fw_domains_saved; /* user domains saved for S3 */ u32 fw_set; u32 fw_clear; diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c index 47bc5b2ddb56..81d9d31042a9 100644 --- a/drivers/gpu/drm/i915/selftests/intel_uncore.c +++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c @@ -160,7 +160,7 @@ static int intel_uncore_check_forcewake_domains(struct drm_i915_private *dev_pri i915_reg_t reg = { offset }; iosf_mbi_punit_acquire(); - intel_uncore_forcewake_reset(dev_priv, false); + intel_uncore_forcewake_reset(dev_priv); iosf_mbi_punit_release(); check_for_unclaimed_mmio(dev_priv); -- GitLab From 7b5ee80a5da3ea44c5abff48e3621135ae9d8177 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 9 Aug 2018 07:34:49 +0100 Subject: [PATCH 0086/1692] drm/i915/selftests: Hold rpm for unparking The call to i915_gem_unpark() checks that we hold a rpm wakeref before taking a long term wakeref for i915->gt.awake. We should therefore make sure we do hold the wakeref when directly calling unpark to disable the retire worker. Fixes: 932cac10c8fb ("drm/i915/selftests: Prevent background reaping of active objects") Signed-off-by: Chris Wilson Cc: Mika Kuoppala Cc: Matthew Auld Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20180809063449.4474-1-chris@chris-wilson.co.uk --- .../gpu/drm/i915/selftests/i915_gem_object.c | 20 +++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c index d9eca1b02aee..6d3516d5bff9 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c @@ -499,6 +499,19 @@ static bool assert_mmap_offset(struct drm_i915_private *i915, return err == expected; } +static void disable_retire_worker(struct drm_i915_private *i915) +{ + mutex_lock(&i915->drm.struct_mutex); + if (!i915->gt.active_requests++) { + intel_runtime_pm_get(i915); + i915_gem_unpark(i915); + intel_runtime_pm_put(i915); + } + mutex_unlock(&i915->drm.struct_mutex); + cancel_delayed_work_sync(&i915->gt.retire_work); + cancel_delayed_work_sync(&i915->gt.idle_work); +} + static int igt_mmap_offset_exhaustion(void *arg) { struct drm_i915_private *i915 = arg; @@ -509,12 +522,7 @@ static int igt_mmap_offset_exhaustion(void *arg) int loop, err; /* Disable background reaper */ - mutex_lock(&i915->drm.struct_mutex); - if (!i915->gt.active_requests++) - i915_gem_unpark(i915); - mutex_unlock(&i915->drm.struct_mutex); - cancel_delayed_work_sync(&i915->gt.retire_work); - cancel_delayed_work_sync(&i915->gt.idle_work); + disable_retire_worker(i915); GEM_BUG_ON(!i915->gt.awake); /* Trim the device mmap space to only a page */ -- GitLab From c44301fce614644bbd608347881bf1aab940b436 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Thu, 9 Aug 2018 16:21:01 +0200 Subject: [PATCH 0087/1692] drm/i915: Allow control of PSR at runtime through debugfs, v6 Currently tests modify i915.enable_psr and then do a modeset cycle to change PSR. We can write a value to i915_edp_psr_debug to force a certain PSR mode without a modeset. To retain compatibility with older userspace, we also still allow the override through the module parameter, and add some tracking to check whether a debugfs mode is specified. Changes since v1: - Rename dev_priv->psr.enabled to .dp, and .hw_configured to .enabled. - Fix i915_psr_debugfs_mode to match the writes to debugfs. - Rename __i915_edp_psr_write to intel_psr_set_debugfs_mode, simplify it and move it to intel_psr.c. This keeps all internals in intel_psr.c - Perform an interruptible wait for hw completion outside of the psr lock, instead of being forced to trywait and return -EBUSY. Changes since v2: - Rebase on top of intel_psr changes. Changes since v3: - Assign psr.dp during init. (dhnkrn) - Add prepared bool, which should be used instead of relying on psr.dp. (dhnkrn) - Fix -EDEADLK handling in debugfs. (dhnkrn) - Clean up waiting for idle in intel_psr_set_debugfs_mode. - Print PSR mode when trying to enable PSR. (dhnkrn) - Move changing psr debug setting to i915_edp_psr_debug_set. (dhnkrn) Changes since v4: - Return error in _set() function. - Change flag values to make them easier to remember. (dhnkrn) - Only assign psr.dp once. (dhnkrn) - Only set crtc_state->has_psr on the crtc with psr.dp. - Fix typo. (dhnkrn) Changes since v5: - Only wait for PSR idle on the PSR connector correctly. (dhnkrn) - Reinstate WARN_ON(drrs.dp) in intel_psr_enable. (dhnkrn) - Remove stray comment. (dhnkrn) - Be silent in intel_psr_compute_config on wrong connector. (dhnkrn) Signed-off-by: Maarten Lankhorst Cc: Rodrigo Vivi Cc: Dhinakaran Pandiyan Link: https://patchwork.freedesktop.org/patch/msgid/20180809142101.26155-1-maarten.lankhorst@linux.intel.com Reviewed-by: Dhinakaran Pandiyan --- drivers/gpu/drm/i915/i915_debugfs.c | 23 ++++- drivers/gpu/drm/i915/i915_drv.h | 12 ++- drivers/gpu/drm/i915/i915_irq.c | 2 +- drivers/gpu/drm/i915/intel_drv.h | 3 + drivers/gpu/drm/i915/intel_psr.c | 134 +++++++++++++++++++++++----- 5 files changed, 146 insertions(+), 28 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 23f38bc257a2..26b7e5276b15 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -2708,7 +2708,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) intel_runtime_pm_get(dev_priv); mutex_lock(&dev_priv->psr.lock); - seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled)); + seq_printf(m, "Enabled: %s\n", yesno(dev_priv->psr.enabled)); seq_printf(m, "Busy frontbuffer bits: 0x%03x\n", dev_priv->psr.busy_frontbuffer_bits); @@ -2750,17 +2750,32 @@ static int i915_edp_psr_debug_set(void *data, u64 val) { struct drm_i915_private *dev_priv = data; + struct drm_modeset_acquire_ctx ctx; + int ret; if (!CAN_PSR(dev_priv)) return -ENODEV; - DRM_DEBUG_KMS("PSR debug %s\n", enableddisabled(val)); + DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val); intel_runtime_pm_get(dev_priv); - intel_psr_irq_control(dev_priv, !!val); + + drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); + +retry: + ret = intel_psr_set_debugfs_mode(dev_priv, &ctx, val); + if (ret == -EDEADLK) { + ret = drm_modeset_backoff(&ctx); + if (!ret) + goto retry; + } + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + intel_runtime_pm_put(dev_priv); - return 0; + return ret; } static int diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 0b10a30b7d96..495021cb3b74 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -611,8 +611,17 @@ struct i915_drrs { struct i915_psr { struct mutex lock; + +#define I915_PSR_DEBUG_MODE_MASK 0x0f +#define I915_PSR_DEBUG_DEFAULT 0x00 +#define I915_PSR_DEBUG_DISABLE 0x01 +#define I915_PSR_DEBUG_ENABLE 0x02 +#define I915_PSR_DEBUG_IRQ 0x10 + + u32 debug; bool sink_support; - struct intel_dp *enabled; + bool prepared, enabled; + struct intel_dp *dp; bool active; struct work_struct work; unsigned busy_frontbuffer_bits; @@ -622,7 +631,6 @@ struct i915_psr { bool alpm; bool psr2_enabled; u8 sink_sync_latency; - bool debug; ktime_t last_entry_attempt; ktime_t last_exit; }; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 8084e35b25c5..b2c9838442bc 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -4048,7 +4048,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) if (IS_HASWELL(dev_priv)) { gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR); - intel_psr_irq_control(dev_priv, dev_priv->psr.debug); + intel_psr_irq_control(dev_priv, dev_priv->psr.debug & I915_PSR_DEBUG_IRQ); display_mask |= DE_EDP_PSR_INT_HSW; } diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 0601abb8c71f..1295bd8bcd7d 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1932,6 +1932,9 @@ void intel_psr_enable(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state); void intel_psr_disable(struct intel_dp *intel_dp, const struct intel_crtc_state *old_crtc_state); +int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv, + struct drm_modeset_acquire_ctx *ctx, + u64 value); void intel_psr_invalidate(struct drm_i915_private *dev_priv, unsigned frontbuffer_bits, enum fb_op_origin origin); diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index 4bd5768731ee..e9ca410e18c4 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -56,6 +56,18 @@ #include "intel_drv.h" #include "i915_drv.h" +static bool psr_global_enabled(u32 debug) +{ + switch (debug & I915_PSR_DEBUG_MODE_MASK) { + case I915_PSR_DEBUG_DEFAULT: + return i915_modparams.enable_psr; + case I915_PSR_DEBUG_DISABLE: + return false; + default: + return true; + } +} + void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug) { u32 debug_mask, mask; @@ -80,7 +92,6 @@ void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug) if (debug) mask |= debug_mask; - WRITE_ONCE(dev_priv->psr.debug, debug); I915_WRITE(EDP_PSR_IMR, ~mask); } @@ -213,6 +224,9 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp) dev_priv->psr.sink_sync_latency = intel_dp_get_sink_sync_latency(intel_dp); + WARN_ON(dev_priv->psr.dp); + dev_priv->psr.dp = intel_dp; + if (INTEL_GEN(dev_priv) >= 9 && (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) { bool y_req = intel_dp->psr_dpcd[1] & @@ -471,10 +485,8 @@ void intel_psr_compute_config(struct intel_dp *intel_dp, if (!CAN_PSR(dev_priv)) return; - if (!i915_modparams.enable_psr) { - DRM_DEBUG_KMS("PSR disable by flag\n"); + if (intel_dp != dev_priv->psr.dp) return; - } /* * HSW spec explicitly says PSR is tied to port A. @@ -517,7 +529,6 @@ void intel_psr_compute_config(struct intel_dp *intel_dp, crtc_state->has_psr = true; crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state); - DRM_DEBUG_KMS("Enabling PSR%s\n", crtc_state->has_psr2 ? "2" : ""); } static void intel_psr_activate(struct intel_dp *intel_dp) @@ -589,6 +600,24 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, } } +static void intel_psr_enable_locked(struct drm_i915_private *dev_priv, + const struct intel_crtc_state *crtc_state) +{ + struct intel_dp *intel_dp = dev_priv->psr.dp; + + if (dev_priv->psr.enabled) + return; + + DRM_DEBUG_KMS("Enabling PSR%s\n", + dev_priv->psr.psr2_enabled ? "2" : "1"); + intel_psr_setup_vsc(intel_dp, crtc_state); + intel_psr_enable_sink(intel_dp); + intel_psr_enable_source(intel_dp, crtc_state); + dev_priv->psr.enabled = true; + + intel_psr_activate(intel_dp); +} + /** * intel_psr_enable - Enable PSR * @intel_dp: Intel DP @@ -610,21 +639,21 @@ void intel_psr_enable(struct intel_dp *intel_dp, return; WARN_ON(dev_priv->drrs.dp); + mutex_lock(&dev_priv->psr.lock); - if (dev_priv->psr.enabled) { + if (dev_priv->psr.prepared) { DRM_DEBUG_KMS("PSR already in use\n"); goto unlock; } dev_priv->psr.psr2_enabled = crtc_state->has_psr2; dev_priv->psr.busy_frontbuffer_bits = 0; + dev_priv->psr.prepared = true; - intel_psr_setup_vsc(intel_dp, crtc_state); - intel_psr_enable_sink(intel_dp); - intel_psr_enable_source(intel_dp, crtc_state); - dev_priv->psr.enabled = intel_dp; - - intel_psr_activate(intel_dp); + if (psr_global_enabled(dev_priv->psr.debug)) + intel_psr_enable_locked(dev_priv, crtc_state); + else + DRM_DEBUG_KMS("PSR disabled by flag\n"); unlock: mutex_unlock(&dev_priv->psr.lock); @@ -683,12 +712,14 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) if (!dev_priv->psr.enabled) return; + DRM_DEBUG_KMS("Disabling PSR%s\n", + dev_priv->psr.psr2_enabled ? "2" : "1"); intel_psr_disable_source(intel_dp); /* Disable PSR on Sink */ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0); - dev_priv->psr.enabled = NULL; + dev_priv->psr.enabled = false; } /** @@ -712,7 +743,14 @@ void intel_psr_disable(struct intel_dp *intel_dp, return; mutex_lock(&dev_priv->psr.lock); + if (!dev_priv->psr.prepared) { + mutex_unlock(&dev_priv->psr.lock); + return; + } + intel_psr_disable_locked(intel_dp); + + dev_priv->psr.prepared = false; mutex_unlock(&dev_priv->psr.lock); cancel_work_sync(&dev_priv->psr.work); } @@ -724,7 +762,7 @@ int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state) i915_reg_t reg; u32 mask; - if (!new_crtc_state->has_psr) + if (!dev_priv->psr.enabled || !new_crtc_state->has_psr) return 0; /* @@ -756,13 +794,11 @@ int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state) static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv) { - struct intel_dp *intel_dp; i915_reg_t reg; u32 mask; int err; - intel_dp = dev_priv->psr.enabled; - if (!intel_dp) + if (!dev_priv->psr.enabled) return false; if (dev_priv->psr.psr2_enabled) { @@ -784,6 +820,62 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv) return err == 0 && dev_priv->psr.enabled; } +int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv, + struct drm_modeset_acquire_ctx *ctx, + u64 val) +{ + struct drm_device *dev = &dev_priv->drm; + struct drm_connector_state *conn_state; + struct drm_crtc *crtc; + struct intel_dp *dp; + int ret; + bool enable; + + if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) || + (val & I915_PSR_DEBUG_MODE_MASK) > I915_PSR_DEBUG_ENABLE) { + DRM_DEBUG_KMS("Invalid debug mask %llx\n", val); + return -EINVAL; + } + + ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx); + if (ret) + return ret; + + /* dev_priv->psr.dp should be set once and then never touched again. */ + dp = READ_ONCE(dev_priv->psr.dp); + conn_state = dp->attached_connector->base.state; + crtc = conn_state->crtc; + if (crtc) { + ret = drm_modeset_lock(&crtc->mutex, ctx); + if (ret) + return ret; + + ret = wait_for_completion_interruptible(&crtc->state->commit->hw_done); + } else + ret = wait_for_completion_interruptible(&conn_state->commit->hw_done); + + if (ret) + return ret; + + ret = mutex_lock_interruptible(&dev_priv->psr.lock); + if (ret) + return ret; + + enable = psr_global_enabled(val); + + if (!enable) + intel_psr_disable_locked(dev_priv->psr.dp); + + dev_priv->psr.debug = val; + intel_psr_irq_control(dev_priv, dev_priv->psr.debug & I915_PSR_DEBUG_IRQ); + + if (dev_priv->psr.prepared && enable) + intel_psr_enable_locked(dev_priv, to_intel_crtc_state(crtc->state)); + + mutex_unlock(&dev_priv->psr.lock); + return ret; +} + static void intel_psr_work(struct work_struct *work) { struct drm_i915_private *dev_priv = @@ -811,7 +903,7 @@ static void intel_psr_work(struct work_struct *work) if (dev_priv->psr.busy_frontbuffer_bits || dev_priv->psr.active) goto unlock; - intel_psr_activate(dev_priv->psr.enabled); + intel_psr_activate(dev_priv->psr.dp); unlock: mutex_unlock(&dev_priv->psr.lock); } @@ -866,7 +958,7 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv, return; } - crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc; + crtc = dp_to_dig_port(dev_priv->psr.dp)->base.base.crtc; pipe = to_intel_crtc(crtc)->pipe; frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); @@ -909,7 +1001,7 @@ void intel_psr_flush(struct drm_i915_private *dev_priv, return; } - crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc; + crtc = dp_to_dig_port(dev_priv->psr.dp)->base.base.crtc; pipe = to_intel_crtc(crtc)->pipe; frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); @@ -991,7 +1083,7 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp) mutex_lock(&psr->lock); - if (psr->enabled != intel_dp) + if (!psr->enabled || psr->dp != intel_dp) goto exit; if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val) != 1) { -- GitLab From 2ac45bdd92e0289ee2d1310f1e07b719c037c6b7 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Wed, 8 Aug 2018 16:19:11 +0200 Subject: [PATCH 0088/1692] drm/i915/psr: Add debugfs support to force a downgrade to PSR1 mode. This will make it easier to test PSR1 on PSR2 capable eDP machines. Changes since v1: - Remove I915_PSR_DEBUG_FORCE_PSR2, it did nothing, not sure forcing PSR2 would even work. - Handle NULL crtc in intel_psr_set_debugfs_mode. (dhnkrn) Signed-off-by: Maarten Lankhorst Link: https://patchwork.freedesktop.org/patch/msgid/20180808141911.7647-2-maarten.lankhorst@linux.intel.com Reviewed-by: Dhinakaran Pandiyan --- drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/intel_psr.c | 44 ++++++++++++++++++++++++++++---- 2 files changed, 40 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 495021cb3b74..5fa13887b911 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -616,6 +616,7 @@ struct i915_psr { #define I915_PSR_DEBUG_DEFAULT 0x00 #define I915_PSR_DEBUG_DISABLE 0x01 #define I915_PSR_DEBUG_ENABLE 0x02 +#define I915_PSR_DEBUG_FORCE_PSR1 0x03 #define I915_PSR_DEBUG_IRQ 0x10 u32 debug; diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index e9ca410e18c4..7560c65f50ad 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -68,6 +68,17 @@ static bool psr_global_enabled(u32 debug) } } +static bool intel_psr2_enabled(struct drm_i915_private *dev_priv, + const struct intel_crtc_state *crtc_state) +{ + switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) { + case I915_PSR_DEBUG_FORCE_PSR1: + return false; + default: + return crtc_state->has_psr2; + } +} + void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug) { u32 debug_mask, mask; @@ -646,7 +657,7 @@ void intel_psr_enable(struct intel_dp *intel_dp, goto unlock; } - dev_priv->psr.psr2_enabled = crtc_state->has_psr2; + dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state); dev_priv->psr.busy_frontbuffer_bits = 0; dev_priv->psr.prepared = true; @@ -820,19 +831,38 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv) return err == 0 && dev_priv->psr.enabled; } +static bool switching_psr(struct drm_i915_private *dev_priv, + struct intel_crtc_state *crtc_state, + u32 mode) +{ + /* Can't switch psr state anyway if PSR2 is not supported. */ + if (!crtc_state || !crtc_state->has_psr2) + return false; + + if (dev_priv->psr.psr2_enabled && mode == I915_PSR_DEBUG_FORCE_PSR1) + return true; + + if (!dev_priv->psr.psr2_enabled && mode != I915_PSR_DEBUG_FORCE_PSR1) + return true; + + return false; +} + int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv, struct drm_modeset_acquire_ctx *ctx, u64 val) { struct drm_device *dev = &dev_priv->drm; struct drm_connector_state *conn_state; + struct intel_crtc_state *crtc_state = NULL; struct drm_crtc *crtc; struct intel_dp *dp; int ret; bool enable; + u32 mode = val & I915_PSR_DEBUG_MODE_MASK; if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) || - (val & I915_PSR_DEBUG_MODE_MASK) > I915_PSR_DEBUG_ENABLE) { + mode > I915_PSR_DEBUG_FORCE_PSR1) { DRM_DEBUG_KMS("Invalid debug mask %llx\n", val); return -EINVAL; } @@ -850,7 +880,8 @@ int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv, if (ret) return ret; - ret = wait_for_completion_interruptible(&crtc->state->commit->hw_done); + crtc_state = to_intel_crtc_state(crtc->state); + ret = wait_for_completion_interruptible(&crtc_state->base.commit->hw_done); } else ret = wait_for_completion_interruptible(&conn_state->commit->hw_done); @@ -863,14 +894,17 @@ int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv, enable = psr_global_enabled(val); - if (!enable) + if (!enable || switching_psr(dev_priv, crtc_state, mode)) intel_psr_disable_locked(dev_priv->psr.dp); dev_priv->psr.debug = val; + if (crtc) + dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state); + intel_psr_irq_control(dev_priv, dev_priv->psr.debug & I915_PSR_DEBUG_IRQ); if (dev_priv->psr.prepared && enable) - intel_psr_enable_locked(dev_priv, to_intel_crtc_state(crtc->state)); + intel_psr_enable_locked(dev_priv, crtc_state); mutex_unlock(&dev_priv->psr.lock); return ret; -- GitLab From ee435831ec83344dba5ccddd4ffcc6ca95d1cf77 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Thu, 9 Aug 2018 16:58:52 -0700 Subject: [PATCH 0089/1692] drm/i915/icl: account for context save/restore removed bits The RS_CTX_ENABLE and CTX_SAVE_INHIBIT bits are not present on ICL anymore, but we still try to set them and then check them with GEM_BUG_ON, resulting in a BUG() call. The bug can be reproduced by igt/drv_selftest/live_hangcheck/others-priority and our CI was able to catch it. It is worth noticing that commit 05f0addd9b10 ("drm/i915/icl: Enhanced execution list support") already tried to avoid the save bits on ICL, but only inside populate_lr_context(). Cc: Chris Wilson Cc: Mika Kuoppala Testcase: igt/drv_selftest/live_hangcheck/others-priority Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=107399 References: 05f0addd9b10 ("drm/i915/icl: Enhanced execution list support") Signed-off-by: Paulo Zanoni Link: https://patchwork.freedesktop.org/patch/msgid/20180809235852.24516-1-paulo.r.zanoni@intel.com Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_lrc.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index e5385dbfcdda..3f90c74038ef 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -541,11 +541,6 @@ static void inject_preempt_context(struct intel_engine_cs *engine) GEM_BUG_ON(execlists->preempt_complete_status != upper_32_bits(ce->lrc_desc)); - GEM_BUG_ON((ce->lrc_reg_state[CTX_CONTEXT_CONTROL + 1] & - _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | - CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT)) != - _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | - CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT)); /* * Switch to our empty preempt context so @@ -2582,10 +2577,13 @@ static void execlists_init_reg_state(u32 *regs, MI_LRI_FORCE_POSTED; CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(engine), - _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | - CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT | - CTX_CTRL_RS_CTX_ENABLE) | + _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) | _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH)); + if (INTEL_GEN(dev_priv) < 11) { + regs[CTX_CONTEXT_CONTROL + 1] |= + _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT | + CTX_CTRL_RS_CTX_ENABLE); + } CTX_REG(regs, CTX_RING_HEAD, RING_HEAD(base), 0); CTX_REG(regs, CTX_RING_TAIL, RING_TAIL(base), 0); CTX_REG(regs, CTX_RING_BUFFER_START, RING_START(base), 0); -- GitLab From 41db645a33e775855aeeec1a437d5c1e24ff6c88 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 12 Jul 2018 12:57:29 +0100 Subject: [PATCH 0090/1692] drm/i915: Bump priority of clean up work We require that we keep the list of outstanding work short so that we do not "leak" memory while pageflipping under stress. However that system stress may delay kernel workers virtually indefinitely, which incurs the pageflips stall and eventually hit a timeout waiting for the cleanup. Try to combat CPU starvation of our short-lived cleanup workers by switching to a high priority workqueue. Testcase: igt/kms_cursor_legacy/all-pipes-torture-move References: https://bugs.freedesktop.org/show_bug.cgi?id=107122 Signed-off-by: Chris Wilson Cc: Daniel Vetter Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20180712115729.3506-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_display.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 53e7a7e75384..366ff66e9279 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -12738,7 +12738,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) * down. */ INIT_WORK(&state->commit_work, intel_atomic_cleanup_work); - schedule_work(&state->commit_work); + queue_work(system_highpri_wq, &state->commit_work); } static void intel_atomic_commit_work(struct work_struct *work) -- GitLab From e02e65001e7b436f3590ec6acff259ec54689df5 Mon Sep 17 00:00:00 2001 From: Mika Kuoppala Date: Fri, 10 Aug 2018 17:00:35 +0300 Subject: [PATCH 0091/1692] drm/i915: Expose retry count to per gen reset logic There is a possibility for per gen reset logic to be more nasty if the softer approach on resetting does not bear fruit. Expose retry count to per gen reset logic if it wants to take such tough measures. Cc: Chris Wilson Signed-off-by: Mika Kuoppala Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20180810140036.24240-1-mika.kuoppala@linux.intel.com --- drivers/gpu/drm/i915/intel_uncore.c | 40 +++++++++++++++++++---------- 1 file changed, 26 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index c2fcb51fc58a..027d14574bfa 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1739,7 +1739,7 @@ static void gen3_stop_engine(struct intel_engine_cs *engine) } static void i915_stop_engines(struct drm_i915_private *dev_priv, - unsigned engine_mask) + unsigned int engine_mask) { struct intel_engine_cs *engine; enum intel_engine_id id; @@ -1759,7 +1759,9 @@ static bool i915_in_reset(struct pci_dev *pdev) return gdrst & GRDOM_RESET_STATUS; } -static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) +static int i915_do_reset(struct drm_i915_private *dev_priv, + unsigned int engine_mask, + unsigned int retry) { struct pci_dev *pdev = dev_priv->drm.pdev; int err; @@ -1786,7 +1788,9 @@ static bool g4x_reset_complete(struct pci_dev *pdev) return (gdrst & GRDOM_RESET_ENABLE) == 0; } -static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) +static int g33_do_reset(struct drm_i915_private *dev_priv, + unsigned int engine_mask, + unsigned int retry) { struct pci_dev *pdev = dev_priv->drm.pdev; @@ -1794,7 +1798,9 @@ static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) return wait_for(g4x_reset_complete(pdev), 500); } -static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) +static int g4x_do_reset(struct drm_i915_private *dev_priv, + unsigned int engine_mask, + unsigned int retry) { struct pci_dev *pdev = dev_priv->drm.pdev; int ret; @@ -1831,7 +1837,8 @@ static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) } static int ironlake_do_reset(struct drm_i915_private *dev_priv, - unsigned engine_mask) + unsigned int engine_mask, + unsigned int retry) { int ret; @@ -1887,6 +1894,7 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv, * gen6_reset_engines - reset individual engines * @dev_priv: i915 device * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset + * @retry: the count of of previous attempts to reset. * * This function will reset the individual engines that are set in engine_mask. * If you provide ALL_ENGINES as mask, full global domain reset will be issued. @@ -1897,7 +1905,8 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv, * Returns 0 on success, nonzero on error. */ static int gen6_reset_engines(struct drm_i915_private *dev_priv, - unsigned engine_mask) + unsigned int engine_mask, + unsigned int retry) { struct intel_engine_cs *engine; const u32 hw_engine_mask[I915_NUM_ENGINES] = { @@ -1936,7 +1945,7 @@ static int gen6_reset_engines(struct drm_i915_private *dev_priv, * Returns 0 on success, nonzero on error. */ static int gen11_reset_engines(struct drm_i915_private *dev_priv, - unsigned engine_mask) + unsigned int engine_mask) { struct intel_engine_cs *engine; const u32 hw_engine_mask[I915_NUM_ENGINES] = { @@ -2105,7 +2114,8 @@ static void gen8_reset_engine_cancel(struct intel_engine_cs *engine) } static int gen8_reset_engines(struct drm_i915_private *dev_priv, - unsigned engine_mask) + unsigned int engine_mask, + unsigned int retry) { struct intel_engine_cs *engine; unsigned int tmp; @@ -2121,7 +2131,7 @@ static int gen8_reset_engines(struct drm_i915_private *dev_priv, if (INTEL_GEN(dev_priv) >= 11) ret = gen11_reset_engines(dev_priv, engine_mask); else - ret = gen6_reset_engines(dev_priv, engine_mask); + ret = gen6_reset_engines(dev_priv, engine_mask, retry); not_ready: for_each_engine_masked(engine, dev_priv, engine_mask, tmp) @@ -2130,7 +2140,8 @@ static int gen8_reset_engines(struct drm_i915_private *dev_priv, return ret; } -typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask); +typedef int (*reset_func)(struct drm_i915_private *, + unsigned int engine_mask, unsigned int retry); static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv) { @@ -2153,10 +2164,10 @@ static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv) return NULL; } -int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) +int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned int engine_mask) { reset_func reset = intel_get_gpu_reset(dev_priv); - int retry; + unsigned int retry; int ret; /* @@ -2200,8 +2211,9 @@ int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) ret = -ENODEV; if (reset) { - GEM_TRACE("engine_mask=%x\n", engine_mask); - ret = reset(dev_priv, engine_mask); + ret = reset(dev_priv, engine_mask, retry); + GEM_TRACE("engine_mask=%x, ret=%d, retry=%d\n", + engine_mask, ret, retry); } if (ret != -ETIMEDOUT || engine_mask != ALL_ENGINES) break; -- GitLab From f4e60c5cfbf217cc9faa3aeb63742860154fcfef Mon Sep 17 00:00:00 2001 From: Mika Kuoppala Date: Mon, 13 Aug 2018 16:01:16 +0300 Subject: [PATCH 0092/1692] drm/i915: Force reset on unready engine If engine reports that it is not ready for reset, we give up. Evidence shows that forcing a per engine reset on an engine which is not reporting to be ready for reset, can bring it back into a working order. There is risk that we corrupt the context image currently executing on that engine. But that is a risk worth taking as if we unblock the engine, we prevent a whole device wedging in a case of full gpu reset. Reset individual engine even if it reports that it is not prepared for reset, but only if we aim for full gpu reset and not on first reset attempt. v2: force reset only on later attempts, readability (Chris) v3: simplify with adequate caffeine levels (Chris) v4: comment about risks and migitations (Chris) Cc: Chris Wilson Signed-off-by: Mika Kuoppala Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20180813130116.7250-1-mika.kuoppala@linux.intel.com --- drivers/gpu/drm/i915/intel_uncore.c | 50 +++++++++++++++++++++-------- 1 file changed, 37 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 027d14574bfa..20f2f5ad9c3f 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -2085,7 +2085,7 @@ int __intel_wait_for_register(struct drm_i915_private *dev_priv, return ret; } -static int gen8_reset_engine_start(struct intel_engine_cs *engine) +static int gen8_engine_reset_prepare(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; int ret; @@ -2105,7 +2105,7 @@ static int gen8_reset_engine_start(struct intel_engine_cs *engine) return ret; } -static void gen8_reset_engine_cancel(struct intel_engine_cs *engine) +static void gen8_engine_reset_cancel(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; @@ -2113,29 +2113,50 @@ static void gen8_reset_engine_cancel(struct intel_engine_cs *engine) _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET)); } +static int reset_engines(struct drm_i915_private *i915, + unsigned int engine_mask, + unsigned int retry) +{ + if (INTEL_GEN(i915) >= 11) + return gen11_reset_engines(i915, engine_mask); + else + return gen6_reset_engines(i915, engine_mask, retry); +} + static int gen8_reset_engines(struct drm_i915_private *dev_priv, unsigned int engine_mask, unsigned int retry) { struct intel_engine_cs *engine; + const bool reset_non_ready = retry >= 1; unsigned int tmp; int ret; for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { - if (gen8_reset_engine_start(engine)) { - ret = -EIO; - goto not_ready; - } + ret = gen8_engine_reset_prepare(engine); + if (ret && !reset_non_ready) + goto skip_reset; + + /* + * If this is not the first failed attempt to prepare, + * we decide to proceed anyway. + * + * By doing so we risk context corruption and with + * some gens (kbl), possible system hang if reset + * happens during active bb execution. + * + * We rather take context corruption instead of + * failed reset with a wedged driver/gpu. And + * active bb execution case should be covered by + * i915_stop_engines we have before the reset. + */ } - if (INTEL_GEN(dev_priv) >= 11) - ret = gen11_reset_engines(dev_priv, engine_mask); - else - ret = gen6_reset_engines(dev_priv, engine_mask, retry); + ret = reset_engines(dev_priv, engine_mask, retry); -not_ready: +skip_reset: for_each_engine_masked(engine, dev_priv, engine_mask, tmp) - gen8_reset_engine_cancel(engine); + gen8_engine_reset_cancel(engine); return ret; } @@ -2164,12 +2185,15 @@ static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv) return NULL; } -int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned int engine_mask) +int intel_gpu_reset(struct drm_i915_private *dev_priv, + const unsigned int engine_mask) { reset_func reset = intel_get_gpu_reset(dev_priv); unsigned int retry; int ret; + GEM_BUG_ON(!engine_mask); + /* * We want to perform per-engine reset from atomic context (e.g. * softirq), which imposes the constraint that we cannot sleep. -- GitLab From 30b710840e4b9c9699d3d4b33fb19ad8880d4614 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 12 Aug 2018 23:36:29 +0100 Subject: [PATCH 0093/1692] drm/i915: Cleanup gt powerstate from gem Since the gt powerstate is allocated by i915_gem_init, clean it from i915_gem_fini for symmetry and to correct the imbalance on error. Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20180812223642.24865-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem.c | 3 +++ drivers/gpu/drm/i915/intel_display.c | 4 ---- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 71502512ac1f..0453eb42a1a3 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -5634,6 +5634,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv) void i915_gem_fini(struct drm_i915_private *dev_priv) { i915_gem_suspend_late(dev_priv); + intel_disable_gt_powersave(dev_priv); /* Flush any outstanding unpin_work. */ i915_gem_drain_workqueue(dev_priv); @@ -5645,6 +5646,8 @@ void i915_gem_fini(struct drm_i915_private *dev_priv) i915_gem_contexts_fini(dev_priv); mutex_unlock(&dev_priv->drm.struct_mutex); + intel_cleanup_gt_powersave(dev_priv); + intel_uc_fini_misc(dev_priv); i915_gem_cleanup_userptr(dev_priv); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 366ff66e9279..5138a921e0b5 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -15991,8 +15991,6 @@ void intel_modeset_cleanup(struct drm_device *dev) flush_work(&dev_priv->atomic_helper.free_work); WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list)); - intel_disable_gt_powersave(dev_priv); - /* * Interrupts and polling as the first thing to avoid creating havoc. * Too much stuff here (turning of connectors, ...) would @@ -16020,8 +16018,6 @@ void intel_modeset_cleanup(struct drm_device *dev) intel_cleanup_overlay(dev_priv); - intel_cleanup_gt_powersave(dev_priv); - intel_teardown_gmbus(dev_priv); destroy_workqueue(dev_priv->modeset_wq); -- GitLab From 61e1e376bb25095f741d3949e51eb557cc432dc2 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 12 Aug 2018 23:36:30 +0100 Subject: [PATCH 0094/1692] drm/i915: Restrict gen6_reset_rps_interrupts to gen6+ Do not call gen6_reset_rps_interrupts() when we know the registers do not exist. Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20180812223642.24865-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_pm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 03654f5f68c3..9a01560c5bd1 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -8260,7 +8260,7 @@ void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv) if (INTEL_GEN(dev_priv) >= 11) gen11_reset_rps_interrupts(dev_priv); - else + else if (INTEL_GEN(dev_priv) >= 6) gen6_reset_rps_interrupts(dev_priv); } -- GitLab From d6fee0dee09317d5e83e9b855316cb779dd679cf Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 14 Aug 2018 11:40:56 +0100 Subject: [PATCH 0095/1692] drm/i915: Kick waiters on resetting legacy rings This reapplies commit 39f3be162c46 ("drm/i915: Kick waiters on resetting legacy rings") after the improved gem_eio was run across all machines we found that gen3 and early gen4 still lost the immediate interrupt following reset, and the HWSTAM w/a applied to gen6+ is inadequate. Unlike the later gen, on gen3/4 the principle (and only tests to fail so far) are the wait vs reset test cases, whereas the reset stress case works fine (which was the predominantly failing case for gen6+). That is enough to suggest the underlying issue is sufficiently different to support the difference in HWSTAM efficacy. Testcase: igt/gem_eio/wait-10ms References: 39f3be162c46 ("drm/i915: Kick waiters on resetting legacy rings") References: a69ab52b0358 ("drm/i915: Remove extra waiter kick on legacy resets") Signed-off-by: Chris Wilson Cc: Matthew Auld Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20180814104056.27001-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_ringbuffer.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index b65cf7832b39..d40f55a8dc34 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -537,6 +537,8 @@ static int init_ring_common(struct intel_engine_cs *engine) if (INTEL_GEN(dev_priv) > 2) I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING)); + /* Papering over lost _interrupts_ immediately following the restart */ + intel_engine_wakeup(engine); out: intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); -- GitLab From f623f75ae443d0c771635d51cc986b9d389bf631 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 14 Aug 2018 00:35:24 +0200 Subject: [PATCH 0096/1692] rfkill-gpio: include linux/mod_devicetable.h One more driver is apparently broken by the recent change to linux/platform_device.h: net/rfkill/rfkill-gpio.c: In function 'rfkill_gpio_acpi_probe': net/rfkill/rfkill-gpio.c:82:29: error: dereferencing pointer to incomplete type 'const struct acpi_device_id' Include linux/mod_devicetable.h to get the definition of the acpi_device_id structure. Fixes: ac3167257b9f ("headers: separate linux/mod_devicetable.h from linux/platform_device.h") Signed-off-by: Arnd Bergmann Signed-off-by: Johannes Berg --- net/rfkill/rfkill-gpio.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c index 00192a996be0..0f8465852254 100644 --- a/net/rfkill/rfkill-gpio.c +++ b/net/rfkill/rfkill-gpio.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include -- GitLab From 77cfaf52eca5cac30ed029507e0cab065f888995 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= Date: Mon, 13 Aug 2018 14:16:25 +0200 Subject: [PATCH 0097/1692] mac80211: Run TXQ teardown code before de-registering interfaces MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The TXQ teardown code can reference the vif data structures that are stored in the netdev private memory area if there are still packets on the queue when it is being freed. Since the TXQ teardown code is run after the netdevs are freed, this can lead to a use-after-free. Fix this by moving the TXQ teardown code to earlier in ieee80211_unregister_hw(). Reported-by: Ben Greear Tested-by: Ben Greear Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: Johannes Berg --- net/mac80211/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/mac80211/main.c b/net/mac80211/main.c index fb73451ed85e..0358f20b675f 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -1182,6 +1182,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw) #if IS_ENABLED(CONFIG_IPV6) unregister_inet6addr_notifier(&local->ifa6_notifier); #endif + ieee80211_txq_teardown_flows(local); rtnl_lock(); @@ -1210,7 +1211,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw) skb_queue_purge(&local->skb_queue); skb_queue_purge(&local->skb_queue_unreliable); skb_queue_purge(&local->skb_queue_tdls_chsw); - ieee80211_txq_teardown_flows(local); destroy_workqueue(local->workqueue); wiphy_unregister(local->hw.wiphy); -- GitLab From dc5977da99ea28094b8fa4e9bacbd29bedc41de5 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Tue, 14 Aug 2018 09:00:01 +0300 Subject: [PATCH 0098/1692] drm/i915: set DP Main Stream Attribute for color range on DDI platforms MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since Haswell we have no color range indication either in the pipe or port registers for DP. Instead, there's a separate register for setting the DP Main Stream Attributes (MSA) directly. The MSA register definition makes no references to colorimetry, just a vague reference to the DP spec. The connection to the color range was lost. Apparently we've failed to set the proper MSA bit for limited, or CEA, range ever since the first DDI platforms. We've started setting other MSA parameters since commit dae847991a43 ("drm/i915: add intel_ddi_set_pipe_settings"). Without the crucial bit of information, the DP sink has no way of knowing the source is actually transmitting limited range RGB, leading to "washed out" colors. With the colorimetry information, compliant sinks should be able to handle the limited range properly. Native (i.e. non-LSPCON) HDMI was not affected because we do pass the color range via AVI infoframes. Though not the root cause, the problem was made worse for DDI platforms with commit 55bc60db5988 ("drm/i915: Add "Automatic" mode for the "Broadcast RGB" property"), which selects limited range RGB automatically based on the mode, as per the DP, HDMI and CEA specs. After all these years, the fix boils down to flipping one bit. [Per testing reports, this fixes DP sinks, but not the LSPCON. My educated guess is that the LSPCON fails to turn the CEA range MSA into AVI infoframes for HDMI.] Reported-by: Michał Kopeć Reported-by: N. W. Reported-by: Nicholas Stommel Reported-by: Tom Yan Tested-by: Nicholas Stommel References: https://bugs.freedesktop.org/show_bug.cgi?id=100023 Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=107476 Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=94921 Cc: Paulo Zanoni Cc: Rodrigo Vivi Cc: Ville Syrjälä Cc: # v3.9+ Reviewed-by: Rodrigo Vivi Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20180814060001.18224-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/i915_reg.h | 1 + drivers/gpu/drm/i915/intel_ddi.c | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 17575cfc22b5..0c9f03dda569 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -9246,6 +9246,7 @@ enum skl_power_gate { #define TRANS_MSA_10_BPC (2 << 5) #define TRANS_MSA_12_BPC (3 << 5) #define TRANS_MSA_16_BPC (4 << 5) +#define TRANS_MSA_CEA_RANGE (1 << 3) /* LCPLL Control */ #define LCPLL_CTL _MMIO(0x130040) diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 0adc043529f2..6f7be066c8f2 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -1685,6 +1685,10 @@ void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state) WARN_ON(transcoder_is_dsi(cpu_transcoder)); temp = TRANS_MSA_SYNC_CLK; + + if (crtc_state->limited_color_range) + temp |= TRANS_MSA_CEA_RANGE; + switch (crtc_state->pipe_bpp) { case 18: temp |= TRANS_MSA_6_BPC; -- GitLab From 260c48b7ec26dfaf70d9230c3639f420e304e781 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Sun, 12 Aug 2018 13:42:06 +0200 Subject: [PATCH 0099/1692] ASoC: Intel: bytcr_rt5640: Add quirks for 2 more devices Add quirks to select the right input-map, jack-detect pin, etc. for: Linx Linx7 tablet Onda V975w tablet Signed-off-by: Hans de Goede Acked-by: Pierre-Louis Bossart Signed-off-by: Mark Brown --- sound/soc/intel/boards/bytcr_rt5640.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c index d32844f94d74..b6dc524830b2 100644 --- a/sound/soc/intel/boards/bytcr_rt5640.c +++ b/sound/soc/intel/boards/bytcr_rt5640.c @@ -575,6 +575,17 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = { BYT_RT5640_MONO_SPEAKER | BYT_RT5640_MCLK_EN), }, + { /* Linx Linx7 tablet */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LINX"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "LINX7"), + }, + .driver_data = (void *)(BYTCR_INPUT_DEFAULTS | + BYT_RT5640_MONO_SPEAKER | + BYT_RT5640_JD_NOT_INV | + BYT_RT5640_SSP0_AIF1 | + BYT_RT5640_MCLK_EN), + }, { /* MSI S100 tablet */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Micro-Star International Co., Ltd."), @@ -602,6 +613,21 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = { BYT_RT5640_SSP0_AIF1 | BYT_RT5640_MCLK_EN), }, + { /* Onda v975w */ + .matches = { + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "Aptio CRB"), + /* The above are too generic, also match BIOS info */ + DMI_EXACT_MATCH(DMI_BIOS_VERSION, "5.6.5"), + DMI_EXACT_MATCH(DMI_BIOS_DATE, "07/25/2014"), + }, + .driver_data = (void *)(BYT_RT5640_IN1_MAP | + BYT_RT5640_JD_SRC_JD2_IN4N | + BYT_RT5640_OVCD_TH_2000UA | + BYT_RT5640_OVCD_SF_0P75 | + BYT_RT5640_DIFF_MIC | + BYT_RT5640_MCLK_EN), + }, { /* Pipo W4 */ .matches = { DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), -- GitLab From 5e4cfadaf5b73a0801b2fa7fb007f98400ebfe6e Mon Sep 17 00:00:00 2001 From: Marcel Ziswiler Date: Tue, 14 Aug 2018 00:35:56 +0200 Subject: [PATCH 0100/1692] ASoC: wm9712: fix replace codec to component Since commit 143b44845d87 ("ASoC: wm9712: replace codec to component") "wm9712-codec" got renamed to "wm9712-component", however, this change never got propagated down to the actual board/platform drivers. E.g. on Colibri T20 this lead to the following spew upon boot with sound/touch being broken: [ 2.214121] tegra-snd-wm9712 sound: ASoC: CODEC DAI wm9712-hifi not registered [ 2.222137] tegra-snd-wm9712 sound: snd_soc_register_card failed (-517) ... [ 2.344384] tegra-snd-wm9712 sound: ASoC: CODEC DAI wm9712-hifi not registered [ 2.351885] tegra-snd-wm9712 sound: snd_soc_register_card failed (-517) ... [ 2.668339] tegra-snd-wm9712 sound: ASoC: CODEC DAI wm9712-hifi not registered [ 2.675811] tegra-snd-wm9712 sound: snd_soc_register_card failed (-517) ... [ 3.208408] tegra-snd-wm9712 sound: ASoC: CODEC DAI wm9712-hifi not registered [ 3.216312] tegra-snd-wm9712 sound: snd_soc_register_card failed (-517) ... [ 3.235397] tegra-snd-wm9712 sound: ASoC: CODEC DAI wm9712-hifi not registered [ 3.248938] tegra-snd-wm9712 sound: snd_soc_register_card failed (-517) ... [ 14.970443] ALSA device list: [ 14.996628] No soundcards found. This commit finally fixes this again. Signed-off-by: Marcel Ziswiler Acked-by: Charles Keepax Signed-off-by: Mark Brown Cc: stable@vger.kernel.org --- sound/soc/codecs/wm9712.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c index 953d94d50586..ade34c26ad2f 100644 --- a/sound/soc/codecs/wm9712.c +++ b/sound/soc/codecs/wm9712.c @@ -719,7 +719,7 @@ static int wm9712_probe(struct platform_device *pdev) static struct platform_driver wm9712_component_driver = { .driver = { - .name = "wm9712-component", + .name = "wm9712-codec", }, .probe = wm9712_probe, -- GitLab From fb504caae7ef85be159743bd4b08ecde269ba55f Mon Sep 17 00:00:00 2001 From: "Dmitry V. Levin" Date: Mon, 13 Aug 2018 18:50:02 +0300 Subject: [PATCH 0101/1692] ASoC: uapi: fix sound/skl-tplg-interface.h userspace compilation errors Include and consistently use types it provides to fix the following sound/skl-tplg-interface.h userspace compilation errors: /usr/include/sound/skl-tplg-interface.h:146:2: error: unknown type name 'u32' u32 set_params:2; /usr/include/sound/skl-tplg-interface.h:147:2: error: unknown type name 'u32' u32 rsvd:30; /usr/include/sound/skl-tplg-interface.h:148:2: error: unknown type name 'u32' u32 param_id; /usr/include/sound/skl-tplg-interface.h:149:2: error: unknown type name 'u32' u32 max; /usr/include/sound/skl-tplg-interface.h:166:2: error: unknown type name 'u16' u16 module_id; /usr/include/sound/skl-tplg-interface.h:167:2: error: unknown type name 'u16' u16 instance_id; /usr/include/sound/skl-tplg-interface.h:171:2: error: unknown type name 'u32' u32 channels; /usr/include/sound/skl-tplg-interface.h:172:2: error: unknown type name 'u32' u32 freq; /usr/include/sound/skl-tplg-interface.h:173:2: error: unknown type name 'u32' u32 bit_depth; /usr/include/sound/skl-tplg-interface.h:174:2: error: unknown type name 'u32' u32 valid_bit_depth; /usr/include/sound/skl-tplg-interface.h:175:2: error: unknown type name 'u32' u32 ch_cfg; /usr/include/sound/skl-tplg-interface.h:176:2: error: unknown type name 'u32' u32 interleaving_style; /usr/include/sound/skl-tplg-interface.h:177:2: error: unknown type name 'u32' u32 sample_type; /usr/include/sound/skl-tplg-interface.h:178:2: error: unknown type name 'u32' u32 ch_map; /usr/include/sound/skl-tplg-interface.h:182:2: error: unknown type name 'u32' u32 set_params:2; /usr/include/sound/skl-tplg-interface.h:183:2: error: unknown type name 'u32' u32 rsvd:30; /usr/include/sound/skl-tplg-interface.h:184:2: error: unknown type name 'u32' u32 param_id; /usr/include/sound/skl-tplg-interface.h:185:2: error: unknown type name 'u32' u32 caps_size; /usr/include/sound/skl-tplg-interface.h:186:2: error: unknown type name 'u32' u32 caps[HDA_SST_CFG_MAX]; /usr/include/sound/skl-tplg-interface.h:190:2: error: unknown type name 'u8' u8 pipe_id; /usr/include/sound/skl-tplg-interface.h:191:2: error: unknown type name 'u8' u8 pipe_priority; /usr/include/sound/skl-tplg-interface.h:192:2: error: unknown type name 'u16' u16 conn_type:4; /usr/include/sound/skl-tplg-interface.h:193:2: error: unknown type name 'u16' u16 rsvd:4; /usr/include/sound/skl-tplg-interface.h:194:2: error: unknown type name 'u16' u16 memory_pages:8; /usr/include/sound/skl-tplg-interface.h:200:2: error: unknown type name 'u16' u16 module_id; /usr/include/sound/skl-tplg-interface.h:201:2: error: unknown type name 'u16' u16 instance_id; /usr/include/sound/skl-tplg-interface.h:202:2: error: unknown type name 'u32' u32 max_mcps; /usr/include/sound/skl-tplg-interface.h:203:2: error: unknown type name 'u32' u32 mem_pages; /usr/include/sound/skl-tplg-interface.h:204:2: error: unknown type name 'u32' u32 obs; /usr/include/sound/skl-tplg-interface.h:205:2: error: unknown type name 'u32' u32 ibs; /usr/include/sound/skl-tplg-interface.h:206:2: error: unknown type name 'u32' u32 vbus_id; /usr/include/sound/skl-tplg-interface.h:208:2: error: unknown type name 'u32' u32 max_in_queue:8; /usr/include/sound/skl-tplg-interface.h:209:2: error: unknown type name 'u32' u32 max_out_queue:8; /usr/include/sound/skl-tplg-interface.h:210:2: error: unknown type name 'u32' u32 time_slot:8; /usr/include/sound/skl-tplg-interface.h:211:2: error: unknown type name 'u32' u32 core_id:4; /usr/include/sound/skl-tplg-interface.h:212:2: error: unknown type name 'u32' u32 rsvd1:4; /usr/include/sound/skl-tplg-interface.h:214:2: error: unknown type name 'u32' u32 module_type:8; /usr/include/sound/skl-tplg-interface.h:215:2: error: unknown type name 'u32' u32 conn_type:4; /usr/include/sound/skl-tplg-interface.h:216:2: error: unknown type name 'u32' u32 dev_type:4; /usr/include/sound/skl-tplg-interface.h:217:2: error: unknown type name 'u32' u32 hw_conn_type:4; /usr/include/sound/skl-tplg-interface.h:218:2: error: unknown type name 'u32' u32 rsvd2:12; /usr/include/sound/skl-tplg-interface.h:220:2: error: unknown type name 'u32' u32 params_fixup:8; /usr/include/sound/skl-tplg-interface.h:221:2: error: unknown type name 'u32' u32 converter:8; /usr/include/sound/skl-tplg-interface.h:222:2: error: unknown type name 'u32' u32 input_pin_type:1; /usr/include/sound/skl-tplg-interface.h:223:2: error: unknown type name 'u32' u32 output_pin_type:1; /usr/include/sound/skl-tplg-interface.h:224:2: error: unknown type name 'u32' u32 is_dynamic_in_pin:1; /usr/include/sound/skl-tplg-interface.h:225:2: error: unknown type name 'u32' u32 is_dynamic_out_pin:1; /usr/include/sound/skl-tplg-interface.h:226:2: error: unknown type name 'u32' u32 is_loadable:1; /usr/include/sound/skl-tplg-interface.h:227:2: error: unknown type name 'u32' u32 rsvd3:11; Fixes: 0c24fdc00244 ("ASoC: topology: Move skl-tplg-interface.h to uapi") Signed-off-by: Dmitry V. Levin Reviewed-by: Guenter Roeck Signed-off-by: Mark Brown Cc: # v4.18 --- include/uapi/sound/skl-tplg-interface.h | 106 ++++++++++++------------ 1 file changed, 54 insertions(+), 52 deletions(-) diff --git a/include/uapi/sound/skl-tplg-interface.h b/include/uapi/sound/skl-tplg-interface.h index f58cafa42f18..f39352cef382 100644 --- a/include/uapi/sound/skl-tplg-interface.h +++ b/include/uapi/sound/skl-tplg-interface.h @@ -10,6 +10,8 @@ #ifndef __HDA_TPLG_INTERFACE_H__ #define __HDA_TPLG_INTERFACE_H__ +#include + /* * Default types range from 0~12. type can range from 0 to 0xff * SST types start at higher to avoid any overlapping in future @@ -143,10 +145,10 @@ enum skl_module_param_type { }; struct skl_dfw_algo_data { - u32 set_params:2; - u32 rsvd:30; - u32 param_id; - u32 max; + __u32 set_params:2; + __u32 rsvd:30; + __u32 param_id; + __u32 max; char params[0]; } __packed; @@ -163,68 +165,68 @@ enum skl_tuple_type { /* v4 configuration data */ struct skl_dfw_v4_module_pin { - u16 module_id; - u16 instance_id; + __u16 module_id; + __u16 instance_id; } __packed; struct skl_dfw_v4_module_fmt { - u32 channels; - u32 freq; - u32 bit_depth; - u32 valid_bit_depth; - u32 ch_cfg; - u32 interleaving_style; - u32 sample_type; - u32 ch_map; + __u32 channels; + __u32 freq; + __u32 bit_depth; + __u32 valid_bit_depth; + __u32 ch_cfg; + __u32 interleaving_style; + __u32 sample_type; + __u32 ch_map; } __packed; struct skl_dfw_v4_module_caps { - u32 set_params:2; - u32 rsvd:30; - u32 param_id; - u32 caps_size; - u32 caps[HDA_SST_CFG_MAX]; + __u32 set_params:2; + __u32 rsvd:30; + __u32 param_id; + __u32 caps_size; + __u32 caps[HDA_SST_CFG_MAX]; } __packed; struct skl_dfw_v4_pipe { - u8 pipe_id; - u8 pipe_priority; - u16 conn_type:4; - u16 rsvd:4; - u16 memory_pages:8; + __u8 pipe_id; + __u8 pipe_priority; + __u16 conn_type:4; + __u16 rsvd:4; + __u16 memory_pages:8; } __packed; struct skl_dfw_v4_module { char uuid[SKL_UUID_STR_SZ]; - u16 module_id; - u16 instance_id; - u32 max_mcps; - u32 mem_pages; - u32 obs; - u32 ibs; - u32 vbus_id; - - u32 max_in_queue:8; - u32 max_out_queue:8; - u32 time_slot:8; - u32 core_id:4; - u32 rsvd1:4; - - u32 module_type:8; - u32 conn_type:4; - u32 dev_type:4; - u32 hw_conn_type:4; - u32 rsvd2:12; - - u32 params_fixup:8; - u32 converter:8; - u32 input_pin_type:1; - u32 output_pin_type:1; - u32 is_dynamic_in_pin:1; - u32 is_dynamic_out_pin:1; - u32 is_loadable:1; - u32 rsvd3:11; + __u16 module_id; + __u16 instance_id; + __u32 max_mcps; + __u32 mem_pages; + __u32 obs; + __u32 ibs; + __u32 vbus_id; + + __u32 max_in_queue:8; + __u32 max_out_queue:8; + __u32 time_slot:8; + __u32 core_id:4; + __u32 rsvd1:4; + + __u32 module_type:8; + __u32 conn_type:4; + __u32 dev_type:4; + __u32 hw_conn_type:4; + __u32 rsvd2:12; + + __u32 params_fixup:8; + __u32 converter:8; + __u32 input_pin_type:1; + __u32 output_pin_type:1; + __u32 is_dynamic_in_pin:1; + __u32 is_dynamic_out_pin:1; + __u32 is_loadable:1; + __u32 rsvd3:11; struct skl_dfw_v4_pipe pipe; struct skl_dfw_v4_module_fmt in_fmt[MAX_IN_QUEUE]; -- GitLab From 08ea70a417baa1c0e5faa580070cd2ffd04e6285 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 12 Aug 2018 23:36:31 +0100 Subject: [PATCH 0102/1692] drm/i915: Disable runtime-pm using lowlevel functions if !HAS_RC6 If we cannot setup rc6, we cannot let the GPU suspend itself as it cannot save its state (to a powercontext). As such, we must disable runtime-pm, but we should do so using the low-level pm-runtime function which leaves our own debugging functions intact (and continue to detect errors in our runtime-pm handling should we ever be able to enable rc6). Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20180812223642.24865-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_pm.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 9a01560c5bd1..d99e5fabe93c 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -26,6 +26,7 @@ */ #include +#include #include #include "i915_drv.h" #include "intel_drv.h" @@ -8181,7 +8182,7 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv) */ if (!sanitize_rc6(dev_priv)) { DRM_INFO("RC6 disabled, disabling runtime PM support\n"); - intel_runtime_pm_get(dev_priv); + pm_runtime_get(&dev_priv->drm.pdev->dev); } mutex_lock(&dev_priv->pcu_lock); @@ -8233,7 +8234,7 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv) valleyview_cleanup_gt_powersave(dev_priv); if (!HAS_RC6(dev_priv)) - intel_runtime_pm_put(dev_priv); + pm_runtime_put(&dev_priv->drm.pdev->dev); } /** -- GitLab From 12eeeb4f4733bbc4481d01df35933fc15beb8b19 Mon Sep 17 00:00:00 2001 From: Yong Zhi Date: Mon, 13 Aug 2018 18:15:14 -0500 Subject: [PATCH 0103/1692] ASoC: Intel: Skylake: Acquire irq after RIRB allocation Cold reboot stress test found that the hda irq could access rirb ring buffer before its memory gets allocated which resulting in null pointer dereference inside snd_hdac_bus_update_rirb(). Fix it by moving the skl_acquire_irq after ring buffer allocation. While here, also change err return from -EBUSY to actual error code. Signed-off-by: Yong Zhi Acked-by: Pierre-Louis Bossart Signed-off-by: Mark Brown --- sound/soc/intel/skylake/skl.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c index dce649485649..cf09721ca13e 100644 --- a/sound/soc/intel/skylake/skl.c +++ b/sound/soc/intel/skylake/skl.c @@ -838,11 +838,7 @@ static int skl_first_init(struct hdac_bus *bus) snd_hdac_bus_parse_capabilities(bus); - if (skl_acquire_irq(bus, 0) < 0) - return -EBUSY; - pci_set_master(pci); - synchronize_irq(bus->irq); gcap = snd_hdac_chip_readw(bus, GCAP); dev_dbg(bus->dev, "chipset global capabilities = 0x%x\n", gcap); @@ -875,6 +871,12 @@ static int skl_first_init(struct hdac_bus *bus) if (err < 0) return err; + err = skl_acquire_irq(bus, 0); + if (err < 0) + return err; + + synchronize_irq(bus->irq); + /* initialize chip */ skl_init_pci(skl); -- GitLab From c066fafc595eef5ae3c83ae3a8305956b8c3ef15 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Tue, 14 Aug 2018 20:37:45 +1000 Subject: [PATCH 0104/1692] KVM: PPC: Book3S HV: Use correct pagesize in kvm_unmap_radix() Since commit e641a317830b ("KVM: PPC: Book3S HV: Unify dirty page map between HPT and radix", 2017-10-26), kvm_unmap_radix() computes the number of PAGE_SIZEd pages being unmapped and passes it to kvmppc_update_dirty_map(), which expects to be passed the page size instead. Consequently it will only mark one system page dirty even when a large page (for example a THP page) is being unmapped. The consequence of this is that part of the THP page might not get copied during live migration, resulting in memory corruption for the guest. This fixes it by computing and passing the page size in kvm_unmap_radix(). Cc: stable@vger.kernel.org # v4.15+ Fixes: e641a317830b (KVM: PPC: Book3S HV: Unify dirty page map between HPT and radix) Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_64_mmu_radix.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 176f911ee983..7efc42538ccf 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -738,10 +738,10 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, gpa, shift); kvmppc_radix_tlbie_page(kvm, gpa, shift); if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) { - unsigned long npages = 1; + unsigned long psize = PAGE_SIZE; if (shift) - npages = 1ul << (shift - PAGE_SHIFT); - kvmppc_update_dirty_map(memslot, gfn, npages); + psize = 1ul << shift; + kvmppc_update_dirty_map(memslot, gfn, psize); } } return 0; -- GitLab From a99b32a6fff7e482a267c72e565c8c410ce793d7 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 14 Aug 2018 18:18:57 +0100 Subject: [PATCH 0105/1692] drm/i915: Clear stop-engine for a pardoned reset If we pardon a per-engine reset, we may leave the STOP_RING bit asserted in RING_MI_MODE resulting in the engine hanging. Unconditionally clear it on the per-engine exit path as we know that either we skipped the reset and so need the cancellation, or the reset was successful and the cancellation is a no-op, or there was an error and we will follow up with a full-reset or wedging (both of which will stop the engines again as required). Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=107188 Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=106560 Signed-off-by: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20180814171857.24673-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_drv.c | 1 + drivers/gpu/drm/i915/intel_engine_cs.c | 10 ++++++++++ drivers/gpu/drm/i915/intel_ringbuffer.h | 1 + 3 files changed, 12 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 9dce55182c3a..41111f2a9c39 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -2079,6 +2079,7 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg) goto out; out: + intel_engine_cancel_stop_cs(engine); i915_gem_reset_finish_engine(engine); return ret; } diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 99d5a24219c1..8628567d8f6e 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -788,6 +788,16 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine) return err; } +void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine) +{ + struct drm_i915_private *dev_priv = engine->i915; + + GEM_TRACE("%s\n", engine->name); + + I915_WRITE_FW(RING_MI_MODE(engine->mmio_base), + _MASKED_BIT_DISABLE(STOP_RING)); +} + const char *i915_cache_level_str(struct drm_i915_private *i915, int type) { switch (type) { diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 9090885d57de..3f6920dd7880 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -906,6 +906,7 @@ int intel_init_blt_ring_buffer(struct intel_engine_cs *engine); int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine); int intel_engine_stop_cs(struct intel_engine_cs *engine); +void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine); u64 intel_engine_get_active_head(const struct intel_engine_cs *engine); u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine); -- GitLab From ad3c776b171078a10ace07616a34ed6266beb0e7 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Wed, 15 Aug 2018 16:10:38 +0300 Subject: [PATCH 0106/1692] drm/i915: Fix PM refcounting w/o DMC firmware The case where the firmware isn't specified for a platform (although runtime PM works only with DMC on this platform) is the same case where the firmware is specified but can't be loaded for some reason. Hence we need to get a display init power domain ref in the first case too to keep the refcount bookkeeping in balance. Also convert the related log message to be a debug one, since it's a valid scenario for a new platform, where we need to have dev_info->has_csr=1 set, but add support for actually loading the firmware only later. v2: - In addition to the debug log, WARN on non-alpha support platforms, since then the first case isn't valid scenario. (Chris) References: https://bugs.freedesktop.org/show_bug.cgi?id=107382 Cc: Chris Wilson Cc: Anusha Srivatsa Signed-off-by: Imre Deak Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20180815131038.24446-1-imre.deak@intel.com --- drivers/gpu/drm/i915/intel_csr.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index cf9b600cca79..1ec4f09c61f6 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c @@ -468,12 +468,6 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv) csr->fw_path = I915_CSR_SKL; else if (IS_BROXTON(dev_priv)) csr->fw_path = I915_CSR_BXT; - else { - DRM_ERROR("Unexpected: no known CSR firmware for platform\n"); - return; - } - - DRM_DEBUG_KMS("Loading %s\n", csr->fw_path); /* * Obtain a runtime pm reference, until CSR is loaded, @@ -481,6 +475,14 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv) */ intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); + if (csr->fw_path == NULL) { + DRM_DEBUG_KMS("No known CSR firmware for platform, disabling runtime PM\n"); + WARN_ON(!IS_ALPHA_SUPPORT(INTEL_INFO(dev_priv))); + + return; + } + + DRM_DEBUG_KMS("Loading %s\n", csr->fw_path); schedule_work(&dev_priv->csr.work); } -- GitLab From 1a4327fbf4554d5b78d75b19a13d40d6de220159 Mon Sep 17 00:00:00 2001 From: Kirill Kapranov Date: Mon, 13 Aug 2018 19:48:10 +0300 Subject: [PATCH 0107/1692] spi: fix IDR collision on systems with both fixed and dynamic SPI bus numbers On systems where some controllers get a dynamic ID assigned and some have a fixed number (e.g. from ACPI tables), the current implementation might run into an IDR collision: in case of a fixed bus number is gotten by a driver (but not marked busy in IDR tree) and a driver with dynamic bus number gets the same ID and predictably fails. Fix this by means of checking-in fixed IDsin IDR as far as dynamic ones at the moment of the controller registration. Fixes: 9b61e302210e (spi: Pick spi bus number from Linux idr or spi alias) Signed-off-by: Kirill Kapranov Signed-off-by: Mark Brown Cc: stable@vger.kernel.org --- drivers/spi/spi.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index ec395a6baf9c..a00d006d4c3a 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -2170,6 +2170,15 @@ int spi_register_controller(struct spi_controller *ctlr) if (WARN(id < 0, "couldn't get idr")) return id; ctlr->bus_num = id; + } else { + /* devices with a fixed bus num must check-in with the num */ + mutex_lock(&board_lock); + id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num, + ctlr->bus_num + 1, GFP_KERNEL); + mutex_unlock(&board_lock); + if (WARN(id < 0, "couldn't get idr")) + return id == -ENOSPC ? -EBUSY : id; + ctlr->bus_num = id; } INIT_LIST_HEAD(&ctlr->queue); spin_lock_init(&ctlr->queue_lock); -- GitLab From 249dc49576fc953a7378b916c6a6d47ea81e4da2 Mon Sep 17 00:00:00 2001 From: Charles Keepax Date: Wed, 15 Aug 2018 13:11:35 +0100 Subject: [PATCH 0108/1692] ASoC: dapm: Fix NULL pointer deference on CODEC to CODEC DAIs Commit a655de808cbde ("ASoC: core: Allow topology to override machine driver FE DAI link config.") caused soc_dai_hw_params to be come dependent on the substream private_data being set with a pointer to the snd_soc_pcm_runtime. Currently, CODEC to CODEC links don't set this, which causes a NULL pointer dereference: [<4069de54>] (soc_dai_hw_params) from [<40694b68>] (snd_soc_dai_link_event+0x1a0/0x380) Since the ASoC core in general assumes that the substream private_data will be set to a pointer to the snd_soc_pcm_runtime, update the CODEC to CODEC links to respect this. Signed-off-by: Charles Keepax Signed-off-by: Mark Brown --- include/sound/soc-dapm.h | 1 + sound/soc/soc-core.c | 4 ++-- sound/soc/soc-dapm.c | 4 ++++ 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h index af9ef16cc34d..fdaaafdc7a00 100644 --- a/include/sound/soc-dapm.h +++ b/include/sound/soc-dapm.h @@ -407,6 +407,7 @@ int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm, int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card); void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card); int snd_soc_dapm_new_pcm(struct snd_soc_card *card, + struct snd_soc_pcm_runtime *rtd, const struct snd_soc_pcm_stream *params, unsigned int num_params, struct snd_soc_dapm_widget *source, diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 9cfe10d8040c..473eefe8658e 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -1447,7 +1447,7 @@ static int soc_link_dai_widgets(struct snd_soc_card *card, sink = codec_dai->playback_widget; source = cpu_dai->capture_widget; if (sink && source) { - ret = snd_soc_dapm_new_pcm(card, dai_link->params, + ret = snd_soc_dapm_new_pcm(card, rtd, dai_link->params, dai_link->num_params, source, sink); if (ret != 0) { @@ -1460,7 +1460,7 @@ static int soc_link_dai_widgets(struct snd_soc_card *card, sink = cpu_dai->playback_widget; source = codec_dai->capture_widget; if (sink && source) { - ret = snd_soc_dapm_new_pcm(card, dai_link->params, + ret = snd_soc_dapm_new_pcm(card, rtd, dai_link->params, dai_link->num_params, source, sink); if (ret != 0) { diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 7e96793050c9..461d951917c0 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -3652,6 +3652,7 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w, { struct snd_soc_dapm_path *source_p, *sink_p; struct snd_soc_dai *source, *sink; + struct snd_soc_pcm_runtime *rtd = w->priv; const struct snd_soc_pcm_stream *config = w->params + w->params_select; struct snd_pcm_substream substream; struct snd_pcm_hw_params *params = NULL; @@ -3711,6 +3712,7 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w, goto out; } substream.runtime = runtime; + substream.private_data = rtd; switch (event) { case SND_SOC_DAPM_PRE_PMU: @@ -3895,6 +3897,7 @@ snd_soc_dapm_alloc_kcontrol(struct snd_soc_card *card, } int snd_soc_dapm_new_pcm(struct snd_soc_card *card, + struct snd_soc_pcm_runtime *rtd, const struct snd_soc_pcm_stream *params, unsigned int num_params, struct snd_soc_dapm_widget *source, @@ -3963,6 +3966,7 @@ int snd_soc_dapm_new_pcm(struct snd_soc_card *card, w->params = params; w->num_params = num_params; + w->priv = rtd; ret = snd_soc_dapm_add_path(&card->dapm, source, w, NULL, NULL); if (ret) -- GitLab From 484004339d4514fde425f6e8a9f6a6cc979bb0c3 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 15 Aug 2018 18:17:03 +0200 Subject: [PATCH 0109/1692] mac80211_hwsim: require at least one channel Syzbot continues to try to create mac80211_hwsim radios, and manages to pass parameters that are later checked with WARN_ON in cfg80211 - catch another one in hwsim directly. Reported-by: syzbot+2a12f11c306afe871c1f@syzkaller.appspotmail.com Signed-off-by: Johannes Berg --- drivers/net/wireless/mac80211_hwsim.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 18e819d964f1..fe1b0108f06d 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -3194,6 +3194,11 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) if (info->attrs[HWSIM_ATTR_CHANNELS]) param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]); + if (param.channels < 1) { + GENL_SET_ERR_MSG(info, "must have at least one channel"); + return -EINVAL; + } + if (param.channels > CFG80211_MAX_NUM_DIFFERENT_CHANNELS) { GENL_SET_ERR_MSG(info, "too many channels specified"); return -EINVAL; -- GitLab From fc0c5a9d1dabba39058e91987766ec24988ae1fa Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 15 Aug 2018 21:12:07 +0100 Subject: [PATCH 0110/1692] drm/i915: Only skip connector output for disable_display We want to add no connectors, encoders or crtcs if the display is disabled, but we still need to hook up any existing HW so that we can power it down. Signed-off-by: Chris Wilson Reviewed-by: Imre Deak Link: https://patchwork.freedesktop.org/patch/msgid/20180815201207.2203-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_display.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 5138a921e0b5..3b41a247943a 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -14128,6 +14128,9 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) intel_pps_init(dev_priv); + if (INTEL_INFO(dev_priv)->num_pipes == 0) + return; + /* * intel_edp_init_connector() depends on this completing first, to * prevent the registeration of both eDP and LVDS and the incorrect @@ -15206,9 +15209,6 @@ int intel_modeset_init(struct drm_device *dev) intel_init_pm(dev_priv); - if (INTEL_INFO(dev_priv)->num_pipes == 0) - return 0; - /* * There may be no VBT; and if the BIOS enabled SSC we can * just keep using it to avoid unnecessary flicker. Whereas if the -- GitLab From 805615dae0572087d2def1625496a72b8d6dbd25 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 15 Aug 2018 19:42:51 +0100 Subject: [PATCH 0111/1692] drm/i915: Remove useless error return from intel_init_mocs_engine() As the only error is for a programming error in constructing the static tables describing the register values, replace the error code propagation with an assert. Signed-off-by: Chris Wilson Reviewed-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20180815184251.5850-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_lrc.c | 6 +----- drivers/gpu/drm/i915/intel_mocs.c | 11 +++-------- drivers/gpu/drm/i915/intel_mocs.h | 2 +- 3 files changed, 5 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 3f90c74038ef..841895cfb05f 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -1769,11 +1769,7 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine) static int gen8_init_common_ring(struct intel_engine_cs *engine) { - int ret; - - ret = intel_mocs_init_engine(engine); - if (ret) - return ret; + intel_mocs_init_engine(engine); intel_engine_reset_breadcrumbs(engine); diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c index 9f0bd6a4cb79..77e9871a8c9a 100644 --- a/drivers/gpu/drm/i915/intel_mocs.c +++ b/drivers/gpu/drm/i915/intel_mocs.c @@ -232,20 +232,17 @@ static i915_reg_t mocs_register(enum intel_engine_id engine_id, int index) * * This function simply emits a MI_LOAD_REGISTER_IMM command for the * given table starting at the given address. - * - * Return: 0 on success, otherwise the error status. */ -int intel_mocs_init_engine(struct intel_engine_cs *engine) +void intel_mocs_init_engine(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; struct drm_i915_mocs_table table; unsigned int index; if (!get_mocs_settings(dev_priv, &table)) - return 0; + return; - if (WARN_ON(table.size > GEN9_NUM_MOCS_ENTRIES)) - return -ENODEV; + GEM_BUG_ON(table.size > GEN9_NUM_MOCS_ENTRIES); for (index = 0; index < table.size; index++) I915_WRITE(mocs_register(engine->id, index), @@ -262,8 +259,6 @@ int intel_mocs_init_engine(struct intel_engine_cs *engine) for (; index < GEN9_NUM_MOCS_ENTRIES; index++) I915_WRITE(mocs_register(engine->id, index), table.table[0].control_value); - - return 0; } /** diff --git a/drivers/gpu/drm/i915/intel_mocs.h b/drivers/gpu/drm/i915/intel_mocs.h index d1751f91c1a4..d89080d75b80 100644 --- a/drivers/gpu/drm/i915/intel_mocs.h +++ b/drivers/gpu/drm/i915/intel_mocs.h @@ -54,6 +54,6 @@ int intel_rcs_context_init_mocs(struct i915_request *rq); void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv); -int intel_mocs_init_engine(struct intel_engine_cs *engine); +void intel_mocs_init_engine(struct intel_engine_cs *engine); #endif -- GitLab From 0d22825255f25adb6a609f130b42c752d3fd0f5d Mon Sep 17 00:00:00 2001 From: Ryan Lee Date: Wed, 15 Aug 2018 18:53:38 -0700 Subject: [PATCH 0112/1692] ASoC: max98373: Added speaker FS gain cotnrol register to volatile. Signed-off-by: Ryan Lee Signed-off-by: Mark Brown --- sound/soc/codecs/max98373.c | 1 + 1 file changed, 1 insertion(+) diff --git a/sound/soc/codecs/max98373.c b/sound/soc/codecs/max98373.c index 92b7125ea169..2764fae69333 100644 --- a/sound/soc/codecs/max98373.c +++ b/sound/soc/codecs/max98373.c @@ -520,6 +520,7 @@ static bool max98373_volatile_reg(struct device *dev, unsigned int reg) { switch (reg) { case MAX98373_R2000_SW_RESET ... MAX98373_R2009_INT_FLAG3: + case MAX98373_R203E_AMP_PATH_GAIN: case MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK: case MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK: case MAX98373_R20B6_BDE_CUR_STATE_READBACK: -- GitLab From 6f0a256253f48095ba2e5bcdfbed41f21643c105 Mon Sep 17 00:00:00 2001 From: Oder Chiou Date: Wed, 15 Aug 2018 14:47:49 +0800 Subject: [PATCH 0113/1692] ASoC: rt5514: Fix the issue of the delay volume applied again After our evaluation, we need to modify the default values to make sure the volume applied immediately. Signed-off-by: Oder Chiou Signed-off-by: Mark Brown --- sound/soc/codecs/rt5514.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/sound/soc/codecs/rt5514.c b/sound/soc/codecs/rt5514.c index dca82dd6e3bf..32fe76c3134a 100644 --- a/sound/soc/codecs/rt5514.c +++ b/sound/soc/codecs/rt5514.c @@ -64,8 +64,8 @@ static const struct reg_sequence rt5514_patch[] = { {RT5514_ANA_CTRL_LDO10, 0x00028604}, {RT5514_ANA_CTRL_ADCFED, 0x00000800}, {RT5514_ASRC_IN_CTRL1, 0x00000003}, - {RT5514_DOWNFILTER0_CTRL3, 0x10000352}, - {RT5514_DOWNFILTER1_CTRL3, 0x10000352}, + {RT5514_DOWNFILTER0_CTRL3, 0x10000342}, + {RT5514_DOWNFILTER1_CTRL3, 0x10000342}, }; static const struct reg_default rt5514_reg[] = { @@ -92,10 +92,10 @@ static const struct reg_default rt5514_reg[] = { {RT5514_ASRC_IN_CTRL1, 0x00000003}, {RT5514_DOWNFILTER0_CTRL1, 0x00020c2f}, {RT5514_DOWNFILTER0_CTRL2, 0x00020c2f}, - {RT5514_DOWNFILTER0_CTRL3, 0x10000352}, + {RT5514_DOWNFILTER0_CTRL3, 0x10000342}, {RT5514_DOWNFILTER1_CTRL1, 0x00020c2f}, {RT5514_DOWNFILTER1_CTRL2, 0x00020c2f}, - {RT5514_DOWNFILTER1_CTRL3, 0x10000352}, + {RT5514_DOWNFILTER1_CTRL3, 0x10000342}, {RT5514_ANA_CTRL_LDO10, 0x00028604}, {RT5514_ANA_CTRL_LDO18_16, 0x02000345}, {RT5514_ANA_CTRL_ADC12, 0x0000a2a8}, -- GitLab From a4417b7b419a68540ad7945ac4efbb39d19afa63 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 16 Aug 2018 08:34:46 +0100 Subject: [PATCH 0114/1692] drm/i915: Stop holding a ref to the ppgtt from each vma The context owns both the ppgtt and the vma within it, and our activity tracking on the context ensures that we do not release active ppgtt. As the context fulfils our obligations for active memory tracking, we can relinquish the reference from the vma. This fixes a silly transient refleak from closed vma being kept alive until the entire system was idle, keeping all vm alive as well. Reported-by: Paulo Zanoni Testcase: igt/gem_ctx_create/files Fixes: 3365e2268b6b ("drm/i915: Lazily unbind vma on close") Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Cc: Paulo Zanoni Reviewed-by: Mika Kuoppala Tested-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20180816073448.19396-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_vma.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 274fd2a7bcb6..31efc971a3a8 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -199,7 +199,6 @@ vma_create(struct drm_i915_gem_object *obj, vma->flags |= I915_VMA_GGTT; list_add(&vma->obj_link, &obj->vma_list); } else { - i915_ppgtt_get(i915_vm_to_ppgtt(vm)); list_add_tail(&vma->obj_link, &obj->vma_list); } @@ -810,9 +809,6 @@ static void __i915_vma_destroy(struct i915_vma *vma) if (vma->obj) rb_erase(&vma->obj_node, &vma->obj->vma_tree); - if (!i915_vma_is_ggtt(vma)) - i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm)); - rbtree_postorder_for_each_entry_safe(iter, n, &vma->active, node) { GEM_BUG_ON(i915_gem_active_isset(&iter->base)); kfree(iter); -- GitLab From 07d805721938a35e695d9f89218a4b02f6a4b2c4 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 16 Aug 2018 15:37:56 +0300 Subject: [PATCH 0115/1692] drm/i915: Introduce intel_runtime_pm_disable to pair intel_runtime_pm_enable Currently, we cancel the extra wakeref we have for !runtime-pm devices inside power_wells_fini_hw. However, this is not strictly paired with the acquisition of that wakeref in runtime_pm_enable (as the fini_hw may be called on errors paths before we even call runtime_pm_enable). Make the symmetry more explicit and include a check that we do release all of our rpm wakerefs. v2: Fixup transfer of ownership back to core whilst keeping our wakeref count balanced. Signed-off-by: Chris Wilson Cc: Imre Deak Reviewed-by: Imre Deak Signed-off-by: Imre Deak Link: https://patchwork.freedesktop.org/patch/msgid/20180816123757.3286-1-imre.deak@intel.com --- drivers/gpu/drm/i915/i915_drv.c | 21 ++++-------- drivers/gpu/drm/i915/intel_drv.h | 1 + drivers/gpu/drm/i915/intel_runtime_pm.c | 43 +++++++++++++++++-------- 3 files changed, 37 insertions(+), 28 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 41111f2a9c39..021304e252eb 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1281,6 +1281,8 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) */ if (INTEL_INFO(dev_priv)->num_pipes) drm_kms_helper_poll_init(dev); + + intel_runtime_pm_enable(dev_priv); } /** @@ -1289,6 +1291,8 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) */ static void i915_driver_unregister(struct drm_i915_private *dev_priv) { + intel_runtime_pm_disable(dev_priv); + intel_fbdev_unregister(dev_priv); intel_audio_deinit(dev_priv); @@ -1366,16 +1370,6 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_fini; pci_set_drvdata(pdev, &dev_priv->drm); - /* - * Disable the system suspend direct complete optimization, which can - * leave the device suspended skipping the driver's suspend handlers - * if the device was already runtime suspended. This is needed due to - * the difference in our runtime and system suspend sequence and - * becaue the HDA driver may require us to enable the audio power - * domain during system suspend. - */ - dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP); - ret = i915_driver_init_early(dev_priv, ent); if (ret < 0) goto out_pci_disable; @@ -1408,8 +1402,6 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) i915_driver_register(dev_priv); - intel_runtime_pm_enable(dev_priv); - intel_init_ipc(dev_priv); intel_runtime_pm_put(dev_priv); @@ -1441,13 +1433,13 @@ void i915_driver_unload(struct drm_device *dev) struct drm_i915_private *dev_priv = to_i915(dev); struct pci_dev *pdev = dev_priv->drm.pdev; + intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); + i915_driver_unregister(dev_priv); if (i915_gem_suspend(dev_priv)) DRM_ERROR("failed to idle hardware; continuing to unload!\n"); - intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); - drm_atomic_helper_shutdown(dev); intel_gvt_cleanup(dev_priv); @@ -1474,6 +1466,7 @@ void i915_driver_unload(struct drm_device *dev) i915_driver_cleanup_mmio(dev_priv); intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); + WARN_ON(atomic_read(&dev_priv->runtime_pm.wakeref_count)); } static void i915_driver_release(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 1295bd8bcd7d..364fc2504fa4 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1959,6 +1959,7 @@ void intel_power_domains_verify_state(struct drm_i915_private *dev_priv); void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume); void bxt_display_core_uninit(struct drm_i915_private *dev_priv); void intel_runtime_pm_enable(struct drm_i915_private *dev_priv); +void intel_runtime_pm_disable(struct drm_i915_private *dev_priv); const char * intel_display_power_domain_str(enum intel_display_power_domain domain); diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index e209edbc561d..c0983f0e46ac 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -3793,29 +3793,19 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume) */ void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv) { - struct device *kdev = &dev_priv->drm.pdev->dev; - /* * The i915.ko module is still not prepared to be loaded when * the power well is not enabled, so just enable it in case * we're going to unload/reload. - * The following also reacquires the RPM reference the core passed - * to the driver during loading, which is dropped in - * intel_runtime_pm_enable(). We have to hand back the control of the - * device to the core with this reference held. */ - intel_display_set_init_power(dev_priv, true); + intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); + + /* Keep the power well enabled, but cancel its rpm wakeref. */ + intel_runtime_pm_put(dev_priv); /* Remove the refcount we took to keep power well support disabled. */ if (!i915_modparams.disable_power_well) intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); - - /* - * Remove the refcount we took in intel_runtime_pm_enable() in case - * the platform doesn't support runtime PM. - */ - if (!HAS_RUNTIME_PM(dev_priv)) - pm_runtime_put(kdev); } /** @@ -4048,6 +4038,16 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv) struct pci_dev *pdev = dev_priv->drm.pdev; struct device *kdev = &pdev->dev; + /* + * Disable the system suspend direct complete optimization, which can + * leave the device suspended skipping the driver's suspend handlers + * if the device was already runtime suspended. This is needed due to + * the difference in our runtime and system suspend sequence and + * becaue the HDA driver may require us to enable the audio power + * domain during system suspend. + */ + dev_pm_set_driver_flags(kdev, DPM_FLAG_NEVER_SKIP); + pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */ pm_runtime_mark_last_busy(kdev); @@ -4074,3 +4074,18 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv) */ pm_runtime_put_autosuspend(kdev); } + +void intel_runtime_pm_disable(struct drm_i915_private *dev_priv) +{ + struct pci_dev *pdev = dev_priv->drm.pdev; + struct device *kdev = &pdev->dev; + + /* Transfer rpm ownership back to core */ + WARN(pm_runtime_get_sync(&dev_priv->drm.pdev->dev) < 0, + "Failed to pass rpm ownership back to core\n"); + + pm_runtime_dont_use_autosuspend(kdev); + + if (!HAS_RUNTIME_PM(dev_priv)) + pm_runtime_put(kdev); +} -- GitLab From 2cd9a689e97b460489348aee89d72a812c3c1066 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Thu, 16 Aug 2018 15:37:57 +0300 Subject: [PATCH 0116/1692] drm/i915: Refactor intel_display_set_init_power() logic The device global init_power_on flag is somewhat arbitrary and makes debugging power refcounting problems difficult. Instead arrange things so that all display power domain get has a corresponding put call. After this change we have the following sequences: driver loading: intel_power_domains_init_hw(); intel_power_domains_enable(); driver unloading: intel_power_domains_disable(); intel_power_domains_fini_hw(); system suspend: intel_power_domains_disable(); intel_power_domains_suspend(); system resume: intel_power_domains_resume(); intel_power_domains_enable(); at other times while the driver is loaded: intel_display_power_get(); ... intel_display_power_put(); Suggested-by: Chris Wilson Cc: Chris Wilson Signed-off-by: Imre Deak Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20180816123757.3286-2-imre.deak@intel.com --- drivers/gpu/drm/i915/i915_drv.c | 65 ++++++------ drivers/gpu/drm/i915/i915_drv.h | 2 +- drivers/gpu/drm/i915/intel_display.c | 5 +- drivers/gpu/drm/i915/intel_drv.h | 15 ++- drivers/gpu/drm/i915/intel_runtime_pm.c | 133 +++++++++++++++++------- 5 files changed, 142 insertions(+), 78 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 021304e252eb..35a012ffc03b 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1282,6 +1282,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) if (INTEL_INFO(dev_priv)->num_pipes) drm_kms_helper_poll_init(dev); + intel_power_domains_enable(dev_priv); intel_runtime_pm_enable(dev_priv); } @@ -1292,6 +1293,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) static void i915_driver_unregister(struct drm_i915_private *dev_priv) { intel_runtime_pm_disable(dev_priv); + intel_power_domains_disable(dev_priv); intel_fbdev_unregister(dev_priv); intel_audio_deinit(dev_priv); @@ -1374,7 +1376,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret < 0) goto out_pci_disable; - intel_runtime_pm_get(dev_priv); + disable_rpm_wakeref_asserts(dev_priv); ret = i915_driver_init_mmio(dev_priv); if (ret < 0) @@ -1404,7 +1406,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) intel_init_ipc(dev_priv); - intel_runtime_pm_put(dev_priv); + enable_rpm_wakeref_asserts(dev_priv); i915_welcome_messages(dev_priv); @@ -1415,7 +1417,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) out_cleanup_mmio: i915_driver_cleanup_mmio(dev_priv); out_runtime_pm_put: - intel_runtime_pm_put(dev_priv); + enable_rpm_wakeref_asserts(dev_priv); i915_driver_cleanup_early(dev_priv); out_pci_disable: pci_disable_device(pdev); @@ -1433,7 +1435,7 @@ void i915_driver_unload(struct drm_device *dev) struct drm_i915_private *dev_priv = to_i915(dev); struct pci_dev *pdev = dev_priv->drm.pdev; - intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); + disable_rpm_wakeref_asserts(dev_priv); i915_driver_unregister(dev_priv); @@ -1465,7 +1467,8 @@ void i915_driver_unload(struct drm_device *dev) i915_driver_cleanup_hw(dev_priv); i915_driver_cleanup_mmio(dev_priv); - intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); + enable_rpm_wakeref_asserts(dev_priv); + WARN_ON(atomic_read(&dev_priv->runtime_pm.wakeref_count)); } @@ -1575,7 +1578,7 @@ static int i915_drm_suspend(struct drm_device *dev) /* We do a lot of poking in a lot of registers, make sure they work * properly. */ - intel_display_set_init_power(dev_priv, true); + intel_power_domains_disable(dev_priv); drm_kms_helper_poll_disable(dev); @@ -1612,6 +1615,18 @@ static int i915_drm_suspend(struct drm_device *dev) return 0; } +static enum i915_drm_suspend_mode +get_suspend_mode(struct drm_i915_private *dev_priv, bool hibernate) +{ + if (hibernate) + return I915_DRM_SUSPEND_HIBERNATE; + + if (suspend_to_idle(dev_priv)) + return I915_DRM_SUSPEND_IDLE; + + return I915_DRM_SUSPEND_MEM; +} + static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) { struct drm_i915_private *dev_priv = to_i915(dev); @@ -1622,21 +1637,10 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) i915_gem_suspend_late(dev_priv); - intel_display_set_init_power(dev_priv, false); intel_uncore_suspend(dev_priv); - /* - * In case of firmware assisted context save/restore don't manually - * deinit the power domains. This also means the CSR/DMC firmware will - * stay active, it will power down any HW resources as required and - * also enable deeper system power states that would be blocked if the - * firmware was inactive. - */ - if (IS_GEN9_LP(dev_priv) || hibernation || !suspend_to_idle(dev_priv) || - dev_priv->csr.dmc_payload == NULL) { - intel_power_domains_suspend(dev_priv); - dev_priv->power_domains_suspended = true; - } + intel_power_domains_suspend(dev_priv, + get_suspend_mode(dev_priv, hibernation)); ret = 0; if (IS_GEN9_LP(dev_priv)) @@ -1648,10 +1652,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) if (ret) { DRM_ERROR("Suspend complete failed: %d\n", ret); - if (dev_priv->power_domains_suspended) { - intel_power_domains_init_hw(dev_priv, true); - dev_priv->power_domains_suspended = false; - } + intel_power_domains_resume(dev_priv); goto out; } @@ -1768,6 +1769,8 @@ static int i915_drm_resume(struct drm_device *dev) intel_opregion_notify_adapter(dev_priv, PCI_D0); + intel_power_domains_enable(dev_priv); + enable_rpm_wakeref_asserts(dev_priv); return 0; @@ -1802,7 +1805,7 @@ static int i915_drm_resume_early(struct drm_device *dev) ret = pci_set_power_state(pdev, PCI_D0); if (ret) { DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret); - goto out; + return ret; } /* @@ -1818,10 +1821,8 @@ static int i915_drm_resume_early(struct drm_device *dev) * depend on the device enable refcount we can't anyway depend on them * disabling/enabling the device. */ - if (pci_enable_device(pdev)) { - ret = -EIO; - goto out; - } + if (pci_enable_device(pdev)) + return -EIO; pci_set_master(pdev); @@ -1844,18 +1845,12 @@ static int i915_drm_resume_early(struct drm_device *dev) intel_uncore_sanitize(dev_priv); - if (dev_priv->power_domains_suspended) - intel_power_domains_init_hw(dev_priv, true); - else - intel_display_set_init_power(dev_priv, true); + intel_power_domains_resume(dev_priv); intel_engines_sanitize(dev_priv); enable_rpm_wakeref_asserts(dev_priv); -out: - dev_priv->power_domains_suspended = false; - return ret; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 5fa13887b911..74482753a04e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -935,8 +935,8 @@ struct i915_power_domains { * Power wells needed for initialization at driver init and suspend * time are on. They are kept on until after the first modeset. */ - bool init_power_on; bool initializing; + bool display_core_suspended; int power_well_count; struct mutex lock; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3b41a247943a..592b847db88e 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -15846,6 +15846,8 @@ intel_modeset_setup_hw_state(struct drm_device *dev, struct intel_encoder *encoder; int i; + intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); + intel_early_display_was(dev_priv); intel_modeset_readout_hw_state(dev); @@ -15900,7 +15902,8 @@ intel_modeset_setup_hw_state(struct drm_device *dev, if (WARN_ON(put_domains)) modeset_put_power_domains(dev_priv, put_domains); } - intel_display_set_init_power(dev_priv, false); + + intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); intel_power_domains_verify_state(dev_priv); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 364fc2504fa4..b2ce343b6027 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1954,7 +1954,18 @@ int intel_power_domains_init(struct drm_i915_private *); void intel_power_domains_cleanup(struct drm_i915_private *dev_priv); void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume); void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv); -void intel_power_domains_suspend(struct drm_i915_private *dev_priv); +void intel_power_domains_enable(struct drm_i915_private *dev_priv); +void intel_power_domains_disable(struct drm_i915_private *dev_priv); + +enum i915_drm_suspend_mode { + I915_DRM_SUSPEND_IDLE, + I915_DRM_SUSPEND_MEM, + I915_DRM_SUSPEND_HIBERNATE, +}; + +void intel_power_domains_suspend(struct drm_i915_private *dev_priv, + enum i915_drm_suspend_mode); +void intel_power_domains_resume(struct drm_i915_private *dev_priv); void intel_power_domains_verify_state(struct drm_i915_private *dev_priv); void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume); void bxt_display_core_uninit(struct drm_i915_private *dev_priv); @@ -2037,8 +2048,6 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv); void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv); void intel_runtime_pm_put(struct drm_i915_private *dev_priv); -void intel_display_set_init_power(struct drm_i915_private *dev, bool enable); - void chv_phy_powergate_lanes(struct intel_encoder *encoder, bool override, unsigned int mask); bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index c0983f0e46ac..6153d5be5cf6 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -257,30 +257,6 @@ bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, return ret; } -/** - * intel_display_set_init_power - set the initial power domain state - * @dev_priv: i915 device instance - * @enable: whether to enable or disable the initial power domain state - * - * For simplicity our driver load/unload and system suspend/resume code assumes - * that all power domains are always enabled. This functions controls the state - * of this little hack. While the initial power domain state is enabled runtime - * pm is effectively disabled. - */ -void intel_display_set_init_power(struct drm_i915_private *dev_priv, - bool enable) -{ - if (dev_priv->power_domains.init_power_on == enable) - return; - - if (enable) - intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); - else - intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); - - dev_priv->power_domains.init_power_on = enable; -} - /* * Starting with Haswell, we have a "Power Down Well" that can be turned off * when not needed anymore. We have 4 registers that can request the power well @@ -3750,6 +3726,10 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) * domains (and not in the INIT domain) are referenced or disabled during the * modeset state HW readout. After that the reference count of each power well * must match its HW enabled state, see intel_power_domains_verify_state(). + * + * It will return with power domains disabled (to be enabled later by + * intel_power_domains_enable()) and must be paired with + * intel_power_domains_fini_hw(). */ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume) { @@ -3775,8 +3755,13 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume) mutex_unlock(&power_domains->lock); } - /* For now, we need the power well to be always enabled. */ - intel_display_set_init_power(dev_priv, true); + /* + * Keep all power wells enabled for any dependent HW access during + * initialization and to make sure we keep BIOS enabled display HW + * resources powered until display HW readout is complete. We drop + * this reference in intel_power_domains_enable(). + */ + intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); /* Disable power support if the user asked so. */ if (!i915_modparams.disable_power_well) intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); @@ -3790,16 +3775,13 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume) * * De-initializes the display power domain HW state. It also ensures that the * device stays powered up so that the driver can be reloaded. + * + * It must be called with power domains already disabled (after a call to + * intel_power_domains_disable()) and must be paired with + * intel_power_domains_init_hw(). */ void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv) { - /* - * The i915.ko module is still not prepared to be loaded when - * the power well is not enabled, so just enable it in case - * we're going to unload/reload. - */ - intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); - /* Keep the power well enabled, but cancel its rpm wakeref. */ intel_runtime_pm_put(dev_priv); @@ -3808,18 +3790,67 @@ void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv) intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); } +/** + * intel_power_domains_enable - enable toggling of display power wells + * @dev_priv: i915 device instance + * + * Enable the ondemand enabling/disabling of the display power wells. Note that + * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled + * only at specific points of the display modeset sequence, thus they are not + * affected by the intel_power_domains_enable()/disable() calls. The purpose + * of these function is to keep the rest of power wells enabled until the end + * of display HW readout (which will acquire the power references reflecting + * the current HW state). + */ +void intel_power_domains_enable(struct drm_i915_private *dev_priv) +{ + intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); +} + +/** + * intel_power_domains_disable - disable toggling of display power wells + * @dev_priv: i915 device instance + * + * Disable the ondemand enabling/disabling of the display power wells. See + * intel_power_domains_enable() for which power wells this call controls. + */ +void intel_power_domains_disable(struct drm_i915_private *dev_priv) +{ + intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); +} + /** * intel_power_domains_suspend - suspend power domain state * @dev_priv: i915 device instance + * @suspend_mode: specifies the target suspend state (idle, mem, hibernation) * * This function prepares the hardware power domain state before entering - * system suspend. It must be paired with intel_power_domains_init_hw(). + * system suspend. + * + * It must be called with power domains already disabled (after a call to + * intel_power_domains_disable()) and paired with intel_power_domains_resume(). */ -void intel_power_domains_suspend(struct drm_i915_private *dev_priv) +void intel_power_domains_suspend(struct drm_i915_private *dev_priv, + enum i915_drm_suspend_mode suspend_mode) { + struct i915_power_domains *power_domains = &dev_priv->power_domains; + + intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); + + /* + * In case of firmware assisted context save/restore don't manually + * deinit the power domains. This also means the CSR/DMC firmware will + * stay active, it will power down any HW resources as required and + * also enable deeper system power states that would be blocked if the + * firmware was inactive. + */ + if (!IS_GEN9_LP(dev_priv) && suspend_mode == I915_DRM_SUSPEND_IDLE && + dev_priv->csr.dmc_payload != NULL) + return; + /* * Even if power well support was disabled we still want to disable - * power wells while we are system suspended. + * power wells if power domains must be deinitialized for suspend. */ if (!i915_modparams.disable_power_well) intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); @@ -3832,6 +3863,32 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv) skl_display_core_uninit(dev_priv); else if (IS_GEN9_LP(dev_priv)) bxt_display_core_uninit(dev_priv); + + power_domains->display_core_suspended = true; +} + +/** + * intel_power_domains_resume - resume power domain state + * @dev_priv: i915 device instance + * + * This function resume the hardware power domain state during system resume. + * + * It will return with power domain support disabled (to be enabled later by + * intel_power_domains_enable()) and must be paired with + * intel_power_domains_suspend(). + */ +void intel_power_domains_resume(struct drm_i915_private *dev_priv) +{ + struct i915_power_domains *power_domains = &dev_priv->power_domains; + + if (power_domains->display_core_suspended) { + intel_power_domains_init_hw(dev_priv, true); + power_domains->display_core_suspended = false; + + return; + } + + intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); } static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv) @@ -4030,8 +4087,8 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv) * This function enables runtime pm at the end of the driver load sequence. * * Note that this function does currently not enable runtime pm for the - * subordinate display power domains. That is only done on the first modeset - * using intel_display_set_init_power(). + * subordinate display power domains. That is done by + * intel_power_domains_enable(). */ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv) { -- GitLab From f5133cca38f5cad3e8eff5f75e321cb592c3b4b0 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Fri, 27 Jul 2018 12:36:45 -0700 Subject: [PATCH 0117/1692] drm/i915: make PCH_GMBUS* definitions private to gvt This is the only place that they are being used - the others use the GMBUS* macros that rely on dev_priv being already properly initialized. Cc: intel-gvt-dev@lists.freedesktop.org Cc: Zhenyu Wang Signed-off-by: Lucas De Marchi Reviewed-by: Zhenyu Wang Signed-off-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20180727193647.8639-1-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/gvt/reg.h | 7 +++++++ drivers/gpu/drm/i915/i915_reg.h | 7 ------- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h index d4f7ce6dc1d7..fd5fd25d0a0f 100644 --- a/drivers/gpu/drm/i915/gvt/reg.h +++ b/drivers/gpu/drm/i915/gvt/reg.h @@ -77,4 +77,11 @@ #define _RING_CTL_BUF_SIZE(ctl) (((ctl) & RB_TAIL_SIZE_MASK) + \ I915_GTT_PAGE_SIZE) +#define PCH_GMBUS0 _MMIO(0xc5100) +#define PCH_GMBUS1 _MMIO(0xc5104) +#define PCH_GMBUS2 _MMIO(0xc5108) +#define PCH_GMBUS3 _MMIO(0xc510c) +#define PCH_GMBUS4 _MMIO(0xc5110) +#define PCH_GMBUS5 _MMIO(0xc5120) + #endif diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 0c9f03dda569..14b47f431a23 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -7790,13 +7790,6 @@ enum { #define PCH_GPIOE _MMIO(0xc5020) #define PCH_GPIOF _MMIO(0xc5024) -#define PCH_GMBUS0 _MMIO(0xc5100) -#define PCH_GMBUS1 _MMIO(0xc5104) -#define PCH_GMBUS2 _MMIO(0xc5108) -#define PCH_GMBUS3 _MMIO(0xc510c) -#define PCH_GMBUS4 _MMIO(0xc5110) -#define PCH_GMBUS5 _MMIO(0xc5120) - #define _PCH_DPLL_A 0xc6014 #define _PCH_DPLL_B 0xc6018 #define PCH_DPLL(pll) _MMIO((pll) == 0 ? _PCH_DPLL_A : _PCH_DPLL_B) -- GitLab From 336662e5e3c90e2b6d4b2c2a773f87218baa8a61 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Fri, 27 Jul 2018 12:36:46 -0700 Subject: [PATCH 0118/1692] drm/i915/gvt: use its own define for gpio The definition on i915_reg.h is going to change to depend on dev_priv->gpio_mmio_base being properly initialized. Define our own macros since init_generic_mmio_info() is called before than gpio_mmio_base being set. Cc: intel-gvt-dev@lists.freedesktop.org Cc: Zhenyu Wang Signed-off-by: Lucas De Marchi Reviewed-by: Zhenyu Wang Signed-off-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20180727193647.8639-2-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/gvt/handlers.c | 2 +- drivers/gpu/drm/i915/gvt/reg.h | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 749c704ca304..c455d7e71a5b 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -2119,7 +2119,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_F(PCH_GMBUS0, 4 * 4, 0, 0, 0, D_ALL, gmbus_mmio_read, gmbus_mmio_write); - MMIO_F(PCH_GPIOA, 6 * 4, F_UNALIGN, 0, 0, D_ALL, NULL, NULL); + MMIO_F(PCH_GPIO_BASE, 6 * 4, F_UNALIGN, 0, 0, D_ALL, NULL, NULL); MMIO_F(_MMIO(0xe4f00), 0x28, 0, 0, 0, D_ALL, NULL, NULL); MMIO_F(_MMIO(_PCH_DPB_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL, diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h index fd5fd25d0a0f..c9d6cf6cc623 100644 --- a/drivers/gpu/drm/i915/gvt/reg.h +++ b/drivers/gpu/drm/i915/gvt/reg.h @@ -77,6 +77,8 @@ #define _RING_CTL_BUF_SIZE(ctl) (((ctl) & RB_TAIL_SIZE_MASK) + \ I915_GTT_PAGE_SIZE) +#define PCH_GPIO_BASE _MMIO(0xc5010) + #define PCH_GMBUS0 _MMIO(0xc5100) #define PCH_GMBUS1 _MMIO(0xc5104) #define PCH_GMBUS2 _MMIO(0xc5108) -- GitLab From dce888798d3ed1c7fea2d45f5f757a749a9e2584 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Fri, 27 Jul 2018 12:36:47 -0700 Subject: [PATCH 0119/1692] drm/i915: remove confusing GPIO vs PCH_GPIO Instead of defining all registers twice, define just a PCH_GPIO_BASE that has the same address as PCH_GPIO_A and use that to calculate all the others. This also brings VLV and !HAS_GMCH_DISPLAY in line, doing the same thing. v2: Fix GMBUS registers to be relative to gpio base; create GPIO() macro to return a particular gpio address and move the enum out of i915_reg.h (suggested by Jani) v3: Move base offset inside the GPIO() macro so the GMBUS defines don't actually need to be changed (suggested by Daniel/Ville) v4: Move definition of i915_gpio to intel_display.h and remove GMBUS/GPIO handling from gvt since now they have their own defines. Signed-off-by: Lucas De Marchi Reviewed-by: Rodrigo Vivi Signed-off-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20180727193647.8639-3-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/i915_drv.h | 3 ++- drivers/gpu/drm/i915/i915_reg.h | 24 +++++------------------- drivers/gpu/drm/i915/intel_display.h | 16 ++++++++++++++++ drivers/gpu/drm/i915/intel_i2c.c | 16 ++++++++-------- 4 files changed, 31 insertions(+), 28 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 74482753a04e..e5b9d3c77139 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1643,7 +1643,8 @@ struct drm_i915_private { struct mutex gmbus_mutex; /** - * Base address of the gmbus and gpio block. + * Base address of where the gmbus and gpio blocks are located (either + * on PCH or on SoC for platforms without PCH). */ uint32_t gpio_mmio_base; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 14b47f431a23..5121b9f072c6 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -3082,18 +3082,9 @@ enum i915_power_well_id { /* * GPIO regs */ -#define GPIOA _MMIO(0x5010) -#define GPIOB _MMIO(0x5014) -#define GPIOC _MMIO(0x5018) -#define GPIOD _MMIO(0x501c) -#define GPIOE _MMIO(0x5020) -#define GPIOF _MMIO(0x5024) -#define GPIOG _MMIO(0x5028) -#define GPIOH _MMIO(0x502c) -#define GPIOJ _MMIO(0x5034) -#define GPIOK _MMIO(0x5038) -#define GPIOL _MMIO(0x503C) -#define GPIOM _MMIO(0x5040) +#define GPIO(gpio) _MMIO(dev_priv->gpio_mmio_base + 0x5010 + \ + 4 * (gpio)) + # define GPIO_CLOCK_DIR_MASK (1 << 0) # define GPIO_CLOCK_DIR_IN (0 << 1) # define GPIO_CLOCK_DIR_OUT (1 << 1) @@ -7489,6 +7480,8 @@ enum { /* PCH */ +#define PCH_DISPLAY_BASE 0xc0000u + /* south display engine interrupt: IBX */ #define SDE_AUDIO_POWER_D (1 << 27) #define SDE_AUDIO_POWER_C (1 << 26) @@ -7783,13 +7776,6 @@ enum { #define ICP_TC_HPD_LONG_DETECT(tc_port) (2 << (tc_port) * 4) #define ICP_TC_HPD_SHORT_DETECT(tc_port) (1 << (tc_port) * 4) -#define PCH_GPIOA _MMIO(0xc5010) -#define PCH_GPIOB _MMIO(0xc5014) -#define PCH_GPIOC _MMIO(0xc5018) -#define PCH_GPIOD _MMIO(0xc501c) -#define PCH_GPIOE _MMIO(0xc5020) -#define PCH_GPIOF _MMIO(0xc5024) - #define _PCH_DPLL_A 0xc6014 #define _PCH_DPLL_B 0xc6018 #define PCH_DPLL(pll) _MMIO((pll) == 0 ? _PCH_DPLL_A : _PCH_DPLL_B) diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h index 6a28bac71128..a04c5a495a2b 100644 --- a/drivers/gpu/drm/i915/intel_display.h +++ b/drivers/gpu/drm/i915/intel_display.h @@ -25,6 +25,22 @@ #ifndef _INTEL_DISPLAY_H_ #define _INTEL_DISPLAY_H_ +enum i915_gpio { + GPIOA, + GPIOB, + GPIOC, + GPIOD, + GPIOE, + GPIOF, + GPIOG, + GPIOH, + __GPIOI_UNUSED, + GPIOJ, + GPIOK, + GPIOL, + GPIOM, +}; + enum pipe { INVALID_PIPE = -1, diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index bef32b7c248e..33d87ab93fdd 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -37,7 +37,7 @@ struct gmbus_pin { const char *name; - i915_reg_t reg; + enum i915_gpio gpio; }; /* Map gmbus pin pairs to names and registers. */ @@ -121,8 +121,7 @@ bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv, else size = ARRAY_SIZE(gmbus_pins); - return pin < size && - i915_mmio_reg_valid(get_gmbus_pin(dev_priv, pin)->reg); + return pin < size && get_gmbus_pin(dev_priv, pin)->name; } /* Intel GPIO access functions */ @@ -292,8 +291,7 @@ intel_gpio_setup(struct intel_gmbus *bus, unsigned int pin) algo = &bus->bit_algo; - bus->gpio_reg = _MMIO(dev_priv->gpio_mmio_base + - i915_mmio_reg_offset(get_gmbus_pin(dev_priv, pin)->reg)); + bus->gpio_reg = GPIO(get_gmbus_pin(dev_priv, pin)->gpio); bus->adapter.algo_data = algo; algo->setsda = set_data; algo->setscl = set_clock; @@ -825,9 +823,11 @@ int intel_setup_gmbus(struct drm_i915_private *dev_priv) if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE; else if (!HAS_GMCH_DISPLAY(dev_priv)) - dev_priv->gpio_mmio_base = - i915_mmio_reg_offset(PCH_GPIOA) - - i915_mmio_reg_offset(GPIOA); + /* + * Broxton uses the same PCH offsets for South Display Engine, + * even though it doesn't have a PCH. + */ + dev_priv->gpio_mmio_base = PCH_DISPLAY_BASE; mutex_init(&dev_priv->gmbus_mutex); init_waitqueue_head(&dev_priv->gmbus_wait_queue); -- GitLab From 66fc82960c5c68eecdcf4568e5907d3702e4fcdc Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 15 Aug 2018 14:58:27 +0100 Subject: [PATCH 0120/1692] drm/i915/execlists: Include reset depth in traces Show the reset depth (the tasklet disable count) in the GEM_TRACE to indicate when we might not expect tasklets to be flushed. Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20180815135827.25869-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_lrc.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 841895cfb05f..36050f085071 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -1828,7 +1828,8 @@ execlists_reset_prepare(struct intel_engine_cs *engine) struct i915_request *request, *active; unsigned long flags; - GEM_TRACE("%s\n", engine->name); + GEM_TRACE("%s: depth<-%d\n", engine->name, + atomic_read(&execlists->tasklet.count)); /* * Prevent request submission to the hardware until we have @@ -1976,7 +1977,8 @@ static void execlists_reset_finish(struct intel_engine_cs *engine) */ __tasklet_enable_sync_once(&execlists->tasklet); - GEM_TRACE("%s\n", engine->name); + GEM_TRACE("%s: depth->%d\n", engine->name, + atomic_read(&execlists->tasklet.count)); } static int intel_logical_ring_emit_pdps(struct i915_request *rq) -- GitLab From 4769c003e0fcff0ee001a9102e2605bdaa5880f0 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Wed, 8 Aug 2018 01:07:03 -0700 Subject: [PATCH 0121/1692] ARM: OMAP2+: Fix null hwmod for ti-sysc debug We may call omap_hwmod_parse_module_range() with no hwmod allocated yet and may have debug enabled. Let's fix this by checking for hwmod before trying to use it's name. Fixes: 6c72b3550672 ("ARM: OMAP2+: Parse module IO range from dts for legacy Signed-off-by: Tony Lindgren --- arch/arm/mach-omap2/omap_hwmod.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 2ceffd85dd3d..7f759abcf49c 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -2220,7 +2220,7 @@ int omap_hwmod_parse_module_range(struct omap_hwmod *oh, size = be32_to_cpup(ranges); pr_debug("omap_hwmod: %s %s at 0x%llx size 0x%llx\n", - oh->name, np->name, base, size); + oh ? oh->name : "", np->name, base, size); res->start = base; res->end = base + size - 1; -- GitLab From 1dbcb97c656eed1a244c960b8b3a469c3d20ce7b Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Wed, 8 Aug 2018 01:07:04 -0700 Subject: [PATCH 0122/1692] ARM: OMAP2+: Fix module address for modules using mpu_rt_idx If we use device tree data for a module interconnect target we want to map the control registers from the module start. Legacy hwmod platform data however is using child IP offsets for cpsw module with mpu_rt_idx. In cases where we have the interconnect target module already using device tree data with legacy hwmod platform data still around, the sysc register area is not adjusted for mpu_rt_idx causing wrong registers being accessed. Let's fix the issue for mixed dts and platform data mode by ioremapping the module registers using child IP offset if mpu_rt_idx is set. For device tree only data there's no reason to use mpu_rt_idx. Fixes: 6c72b3550672 ("ARM: OMAP2+: Parse module IO range from dts for legacy "ti,hwmods" support") Signed-off-by: Tony Lindgren --- arch/arm/mach-omap2/omap_hwmod.c | 37 ++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 7f759abcf49c..cd65ea4e9c54 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -2160,6 +2160,37 @@ static int of_dev_hwmod_lookup(struct device_node *np, return -ENODEV; } +/** + * omap_hwmod_fix_mpu_rt_idx - fix up mpu_rt_idx register offsets + * + * @oh: struct omap_hwmod * + * @np: struct device_node * + * + * Fix up module register offsets for modules with mpu_rt_idx. + * Only needed for cpsw with interconnect target module defined + * in device tree while still using legacy hwmod platform data + * for rev, sysc and syss registers. + * + * Can be removed when all cpsw hwmod platform data has been + * dropped. + */ +static void omap_hwmod_fix_mpu_rt_idx(struct omap_hwmod *oh, + struct device_node *np, + struct resource *res) +{ + struct device_node *child = NULL; + int error; + + child = of_get_next_child(np, child); + if (!child) + return; + + error = of_address_to_resource(child, oh->mpu_rt_idx, res); + if (error) + pr_err("%s: error mapping mpu_rt_idx: %i\n", + __func__, error); +} + /** * omap_hwmod_parse_module_range - map module IO range from device tree * @oh: struct omap_hwmod * @@ -2222,6 +2253,12 @@ int omap_hwmod_parse_module_range(struct omap_hwmod *oh, pr_debug("omap_hwmod: %s %s at 0x%llx size 0x%llx\n", oh ? oh->name : "", np->name, base, size); + if (oh && oh->mpu_rt_idx) { + omap_hwmod_fix_mpu_rt_idx(oh, np, res); + + return 0; + } + res->start = base; res->end = base + size - 1; res->flags = IORESOURCE_MEM; -- GitLab From 0ef8e3bb974af56346b34393e643d491d9141c66 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Wed, 8 Aug 2018 01:07:05 -0700 Subject: [PATCH 0123/1692] bus: ti-sysc: Fix module register ioremap for larger offsets We can have the interconnect target module control registers pretty much anywhere within the module range. The current code attempts an incomplete optimization of the ioremap size but does it wrong and it only works for registers at the beginning of the module. Let's just use the largest control register to calculate the ioremap size. The ioremapped range is for most part cached anyways so there is no need for size optimization. Let's also update the comments accordingly. Fixes: 0eecc636e5a2 ("bus: ti-sysc: Add minimal TI sysc interconnect target driver") Signed-off-by: Tony Lindgren --- drivers/bus/ti-sysc.c | 31 ++++++++++++++----------------- 1 file changed, 14 insertions(+), 17 deletions(-) diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 80d60f43db56..b31bf03ea497 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -490,32 +490,29 @@ static int sysc_check_registers(struct sysc *ddata) /** * syc_ioremap - ioremap register space for the interconnect target module - * @ddata: deviec driver data + * @ddata: device driver data * * Note that the interconnect target module registers can be anywhere - * within the first child device address space. For example, SGX has - * them at offset 0x1fc00 in the 32MB module address space. We just - * what we need around the interconnect target module registers. + * within the interconnect target module range. For example, SGX has + * them at offset 0x1fc00 in the 32MB module address space. And cpsw + * has them at offset 0x1200 in the CPSW_WR child. Usually the + * the interconnect target module registers are at the beginning of + * the module range though. */ static int sysc_ioremap(struct sysc *ddata) { - u32 size = 0; - - if (ddata->offsets[SYSC_SYSSTATUS] >= 0) - size = ddata->offsets[SYSC_SYSSTATUS]; - else if (ddata->offsets[SYSC_SYSCONFIG] >= 0) - size = ddata->offsets[SYSC_SYSCONFIG]; - else if (ddata->offsets[SYSC_REVISION] >= 0) - size = ddata->offsets[SYSC_REVISION]; - else - return -EINVAL; + int size; - size &= 0xfff00; - size += SZ_256; + size = max3(ddata->offsets[SYSC_REVISION], + ddata->offsets[SYSC_SYSCONFIG], + ddata->offsets[SYSC_SYSSTATUS]); + + if (size < 0 || (size + sizeof(u32)) > ddata->module_size) + return -EINVAL; ddata->module_va = devm_ioremap(ddata->dev, ddata->module_pa, - size); + size + sizeof(u32)); if (!ddata->module_va) return -EIO; -- GitLab From 8ecebf4d767e2307a946c8905278d6358eda35c3 Mon Sep 17 00:00:00 2001 From: Robbie Ko Date: Mon, 6 Aug 2018 10:30:30 +0800 Subject: [PATCH 0124/1692] Btrfs: fix unexpected failure of nocow buffered writes after snapshotting when low on space Commit e9894fd3e3b3 ("Btrfs: fix snapshot vs nocow writting") forced nocow writes to fallback to COW, during writeback, when a snapshot is created. This resulted in writes made before creating the snapshot to unexpectedly fail with ENOSPC during writeback when success (0) was returned to user space through the write system call. The steps leading to this problem are: 1. When it's not possible to allocate data space for a write, the buffered write path checks if a NOCOW write is possible. If it is, it will not reserve space and success (0) is returned to user space. 2. Then when a snapshot is created, the root's will_be_snapshotted atomic is incremented and writeback is triggered for all inode's that belong to the root being snapshotted. Incrementing that atomic forces all previous writes to fallback to COW during writeback (running delalloc). 3. This results in the writeback for the inodes to fail and therefore setting the ENOSPC error in their mappings, so that a subsequent fsync on them will report the error to user space. So it's not a completely silent data loss (since fsync will report ENOSPC) but it's a very unexpected and undesirable behaviour, because if a clean shutdown/unmount of the filesystem happens without previous calls to fsync, it is expected to have the data present in the files after mounting the filesystem again. So fix this by adding a new atomic named snapshot_force_cow to the root structure which prevents this behaviour and works the following way: 1. It is incremented when we start to create a snapshot after triggering writeback and before waiting for writeback to finish. 2. This new atomic is now what is used by writeback (running delalloc) to decide whether we need to fallback to COW or not. Because we incremented this new atomic after triggering writeback in the snapshot creation ioctl, we ensure that all buffered writes that happened before snapshot creation will succeed and not fallback to COW (which would make them fail with ENOSPC). 3. The existing atomic, will_be_snapshotted, is kept because it is used to force new buffered writes, that start after we started snapshotting, to reserve data space even when NOCOW is possible. This makes these writes fail early with ENOSPC when there's no available space to allocate, preventing the unexpected behaviour of writeback later failing with ENOSPC due to a fallback to COW mode. Fixes: e9894fd3e3b3 ("Btrfs: fix snapshot vs nocow writting") Signed-off-by: Robbie Ko Reviewed-by: Filipe Manana Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 1 + fs/btrfs/disk-io.c | 1 + fs/btrfs/inode.c | 25 ++++--------------------- fs/btrfs/ioctl.c | 16 ++++++++++++++++ 4 files changed, 22 insertions(+), 21 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 318be7864072..a67cc190a84b 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1280,6 +1280,7 @@ struct btrfs_root { int send_in_progress; struct btrfs_subvolume_writers *subv_writers; atomic_t will_be_snapshotted; + atomic_t snapshot_force_cow; /* For qgroup metadata reserved space */ spinlock_t qgroup_meta_rsv_lock; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 5124c15705ce..05dc3c17cb62 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1187,6 +1187,7 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info, atomic_set(&root->log_batch, 0); refcount_set(&root->refs, 1); atomic_set(&root->will_be_snapshotted, 0); + atomic_set(&root->snapshot_force_cow, 0); root->log_transid = 0; root->log_transid_committed = -1; root->last_log_commit = 0; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 3f51ddc18f98..c6d8c5d19ff0 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1271,7 +1271,7 @@ static noinline int run_delalloc_nocow(struct inode *inode, u64 disk_num_bytes; u64 ram_bytes; int extent_type; - int ret, err; + int ret; int type; int nocow; int check_prev = 1; @@ -1403,11 +1403,8 @@ static noinline int run_delalloc_nocow(struct inode *inode, * if there are pending snapshots for this root, * we fall into common COW way. */ - if (!nolock) { - err = btrfs_start_write_no_snapshotting(root); - if (!err) - goto out_check; - } + if (!nolock && atomic_read(&root->snapshot_force_cow)) + goto out_check; /* * force cow if csum exists in the range. * this ensure that csum for a given extent are @@ -1416,9 +1413,6 @@ static noinline int run_delalloc_nocow(struct inode *inode, ret = csum_exist_in_range(fs_info, disk_bytenr, num_bytes); if (ret) { - if (!nolock) - btrfs_end_write_no_snapshotting(root); - /* * ret could be -EIO if the above fails to read * metadata. @@ -1431,11 +1425,8 @@ static noinline int run_delalloc_nocow(struct inode *inode, WARN_ON_ONCE(nolock); goto out_check; } - if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr)) { - if (!nolock) - btrfs_end_write_no_snapshotting(root); + if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr)) goto out_check; - } nocow = 1; } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { extent_end = found_key.offset + @@ -1448,8 +1439,6 @@ static noinline int run_delalloc_nocow(struct inode *inode, out_check: if (extent_end <= start) { path->slots[0]++; - if (!nolock && nocow) - btrfs_end_write_no_snapshotting(root); if (nocow) btrfs_dec_nocow_writers(fs_info, disk_bytenr); goto next_slot; @@ -1471,8 +1460,6 @@ static noinline int run_delalloc_nocow(struct inode *inode, end, page_started, nr_written, 1, NULL); if (ret) { - if (!nolock && nocow) - btrfs_end_write_no_snapshotting(root); if (nocow) btrfs_dec_nocow_writers(fs_info, disk_bytenr); @@ -1492,8 +1479,6 @@ static noinline int run_delalloc_nocow(struct inode *inode, ram_bytes, BTRFS_COMPRESS_NONE, BTRFS_ORDERED_PREALLOC); if (IS_ERR(em)) { - if (!nolock && nocow) - btrfs_end_write_no_snapshotting(root); if (nocow) btrfs_dec_nocow_writers(fs_info, disk_bytenr); @@ -1532,8 +1517,6 @@ static noinline int run_delalloc_nocow(struct inode *inode, EXTENT_CLEAR_DATA_RESV, PAGE_UNLOCK | PAGE_SET_PRIVATE2); - if (!nolock && nocow) - btrfs_end_write_no_snapshotting(root); cur_offset = extent_end; /* diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index d3a5d2a41e5f..85c4284bb2cf 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -747,6 +747,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, struct btrfs_pending_snapshot *pending_snapshot; struct btrfs_trans_handle *trans; int ret; + bool snapshot_force_cow = false; if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) return -EINVAL; @@ -763,6 +764,11 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, goto free_pending; } + /* + * Force new buffered writes to reserve space even when NOCOW is + * possible. This is to avoid later writeback (running dealloc) to + * fallback to COW mode and unexpectedly fail with ENOSPC. + */ atomic_inc(&root->will_be_snapshotted); smp_mb__after_atomic(); /* wait for no snapshot writes */ @@ -773,6 +779,14 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, if (ret) goto dec_and_free; + /* + * All previous writes have started writeback in NOCOW mode, so now + * we force future writes to fallback to COW mode during snapshot + * creation. + */ + atomic_inc(&root->snapshot_force_cow); + snapshot_force_cow = true; + btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1); btrfs_init_block_rsv(&pending_snapshot->block_rsv, @@ -837,6 +851,8 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, fail: btrfs_subvolume_release_metadata(fs_info, &pending_snapshot->block_rsv); dec_and_free: + if (snapshot_force_cow) + atomic_dec(&root->snapshot_force_cow); if (atomic_dec_and_test(&root->will_be_snapshotted)) wake_up_var(&root->will_be_snapshotted); free_pending: -- GitLab From da4468a1aa75457e6134127b19761b7ba62ce945 Mon Sep 17 00:00:00 2001 From: Anusha Srivatsa Date: Fri, 17 Aug 2018 10:33:30 -0700 Subject: [PATCH 0125/1692] drm/i915: Do not redefine the has_csr parameter. Let us reuse the already defined has_csr check and not redefine it. The main difference is that in effect this will flip .has_csr to 1 (via GEN9_FEATURES which GEN11_FEATURES pulls in). Suggested-by: Imre Deak Cc: Imre Deak Cc: Rodrigo Vivi Signed-off-by: Anusha Srivatsa Fixes: https://bugs.freedesktop.org/show_bug.cgi?id=107382 Reviewed-by: Imre Deak Signed-off-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/1534527210-16841-1-git-send-email-anusha.srivatsa@intel.com --- drivers/gpu/drm/i915/i915_pci.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index e931b48369dd..d6f7b9fe1d26 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -600,7 +600,6 @@ static const struct intel_device_info intel_cannonlake_info = { GEN10_FEATURES, \ GEN(11), \ .ddb_size = 2048, \ - .has_csr = 0, \ .has_logical_ring_elsq = 1 static const struct intel_device_info intel_icelake_11_info = { -- GitLab From 9c86336c15db1c48cbaddff56caf2be0a930e991 Mon Sep 17 00:00:00 2001 From: Haishuang Yan Date: Mon, 20 Aug 2018 10:51:05 +0800 Subject: [PATCH 0126/1692] ip6_vti: fix a null pointer deference when destroy vti6 tunnel If load ip6_vti module and create a network namespace when set fb_tunnels_only_for_init_net to 1, then exit the namespace will cause following crash: [ 6601.677036] BUG: unable to handle kernel NULL pointer dereference at 0000000000000008 [ 6601.679057] PGD 8000000425eca067 P4D 8000000425eca067 PUD 424292067 PMD 0 [ 6601.680483] Oops: 0000 [#1] SMP PTI [ 6601.681223] CPU: 7 PID: 93 Comm: kworker/u16:1 Kdump: loaded Tainted: G E 4.18.0+ #3 [ 6601.683153] Hardware name: Fedora Project OpenStack Nova, BIOS seabios-1.7.5-11.el7 04/01/2014 [ 6601.684919] Workqueue: netns cleanup_net [ 6601.685742] RIP: 0010:vti6_exit_batch_net+0x87/0xd0 [ip6_vti] [ 6601.686932] Code: 7b 08 48 89 e6 e8 b9 ea d3 dd 48 8b 1b 48 85 db 75 ec 48 83 c5 08 48 81 fd 00 01 00 00 75 d5 49 8b 84 24 08 01 00 00 48 89 e6 <48> 8b 78 08 e8 90 ea d3 dd 49 8b 45 28 49 39 c6 4c 8d 68 d8 75 a1 [ 6601.690735] RSP: 0018:ffffa897c2737de0 EFLAGS: 00010246 [ 6601.691846] RAX: 0000000000000000 RBX: 0000000000000000 RCX: dead000000000200 [ 6601.693324] RDX: 0000000000000015 RSI: ffffa897c2737de0 RDI: ffffffff9f2ea9e0 [ 6601.694824] RBP: 0000000000000100 R08: 0000000000000000 R09: 0000000000000000 [ 6601.696314] R10: 0000000000000001 R11: 0000000000000000 R12: ffff8dc323c07e00 [ 6601.697812] R13: ffff8dc324a63100 R14: ffffa897c2737e30 R15: ffffa897c2737e30 [ 6601.699345] FS: 0000000000000000(0000) GS:ffff8dc33fdc0000(0000) knlGS:0000000000000000 [ 6601.701068] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 6601.702282] CR2: 0000000000000008 CR3: 0000000424966002 CR4: 00000000001606e0 [ 6601.703791] Call Trace: [ 6601.704329] cleanup_net+0x1b4/0x2c0 [ 6601.705268] process_one_work+0x16c/0x370 [ 6601.706145] worker_thread+0x49/0x3e0 [ 6601.706942] kthread+0xf8/0x130 [ 6601.707626] ? rescuer_thread+0x340/0x340 [ 6601.708476] ? kthread_bind+0x10/0x10 [ 6601.709266] ret_from_fork+0x35/0x40 Reproduce: modprobe ip6_vti echo 1 > /proc/sys/net/core/fb_tunnels_only_for_init_net unshare -n exit This because ip6n->tnls_wc[0] point to fallback device in default, but in non-default namespace, ip6n->tnls_wc[0] will be NULL, so add the NULL check comparatively. Fixes: e2948e5af8ee ("ip6_vti: fix creating fallback tunnel device for vti6") Signed-off-by: Haishuang Yan Signed-off-by: David S. Miller --- net/ipv6/ip6_vti.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 38dec9da90d3..5095367c7204 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -1094,7 +1094,8 @@ static void __net_exit vti6_destroy_tunnels(struct vti6_net *ip6n, } t = rtnl_dereference(ip6n->tnls_wc[0]); - unregister_netdevice_queue(t->dev, list); + if (t) + unregister_netdevice_queue(t->dev, list); } static int __net_init vti6_init_net(struct net *net) -- GitLab From 46dec40fb741f00f1864580130779aeeaf24fb3d Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 20 Aug 2018 16:05:45 +1000 Subject: [PATCH 0127/1692] KVM: PPC: Book3S HV: Don't truncate HPTE index in xlate function This fixes a bug which causes guest virtual addresses to get translated to guest real addresses incorrectly when the guest is using the HPT MMU and has more than 256GB of RAM, or more specifically has a HPT larger than 2GB. This has showed up in testing as a failure of the host to emulate doorbell instructions correctly on POWER9 for HPT guests with more than 256GB of RAM. The bug is that the HPTE index in kvmppc_mmu_book3s_64_hv_xlate() is stored as an int, and in forming the HPTE address, the index gets shifted left 4 bits as an int before being signed-extended to 64 bits. The simple fix is to make the variable a long int, matching the return type of kvmppc_hv_find_lock_hpte(), which is what calculates the index. Fixes: 697d3899dcb4 ("KVM: PPC: Implement MMIO emulation support for Book3S HV guests") Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_64_mmu_hv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 7f3a8cf5d66f..4c08f42f6406 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -359,7 +359,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned long pp, key; unsigned long v, orig_v, gr; __be64 *hptep; - int index; + long int index; int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR); if (kvm_is_radix(vcpu->kvm)) -- GitLab From 8a54d8fc160e67ad485d95a0322ce1221f80770a Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 18 Jun 2018 09:29:57 +0200 Subject: [PATCH 0128/1692] cfg80211: remove division by size of sizeof(struct ieee80211_wmm_rule) Pointer arithmetic already adjusts by the size of the struct, so the sizeof() calculation is wrong. This is basically the same as Colin King's patch for similar code in the iwlwifi driver. Fixes: 230ebaa189af ("cfg80211: read wmm rules from regulatory database") Signed-off-by: Johannes Berg --- net/wireless/reg.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 4fc66a117b7d..283902974fbf 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -452,8 +452,7 @@ reg_copy_regd(const struct ieee80211_regdomain *src_regd) continue; regd->reg_rules[i].wmm_rule = d_wmm + - (src_regd->reg_rules[i].wmm_rule - s_wmm) / - sizeof(struct ieee80211_wmm_rule); + (src_regd->reg_rules[i].wmm_rule - s_wmm); } return regd; } -- GitLab From 6dfc4a8f134fe0fe4c77dd09906e7305ba7b3edc Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Thu, 16 Aug 2018 22:34:14 +0300 Subject: [PATCH 0129/1692] drm/i915: Verify power domains after enabling them After commit 2cd9a689e97b ("drm/i915: Refactor intel_display_set_init_power() logic") it makes more sense to check the power domain/well refcounts after enabling the power domains functionality. Before that it's guaranteed that most power wells (in the INIT domain) will have a reference held, so not an interesting state. While at it also add the check after the init_hw/fini_hw, disable and suspend/resume steps. Make the test optional on a Kconfig option since it may add substantial overhead: on VLV/CHV the corresponding PUNIT reg access for each power well may take up to 20ms. v2: - Add the state check to more spots. (Chris) v3: - During suspend check the state before deiniting display core. Afterwards DC states are disabled (and so the dc_off power well is enabled) even though we don't hold a reference on it. - Do the test conditionally based on a new Kconfig option. (Chris) Cc: Chris Wilson Reviewed-by: Chris Wilson [Add DRM_I915_DEBUG_RUNTIME_PM to welcome messages] Signed-off-by: Imre Deak Link: https://patchwork.freedesktop.org/patch/msgid/20180817145837.26592-1-imre.deak@intel.com --- drivers/gpu/drm/i915/Kconfig.debug | 12 +++++++++ drivers/gpu/drm/i915/i915_drv.c | 2 ++ drivers/gpu/drm/i915/intel_display.c | 2 -- drivers/gpu/drm/i915/intel_drv.h | 1 - drivers/gpu/drm/i915/intel_runtime_pm.c | 36 ++++++++++++++++++++----- 5 files changed, 44 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug index 459f8f88a34c..9e36ffb5eb7c 100644 --- a/drivers/gpu/drm/i915/Kconfig.debug +++ b/drivers/gpu/drm/i915/Kconfig.debug @@ -30,6 +30,7 @@ config DRM_I915_DEBUG select SW_SYNC # signaling validation framework (igt/syncobj*) select DRM_I915_SW_FENCE_DEBUG_OBJECTS select DRM_I915_SELFTEST + select DRM_I915_DEBUG_RUNTIME_PM default n help Choose this option to turn on extra driver debugging that may affect @@ -167,3 +168,14 @@ config DRM_I915_DEBUG_VBLANK_EVADE the vblank. If in doubt, say "N". + +config DRM_I915_DEBUG_RUNTIME_PM + bool "Enable extra state checking for runtime PM" + depends on DRM_I915 + default n + help + Choose this option to turn on extra state checking for the + runtime PM functionality. This may introduce overhead during + driver loading, suspend and resume operations. + + If in doubt, say "N" diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 35a012ffc03b..77a4a01ddc08 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1331,6 +1331,8 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv) DRM_INFO("DRM_I915_DEBUG enabled\n"); if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) DRM_INFO("DRM_I915_DEBUG_GEM enabled\n"); + if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) + DRM_INFO("DRM_I915_DEBUG_RUNTIME_PM enabled\n"); } /** diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 592b847db88e..95e9cad5b4de 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -15905,8 +15905,6 @@ intel_modeset_setup_hw_state(struct drm_device *dev, intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); - intel_power_domains_verify_state(dev_priv); - intel_fbc_init_pipe_state(dev_priv); } diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index b2ce343b6027..35dd72fd0152 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1966,7 +1966,6 @@ enum i915_drm_suspend_mode { void intel_power_domains_suspend(struct drm_i915_private *dev_priv, enum i915_drm_suspend_mode); void intel_power_domains_resume(struct drm_i915_private *dev_priv); -void intel_power_domains_verify_state(struct drm_i915_private *dev_priv); void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume); void bxt_display_core_uninit(struct drm_i915_private *dev_priv); void intel_runtime_pm_enable(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 6153d5be5cf6..ff3fd8dbd2b4 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -3716,6 +3716,8 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) cmn->desc->ops->disable(dev_priv, cmn); } +static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv); + /** * intel_power_domains_init_hw - initialize hardware power domain state * @dev_priv: i915 device instance @@ -3767,6 +3769,8 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume) intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); intel_power_domains_sync_hw(dev_priv); power_domains->initializing = false; + + intel_power_domains_verify_state(dev_priv); } /** @@ -3788,6 +3792,8 @@ void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv) /* Remove the refcount we took to keep power well support disabled. */ if (!i915_modparams.disable_power_well) intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); + + intel_power_domains_verify_state(dev_priv); } /** @@ -3805,6 +3811,8 @@ void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv) void intel_power_domains_enable(struct drm_i915_private *dev_priv) { intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); + + intel_power_domains_verify_state(dev_priv); } /** @@ -3817,6 +3825,8 @@ void intel_power_domains_enable(struct drm_i915_private *dev_priv) void intel_power_domains_disable(struct drm_i915_private *dev_priv) { intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); + + intel_power_domains_verify_state(dev_priv); } /** @@ -3845,15 +3855,19 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv, * firmware was inactive. */ if (!IS_GEN9_LP(dev_priv) && suspend_mode == I915_DRM_SUSPEND_IDLE && - dev_priv->csr.dmc_payload != NULL) + dev_priv->csr.dmc_payload != NULL) { + intel_power_domains_verify_state(dev_priv); return; + } /* * Even if power well support was disabled we still want to disable * power wells if power domains must be deinitialized for suspend. */ - if (!i915_modparams.disable_power_well) + if (!i915_modparams.disable_power_well) { intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); + intel_power_domains_verify_state(dev_priv); + } if (IS_ICELAKE(dev_priv)) icl_display_core_uninit(dev_priv); @@ -3884,13 +3898,15 @@ void intel_power_domains_resume(struct drm_i915_private *dev_priv) if (power_domains->display_core_suspended) { intel_power_domains_init_hw(dev_priv, true); power_domains->display_core_suspended = false; - - return; + } else { + intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); } - intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); + intel_power_domains_verify_state(dev_priv); } +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) + static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv) { struct i915_power_domains *power_domains = &dev_priv->power_domains; @@ -3919,7 +3935,7 @@ static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv) * acquiring reference counts for any power wells in use and disabling the * ones left on by BIOS but not required by any active output. */ -void intel_power_domains_verify_state(struct drm_i915_private *dev_priv) +static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv) { struct i915_power_domains *power_domains = &dev_priv->power_domains; struct i915_power_well *power_well; @@ -3974,6 +3990,14 @@ void intel_power_domains_verify_state(struct drm_i915_private *dev_priv) mutex_unlock(&power_domains->lock); } +#else + +static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv) +{ +} + +#endif + /** * intel_runtime_pm_get - grab a runtime pm reference * @dev_priv: i915 device instance -- GitLab From 59f1c8ab30d6f9042562949f42cbd3f3cf69de94 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fredrik=20Sch=C3=B6n?= Date: Fri, 17 Aug 2018 22:07:28 +0200 Subject: [PATCH 0130/1692] drm/i915: Increase LSPCON timeout MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 100 ms is not enough time for the LSPCON adapter on Intel NUC devices to settle. This causes dropped display modes at boot or screen reconfiguration. Empirical testing can reproduce the error up to a timeout of 190 ms. Basic boot and stress testing at 200 ms has not (yet) failed. Increase timeout to 400 ms to get some margin of error. Changes from v1: The initial suggestion of 1000 ms was lowered due to concerns about delaying valid timeout cases. Update patch metadata. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=107503 Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1570392 Fixes: 357c0ae9198a ("drm/i915/lspcon: Wait for expected LSPCON mode to settle") Cc: Shashank Sharma Cc: Imre Deak Cc: Jani Nikula Cc: # v4.11+ Reviewed-by: Rodrigo Vivi Reviewed-by: Shashank Sharma Signed-off-by: Fredrik Schön Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20180817200728.8154-1-fredrik.schon@gmail.com --- drivers/gpu/drm/i915/intel_lspcon.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c index 5dae16ccd9f1..3e085c5f2b81 100644 --- a/drivers/gpu/drm/i915/intel_lspcon.c +++ b/drivers/gpu/drm/i915/intel_lspcon.c @@ -74,7 +74,7 @@ static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon, DRM_DEBUG_KMS("Waiting for LSPCON mode %s to settle\n", lspcon_mode_name(mode)); - wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 100); + wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 400); if (current_mode != mode) DRM_ERROR("LSPCON mode hasn't settled\n"); -- GitLab From d3bc0fa8411c35194f99046157e2e26fe60e1d91 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 20 Aug 2018 13:55:45 +0200 Subject: [PATCH 0131/1692] fsnotify: fix false positive warning on inode delete When inode is getting deleted and someone else holds reference to a mark attached to the inode, we just detach the connector from the inode. In that case fsnotify_put_mark() called from fsnotify_destroy_marks() will decide to recalculate mask for the inode and __fsnotify_recalc_mask() will WARN about invalid connector type: WARNING: CPU: 1 PID: 12015 at fs/notify/mark.c:139 __fsnotify_recalc_mask+0x2d7/0x350 fs/notify/mark.c:139 Actually there's no reason to warn about detached connector in __fsnotify_recalc_mask() so just silently skip updating the mask in such case. Reported-by: syzbot+c34692a51b9a6ca93540@syzkaller.appspotmail.com Fixes: 3ac70bfcde81 ("fsnotify: add helper to get mask from connector") Signed-off-by: Jan Kara --- fs/notify/mark.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fs/notify/mark.c b/fs/notify/mark.c index 05506d60131c..59cdb27826de 100644 --- a/fs/notify/mark.c +++ b/fs/notify/mark.c @@ -132,13 +132,13 @@ static void __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn) struct fsnotify_mark *mark; assert_spin_locked(&conn->lock); + /* We can get detached connector here when inode is getting unlinked. */ + if (!fsnotify_valid_obj_type(conn->type)) + return; hlist_for_each_entry(mark, &conn->list, obj_list) { if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) new_mask |= mark->mask; } - if (WARN_ON(!fsnotify_valid_obj_type(conn->type))) - return; - *fsnotify_conn_mask_p(conn) = new_mask; } -- GitLab From 35a5fd9ebfa93758ca579e30f337b6c9126d995b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 17 Aug 2018 11:02:41 +0100 Subject: [PATCH 0132/1692] drm/i915/audio: Hook up component bindings even if displays are disabled If the display has been disabled by modparam, we still want to connect together the HW bits and bobs with the associated drivers so that we can continue to manage their runtime power gating. Fixes: 108109444ff6 ("drm/i915: Check num_pipes before initializing audio component") Signed-off-by: Chris Wilson Cc: Imre Deak Cc: Takashi Iwai Cc: Jani Nikula Cc: Elaine Wang Reviewed-by: Imre Deak Link: https://patchwork.freedesktop.org/patch/msgid/20180817100241.4628-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_audio.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index bb94172ffc07..f02cb211d3e7 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c @@ -960,9 +960,6 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv) { int ret; - if (INTEL_INFO(dev_priv)->num_pipes == 0) - return; - ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops); if (ret < 0) { DRM_ERROR("failed to add audio component (%d)\n", ret); -- GitLab From f1506a69e3e72196c7c5ce4fd420d5e1a6965ed3 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Sat, 28 Jul 2018 16:17:49 -0300 Subject: [PATCH 0133/1692] thermal: qoriq: Use devm_thermal_zone_of_sensor_register() By using the managed devm_thermal_zone_of_sensor_register() we can drop the explicit call to thermal_zone_of_sensor_unregister() in the qoriq_tmu_remove() function, which simplifies the code a bit. So switch to devm_thermal_zone_of_sensor_register(). Signed-off-by: Fabio Estevam Reviewed-by: Daniel Lezcano Signed-off-by: Eduardo Valentin --- drivers/thermal/qoriq_thermal.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c index c866cc165960..e32d6ac79145 100644 --- a/drivers/thermal/qoriq_thermal.c +++ b/drivers/thermal/qoriq_thermal.c @@ -233,8 +233,9 @@ static int qoriq_tmu_probe(struct platform_device *pdev) if (ret < 0) goto err_tmu; - data->tz = thermal_zone_of_sensor_register(&pdev->dev, data->sensor_id, - data, &tmu_tz_ops); + data->tz = devm_thermal_zone_of_sensor_register(&pdev->dev, + data->sensor_id, + data, &tmu_tz_ops); if (IS_ERR(data->tz)) { ret = PTR_ERR(data->tz); dev_err(&pdev->dev, @@ -261,8 +262,6 @@ static int qoriq_tmu_remove(struct platform_device *pdev) { struct qoriq_tmu_data *data = platform_get_drvdata(pdev); - thermal_zone_of_sensor_unregister(&pdev->dev, data->tz); - /* Disable monitoring */ tmu_write(data, TMR_DISABLE, &data->regs->tmr); -- GitLab From 1a893a5a198eff228ddc1a364830f8928b8f9ac5 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Sat, 28 Jul 2018 16:17:50 -0300 Subject: [PATCH 0134/1692] thermal: qoriq: Simplify the 'site' variable assignment There is no need to assign zero to the variable 'site' and then perform a compound bitwise OR operation afterwards. Make it simpler by assigning the final 'site' value directly. Signed-off-by: Fabio Estevam Reviewed-by: Daniel Lezcano Signed-off-by: Eduardo Valentin --- drivers/thermal/qoriq_thermal.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c index e32d6ac79145..f807e4d1f72e 100644 --- a/drivers/thermal/qoriq_thermal.c +++ b/drivers/thermal/qoriq_thermal.c @@ -197,7 +197,7 @@ static int qoriq_tmu_probe(struct platform_device *pdev) int ret; struct qoriq_tmu_data *data; struct device_node *np = pdev->dev.of_node; - u32 site = 0; + u32 site; if (!np) { dev_err(&pdev->dev, "Device OF-Node is NULL"); @@ -244,7 +244,7 @@ static int qoriq_tmu_probe(struct platform_device *pdev) } /* Enable monitoring */ - site |= 0x1 << (15 - data->sensor_id); + site = 0x1 << (15 - data->sensor_id); tmu_write(data, site | TMR_ME | TMR_ALPF, &data->regs->tmr); return 0; -- GitLab From 2dfef650217c0e24754cd4c3abbb43e98131a7cf Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Sat, 28 Jul 2018 16:17:51 -0300 Subject: [PATCH 0135/1692] thermal: qoriq: Switch to SPDX identifier Adopt the SPDX license identifier headers to ease license compliance management. Signed-off-by: Fabio Estevam Reviewed-by: Daniel Lezcano Acked-by: Philippe Ombredanne Signed-off-by: Eduardo Valentin --- drivers/thermal/qoriq_thermal.c | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c index f807e4d1f72e..450ed66edf58 100644 --- a/drivers/thermal/qoriq_thermal.c +++ b/drivers/thermal/qoriq_thermal.c @@ -1,16 +1,6 @@ -/* - * Copyright 2016 Freescale Semiconductor, Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - */ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright 2016 Freescale Semiconductor, Inc. #include #include -- GitLab From f00d25f3154b676fcea4502a25b94bd7f142ca74 Mon Sep 17 00:00:00 2001 From: Tomer Tayar Date: Mon, 20 Aug 2018 00:01:42 +0300 Subject: [PATCH 0136/1692] qed: Wait for ready indication before rereading the shmem The MFW might be reset and re-update its shared memory. Upon the detection of such a reset the driver rereads this memory, but it has to wait till the data is valid. This patch adds the missing wait for a data ready indication. Signed-off-by: Tomer Tayar Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_mcp.c | 50 +++++++++++++++++++---- 1 file changed, 41 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index d89a0e22f6e4..bdcacb31d88b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -183,18 +183,57 @@ int qed_mcp_free(struct qed_hwfn *p_hwfn) return 0; } +/* Maximum of 1 sec to wait for the SHMEM ready indication */ +#define QED_MCP_SHMEM_RDY_MAX_RETRIES 20 +#define QED_MCP_SHMEM_RDY_ITER_MS 50 + static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_mcp_info *p_info = p_hwfn->mcp_info; + u8 cnt = QED_MCP_SHMEM_RDY_MAX_RETRIES; + u8 msec = QED_MCP_SHMEM_RDY_ITER_MS; u32 drv_mb_offsize, mfw_mb_offsize; u32 mcp_pf_id = MCP_PF_ID(p_hwfn); p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR); - if (!p_info->public_base) - return 0; + if (!p_info->public_base) { + DP_NOTICE(p_hwfn, + "The address of the MCP scratch-pad is not configured\n"); + return -EINVAL; + } p_info->public_base |= GRCBASE_MCP; + /* Get the MFW MB address and number of supported messages */ + mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, + SECTION_OFFSIZE_ADDR(p_info->public_base, + PUBLIC_MFW_MB)); + p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id); + p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, + p_info->mfw_mb_addr + + offsetof(struct public_mfw_mb, + sup_msgs)); + + /* The driver can notify that there was an MCP reset, and might read the + * SHMEM values before the MFW has completed initializing them. + * To avoid this, the "sup_msgs" field in the MFW mailbox is used as a + * data ready indication. + */ + while (!p_info->mfw_mb_length && --cnt) { + msleep(msec); + p_info->mfw_mb_length = + (u16)qed_rd(p_hwfn, p_ptt, + p_info->mfw_mb_addr + + offsetof(struct public_mfw_mb, sup_msgs)); + } + + if (!cnt) { + DP_NOTICE(p_hwfn, + "Failed to get the SHMEM ready notification after %d msec\n", + QED_MCP_SHMEM_RDY_MAX_RETRIES * msec); + return -EBUSY; + } + /* Calculate the driver and MFW mailbox address */ drv_mb_offsize = qed_rd(p_hwfn, p_ptt, SECTION_OFFSIZE_ADDR(p_info->public_base, @@ -204,13 +243,6 @@ static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n", drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id); - /* Set the MFW MB address */ - mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, - SECTION_OFFSIZE_ADDR(p_info->public_base, - PUBLIC_MFW_MB)); - p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id); - p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr); - /* Get the current driver mailbox sequence before sending * the first command */ -- GitLab From 76271809f49056f079e202bf6513d17b0d6dd34d Mon Sep 17 00:00:00 2001 From: Tomer Tayar Date: Mon, 20 Aug 2018 00:01:43 +0300 Subject: [PATCH 0137/1692] qed: Wait for MCP halt and resume commands to take place Successive iterations of halting and resuming the management chip (MCP) might fail, since currently the driver doesn't wait for these operations to actually take place. This patch prevents the driver from moving forward before the operations are reflected in the state register. Signed-off-by: Tomer Tayar Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_mcp.c | 46 +++++++++++++++---- .../net/ethernet/qlogic/qed/qed_reg_addr.h | 1 + 2 files changed, 39 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index bdcacb31d88b..5f3dbdc7ff1d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -2109,31 +2109,61 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, return rc; } +/* A maximal 100 msec waiting time for the MCP to halt */ +#define QED_MCP_HALT_SLEEP_MS 10 +#define QED_MCP_HALT_MAX_RETRIES 10 + int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { - u32 resp = 0, param = 0; + u32 resp = 0, param = 0, cpu_state, cnt = 0; int rc; rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp, ¶m); - if (rc) + if (rc) { DP_ERR(p_hwfn, "MCP response failure, aborting\n"); + return rc; + } - return rc; + do { + msleep(QED_MCP_HALT_SLEEP_MS); + cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); + if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) + break; + } while (++cnt < QED_MCP_HALT_MAX_RETRIES); + + if (cnt == QED_MCP_HALT_MAX_RETRIES) { + DP_NOTICE(p_hwfn, + "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n", + qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state); + return -EBUSY; + } + + return 0; } +#define QED_MCP_RESUME_SLEEP_MS 10 + int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { - u32 value, cpu_mode; + u32 cpu_mode, cpu_state; qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff); - value = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); - value &= ~MCP_REG_CPU_MODE_SOFT_HALT; - qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value); cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); + cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT; + qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode); + msleep(QED_MCP_RESUME_SLEEP_MS); + cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); - return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -EAGAIN : 0; + if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) { + DP_NOTICE(p_hwfn, + "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n", + cpu_mode, cpu_state); + return -EBUSY; + } + + return 0; } int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index d8ad2dcad8d5..2279965f8f8a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -562,6 +562,7 @@ 0 #define MCP_REG_CPU_STATE \ 0xe05004UL +#define MCP_REG_CPU_STATE_SOFT_HALTED (0x1UL << 10) #define MCP_REG_CPU_EVENT_MASK \ 0xe05008UL #define PGLUE_B_REG_PF_BAR0_SIZE \ -- GitLab From eaa50fc59e5841910987e90b0438b2643041f508 Mon Sep 17 00:00:00 2001 From: Tomer Tayar Date: Mon, 20 Aug 2018 00:01:44 +0300 Subject: [PATCH 0138/1692] qed: Prevent a possible deadlock during driver load and unload The MFW manages an internal lock to prevent concurrent hardware (de)initialization of different PFs. This, together with the busy-waiting for the MFW's responses for commands, might lead to a deadlock during concurrent load or unload of PFs. This patch adds the option to sleep within the busy-waiting, and uses it for the (un)load requests (which are not sent from an interrupt context) to prevent the possible deadlock. Signed-off-by: Tomer Tayar Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_mcp.c | 43 ++++++++++++++++------- drivers/net/ethernet/qlogic/qed/qed_mcp.h | 21 ++++++----- 2 files changed, 44 insertions(+), 20 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 5f3dbdc7ff1d..b7279e625db3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -48,7 +48,7 @@ #include "qed_reg_addr.h" #include "qed_sriov.h" -#define CHIP_MCP_RESP_ITER_US 10 +#define QED_MCP_RESP_ITER_US 10 #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */ #define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */ @@ -317,7 +317,7 @@ static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn, int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { - u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0; + u32 org_mcp_reset_seq, seq, delay = QED_MCP_RESP_ITER_US, cnt = 0; int rc = 0; /* Ensure that only a single thread is accessing the mailbox */ @@ -449,10 +449,10 @@ static int _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_mcp_mb_params *p_mb_params, - u32 max_retries, u32 delay) + u32 max_retries, u32 usecs) { + u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000); struct qed_mcp_cmd_elem *p_cmd_elem; - u32 cnt = 0; u16 seq_num; int rc = 0; @@ -475,7 +475,11 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, goto err; spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); - udelay(delay); + + if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) + msleep(msecs); + else + udelay(usecs); } while (++cnt < max_retries); if (cnt >= max_retries) { @@ -504,7 +508,11 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, * The spinlock stays locked until the list element is removed. */ - udelay(delay); + if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) + msleep(msecs); + else + udelay(usecs); + spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); if (p_cmd_elem->b_is_completed) @@ -539,7 +547,7 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n", p_mb_params->mcp_resp, p_mb_params->mcp_param, - (cnt * delay) / 1000, (cnt * delay) % 1000); + (cnt * usecs) / 1000, (cnt * usecs) % 1000); /* Clear the sequence number from the MFW response */ p_mb_params->mcp_resp &= FW_MSG_CODE_MASK; @@ -557,7 +565,7 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, { size_t union_data_size = sizeof(union drv_union_data); u32 max_retries = QED_DRV_MB_MAX_RETRIES; - u32 delay = CHIP_MCP_RESP_ITER_US; + u32 usecs = QED_MCP_RESP_ITER_US; /* MCP not initialized */ if (!qed_mcp_is_init(p_hwfn)) { @@ -574,8 +582,13 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, return -EINVAL; } + if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) { + max_retries = DIV_ROUND_UP(max_retries, 1000); + usecs *= 1000; + } + return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries, - delay); + usecs); } int qed_mcp_cmd(struct qed_hwfn *p_hwfn, @@ -793,6 +806,7 @@ __qed_mcp_load_req(struct qed_hwfn *p_hwfn, mb_params.data_src_size = sizeof(load_req); mb_params.p_data_dst = &load_rsp; mb_params.data_dst_size = sizeof(load_rsp); + mb_params.flags = QED_MB_FLAG_CAN_SLEEP; DP_VERBOSE(p_hwfn, QED_MSG_SP, "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n", @@ -1014,7 +1028,8 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn, int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { - u32 wol_param, mcp_resp, mcp_param; + struct qed_mcp_mb_params mb_params; + u32 wol_param; switch (p_hwfn->cdev->wol_config) { case QED_OV_WOL_DISABLED: @@ -1032,8 +1047,12 @@ int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP; } - return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param, - &mcp_resp, &mcp_param); + memset(&mb_params, 0, sizeof(mb_params)); + mb_params.cmd = DRV_MSG_CODE_UNLOAD_REQ; + mb_params.param = wol_param; + mb_params.flags = QED_MB_FLAG_CAN_SLEEP; + + return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); } int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 047976d5c6e9..b9d3ecf7aad6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -660,14 +660,19 @@ struct qed_mcp_info { }; struct qed_mcp_mb_params { - u32 cmd; - u32 param; - void *p_data_src; - u8 data_src_size; - void *p_data_dst; - u8 data_dst_size; - u32 mcp_resp; - u32 mcp_param; + u32 cmd; + u32 param; + void *p_data_src; + void *p_data_dst; + u8 data_src_size; + u8 data_dst_size; + u32 mcp_resp; + u32 mcp_param; + u32 flags; +#define QED_MB_FLAG_CAN_SLEEP (0x1 << 0) +#define QED_MB_FLAGS_IS_SET(params, flag) \ + ({ typeof(params) __params = (params); \ + (__params && (__params->flags & QED_MB_FLAG_ ## flag)); }) }; struct qed_drv_tlv_hdr { -- GitLab From b310974e041913231b6e3d5d475d4df55c312301 Mon Sep 17 00:00:00 2001 From: Tomer Tayar Date: Mon, 20 Aug 2018 00:01:45 +0300 Subject: [PATCH 0139/1692] qed: Avoid sending mailbox commands when MFW is not responsive Keep sending mailbox commands to the MFW when it is not responsive ends up with a redundant amount of timeout expiries. This patch prints the MCP status on the first command which is not responded, and blocks the following commands. Since the (un)load request commands might be not responded due to other PFs, the patch also adds the option to skip the blocking upon a failure. Signed-off-by: Tomer Tayar Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_mcp.c | 52 ++++++++++++++++++- drivers/net/ethernet/qlogic/qed/qed_mcp.h | 6 ++- .../net/ethernet/qlogic/qed/qed_reg_addr.h | 1 + 3 files changed, 56 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index b7279e625db3..5d37ec7e9b0b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -320,6 +320,12 @@ int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) u32 org_mcp_reset_seq, seq, delay = QED_MCP_RESP_ITER_US, cnt = 0; int rc = 0; + if (p_hwfn->mcp_info->b_block_cmd) { + DP_NOTICE(p_hwfn, + "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n"); + return -EBUSY; + } + /* Ensure that only a single thread is accessing the mailbox */ spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); @@ -445,6 +451,33 @@ static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, (p_mb_params->cmd | seq_num), p_mb_params->param); } +static void qed_mcp_cmd_set_blocking(struct qed_hwfn *p_hwfn, bool block_cmd) +{ + p_hwfn->mcp_info->b_block_cmd = block_cmd; + + DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n", + block_cmd ? "Block" : "Unblock"); +} + +static void qed_mcp_print_cpu_info(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2; + u32 delay = QED_MCP_RESP_ITER_US; + + cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); + cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); + cpu_pc_0 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); + udelay(delay); + cpu_pc_1 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); + udelay(delay); + cpu_pc_2 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); + + DP_NOTICE(p_hwfn, + "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n", + cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2); +} + static int _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -531,11 +564,15 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, DP_NOTICE(p_hwfn, "The MFW failed to respond to command 0x%08x [param 0x%08x].\n", p_mb_params->cmd, p_mb_params->param); + qed_mcp_print_cpu_info(p_hwfn, p_ptt); spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); + if (!QED_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK)) + qed_mcp_cmd_set_blocking(p_hwfn, true); + return -EAGAIN; } @@ -573,6 +610,13 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, return -EBUSY; } + if (p_hwfn->mcp_info->b_block_cmd) { + DP_NOTICE(p_hwfn, + "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n", + p_mb_params->cmd, p_mb_params->param); + return -EBUSY; + } + if (p_mb_params->data_src_size > union_data_size || p_mb_params->data_dst_size > union_data_size) { DP_ERR(p_hwfn, @@ -806,7 +850,7 @@ __qed_mcp_load_req(struct qed_hwfn *p_hwfn, mb_params.data_src_size = sizeof(load_req); mb_params.p_data_dst = &load_rsp; mb_params.data_dst_size = sizeof(load_rsp); - mb_params.flags = QED_MB_FLAG_CAN_SLEEP; + mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK; DP_VERBOSE(p_hwfn, QED_MSG_SP, "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n", @@ -1050,7 +1094,7 @@ int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) memset(&mb_params, 0, sizeof(mb_params)); mb_params.cmd = DRV_MSG_CODE_UNLOAD_REQ; mb_params.param = wol_param; - mb_params.flags = QED_MB_FLAG_CAN_SLEEP; + mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK; return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); } @@ -2158,6 +2202,8 @@ int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) return -EBUSY; } + qed_mcp_cmd_set_blocking(p_hwfn, true); + return 0; } @@ -2182,6 +2228,8 @@ int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) return -EBUSY; } + qed_mcp_cmd_set_blocking(p_hwfn, false); + return 0; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index b9d3ecf7aad6..85e6b3989e7a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -635,11 +635,14 @@ struct qed_mcp_info { */ spinlock_t cmd_lock; + /* Flag to indicate whether sending a MFW mailbox command is blocked */ + bool b_block_cmd; + /* Spinlock used for syncing SW link-changes and link-changes * originating from attention context. */ spinlock_t link_lock; - bool block_mb_sending; + u32 public_base; u32 drv_mb_addr; u32 mfw_mb_addr; @@ -670,6 +673,7 @@ struct qed_mcp_mb_params { u32 mcp_param; u32 flags; #define QED_MB_FLAG_CAN_SLEEP (0x1 << 0) +#define QED_MB_FLAG_AVOID_BLOCK (0x1 << 1) #define QED_MB_FLAGS_IS_SET(params, flag) \ ({ typeof(params) __params = (params); \ (__params && (__params->flags & QED_MB_FLAG_ ## flag)); }) diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index 2279965f8f8a..f736f70956fd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -565,6 +565,7 @@ #define MCP_REG_CPU_STATE_SOFT_HALTED (0x1UL << 10) #define MCP_REG_CPU_EVENT_MASK \ 0xe05008UL +#define MCP_REG_CPU_PROGRAM_COUNTER 0xe0501cUL #define PGLUE_B_REG_PF_BAR0_SIZE \ 0x2aae60UL #define PGLUE_B_REG_PF_BAR1_SIZE \ -- GitLab From c954579087f4c0185206cfa777e697874b1e7d13 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Mon, 30 Jul 2018 07:56:06 +0000 Subject: [PATCH 0140/1692] thermal: rcar_thermal: convert to SPDX identifiers As original license mentioned, it is GPL-2.0 in SPDX. Then, MODULE_LICENSE() should be "GPL v2" instead of "GPL". See ${LINUX}/include/linux/module.h "GPL" [GNU Public License v2 or later] "GPL v2" [GNU Public License v2] Signed-off-by: Kuninori Morimoto Reviewed-by: Daniel Lezcano Reviewed-by: Geert Uytterhoeven Reviewed-by: Simon Horman Signed-off-by: Eduardo Valentin --- drivers/thermal/rcar_thermal.c | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c index e77e63070e99..78f932822d38 100644 --- a/drivers/thermal/rcar_thermal.c +++ b/drivers/thermal/rcar_thermal.c @@ -1,21 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 /* * R-Car THS/TSC thermal sensor driver * * Copyright (C) 2012 Renesas Solutions Corp. * Kuninori Morimoto - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. */ #include #include @@ -660,6 +648,6 @@ static struct platform_driver rcar_thermal_driver = { }; module_platform_driver(rcar_thermal_driver); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("R-Car THS/TSC thermal sensor driver"); MODULE_AUTHOR("Kuninori Morimoto "); -- GitLab From d316522d06e9894429ace4b5f99e181bde4e7bd7 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Mon, 20 Aug 2018 11:42:35 -0700 Subject: [PATCH 0141/1692] thermal: rcar_gen3_thermal: convert to SPDX identifiers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Kuninori Morimoto Reviewed-by: Geert Uytterhoeven Reviewed-by: Daniel Lezcano Reviewed-by: Niklas Söderlund Reviewed-by: Simon Horman Signed-off-by: Eduardo Valentin --- drivers/thermal/rcar_gen3_thermal.c | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c index 766521eb7071..7aed5337bdd3 100644 --- a/drivers/thermal/rcar_gen3_thermal.c +++ b/drivers/thermal/rcar_gen3_thermal.c @@ -1,19 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 /* * R-Car Gen3 THS thermal sensor driver * Based on rcar_thermal.c and work from Hien Dang and Khiem Nguyen. * * Copyright (C) 2016 Renesas Electronics Corporation. * Copyright (C) 2016 Sang Engineering - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * */ #include #include -- GitLab From 152395fd03d4ce1e535a75cdbf58105e50587611 Mon Sep 17 00:00:00 2001 From: Anson Huang Date: Tue, 31 Jul 2018 00:56:49 +0800 Subject: [PATCH 0142/1692] thermal: of-thermal: disable passive polling when thermal zone is disabled When thermal zone is in passive mode, disabling its mode from sysfs is NOT taking effect at all, it is still polling the temperature of the disabled thermal zone and handling all thermal trips, it makes user confused. The disabling operation should disable the thermal zone behavior completely, for both active and passive mode, this patch clears the passive_delay when thermal zone is disabled and restores it when it is enabled. Signed-off-by: Anson Huang Signed-off-by: Eduardo Valentin --- drivers/thermal/of-thermal.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c index 977a8307fbb1..4f2816559205 100644 --- a/drivers/thermal/of-thermal.c +++ b/drivers/thermal/of-thermal.c @@ -260,10 +260,13 @@ static int of_thermal_set_mode(struct thermal_zone_device *tz, mutex_lock(&tz->lock); - if (mode == THERMAL_DEVICE_ENABLED) + if (mode == THERMAL_DEVICE_ENABLED) { tz->polling_delay = data->polling_delay; - else + tz->passive_delay = data->passive_delay; + } else { tz->polling_delay = 0; + tz->passive_delay = 0; + } mutex_unlock(&tz->lock); -- GitLab From 176eb614b118c96e7797f5ddefd10708c316f621 Mon Sep 17 00:00:00 2001 From: Kai-Heng Feng Date: Mon, 20 Aug 2018 12:43:51 +0800 Subject: [PATCH 0143/1692] r8152: disable RX aggregation on new Dell TB16 dock There's a new Dell TB16 dock with a different iSerialNumber. Apply the same fix from commit 0b1655143df0 ("r8152: disable RX aggregation on Dell TB16 dock") to this model. BugLink: https://bugs.launchpad.net/bugs/1785780 Signed-off-by: Kai-Heng Feng Signed-off-by: David S. Miller --- drivers/net/usb/r8152.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 97742708460b..2cd71bdb6484 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -5217,8 +5217,8 @@ static int rtl8152_probe(struct usb_interface *intf, netdev->hw_features &= ~NETIF_F_RXCSUM; } - if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && - udev->serial && !strcmp(udev->serial, "000001000000")) { + if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && udev->serial && + (!strcmp(udev->serial, "000001000000") || !strcmp(udev->serial, "000002000000"))) { dev_info(&udev->dev, "Dell TB16 Dock, disable RX aggregation"); set_bit(DELL_TB_RX_AGG_BUG, &tp->flags); } -- GitLab From bcaad532974eb47f1fb4ee04ede9812107060245 Mon Sep 17 00:00:00 2001 From: Manasi Navare Date: Fri, 17 Aug 2018 14:52:08 -0700 Subject: [PATCH 0144/1692] drm/i915/icl: Implement HSDIV_RATIO of MG_CLKTOP2_HSCLKCTL_PORT reg as separate divider value defines MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The register value of Divider Ratio for high speed divider (hsdiv_ratio) in MG_CLKTOP2_HSCLKCTL_PORT register is not same as the actual numerical value of the divider. So this patch implements separate divider value defines for that field. icl_mg_pll_find_divisors() can use these defines instead of magic register values. The new defines are going to be used in the next patch. v2 (from Paulo): * Rebase. * Make it look a little more like the rest of our code. v3 (from Paulo): * Make hsdiv u32 now that it's a bit field (José). Reviewed-by: Rodrigo Vivi Reviewed-by: José Roberto de Souza Suggested-by: James Ausmus Signed-off-by: Manasi Navare Signed-off-by: Paulo Zanoni Link: https://patchwork.freedesktop.org/patch/msgid/20180817215209.29133-1-paulo.r.zanoni@intel.com --- drivers/gpu/drm/i915/i915_reg.h | 5 ++++- drivers/gpu/drm/i915/intel_dpll_mgr.c | 13 +++++++------ 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 5121b9f072c6..8d3a7fe44d66 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -9391,8 +9391,11 @@ enum skl_power_gate { #define MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK (0x1 << 16) #define MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(x) ((x) << 14) #define MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK (0x3 << 14) -#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO(x) ((x) << 12) #define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK (0x3 << 12) +#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2 (0 << 12) +#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3 (1 << 12) +#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5 (2 << 12) +#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7 (3 << 12) #define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(x) ((x) << 8) #define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK (0xf << 8) #define MG_CLKTOP2_HSCLKCTL(port) _MMIO_PORT((port) - PORT_C, \ diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index 20c90688a48a..04d41bc1a4bb 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c @@ -2643,7 +2643,8 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, for (div2 = 10; div2 > 0; div2--) { int dco = div1 * div2 * clock_khz * 5; - int a_divratio, tlinedrv, inputsel, hsdiv; + int a_divratio, tlinedrv, inputsel; + u32 hsdiv; if (dco < dco_min_freq || dco > dco_max_freq) continue; @@ -2662,16 +2663,16 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, MISSING_CASE(div1); /* fall through */ case 2: - hsdiv = 0; + hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2; break; case 3: - hsdiv = 1; + hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3; break; case 5: - hsdiv = 2; + hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5; break; case 7: - hsdiv = 3; + hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7; break; } @@ -2685,7 +2686,7 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, state->mg_clktop2_hsclkctl = MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) | MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) | - MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO(hsdiv) | + hsdiv | MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2); return true; -- GitLab From 7b19f544ed90b7ca4bd850145e2624a99a967de0 Mon Sep 17 00:00:00 2001 From: Manasi Navare Date: Fri, 17 Aug 2018 14:52:09 -0700 Subject: [PATCH 0145/1692] drm/i915/icl: Get DDI clock for ICL for MG PLL and TBT PLL MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PLLs are the source clocks for the DDIs so in order to determine the ddi clock we need to check the PLL configuration. For MG PHy Ports (C - F), depending on whether it is a TBT PLL or MG PLL the link lock can be obtained from the the PLL divisors based on the specification. v2 (from Paulo): * Make the algorithm look more like what's in the spec, also document where we differ form the spec and why. * Make the code a little more consistent with our coding style. Reviewed-by: José Roberto de Souza Signed-off-by: Manasi Navare Signed-off-by: Paulo Zanoni Link: https://patchwork.freedesktop.org/patch/msgid/20180817215209.29133-2-paulo.r.zanoni@intel.com --- drivers/gpu/drm/i915/i915_reg.h | 5 ++ drivers/gpu/drm/i915/intel_ddi.c | 81 +++++++++++++++++++++++++++++++- 2 files changed, 84 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 8d3a7fe44d66..59d06d0055bb 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -9397,6 +9397,7 @@ enum skl_power_gate { #define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5 (2 << 12) #define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7 (3 << 12) #define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(x) ((x) << 8) +#define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT 8 #define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK (0xf << 8) #define MG_CLKTOP2_HSCLKCTL(port) _MMIO_PORT((port) - PORT_C, \ _MG_CLKTOP2_HSCLKCTL_PORT1, \ @@ -9407,7 +9408,10 @@ enum skl_power_gate { #define _MG_PLL_DIV0_PORT3 0x16AA00 #define _MG_PLL_DIV0_PORT4 0x16BA00 #define MG_PLL_DIV0_FRACNEN_H (1 << 30) +#define MG_PLL_DIV0_FBDIV_FRAC_MASK (0x3fffff << 8) +#define MG_PLL_DIV0_FBDIV_FRAC_SHIFT 8 #define MG_PLL_DIV0_FBDIV_FRAC(x) ((x) << 8) +#define MG_PLL_DIV0_FBDIV_INT_MASK (0xff << 0) #define MG_PLL_DIV0_FBDIV_INT(x) ((x) << 0) #define MG_PLL_DIV0(port) _MMIO_PORT((port) - PORT_C, _MG_PLL_DIV0_PORT1, \ _MG_PLL_DIV0_PORT2) @@ -9422,6 +9426,7 @@ enum skl_power_gate { #define MG_PLL_DIV1_DITHER_DIV_4 (2 << 12) #define MG_PLL_DIV1_DITHER_DIV_8 (3 << 12) #define MG_PLL_DIV1_NDIVRATIO(x) ((x) << 4) +#define MG_PLL_DIV1_FBPREDIV_MASK (0xf << 0) #define MG_PLL_DIV1_FBPREDIV(x) ((x) << 0) #define MG_PLL_DIV1(port) _MMIO_PORT((port) - PORT_C, _MG_PLL_DIV1_PORT1, \ _MG_PLL_DIV1_PORT2) diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 6f7be066c8f2..f3b115ce4029 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -1427,6 +1427,81 @@ static int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv, return dco_freq / (p0 * p1 * p2 * 5); } +static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv, + enum port port) +{ + u32 val = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK; + + switch (val) { + case DDI_CLK_SEL_NONE: + return 0; + case DDI_CLK_SEL_TBT_162: + return 162000; + case DDI_CLK_SEL_TBT_270: + return 270000; + case DDI_CLK_SEL_TBT_540: + return 540000; + case DDI_CLK_SEL_TBT_810: + return 810000; + default: + MISSING_CASE(val); + return 0; + } +} + +static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv, + enum port port) +{ + u32 mg_pll_div0, mg_clktop_hsclkctl; + u32 m1, m2_int, m2_frac, div1, div2, refclk; + u64 tmp; + + refclk = dev_priv->cdclk.hw.ref; + + mg_pll_div0 = I915_READ(MG_PLL_DIV0(port)); + mg_clktop_hsclkctl = I915_READ(MG_CLKTOP2_HSCLKCTL(port)); + + m1 = I915_READ(MG_PLL_DIV1(port)) & MG_PLL_DIV1_FBPREDIV_MASK; + m2_int = mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK; + m2_frac = (mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) ? + (mg_pll_div0 & MG_PLL_DIV0_FBDIV_FRAC_MASK) >> + MG_PLL_DIV0_FBDIV_FRAC_SHIFT : 0; + + switch (mg_clktop_hsclkctl & MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) { + case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2: + div1 = 2; + break; + case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3: + div1 = 3; + break; + case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5: + div1 = 5; + break; + case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7: + div1 = 7; + break; + default: + MISSING_CASE(mg_clktop_hsclkctl); + return 0; + } + + div2 = (mg_clktop_hsclkctl & MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >> + MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT; + /* div2 value of 0 is same as 1 means no div */ + if (div2 == 0) + div2 = 1; + + /* + * Adjust the original formula to delay the division by 2^22 in order to + * minimize possible rounding errors. + */ + tmp = (u64)m1 * m2_int * refclk + + (((u64)m1 * m2_frac * refclk) >> 22); + tmp = div_u64(tmp, 5 * div1 * div2); + + return tmp; +} + static void ddi_dotclock_get(struct intel_crtc_state *pipe_config) { int dotclock; @@ -1467,8 +1542,10 @@ static void icl_ddi_clock_get(struct intel_encoder *encoder, link_clock = icl_calc_dp_combo_pll_link(dev_priv, pll_id); } else { - /* FIXME - Add for MG PLL */ - WARN(1, "MG PLL clock_get code not implemented yet\n"); + if (pll_id == DPLL_ID_ICL_TBTPLL) + link_clock = icl_calc_tbt_pll_link(dev_priv, port); + else + link_clock = icl_calc_mg_pll_link(dev_priv, port); } pipe_config->port_clock = link_clock; -- GitLab From 80f1a0f4e0cd4bfc8a74fc1c39843a6e7b206b95 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Mon, 20 Aug 2018 13:02:41 -0700 Subject: [PATCH 0146/1692] net/ipv6: Put lwtstate when destroying fib6_info Prior to the introduction of fib6_info lwtstate was managed by the dst code. With fib6_info releasing lwtstate needs to be done when the struct is freed. Fixes: 93531c674315 ("net/ipv6: separate handling of FIB entries from dst based routes") Signed-off-by: David Ahern Signed-off-by: David S. Miller --- net/ipv6/ip6_fib.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index d212738e9d10..c861a6d4671d 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -198,6 +198,8 @@ void fib6_info_destroy_rcu(struct rcu_head *head) } } + lwtstate_put(f6i->fib6_nh.nh_lwtstate); + if (f6i->fib6_nh.nh_dev) dev_put(f6i->fib6_nh.nh_dev); -- GitLab From ab08dcd724543896303eae7de6288242bbaff458 Mon Sep 17 00:00:00 2001 From: Yue Haibing Date: Tue, 21 Aug 2018 01:41:56 +0000 Subject: [PATCH 0147/1692] rhashtable: remove duplicated include from rhashtable.c Remove duplicated include. Signed-off-by: Yue Haibing Signed-off-by: David S. Miller --- lib/rhashtable.c | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/rhashtable.c b/lib/rhashtable.c index ae4223e0f5bc..672eecda874a 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -28,7 +28,6 @@ #include #include #include -#include #define HASH_DEFAULT_SIZE 64UL #define HASH_MIN_SIZE 4U -- GitLab From 09a4e0be5826aa66c4ce9954841f110ffe63ef4f Mon Sep 17 00:00:00 2001 From: Eric Sandeen Date: Thu, 16 Aug 2018 21:44:02 -0500 Subject: [PATCH 0148/1692] isofs: reject hardware sector size > 2048 bytes The largest block size supported by isofs is ISOFS_BLOCK_SIZE (2048), but isofs_fill_super calls sb_min_blocksize and sets the blocksize to the device's logical block size if it's larger than what we ended up with after option parsing. If for some reason we try to mount a hard 4k device as an isofs filesystem, we'll set opt.blocksize to 4096, and when we try to read the superblock we found via: block = iso_blknum << (ISOFS_BLOCK_BITS - s->s_blocksize_bits) with s_blocksize_bits greater than ISOFS_BLOCK_BITS, we'll have a negative shift and the bread will fail somewhat cryptically: isofs_fill_super: bread failed, dev=sda, iso_blknum=17, block=-2147483648 It seems best to just catch and clearly reject mounts of such a device. Reported-by: Bryan Gurney Signed-off-by: Eric Sandeen Signed-off-by: Jan Kara --- fs/isofs/inode.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index ec3fba7d492f..488a9e7f8f66 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c @@ -24,6 +24,7 @@ #include #include #include +#include #include "isofs.h" #include "zisofs.h" @@ -653,6 +654,12 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent) /* * What if bugger tells us to go beyond page size? */ + if (bdev_logical_block_size(s->s_bdev) > 2048) { + printk(KERN_WARNING + "ISOFS: unsupported/invalid hardware sector size %d\n", + bdev_logical_block_size(s->s_bdev)); + goto out_freesbi; + } opt.blocksize = sb_min_blocksize(s, opt.blocksize); sbi->s_high_sierra = 0; /* default is iso9660 */ -- GitLab From df4f94e810fc270db4baa8f4b35ef138246c7746 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 21 Aug 2018 11:11:38 +0100 Subject: [PATCH 0149/1692] drm/i915: Correct CSB probing for engine state dumper Since we no longer maintain our read position in the CSB pointers register, it always returns 0 and not where we last read up to. As a result the CSB probing in the state dumper starts from 0, either missing entries or showing stale one. Signed-off-by: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20180821101138.15822-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_engine_cs.c | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 8628567d8f6e..1a34e8ff82d5 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -1345,20 +1345,19 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine, if (HAS_EXECLISTS(dev_priv)) { const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX]; - u32 ptr, read, write; unsigned int idx; + u8 read, write; drm_printf(m, "\tExeclist status: 0x%08x %08x\n", I915_READ(RING_EXECLIST_STATUS_LO(engine)), I915_READ(RING_EXECLIST_STATUS_HI(engine))); - ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine)); - read = GEN8_CSB_READ_PTR(ptr); - write = GEN8_CSB_WRITE_PTR(ptr); - drm_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], tasklet queued? %s (%s)\n", - read, execlists->csb_head, - write, - intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)), + read = execlists->csb_head; + write = READ_ONCE(*execlists->csb_write); + + drm_printf(m, "\tExeclist CSB read %d, write %d [mmio:%d], tasklet queued? %s (%s)\n", + read, write, + GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine))), yesno(test_bit(TASKLET_STATE_SCHED, &engine->execlists.tasklet.state)), enableddisabled(!atomic_read(&engine->execlists.tasklet.count))); @@ -1370,12 +1369,12 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine, write += GEN8_CSB_ENTRIES; while (read < write) { idx = ++read % GEN8_CSB_ENTRIES; - drm_printf(m, "\tExeclist CSB[%d]: 0x%08x [0x%08x in hwsp], context: %d [%d in hwsp]\n", + drm_printf(m, "\tExeclist CSB[%d]: 0x%08x [mmio:0x%08x], context: %d [mmio:%d]\n", idx, - I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)), hws[idx * 2], - I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)), - hws[idx * 2 + 1]); + I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)), + hws[idx * 2 + 1], + I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx))); } rcu_read_lock(); -- GitLab From 19f5e9e015675fcdbf2c20e804b2e84e80201454 Mon Sep 17 00:00:00 2001 From: Ludovic Desroches Date: Mon, 20 Aug 2018 10:54:44 +0200 Subject: [PATCH 0150/1692] mmc: atmel-mci: fix bad logic of sg_copy_{from,to}_buffer conversion The conversion to sg_copy_{from,to}_buffer has been done in the wrong way. sg_copy_to_buffer is a copy from an SG list to a linear buffer so it can't replace memcpy(buf + offset, &value, remaining) where buf is the virtual address of the SG. Same for sg_copy_to_buffer but in the opposite way. Signed-off-by: Ludovic Desroches Suggested-by: Douglas Gilbert Fixes: 5b4277814e3f ("mmc: atmel-mci: use sg_copy_{from,to}_buffer") Signed-off-by: Ulf Hansson --- drivers/mmc/host/atmel-mci.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index 5aa2c9404e92..be53044086c7 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -1976,7 +1976,7 @@ static void atmci_read_data_pio(struct atmel_mci *host) do { value = atmci_readl(host, ATMCI_RDR); if (likely(offset + 4 <= sg->length)) { - sg_pcopy_to_buffer(sg, 1, &value, sizeof(u32), offset); + sg_pcopy_from_buffer(sg, 1, &value, sizeof(u32), offset); offset += 4; nbytes += 4; @@ -1993,7 +1993,7 @@ static void atmci_read_data_pio(struct atmel_mci *host) } else { unsigned int remaining = sg->length - offset; - sg_pcopy_to_buffer(sg, 1, &value, remaining, offset); + sg_pcopy_from_buffer(sg, 1, &value, remaining, offset); nbytes += remaining; flush_dcache_page(sg_page(sg)); @@ -2003,7 +2003,7 @@ static void atmci_read_data_pio(struct atmel_mci *host) goto done; offset = 4 - remaining; - sg_pcopy_to_buffer(sg, 1, (u8 *)&value + remaining, + sg_pcopy_from_buffer(sg, 1, (u8 *)&value + remaining, offset, 0); nbytes += offset; } @@ -2042,7 +2042,7 @@ static void atmci_write_data_pio(struct atmel_mci *host) do { if (likely(offset + 4 <= sg->length)) { - sg_pcopy_from_buffer(sg, 1, &value, sizeof(u32), offset); + sg_pcopy_to_buffer(sg, 1, &value, sizeof(u32), offset); atmci_writel(host, ATMCI_TDR, value); offset += 4; @@ -2059,7 +2059,7 @@ static void atmci_write_data_pio(struct atmel_mci *host) unsigned int remaining = sg->length - offset; value = 0; - sg_pcopy_from_buffer(sg, 1, &value, remaining, offset); + sg_pcopy_to_buffer(sg, 1, &value, remaining, offset); nbytes += remaining; host->sg = sg = sg_next(sg); @@ -2070,7 +2070,7 @@ static void atmci_write_data_pio(struct atmel_mci *host) } offset = 4 - remaining; - sg_pcopy_from_buffer(sg, 1, (u8 *)&value + remaining, + sg_pcopy_to_buffer(sg, 1, (u8 *)&value + remaining, offset, 0); atmci_writel(host, ATMCI_TDR, value); nbytes += offset; -- GitLab From 17e96d8516e31c3cb52cb8e2ee79d1d2e6948c11 Mon Sep 17 00:00:00 2001 From: Ludovic Desroches Date: Mon, 20 Aug 2018 10:54:45 +0200 Subject: [PATCH 0151/1692] mmc: android-goldfish: fix bad logic of sg_copy_{from,to}_buffer conversion The conversion to sg_copy_{from,to}_buffer has been done in the wrong way. sg_copy_to_buffer is a copy from an SG list to a linear buffer so it can't replace memcpy(dest, host->virt_base, data->sg->length) where dest is the virtual address of the SG. Same for sg_copy_from_buffer but in the opposite way. Signed-off-by: Ludovic Desroches Suggested-by: Douglas Gilbert Fixes: 53d7e098ba08 ("mmc: android-goldfish: use sg_copy_{from,to}_buffer") Signed-off-by: Ulf Hansson --- drivers/mmc/host/android-goldfish.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/mmc/host/android-goldfish.c b/drivers/mmc/host/android-goldfish.c index 294de177632c..61e4e2a213c9 100644 --- a/drivers/mmc/host/android-goldfish.c +++ b/drivers/mmc/host/android-goldfish.c @@ -217,7 +217,7 @@ static void goldfish_mmc_xfer_done(struct goldfish_mmc_host *host, * We don't really have DMA, so we need * to copy from our platform driver buffer */ - sg_copy_to_buffer(data->sg, 1, host->virt_base, + sg_copy_from_buffer(data->sg, 1, host->virt_base, data->sg->length); } host->data->bytes_xfered += data->sg->length; @@ -393,7 +393,7 @@ static void goldfish_mmc_prepare_data(struct goldfish_mmc_host *host, * We don't really have DMA, so we need to copy to our * platform driver buffer */ - sg_copy_from_buffer(data->sg, 1, host->virt_base, + sg_copy_to_buffer(data->sg, 1, host->virt_base, data->sg->length); } } -- GitLab From 26caddf274cf1e89fd4ce44ab2b8dbc7a7f97681 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 21 Aug 2018 15:05:55 +0300 Subject: [PATCH 0152/1692] mmc: block: Fix unsupported parallel dispatch of requests The mmc block driver does not support parallel dispatch of requests. In normal circumstances, all requests are anyway funneled through a single work item, so parallel dispatch never happens. However it can happen if there is no elevator. Fix that by detecting if a dispatch is in progress and returning busy (BLK_STS_RESOURCE) in that case Fixes: 81196976ed94 ("mmc: block: Add blk-mq support") Cc: stable@vger.kernel.org # v4.16+ Signed-off-by: Adrian Hunter Signed-off-by: Ulf Hansson --- drivers/mmc/core/queue.c | 12 +++++++----- drivers/mmc/core/queue.h | 1 + 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index 648eb6743ed5..6edffeed9953 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -238,10 +238,6 @@ static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req, mmc_exit_request(mq->queue, req); } -/* - * We use BLK_MQ_F_BLOCKING and have only 1 hardware queue, which means requests - * will not be dispatched in parallel. - */ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { @@ -264,7 +260,7 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, spin_lock_irq(q->queue_lock); - if (mq->recovery_needed) { + if (mq->recovery_needed || mq->busy) { spin_unlock_irq(q->queue_lock); return BLK_STS_RESOURCE; } @@ -291,6 +287,9 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, break; } + /* Parallel dispatch of requests is not supported at the moment */ + mq->busy = true; + mq->in_flight[issue_type] += 1; get_card = (mmc_tot_in_flight(mq) == 1); cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1); @@ -333,9 +332,12 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, mq->in_flight[issue_type] -= 1; if (mmc_tot_in_flight(mq) == 0) put_card = true; + mq->busy = false; spin_unlock_irq(q->queue_lock); if (put_card) mmc_put_card(card, &mq->ctx); + } else { + WRITE_ONCE(mq->busy, false); } return ret; diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h index 17e59d50b496..9bf3c9245075 100644 --- a/drivers/mmc/core/queue.h +++ b/drivers/mmc/core/queue.h @@ -81,6 +81,7 @@ struct mmc_queue { unsigned int cqe_busy; #define MMC_CQE_DCMD_BUSY BIT(0) #define MMC_CQE_QUEUE_FULL BIT(1) + bool busy; bool use_cqe; bool recovery_needed; bool in_recovery; -- GitLab From 51474eff2bc2777061ab3658e014a37dc9d7a775 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Tue, 14 Aug 2018 11:57:07 +0200 Subject: [PATCH 0153/1692] Bluetooth: Make BT_HCIUART_RTL configuration option depend on ACPI At the moment we only support ACPI enumeration for serial port attached RTL bluetooth controllers. This commit adds a dependency on ACPI to the BT_HCIUART_RTL configuration option, fixing the following warning when ACPI is not enabled: drivers/bluetooth/hci_h5.c:920:22: warning: 'rtl_vnd' defined but not used Cc: Arnd Bergmann Reported-by: Arnd Bergmann Signed-off-by: Hans de Goede Acked-by: Arnd Bergmann Acked-by: Geert Uytterhoeven Signed-off-by: Marcel Holtmann --- drivers/bluetooth/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index 2df11cc08a46..845b0314ce3a 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig @@ -200,6 +200,7 @@ config BT_HCIUART_RTL depends on BT_HCIUART depends on BT_HCIUART_SERDEV depends on GPIOLIB + depends on ACPI select BT_HCIUART_3WIRE select BT_RTL help -- GitLab From addb3ffbca66954fb1d1791d2db2153c403f81af Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 14 Aug 2018 10:10:31 -0500 Subject: [PATCH 0154/1692] Bluetooth: mediatek: Fix memory leak In case memory resources for *fw* were allocated, release them before return. Addresses-Coverity-ID: 1472611 ("Resource leak") Fixes: 7237c4c9ec92 ("Bluetooth: mediatek: Add protocol support for MediaTek serial devices") Signed-off-by: Gustavo A. R. Silva Acked-by: Sean Wang Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btmtkuart.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c index ed2a5c7cb77f..4593baff2bc9 100644 --- a/drivers/bluetooth/btmtkuart.c +++ b/drivers/bluetooth/btmtkuart.c @@ -144,8 +144,10 @@ static int mtk_setup_fw(struct hci_dev *hdev) fw_size = fw->size; /* The size of patch header is 30 bytes, should be skip */ - if (fw_size < 30) - return -EINVAL; + if (fw_size < 30) { + err = -EINVAL; + goto free_fw; + } fw_size -= 30; fw_ptr += 30; @@ -172,8 +174,8 @@ static int mtk_setup_fw(struct hci_dev *hdev) fw_ptr += dlen; } +free_fw: release_firmware(fw); - return err; } -- GitLab From 093dee661d6004738c4cbcbf48835c1e6c6ebae3 Mon Sep 17 00:00:00 2001 From: Yue Haibing Date: Tue, 21 Aug 2018 13:58:08 +0000 Subject: [PATCH 0155/1692] sch_cake: Remove unused including Remove including that don't need it. Signed-off-by: Yue Haibing Signed-off-by: David S. Miller --- net/sched/sch_cake.c | 1 - 1 file changed, 1 deletion(-) diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index 35fc7252187c..4d26b0823cdf 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -64,7 +64,6 @@ #include #include #include -#include #include #include #include -- GitLab From c27f1e2e9f29563cb093e96261e87c1ef83aeb98 Mon Sep 17 00:00:00 2001 From: Yue Haibing Date: Tue, 21 Aug 2018 14:05:42 +0000 Subject: [PATCH 0156/1692] rds: tcp: remove duplicated include from tcp.c Remove duplicated include. Signed-off-by: Yue Haibing Acked-by: Sowmini Varadhan Signed-off-by: David S. Miller --- net/rds/tcp.c | 1 - 1 file changed, 1 deletion(-) diff --git a/net/rds/tcp.c b/net/rds/tcp.c index 2c7b7c352d3e..b9bbcf3d6c63 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c @@ -37,7 +37,6 @@ #include #include #include -#include #include #include "rds.h" -- GitLab From b93c1b5ac8643cc08bb74fa8ae21d6c63dfcb23d Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Tue, 21 Aug 2018 10:40:38 -0700 Subject: [PATCH 0157/1692] hv_netvsc: ignore devices that are not PCI Registering another device with same MAC address (such as TAP, VPN or DPDK KNI) will confuse the VF autobinding logic. Restrict the search to only run if the device is known to be a PCI attached VF. Fixes: e8ff40d4bff1 ("hv_netvsc: improve VF device matching") Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc_drv.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 507f68190cb1..1121a1ec407c 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -2039,12 +2040,16 @@ static int netvsc_register_vf(struct net_device *vf_netdev) { struct net_device *ndev; struct net_device_context *net_device_ctx; + struct device *pdev = vf_netdev->dev.parent; struct netvsc_device *netvsc_dev; int ret; if (vf_netdev->addr_len != ETH_ALEN) return NOTIFY_DONE; + if (!pdev || !dev_is_pci(pdev) || dev_is_pf(pdev)) + return NOTIFY_DONE; + /* * We will use the MAC address to locate the synthetic interface to * associate with the VF interface. If we don't find a matching -- GitLab From edfaf94fa705181eeb2fe0c36c0b902dedbd40f1 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Sun, 19 Aug 2018 12:22:05 -0700 Subject: [PATCH 0158/1692] net_sched: improve and refactor tcf_action_put_many() tcf_action_put_many() is mostly called to clean up actions on failure path, but tcf_action_put_many(&actions[acts_deleted]) is used in the ugliest way: it passes a slice of the array and uses an additional NULL at the end to avoid out-of-bound access. acts_deleted is completely unnecessary since we can teach tcf_action_put_many() scan the whole array and checks against NULL pointer. Which also means tcf_action_delete() should set deleted action pointers to NULL to avoid double free. Fixes: 90b73b77d08e ("net: sched: change action API to use array of pointers to actions") Cc: Jiri Pirko Cc: Vlad Buslov Signed-off-by: Cong Wang Signed-off-by: David S. Miller --- net/sched/act_api.c | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 229d63c99be2..cd69a6afcf88 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -686,14 +686,18 @@ static int tcf_action_put(struct tc_action *p) return __tcf_action_put(p, false); } +/* Put all actions in this array, skip those NULL's. */ static void tcf_action_put_many(struct tc_action *actions[]) { int i; - for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { + for (i = 0; i < TCA_ACT_MAX_PRIO; i++) { struct tc_action *a = actions[i]; - const struct tc_action_ops *ops = a->ops; + const struct tc_action_ops *ops; + if (!a) + continue; + ops = a->ops; if (tcf_action_put(a)) module_put(ops->owner); } @@ -1176,7 +1180,7 @@ static int tca_action_flush(struct net *net, struct nlattr *nla, } static int tcf_action_delete(struct net *net, struct tc_action *actions[], - int *acts_deleted, struct netlink_ext_ack *extack) + struct netlink_ext_ack *extack) { u32 act_index; int ret, i; @@ -1196,20 +1200,17 @@ static int tcf_action_delete(struct net *net, struct tc_action *actions[], } else { /* now do the delete */ ret = ops->delete(net, act_index); - if (ret < 0) { - *acts_deleted = i + 1; + if (ret < 0) return ret; - } } + actions[i] = NULL; } - *acts_deleted = i; return 0; } static int tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[], - int *acts_deleted, u32 portid, size_t attr_size, - struct netlink_ext_ack *extack) + u32 portid, size_t attr_size, struct netlink_ext_ack *extack) { int ret; struct sk_buff *skb; @@ -1227,7 +1228,7 @@ tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[], } /* now do the delete */ - ret = tcf_action_delete(net, actions, acts_deleted, extack); + ret = tcf_action_delete(net, actions, extack); if (ret < 0) { NL_SET_ERR_MSG(extack, "Failed to delete TC action"); kfree_skb(skb); @@ -1249,8 +1250,7 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; struct tc_action *act; size_t attr_size = 0; - struct tc_action *actions[TCA_ACT_MAX_PRIO + 1] = {}; - int acts_deleted = 0; + struct tc_action *actions[TCA_ACT_MAX_PRIO] = {}; ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL, extack); if (ret < 0) @@ -1280,14 +1280,13 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, if (event == RTM_GETACTION) ret = tcf_get_notify(net, portid, n, actions, event, extack); else { /* delete */ - ret = tcf_del_notify(net, n, actions, &acts_deleted, portid, - attr_size, extack); + ret = tcf_del_notify(net, n, actions, portid, attr_size, extack); if (ret) goto err; - return ret; + return 0; } err: - tcf_action_put_many(&actions[acts_deleted]); + tcf_action_put_many(actions); return ret; } -- GitLab From 97a3f84f2c84f81b859aedd2c186df09c2ee21a6 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Sun, 19 Aug 2018 12:22:06 -0700 Subject: [PATCH 0159/1692] net_sched: remove unnecessary ops->delete() All ops->delete() wants is getting the tn->idrinfo, but we already have tc_action before calling ops->delete(), and tc_action has a pointer ->idrinfo. More importantly, each type of action does the same thing, that is, just calling tcf_idr_delete_index(). So it can be just removed. Fixes: b409074e6693 ("net: sched: add 'delete' function to action ops") Cc: Jiri Pirko Cc: Vlad Buslov Signed-off-by: Cong Wang Signed-off-by: David S. Miller --- include/net/act_api.h | 2 -- net/sched/act_api.c | 15 +++++++-------- net/sched/act_bpf.c | 8 -------- net/sched/act_connmark.c | 8 -------- net/sched/act_csum.c | 8 -------- net/sched/act_gact.c | 8 -------- net/sched/act_ife.c | 8 -------- net/sched/act_ipt.c | 16 ---------------- net/sched/act_mirred.c | 8 -------- net/sched/act_nat.c | 8 -------- net/sched/act_pedit.c | 8 -------- net/sched/act_police.c | 8 -------- net/sched/act_sample.c | 8 -------- net/sched/act_simple.c | 8 -------- net/sched/act_skbedit.c | 8 -------- net/sched/act_skbmod.c | 8 -------- net/sched/act_tunnel_key.c | 8 -------- net/sched/act_vlan.c | 8 -------- 18 files changed, 7 insertions(+), 146 deletions(-) diff --git a/include/net/act_api.h b/include/net/act_api.h index 1ad5b19e83a9..e32708491d83 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -102,7 +102,6 @@ struct tc_action_ops { size_t (*get_fill_size)(const struct tc_action *act); struct net_device *(*get_dev)(const struct tc_action *a); void (*put_dev)(struct net_device *dev); - int (*delete)(struct net *net, u32 index); }; struct tc_action_net { @@ -158,7 +157,6 @@ void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a); void tcf_idr_cleanup(struct tc_action_net *tn, u32 index); int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index, struct tc_action **a, int bind); -int tcf_idr_delete_index(struct tc_action_net *tn, u32 index); int __tcf_idr_release(struct tc_action *a, bool bind, bool strict); static inline int tcf_idr_release(struct tc_action *a, bool bind) diff --git a/net/sched/act_api.c b/net/sched/act_api.c index cd69a6afcf88..00bf7d2b0bdd 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -337,9 +337,8 @@ bool tcf_idr_check(struct tc_action_net *tn, u32 index, struct tc_action **a, } EXPORT_SYMBOL(tcf_idr_check); -int tcf_idr_delete_index(struct tc_action_net *tn, u32 index) +static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index) { - struct tcf_idrinfo *idrinfo = tn->idrinfo; struct tc_action *p; int ret = 0; @@ -370,7 +369,6 @@ int tcf_idr_delete_index(struct tc_action_net *tn, u32 index) spin_unlock(&idrinfo->lock); return ret; } -EXPORT_SYMBOL(tcf_idr_delete_index); int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, struct tc_action **a, const struct tc_action_ops *ops, @@ -1182,24 +1180,25 @@ static int tca_action_flush(struct net *net, struct nlattr *nla, static int tcf_action_delete(struct net *net, struct tc_action *actions[], struct netlink_ext_ack *extack) { - u32 act_index; - int ret, i; + int i; for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { struct tc_action *a = actions[i]; const struct tc_action_ops *ops = a->ops; - /* Actions can be deleted concurrently so we must save their * type and id to search again after reference is released. */ - act_index = a->tcfa_index; + struct tcf_idrinfo *idrinfo = a->idrinfo; + u32 act_index = a->tcfa_index; if (tcf_action_put(a)) { /* last reference, action was deleted concurrently */ module_put(ops->owner); } else { + int ret; + /* now do the delete */ - ret = ops->delete(net, act_index); + ret = tcf_idr_delete_index(idrinfo, act_index); if (ret < 0) return ret; } diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index d30b23e42436..0c68bc9cf0b4 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c @@ -395,13 +395,6 @@ static int tcf_bpf_search(struct net *net, struct tc_action **a, u32 index, return tcf_idr_search(tn, a, index); } -static int tcf_bpf_delete(struct net *net, u32 index) -{ - struct tc_action_net *tn = net_generic(net, bpf_net_id); - - return tcf_idr_delete_index(tn, index); -} - static struct tc_action_ops act_bpf_ops __read_mostly = { .kind = "bpf", .type = TCA_ACT_BPF, @@ -412,7 +405,6 @@ static struct tc_action_ops act_bpf_ops __read_mostly = { .init = tcf_bpf_init, .walk = tcf_bpf_walker, .lookup = tcf_bpf_search, - .delete = tcf_bpf_delete, .size = sizeof(struct tcf_bpf), }; diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c index 54c0bf54f2ac..6f0f273f1139 100644 --- a/net/sched/act_connmark.c +++ b/net/sched/act_connmark.c @@ -198,13 +198,6 @@ static int tcf_connmark_search(struct net *net, struct tc_action **a, u32 index, return tcf_idr_search(tn, a, index); } -static int tcf_connmark_delete(struct net *net, u32 index) -{ - struct tc_action_net *tn = net_generic(net, connmark_net_id); - - return tcf_idr_delete_index(tn, index); -} - static struct tc_action_ops act_connmark_ops = { .kind = "connmark", .type = TCA_ACT_CONNMARK, @@ -214,7 +207,6 @@ static struct tc_action_ops act_connmark_ops = { .init = tcf_connmark_init, .walk = tcf_connmark_walker, .lookup = tcf_connmark_search, - .delete = tcf_connmark_delete, .size = sizeof(struct tcf_connmark_info), }; diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index e698d3fe2080..b8a67ae3105a 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c @@ -659,13 +659,6 @@ static size_t tcf_csum_get_fill_size(const struct tc_action *act) return nla_total_size(sizeof(struct tc_csum)); } -static int tcf_csum_delete(struct net *net, u32 index) -{ - struct tc_action_net *tn = net_generic(net, csum_net_id); - - return tcf_idr_delete_index(tn, index); -} - static struct tc_action_ops act_csum_ops = { .kind = "csum", .type = TCA_ACT_CSUM, @@ -677,7 +670,6 @@ static struct tc_action_ops act_csum_ops = { .walk = tcf_csum_walker, .lookup = tcf_csum_search, .get_fill_size = tcf_csum_get_fill_size, - .delete = tcf_csum_delete, .size = sizeof(struct tcf_csum), }; diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index 6a3f25a8ffb3..cd1d9bd32ef9 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c @@ -243,13 +243,6 @@ static size_t tcf_gact_get_fill_size(const struct tc_action *act) return sz; } -static int tcf_gact_delete(struct net *net, u32 index) -{ - struct tc_action_net *tn = net_generic(net, gact_net_id); - - return tcf_idr_delete_index(tn, index); -} - static struct tc_action_ops act_gact_ops = { .kind = "gact", .type = TCA_ACT_GACT, @@ -261,7 +254,6 @@ static struct tc_action_ops act_gact_ops = { .walk = tcf_gact_walker, .lookup = tcf_gact_search, .get_fill_size = tcf_gact_get_fill_size, - .delete = tcf_gact_delete, .size = sizeof(struct tcf_gact), }; diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index d1081bdf1bdb..92fcf8ba5bca 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c @@ -853,13 +853,6 @@ static int tcf_ife_search(struct net *net, struct tc_action **a, u32 index, return tcf_idr_search(tn, a, index); } -static int tcf_ife_delete(struct net *net, u32 index) -{ - struct tc_action_net *tn = net_generic(net, ife_net_id); - - return tcf_idr_delete_index(tn, index); -} - static struct tc_action_ops act_ife_ops = { .kind = "ife", .type = TCA_ACT_IFE, @@ -870,7 +863,6 @@ static struct tc_action_ops act_ife_ops = { .init = tcf_ife_init, .walk = tcf_ife_walker, .lookup = tcf_ife_search, - .delete = tcf_ife_delete, .size = sizeof(struct tcf_ife_info), }; diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 51f235bbeb5b..23273b5303fd 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -337,13 +337,6 @@ static int tcf_ipt_search(struct net *net, struct tc_action **a, u32 index, return tcf_idr_search(tn, a, index); } -static int tcf_ipt_delete(struct net *net, u32 index) -{ - struct tc_action_net *tn = net_generic(net, ipt_net_id); - - return tcf_idr_delete_index(tn, index); -} - static struct tc_action_ops act_ipt_ops = { .kind = "ipt", .type = TCA_ACT_IPT, @@ -354,7 +347,6 @@ static struct tc_action_ops act_ipt_ops = { .init = tcf_ipt_init, .walk = tcf_ipt_walker, .lookup = tcf_ipt_search, - .delete = tcf_ipt_delete, .size = sizeof(struct tcf_ipt), }; @@ -395,13 +387,6 @@ static int tcf_xt_search(struct net *net, struct tc_action **a, u32 index, return tcf_idr_search(tn, a, index); } -static int tcf_xt_delete(struct net *net, u32 index) -{ - struct tc_action_net *tn = net_generic(net, xt_net_id); - - return tcf_idr_delete_index(tn, index); -} - static struct tc_action_ops act_xt_ops = { .kind = "xt", .type = TCA_ACT_XT, @@ -412,7 +397,6 @@ static struct tc_action_ops act_xt_ops = { .init = tcf_xt_init, .walk = tcf_xt_walker, .lookup = tcf_xt_search, - .delete = tcf_xt_delete, .size = sizeof(struct tcf_ipt), }; diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 38fd20f10f67..8bf66d0a6800 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -395,13 +395,6 @@ static void tcf_mirred_put_dev(struct net_device *dev) dev_put(dev); } -static int tcf_mirred_delete(struct net *net, u32 index) -{ - struct tc_action_net *tn = net_generic(net, mirred_net_id); - - return tcf_idr_delete_index(tn, index); -} - static struct tc_action_ops act_mirred_ops = { .kind = "mirred", .type = TCA_ACT_MIRRED, @@ -416,7 +409,6 @@ static struct tc_action_ops act_mirred_ops = { .size = sizeof(struct tcf_mirred), .get_dev = tcf_mirred_get_dev, .put_dev = tcf_mirred_put_dev, - .delete = tcf_mirred_delete, }; static __net_init int mirred_init_net(struct net *net) diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 822e903bfc25..4313aa102440 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c @@ -300,13 +300,6 @@ static int tcf_nat_search(struct net *net, struct tc_action **a, u32 index, return tcf_idr_search(tn, a, index); } -static int tcf_nat_delete(struct net *net, u32 index) -{ - struct tc_action_net *tn = net_generic(net, nat_net_id); - - return tcf_idr_delete_index(tn, index); -} - static struct tc_action_ops act_nat_ops = { .kind = "nat", .type = TCA_ACT_NAT, @@ -316,7 +309,6 @@ static struct tc_action_ops act_nat_ops = { .init = tcf_nat_init, .walk = tcf_nat_walker, .lookup = tcf_nat_search, - .delete = tcf_nat_delete, .size = sizeof(struct tcf_nat), }; diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 8a7a7cb94e83..107034070019 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -460,13 +460,6 @@ static int tcf_pedit_search(struct net *net, struct tc_action **a, u32 index, return tcf_idr_search(tn, a, index); } -static int tcf_pedit_delete(struct net *net, u32 index) -{ - struct tc_action_net *tn = net_generic(net, pedit_net_id); - - return tcf_idr_delete_index(tn, index); -} - static struct tc_action_ops act_pedit_ops = { .kind = "pedit", .type = TCA_ACT_PEDIT, @@ -477,7 +470,6 @@ static struct tc_action_ops act_pedit_ops = { .init = tcf_pedit_init, .walk = tcf_pedit_walker, .lookup = tcf_pedit_search, - .delete = tcf_pedit_delete, .size = sizeof(struct tcf_pedit), }; diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 06f0742db593..5d8bfa878477 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -320,13 +320,6 @@ static int tcf_police_search(struct net *net, struct tc_action **a, u32 index, return tcf_idr_search(tn, a, index); } -static int tcf_police_delete(struct net *net, u32 index) -{ - struct tc_action_net *tn = net_generic(net, police_net_id); - - return tcf_idr_delete_index(tn, index); -} - MODULE_AUTHOR("Alexey Kuznetsov"); MODULE_DESCRIPTION("Policing actions"); MODULE_LICENSE("GPL"); @@ -340,7 +333,6 @@ static struct tc_action_ops act_police_ops = { .init = tcf_police_init, .walk = tcf_police_walker, .lookup = tcf_police_search, - .delete = tcf_police_delete, .size = sizeof(struct tcf_police), }; diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c index 207b4132d1b0..44e9c00657bc 100644 --- a/net/sched/act_sample.c +++ b/net/sched/act_sample.c @@ -232,13 +232,6 @@ static int tcf_sample_search(struct net *net, struct tc_action **a, u32 index, return tcf_idr_search(tn, a, index); } -static int tcf_sample_delete(struct net *net, u32 index) -{ - struct tc_action_net *tn = net_generic(net, sample_net_id); - - return tcf_idr_delete_index(tn, index); -} - static struct tc_action_ops act_sample_ops = { .kind = "sample", .type = TCA_ACT_SAMPLE, @@ -249,7 +242,6 @@ static struct tc_action_ops act_sample_ops = { .cleanup = tcf_sample_cleanup, .walk = tcf_sample_walker, .lookup = tcf_sample_search, - .delete = tcf_sample_delete, .size = sizeof(struct tcf_sample), }; diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index e616523ba3c1..52400d49f81f 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c @@ -196,13 +196,6 @@ static int tcf_simp_search(struct net *net, struct tc_action **a, u32 index, return tcf_idr_search(tn, a, index); } -static int tcf_simp_delete(struct net *net, u32 index) -{ - struct tc_action_net *tn = net_generic(net, simp_net_id); - - return tcf_idr_delete_index(tn, index); -} - static struct tc_action_ops act_simp_ops = { .kind = "simple", .type = TCA_ACT_SIMP, @@ -213,7 +206,6 @@ static struct tc_action_ops act_simp_ops = { .init = tcf_simp_init, .walk = tcf_simp_walker, .lookup = tcf_simp_search, - .delete = tcf_simp_delete, .size = sizeof(struct tcf_defact), }; diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index 926d7bc4a89d..73e44ce2a883 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -299,13 +299,6 @@ static int tcf_skbedit_search(struct net *net, struct tc_action **a, u32 index, return tcf_idr_search(tn, a, index); } -static int tcf_skbedit_delete(struct net *net, u32 index) -{ - struct tc_action_net *tn = net_generic(net, skbedit_net_id); - - return tcf_idr_delete_index(tn, index); -} - static struct tc_action_ops act_skbedit_ops = { .kind = "skbedit", .type = TCA_ACT_SKBEDIT, @@ -316,7 +309,6 @@ static struct tc_action_ops act_skbedit_ops = { .cleanup = tcf_skbedit_cleanup, .walk = tcf_skbedit_walker, .lookup = tcf_skbedit_search, - .delete = tcf_skbedit_delete, .size = sizeof(struct tcf_skbedit), }; diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c index d6a1af0c4171..588077fafd6c 100644 --- a/net/sched/act_skbmod.c +++ b/net/sched/act_skbmod.c @@ -259,13 +259,6 @@ static int tcf_skbmod_search(struct net *net, struct tc_action **a, u32 index, return tcf_idr_search(tn, a, index); } -static int tcf_skbmod_delete(struct net *net, u32 index) -{ - struct tc_action_net *tn = net_generic(net, skbmod_net_id); - - return tcf_idr_delete_index(tn, index); -} - static struct tc_action_ops act_skbmod_ops = { .kind = "skbmod", .type = TCA_ACT_SKBMOD, @@ -276,7 +269,6 @@ static struct tc_action_ops act_skbmod_ops = { .cleanup = tcf_skbmod_cleanup, .walk = tcf_skbmod_walker, .lookup = tcf_skbmod_search, - .delete = tcf_skbmod_delete, .size = sizeof(struct tcf_skbmod), }; diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index 8f09cf08d8fe..420759153d5f 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c @@ -548,13 +548,6 @@ static int tunnel_key_search(struct net *net, struct tc_action **a, u32 index, return tcf_idr_search(tn, a, index); } -static int tunnel_key_delete(struct net *net, u32 index) -{ - struct tc_action_net *tn = net_generic(net, tunnel_key_net_id); - - return tcf_idr_delete_index(tn, index); -} - static struct tc_action_ops act_tunnel_key_ops = { .kind = "tunnel_key", .type = TCA_ACT_TUNNEL_KEY, @@ -565,7 +558,6 @@ static struct tc_action_ops act_tunnel_key_ops = { .cleanup = tunnel_key_release, .walk = tunnel_key_walker, .lookup = tunnel_key_search, - .delete = tunnel_key_delete, .size = sizeof(struct tcf_tunnel_key), }; diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c index 209e70ad2c09..033d273afe50 100644 --- a/net/sched/act_vlan.c +++ b/net/sched/act_vlan.c @@ -296,13 +296,6 @@ static int tcf_vlan_search(struct net *net, struct tc_action **a, u32 index, return tcf_idr_search(tn, a, index); } -static int tcf_vlan_delete(struct net *net, u32 index) -{ - struct tc_action_net *tn = net_generic(net, vlan_net_id); - - return tcf_idr_delete_index(tn, index); -} - static struct tc_action_ops act_vlan_ops = { .kind = "vlan", .type = TCA_ACT_VLAN, @@ -313,7 +306,6 @@ static struct tc_action_ops act_vlan_ops = { .cleanup = tcf_vlan_cleanup, .walk = tcf_vlan_walker, .lookup = tcf_vlan_search, - .delete = tcf_vlan_delete, .size = sizeof(struct tcf_vlan), }; -- GitLab From b144e7ec51a132eac00a68bf897b6349d810022f Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Sun, 19 Aug 2018 12:22:07 -0700 Subject: [PATCH 0160/1692] net_sched: remove unused parameter for tcf_action_delete() Fixes: 16af6067392c ("net: sched: implement reference counted action release") Cc: Jiri Pirko Cc: Vlad Buslov Signed-off-by: Cong Wang Signed-off-by: David S. Miller --- net/sched/act_api.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 00bf7d2b0bdd..ba55226928a3 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -1177,8 +1177,7 @@ static int tca_action_flush(struct net *net, struct nlattr *nla, return err; } -static int tcf_action_delete(struct net *net, struct tc_action *actions[], - struct netlink_ext_ack *extack) +static int tcf_action_delete(struct net *net, struct tc_action *actions[]) { int i; @@ -1227,7 +1226,7 @@ tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[], } /* now do the delete */ - ret = tcf_action_delete(net, actions, extack); + ret = tcf_action_delete(net, actions); if (ret < 0) { NL_SET_ERR_MSG(extack, "Failed to delete TC action"); kfree_skb(skb); -- GitLab From 7d485c451fc82f8ae431cdb379521bc6d0641064 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Sun, 19 Aug 2018 12:22:08 -0700 Subject: [PATCH 0161/1692] net_sched: remove unused tcf_idr_check() tcf_idr_check() is replaced by tcf_idr_check_alloc(), and __tcf_idr_check() now can be folded into tcf_idr_search(). Fixes: 0190c1d452a9 ("net: sched: atomically check-allocate action") Cc: Jiri Pirko Cc: Vlad Buslov Signed-off-by: Cong Wang Signed-off-by: David S. Miller --- include/net/act_api.h | 2 -- net/sched/act_api.c | 22 +++------------------- 2 files changed, 3 insertions(+), 21 deletions(-) diff --git a/include/net/act_api.h b/include/net/act_api.h index e32708491d83..eaa0e8b93d5b 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -147,8 +147,6 @@ int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb, const struct tc_action_ops *ops, struct netlink_ext_ack *extack); int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index); -bool tcf_idr_check(struct tc_action_net *tn, u32 index, struct tc_action **a, - int bind); int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, struct tc_action **a, const struct tc_action_ops *ops, int bind, bool cpustats); diff --git a/net/sched/act_api.c b/net/sched/act_api.c index ba55226928a3..d76948f02a02 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -300,21 +300,17 @@ int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb, } EXPORT_SYMBOL(tcf_generic_walker); -static bool __tcf_idr_check(struct tc_action_net *tn, u32 index, - struct tc_action **a, int bind) +int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index) { struct tcf_idrinfo *idrinfo = tn->idrinfo; struct tc_action *p; spin_lock(&idrinfo->lock); p = idr_find(&idrinfo->action_idr, index); - if (IS_ERR(p)) { + if (IS_ERR(p)) p = NULL; - } else if (p) { + else if (p) refcount_inc(&p->tcfa_refcnt); - if (bind) - atomic_inc(&p->tcfa_bindcnt); - } spin_unlock(&idrinfo->lock); if (p) { @@ -323,20 +319,8 @@ static bool __tcf_idr_check(struct tc_action_net *tn, u32 index, } return false; } - -int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index) -{ - return __tcf_idr_check(tn, index, a, 0); -} EXPORT_SYMBOL(tcf_idr_search); -bool tcf_idr_check(struct tc_action_net *tn, u32 index, struct tc_action **a, - int bind) -{ - return __tcf_idr_check(tn, index, a, bind); -} -EXPORT_SYMBOL(tcf_idr_check); - static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index) { struct tc_action *p; -- GitLab From 244cd96adb5f5ab39551081fb1f9009a54bb12ee Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Sun, 19 Aug 2018 12:22:09 -0700 Subject: [PATCH 0162/1692] net_sched: remove list_head from tc_action After commit 90b73b77d08e, list_head is no longer needed. Now we just need to convert the list iteration to array iteration for drivers. Fixes: 90b73b77d08e ("net: sched: change action API to use array of pointers to actions") Cc: Jiri Pirko Cc: Vlad Buslov Signed-off-by: Cong Wang Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c | 6 ++--- .../ethernet/chelsio/cxgb4/cxgb4_tc_flower.c | 10 +++----- .../net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c | 5 ++-- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 6 ++--- .../net/ethernet/mellanox/mlx5/core/en_tc.c | 19 +++++++------- .../net/ethernet/mellanox/mlxsw/spectrum.c | 3 +-- .../ethernet/mellanox/mlxsw/spectrum_flower.c | 6 ++--- .../ethernet/netronome/nfp/flower/action.c | 6 ++--- .../net/ethernet/qlogic/qede/qede_filter.c | 6 ++--- .../net/ethernet/stmicro/stmmac/stmmac_tc.c | 5 ++-- include/net/act_api.h | 1 - include/net/pkt_cls.h | 25 +++++++++++-------- net/dsa/slave.c | 4 +-- net/sched/act_api.c | 1 - 14 files changed, 43 insertions(+), 60 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 139d96c5a023..092c817f8f11 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -110,16 +110,14 @@ static int bnxt_tc_parse_actions(struct bnxt *bp, struct tcf_exts *tc_exts) { const struct tc_action *tc_act; - LIST_HEAD(tc_actions); - int rc; + int i, rc; if (!tcf_exts_has_actions(tc_exts)) { netdev_info(bp->dev, "no actions"); return -EINVAL; } - tcf_exts_to_list(tc_exts, &tc_actions); - list_for_each_entry(tc_act, &tc_actions, list) { + tcf_exts_for_each_action(i, tc_act, tc_exts) { /* Drop action */ if (is_tcf_gact_shot(tc_act)) { actions->flags |= BNXT_TC_ACTION_FLAG_DROP; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index 623f73dd7738..c116f96956fe 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -417,10 +417,9 @@ static void cxgb4_process_flow_actions(struct net_device *in, struct ch_filter_specification *fs) { const struct tc_action *a; - LIST_HEAD(actions); + int i; - tcf_exts_to_list(cls->exts, &actions); - list_for_each_entry(a, &actions, list) { + tcf_exts_for_each_action(i, a, cls->exts) { if (is_tcf_gact_ok(a)) { fs->action = FILTER_PASS; } else if (is_tcf_gact_shot(a)) { @@ -591,10 +590,9 @@ static int cxgb4_validate_flow_actions(struct net_device *dev, bool act_redir = false; bool act_pedit = false; bool act_vlan = false; - LIST_HEAD(actions); + int i; - tcf_exts_to_list(cls->exts, &actions); - list_for_each_entry(a, &actions, list) { + tcf_exts_for_each_action(i, a, cls->exts) { if (is_tcf_gact_ok(a)) { /* Do nothing */ } else if (is_tcf_gact_shot(a)) { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c index 18eb2aedd4cb..c7d2b4dc7568 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c @@ -93,14 +93,13 @@ static int fill_action_fields(struct adapter *adap, unsigned int num_actions = 0; const struct tc_action *a; struct tcf_exts *exts; - LIST_HEAD(actions); + int i; exts = cls->knode.exts; if (!tcf_exts_has_actions(exts)) return -EINVAL; - tcf_exts_to_list(exts, &actions); - list_for_each_entry(a, &actions, list) { + tcf_exts_for_each_action(i, a, exts) { /* Don't allow more than one action per rule. */ if (num_actions) return -EINVAL; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 447098005490..af4c9ae7f432 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -9171,14 +9171,12 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter, struct tcf_exts *exts, u64 *action, u8 *queue) { const struct tc_action *a; - LIST_HEAD(actions); + int i; if (!tcf_exts_has_actions(exts)) return -EINVAL; - tcf_exts_to_list(exts, &actions); - list_for_each_entry(a, &actions, list) { - + tcf_exts_for_each_action(i, a, exts) { /* Drop action */ if (is_tcf_gact_shot(a)) { *action = IXGBE_FDIR_DROP_QUEUE; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 9131a1376e7d..9fed54017659 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1982,14 +1982,15 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec, goto out_ok; modify_ip_header = false; - tcf_exts_to_list(exts, &actions); - list_for_each_entry(a, &actions, list) { + tcf_exts_for_each_action(i, a, exts) { + int k; + if (!is_tcf_pedit(a)) continue; nkeys = tcf_pedit_nkeys(a); - for (i = 0; i < nkeys; i++) { - htype = tcf_pedit_htype(a, i); + for (k = 0; k < nkeys; k++) { + htype = tcf_pedit_htype(a, k); if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 || htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) { modify_ip_header = true; @@ -2053,15 +2054,14 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, const struct tc_action *a; LIST_HEAD(actions); u32 action = 0; - int err; + int err, i; if (!tcf_exts_has_actions(exts)) return -EINVAL; attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; - tcf_exts_to_list(exts, &actions); - list_for_each_entry(a, &actions, list) { + tcf_exts_for_each_action(i, a, exts) { if (is_tcf_gact_shot(a)) { action |= MLX5_FLOW_CONTEXT_ACTION_DROP; if (MLX5_CAP_FLOWTABLE(priv->mdev, @@ -2666,7 +2666,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, LIST_HEAD(actions); bool encap = false; u32 action = 0; - int err; + int err, i; if (!tcf_exts_has_actions(exts)) return -EINVAL; @@ -2674,8 +2674,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, attr->in_rep = rpriv->rep; attr->in_mdev = priv->mdev; - tcf_exts_to_list(exts, &actions); - list_for_each_entry(a, &actions, list) { + tcf_exts_for_each_action(i, a, exts) { if (is_tcf_gact_shot(a)) { action |= MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 6070d1591d1e..930700413b1d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1346,8 +1346,7 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, return -ENOMEM; mall_tc_entry->cookie = f->cookie; - tcf_exts_to_list(f->exts, &actions); - a = list_first_entry(&actions, struct tc_action, list); + a = tcf_exts_first_action(f->exts); if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index ebd1b24ebaa5..8d211972c5e9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -21,8 +21,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, struct netlink_ext_ack *extack) { const struct tc_action *a; - LIST_HEAD(actions); - int err; + int err, i; if (!tcf_exts_has_actions(exts)) return 0; @@ -32,8 +31,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, if (err) return err; - tcf_exts_to_list(exts, &actions); - list_for_each_entry(a, &actions, list) { + tcf_exts_for_each_action(i, a, exts) { if (is_tcf_gact_ok(a)) { err = mlxsw_sp_acl_rulei_act_terminate(rulei); if (err) { diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index 0ba0356ec4e6..9044496803e6 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -796,11 +796,10 @@ int nfp_flower_compile_action(struct nfp_app *app, struct net_device *netdev, struct nfp_fl_payload *nfp_flow) { - int act_len, act_cnt, err, tun_out_cnt, out_cnt; + int act_len, act_cnt, err, tun_out_cnt, out_cnt, i; enum nfp_flower_tun_type tun_type; const struct tc_action *a; u32 csum_updated = 0; - LIST_HEAD(actions); memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ); nfp_flow->meta.act_len = 0; @@ -810,8 +809,7 @@ int nfp_flower_compile_action(struct nfp_app *app, tun_out_cnt = 0; out_cnt = 0; - tcf_exts_to_list(flow->exts, &actions); - list_for_each_entry(a, &actions, list) { + tcf_exts_for_each_action(i, a, flow->exts) { err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len, netdev, &tun_type, &tun_out_cnt, &out_cnt, &csum_updated); diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index 9673d19308e6..b16ce7d93caf 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -2006,18 +2006,16 @@ int qede_get_arfs_filter_count(struct qede_dev *edev) static int qede_parse_actions(struct qede_dev *edev, struct tcf_exts *exts) { - int rc = -EINVAL, num_act = 0; + int rc = -EINVAL, num_act = 0, i; const struct tc_action *a; bool is_drop = false; - LIST_HEAD(actions); if (!tcf_exts_has_actions(exts)) { DP_NOTICE(edev, "No tc actions received\n"); return rc; } - tcf_exts_to_list(exts, &actions); - list_for_each_entry(a, &actions, list) { + tcf_exts_for_each_action(i, a, exts) { num_act++; if (is_tcf_gact_shot(a)) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 1a96dd9c1091..531294f4978b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -61,7 +61,7 @@ static int tc_fill_actions(struct stmmac_tc_entry *entry, struct stmmac_tc_entry *action_entry = entry; const struct tc_action *act; struct tcf_exts *exts; - LIST_HEAD(actions); + int i; exts = cls->knode.exts; if (!tcf_exts_has_actions(exts)) @@ -69,8 +69,7 @@ static int tc_fill_actions(struct stmmac_tc_entry *entry, if (frag) action_entry = frag; - tcf_exts_to_list(exts, &actions); - list_for_each_entry(act, &actions, list) { + tcf_exts_for_each_action(i, act, exts) { /* Accept */ if (is_tcf_gact_ok(act)) { action_entry->val.af = 1; diff --git a/include/net/act_api.h b/include/net/act_api.h index eaa0e8b93d5b..f9c4b871af88 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -23,7 +23,6 @@ struct tc_action { const struct tc_action_ops *ops; __u32 type; /* for backward compat(TCA_OLD_COMPAT) */ __u32 order; - struct list_head list; struct tcf_idrinfo *idrinfo; u32 tcfa_index; diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index ef727f71336e..c17d51865469 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -298,19 +298,13 @@ static inline void tcf_exts_put_net(struct tcf_exts *exts) #endif } -static inline void tcf_exts_to_list(const struct tcf_exts *exts, - struct list_head *actions) -{ #ifdef CONFIG_NET_CLS_ACT - int i; - - for (i = 0; i < exts->nr_actions; i++) { - struct tc_action *a = exts->actions[i]; - - list_add_tail(&a->list, actions); - } +#define tcf_exts_for_each_action(i, a, exts) \ + for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++) +#else +#define tcf_exts_for_each_action(i, a, exts) \ + for (; 0; ) #endif -} static inline void tcf_exts_stats_update(const struct tcf_exts *exts, @@ -361,6 +355,15 @@ static inline bool tcf_exts_has_one_action(struct tcf_exts *exts) #endif } +static inline struct tc_action *tcf_exts_first_action(struct tcf_exts *exts) +{ +#ifdef CONFIG_NET_CLS_ACT + return exts->actions[0]; +#else + return NULL; +#endif +} + /** * tcf_exts_exec - execute tc filter extensions * @skb: socket buffer diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 962c4fd338ba..1c45c1d6d241 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -767,7 +767,6 @@ static int dsa_slave_add_cls_matchall(struct net_device *dev, const struct tc_action *a; struct dsa_port *to_dp; int err = -EOPNOTSUPP; - LIST_HEAD(actions); if (!ds->ops->port_mirror_add) return err; @@ -775,8 +774,7 @@ static int dsa_slave_add_cls_matchall(struct net_device *dev, if (!tcf_exts_has_one_action(cls->exts)) return err; - tcf_exts_to_list(cls->exts, &actions); - a = list_first_entry(&actions, struct tc_action, list); + a = tcf_exts_first_action(cls->exts); if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { struct dsa_mall_mirror_tc_entry *mirror; diff --git a/net/sched/act_api.c b/net/sched/act_api.c index d76948f02a02..db83dac1e7f4 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -391,7 +391,6 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, p->idrinfo = idrinfo; p->ops = ops; - INIT_LIST_HEAD(&p->list); *a = p; return 0; err3: -- GitLab From a0c2e90fe131d9a7440ac05c9c31fc35dfac2fa8 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Sun, 19 Aug 2018 12:22:10 -0700 Subject: [PATCH 0163/1692] net_sched: remove unused tcfa_capab Cc: Jamal Hadi Salim Signed-off-by: Cong Wang Signed-off-by: David S. Miller --- include/net/act_api.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/include/net/act_api.h b/include/net/act_api.h index f9c4b871af88..970303448c90 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -28,7 +28,6 @@ struct tc_action { u32 tcfa_index; refcount_t tcfa_refcnt; atomic_t tcfa_bindcnt; - u32 tcfa_capab; int tcfa_action; struct tcf_t tcfa_tm; struct gnet_stats_basic_packed tcfa_bstats; @@ -43,7 +42,6 @@ struct tc_action { #define tcf_index common.tcfa_index #define tcf_refcnt common.tcfa_refcnt #define tcf_bindcnt common.tcfa_bindcnt -#define tcf_capab common.tcfa_capab #define tcf_action common.tcfa_action #define tcf_tm common.tcfa_tm #define tcf_bstats common.tcfa_bstats -- GitLab From 8ce5be1c899d31f3cd047e5f707cd9dbfb81e076 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Sun, 19 Aug 2018 12:22:11 -0700 Subject: [PATCH 0164/1692] Revert "net: sched: act_ife: disable bh when taking ife_mod_lock" This reverts commit 42c625a486f3 ("net: sched: act_ife: disable bh when taking ife_mod_lock"), because what ife_mod_lock protects is absolutely not touched in rate est timer BH context, they have no race. A better fix is following up. Cc: Vlad Buslov Cc: Jamal Hadi Salim Signed-off-by: Cong Wang Signed-off-by: David S. Miller --- net/sched/act_ife.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index 92fcf8ba5bca..9decbb74b3ac 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c @@ -167,16 +167,16 @@ static struct tcf_meta_ops *find_ife_oplist(u16 metaid) { struct tcf_meta_ops *o; - read_lock_bh(&ife_mod_lock); + read_lock(&ife_mod_lock); list_for_each_entry(o, &ifeoplist, list) { if (o->metaid == metaid) { if (!try_module_get(o->owner)) o = NULL; - read_unlock_bh(&ife_mod_lock); + read_unlock(&ife_mod_lock); return o; } } - read_unlock_bh(&ife_mod_lock); + read_unlock(&ife_mod_lock); return NULL; } @@ -190,12 +190,12 @@ int register_ife_op(struct tcf_meta_ops *mops) !mops->get || !mops->alloc) return -EINVAL; - write_lock_bh(&ife_mod_lock); + write_lock(&ife_mod_lock); list_for_each_entry(m, &ifeoplist, list) { if (m->metaid == mops->metaid || (strcmp(mops->name, m->name) == 0)) { - write_unlock_bh(&ife_mod_lock); + write_unlock(&ife_mod_lock); return -EEXIST; } } @@ -204,7 +204,7 @@ int register_ife_op(struct tcf_meta_ops *mops) mops->release = ife_release_meta_gen; list_add_tail(&mops->list, &ifeoplist); - write_unlock_bh(&ife_mod_lock); + write_unlock(&ife_mod_lock); return 0; } EXPORT_SYMBOL_GPL(unregister_ife_op); @@ -214,7 +214,7 @@ int unregister_ife_op(struct tcf_meta_ops *mops) struct tcf_meta_ops *m; int err = -ENOENT; - write_lock_bh(&ife_mod_lock); + write_lock(&ife_mod_lock); list_for_each_entry(m, &ifeoplist, list) { if (m->metaid == mops->metaid) { list_del(&mops->list); @@ -222,7 +222,7 @@ int unregister_ife_op(struct tcf_meta_ops *mops) break; } } - write_unlock_bh(&ife_mod_lock); + write_unlock(&ife_mod_lock); return err; } @@ -343,13 +343,13 @@ static int use_all_metadata(struct tcf_ife_info *ife) int rc = 0; int installed = 0; - read_lock_bh(&ife_mod_lock); + read_lock(&ife_mod_lock); list_for_each_entry(o, &ifeoplist, list) { rc = add_metainfo(ife, o->metaid, NULL, 0, true); if (rc == 0) installed += 1; } - read_unlock_bh(&ife_mod_lock); + read_unlock(&ife_mod_lock); if (installed) return 0; -- GitLab From 4e407ff5cd67ec76eeeea1deec227b7982dc7f66 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Sun, 19 Aug 2018 12:22:12 -0700 Subject: [PATCH 0165/1692] act_ife: move tcfa_lock down to where necessary The only time we need to take tcfa_lock is when adding a new metainfo to an existing ife->metalist. We don't need to take tcfa_lock so early and so broadly in tcf_ife_init(). This means we can always take ife_mod_lock first, avoid the reverse locking ordering warning as reported by Vlad. Reported-by: Vlad Buslov Tested-by: Vlad Buslov Cc: Vlad Buslov Cc: Jamal Hadi Salim Signed-off-by: Cong Wang Signed-off-by: David S. Miller --- net/sched/act_ife.c | 38 +++++++++++++------------------------- 1 file changed, 13 insertions(+), 25 deletions(-) diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index 9decbb74b3ac..244a8cf48183 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c @@ -265,11 +265,8 @@ static const char *ife_meta_id2name(u32 metaid) #endif /* called when adding new meta information - * under ife->tcf_lock for existing action */ -static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid, - void *val, int len, bool exists, - bool rtnl_held) +static int load_metaops_and_vet(u32 metaid, void *val, int len, bool rtnl_held) { struct tcf_meta_ops *ops = find_ife_oplist(metaid); int ret = 0; @@ -277,15 +274,11 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid, if (!ops) { ret = -ENOENT; #ifdef CONFIG_MODULES - if (exists) - spin_unlock_bh(&ife->tcf_lock); if (rtnl_held) rtnl_unlock(); request_module("ife-meta-%s", ife_meta_id2name(metaid)); if (rtnl_held) rtnl_lock(); - if (exists) - spin_lock_bh(&ife->tcf_lock); ops = find_ife_oplist(metaid); #endif } @@ -302,10 +295,9 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid, } /* called when adding new meta information - * under ife->tcf_lock for existing action */ static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, - int len, bool atomic) + int len, bool atomic, bool exists) { struct tcf_meta_info *mi = NULL; struct tcf_meta_ops *ops = find_ife_oplist(metaid); @@ -332,12 +324,16 @@ static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, } } + if (exists) + spin_lock_bh(&ife->tcf_lock); list_add_tail(&mi->metalist, &ife->metalist); + if (exists) + spin_unlock_bh(&ife->tcf_lock); return ret; } -static int use_all_metadata(struct tcf_ife_info *ife) +static int use_all_metadata(struct tcf_ife_info *ife, bool exists) { struct tcf_meta_ops *o; int rc = 0; @@ -345,7 +341,7 @@ static int use_all_metadata(struct tcf_ife_info *ife) read_lock(&ife_mod_lock); list_for_each_entry(o, &ifeoplist, list) { - rc = add_metainfo(ife, o->metaid, NULL, 0, true); + rc = add_metainfo(ife, o->metaid, NULL, 0, true, exists); if (rc == 0) installed += 1; } @@ -422,7 +418,6 @@ static void tcf_ife_cleanup(struct tc_action *a) kfree_rcu(p, rcu); } -/* under ife->tcf_lock for existing action */ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb, bool exists, bool rtnl_held) { @@ -436,12 +431,11 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb, val = nla_data(tb[i]); len = nla_len(tb[i]); - rc = load_metaops_and_vet(ife, i, val, len, exists, - rtnl_held); + rc = load_metaops_and_vet(i, val, len, rtnl_held); if (rc != 0) return rc; - rc = add_metainfo(ife, i, val, len, exists); + rc = add_metainfo(ife, i, val, len, false, exists); if (rc) return rc; } @@ -540,8 +534,6 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, p->eth_type = ife_type; } - if (exists) - spin_lock_bh(&ife->tcf_lock); if (ret == ACT_P_CREATED) INIT_LIST_HEAD(&ife->metalist); @@ -551,10 +543,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, NULL, NULL); if (err) { metadata_parse_err: - if (exists) - spin_unlock_bh(&ife->tcf_lock); tcf_idr_release(*a, bind); - kfree(p); return err; } @@ -569,17 +558,16 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, * as we can. You better have at least one else we are * going to bail out */ - err = use_all_metadata(ife); + err = use_all_metadata(ife, exists); if (err) { - if (exists) - spin_unlock_bh(&ife->tcf_lock); tcf_idr_release(*a, bind); - kfree(p); return err; } } + if (exists) + spin_lock_bh(&ife->tcf_lock); ife->tcf_action = parm->action; /* protected by tcf_lock when modifying existing action */ rcu_swap_protected(ife->params, p, 1); -- GitLab From 5ffe57da29b3802baeddaa40909682bbb4cb4d48 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Sun, 19 Aug 2018 12:22:13 -0700 Subject: [PATCH 0166/1692] act_ife: fix a potential deadlock use_all_metadata() acquires read_lock(&ife_mod_lock), then calls add_metainfo() which calls find_ife_oplist() which acquires the same lock again. Deadlock! Introduce __add_metainfo() which accepts struct tcf_meta_ops *ops as an additional parameter and let its callers to decide how to find it. For use_all_metadata(), it already has ops, no need to find it again, just call __add_metainfo() directly. And, as ife_mod_lock is only needed for find_ife_oplist(), this means we can make non-atomic allocation for populate_metalist() now. Fixes: 817e9f2c5c26 ("act_ife: acquire ife_mod_lock before reading ifeoplist") Cc: Jamal Hadi Salim Signed-off-by: Cong Wang Signed-off-by: David S. Miller --- net/sched/act_ife.c | 34 +++++++++++++++++++++------------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index 244a8cf48183..196430aefe87 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c @@ -296,22 +296,16 @@ static int load_metaops_and_vet(u32 metaid, void *val, int len, bool rtnl_held) /* called when adding new meta information */ -static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, - int len, bool atomic, bool exists) +static int __add_metainfo(const struct tcf_meta_ops *ops, + struct tcf_ife_info *ife, u32 metaid, void *metaval, + int len, bool atomic, bool exists) { struct tcf_meta_info *mi = NULL; - struct tcf_meta_ops *ops = find_ife_oplist(metaid); int ret = 0; - if (!ops) - return -ENOENT; - mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL); - if (!mi) { - /*put back what find_ife_oplist took */ - module_put(ops->owner); + if (!mi) return -ENOMEM; - } mi->metaid = metaid; mi->ops = ops; @@ -319,7 +313,6 @@ static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL); if (ret != 0) { kfree(mi); - module_put(ops->owner); return ret; } } @@ -333,6 +326,21 @@ static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, return ret; } +static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, + int len, bool exists) +{ + const struct tcf_meta_ops *ops = find_ife_oplist(metaid); + int ret; + + if (!ops) + return -ENOENT; + ret = __add_metainfo(ops, ife, metaid, metaval, len, false, exists); + if (ret) + /*put back what find_ife_oplist took */ + module_put(ops->owner); + return ret; +} + static int use_all_metadata(struct tcf_ife_info *ife, bool exists) { struct tcf_meta_ops *o; @@ -341,7 +349,7 @@ static int use_all_metadata(struct tcf_ife_info *ife, bool exists) read_lock(&ife_mod_lock); list_for_each_entry(o, &ifeoplist, list) { - rc = add_metainfo(ife, o->metaid, NULL, 0, true, exists); + rc = __add_metainfo(o, ife, o->metaid, NULL, 0, true, exists); if (rc == 0) installed += 1; } @@ -435,7 +443,7 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb, if (rc != 0) return rc; - rc = add_metainfo(ife, i, val, len, false, exists); + rc = add_metainfo(ife, i, val, len, exists); if (rc) return rc; } -- GitLab From 96c26e04581667e3cd17ed74c2fc3499afea49b8 Mon Sep 17 00:00:00 2001 From: Prashant Bhole Date: Mon, 20 Aug 2018 09:54:25 +0900 Subject: [PATCH 0167/1692] xsk: fix return value of xdp_umem_assign_dev() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit s/ENOTSUPP/EOPNOTSUPP/ in function umem_assign_dev(). This function's return value is directly returned by xsk_bind(). EOPNOTSUPP is bind()'s possible return value. Fixes: f734607e819b ("xsk: refactor xdp_umem_assign_dev()") Signed-off-by: Prashant Bhole Acked-by: Song Liu Acked-by: Björn Töpel Signed-off-by: Daniel Borkmann --- net/xdp/xdp_umem.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c index 911ca6d3cb5a..bfe2dbea480b 100644 --- a/net/xdp/xdp_umem.c +++ b/net/xdp/xdp_umem.c @@ -74,14 +74,14 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev, return 0; if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_async_xmit) - return force_zc ? -ENOTSUPP : 0; /* fail or fallback */ + return force_zc ? -EOPNOTSUPP : 0; /* fail or fallback */ bpf.command = XDP_QUERY_XSK_UMEM; rtnl_lock(); err = xdp_umem_query(dev, queue_id); if (err) { - err = err < 0 ? -ENOTSUPP : -EBUSY; + err = err < 0 ? -EOPNOTSUPP : -EBUSY; goto err_rtnl_unlock; } -- GitLab From 63ec132d5b60a0d504a82ac0356557112dfbb114 Mon Sep 17 00:00:00 2001 From: Dhinakaran Pandiyan Date: Tue, 21 Aug 2018 15:11:54 -0700 Subject: [PATCH 0168/1692] drm/i915/psr: Print PSR_STATUS when PSR idle wait times out. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Knowing the status of the PSR HW state machine is useful for debug, especially since we are seeing errors with PSR2 in CI. Cc: José Roberto de Souza Cc: Rodrigo Vivi Signed-off-by: Dhinakaran Pandiyan Reviewed-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20180821221156.2442-1-dhinakaran.pandiyan@intel.com --- drivers/gpu/drm/i915/intel_drv.h | 3 ++- drivers/gpu/drm/i915/intel_psr.c | 9 ++++++--- drivers/gpu/drm/i915/intel_sprite.c | 6 ++++-- 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 35dd72fd0152..ed35632f1554 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1947,7 +1947,8 @@ void intel_psr_compute_config(struct intel_dp *intel_dp, void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug); void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir); void intel_psr_short_pulse(struct intel_dp *intel_dp); -int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state); +int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state, + u32 *out_value); /* intel_runtime_pm.c */ int intel_power_domains_init(struct drm_i915_private *); diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index 7560c65f50ad..7980f8120aaa 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -766,7 +766,8 @@ void intel_psr_disable(struct intel_dp *intel_dp, cancel_work_sync(&dev_priv->psr.work); } -int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state) +int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state, + u32 *out_value) { struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -799,8 +800,10 @@ int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state) * 6 ms of exit training time + 1.5 ms of aux channel * handshake. 50 msec is defesive enough to cover everything. */ - return intel_wait_for_register(dev_priv, reg, mask, - EDP_PSR_STATUS_STATE_IDLE, 50); + + return __intel_wait_for_register(dev_priv, reg, mask, + EDP_PSR_STATUS_STATE_IDLE, 2, 50, + out_value); } static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index f7026e887fa9..774bfb03c5d9 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -83,6 +83,7 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state) bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI); DEFINE_WAIT(wait); + u32 psr_status; vblank_start = adjusted_mode->crtc_vblank_start; if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) @@ -104,8 +105,9 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state) * VBL interrupts will start the PSR exit and prevent a PSR * re-entry as well. */ - if (intel_psr_wait_for_idle(new_crtc_state)) - DRM_ERROR("PSR idle timed out, atomic update may fail\n"); + if (intel_psr_wait_for_idle(new_crtc_state, &psr_status)) + DRM_ERROR("PSR idle timed out 0x%x, atomic update may fail\n", + psr_status); local_irq_disable(); -- GitLab From 9844d4bf3eb46778f46390d7beee5add650d5fc0 Mon Sep 17 00:00:00 2001 From: Dhinakaran Pandiyan Date: Tue, 21 Aug 2018 15:11:55 -0700 Subject: [PATCH 0169/1692] drm/i915/psr: Add missing check for I915_PSR_DEBUG_IRQ bit We print the last attempted entry and last exit timestamps only when IRQ debug is requested. This check was missed when new debug flags were added in 'commit c44301fce614 ("drm/i915: Allow control of PSR at runtime through debugfs, v6") Fixes: c44301fce614 ("drm/i915: Allow control of PSR at runtime through debugfs, v6") Cc: Maarten Lankhorst Signed-off-by: Dhinakaran Pandiyan Reviewed-by: Maarten Lankhorst Link: https://patchwork.freedesktop.org/patch/msgid/20180821221156.2442-2-dhinakaran.pandiyan@intel.com --- drivers/gpu/drm/i915/i915_debugfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 26b7e5276b15..374b550d9a4f 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -2735,7 +2735,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) psr_source_status(dev_priv, m); mutex_unlock(&dev_priv->psr.lock); - if (READ_ONCE(dev_priv->psr.debug)) { + if (READ_ONCE(dev_priv->psr.debug) & I915_PSR_DEBUG_IRQ) { seq_printf(m, "Last attempted entry at: %lld\n", dev_priv->psr.last_entry_attempt); seq_printf(m, "Last exit at: %lld\n", -- GitLab From 1aeb1b5fa069f7d7a0de3ac8a33547014613fc7a Mon Sep 17 00:00:00 2001 From: Dhinakaran Pandiyan Date: Tue, 21 Aug 2018 15:11:56 -0700 Subject: [PATCH 0170/1692] drm/i915/psr: Mask PSR irq bits when re-enabling interrupts. gen8_de_irq_postinstall() wasn't masking the IRQ bit before passing the debug flag to psr_irq_control(). This check was missed when new debug bits were defined in 'commit c44301fce614 ("drm/i915: Allow control of PSR at runtime through debugfs, v6")'. Instead of ANDing the irq bit in all the callers, move it to the callee. v2: Rebased. Fixes: c44301fce614 ("drm/i915: Allow control of PSR at runtime through debugfs, v6") Cc: Maarten Lankhorst Signed-off-by: Dhinakaran Pandiyan Reviewed-by: Maarten Lankhorst Link: https://patchwork.freedesktop.org/patch/msgid/20180821221156.2442-3-dhinakaran.pandiyan@intel.com --- drivers/gpu/drm/i915/i915_irq.c | 2 +- drivers/gpu/drm/i915/intel_drv.h | 2 +- drivers/gpu/drm/i915/intel_psr.c | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index b2c9838442bc..8084e35b25c5 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -4048,7 +4048,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) if (IS_HASWELL(dev_priv)) { gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR); - intel_psr_irq_control(dev_priv, dev_priv->psr.debug & I915_PSR_DEBUG_IRQ); + intel_psr_irq_control(dev_priv, dev_priv->psr.debug); display_mask |= DE_EDP_PSR_INT_HSW; } diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index ed35632f1554..843eefaa0f0c 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1944,7 +1944,7 @@ void intel_psr_flush(struct drm_i915_private *dev_priv, void intel_psr_init(struct drm_i915_private *dev_priv); void intel_psr_compute_config(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state); -void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug); +void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug); void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir); void intel_psr_short_pulse(struct intel_dp *intel_dp); int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state, diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index 7980f8120aaa..da583a45e942 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -79,7 +79,7 @@ static bool intel_psr2_enabled(struct drm_i915_private *dev_priv, } } -void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug) +void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug) { u32 debug_mask, mask; @@ -100,7 +100,7 @@ void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug) EDP_PSR_PRE_ENTRY(TRANSCODER_C); } - if (debug) + if (debug & I915_PSR_DEBUG_IRQ) mask |= debug_mask; I915_WRITE(EDP_PSR_IMR, ~mask); @@ -904,7 +904,7 @@ int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv, if (crtc) dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state); - intel_psr_irq_control(dev_priv, dev_priv->psr.debug & I915_PSR_DEBUG_IRQ); + intel_psr_irq_control(dev_priv, dev_priv->psr.debug); if (dev_priv->psr.prepared && enable) intel_psr_enable_locked(dev_priv, crtc_state); -- GitLab From b1f1c2c11fc6c6cd3e361061e30f9b2839897b28 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Thu, 19 Jul 2018 21:21:57 +0300 Subject: [PATCH 0171/1692] drm/i915: Fix glk/cnl display w/a #1175 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The workaround was supposed to look at the plane destination coordinates. Currently it's looking at some mixture of src and dst coordinates that doesn't make sense. Fix it up. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20180719182214.4323-2-ville.syrjala@linux.intel.com Fixes: 394676f05bee (drm/i915: Add WA for planes ending close to left screen edge) Reviewed-by: Imre Deak --- drivers/gpu/drm/i915/intel_display.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 95e9cad5b4de..b4566c2f5d28 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2988,6 +2988,7 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state, int w = drm_rect_width(&plane_state->base.src) >> 16; int h = drm_rect_height(&plane_state->base.src) >> 16; int dst_x = plane_state->base.dst.x1; + int dst_w = drm_rect_width(&plane_state->base.dst); int pipe_src_w = crtc_state->pipe_src_w; int max_width = skl_max_plane_width(fb, 0, rotation); int max_height = 4096; @@ -3009,10 +3010,10 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state, * screen may cause FIFO underflow and display corruption. */ if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) && - (dst_x + w < 4 || dst_x > pipe_src_w - 4)) { + (dst_x + dst_w < 4 || dst_x > pipe_src_w - 4)) { DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n", - dst_x + w < 4 ? "end" : "start", - dst_x + w < 4 ? dst_x + w : dst_x, + dst_x + dst_w < 4 ? "end" : "start", + dst_x + dst_w < 4 ? dst_x + dst_w : dst_x, 4, pipe_src_w - 4); return -ERANGE; } -- GitLab From 4f3530f4a41d49c41015020cd9a5ed5c95b5d2db Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Wed, 8 Aug 2018 01:07:06 -0700 Subject: [PATCH 0172/1692] bus: ti-sysc: Fix no_console_suspend handling If no_console_suspend is set, we should keep console enabled during suspend. Lets fix this by only producing a warning if we can't idle hardware during suspend. Fixes: ef55f8215a78 ("bus: ti-sysc: Improve suspend and resume handling") Signed-off-by: Tony Lindgren --- drivers/bus/ti-sysc.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index b31bf03ea497..4576a1268e0e 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -1175,10 +1175,10 @@ static int sysc_child_suspend_noirq(struct device *dev) if (!pm_runtime_status_suspended(dev)) { error = pm_generic_runtime_suspend(dev); if (error) { - dev_err(dev, "%s error at %i: %i\n", - __func__, __LINE__, error); + dev_warn(dev, "%s busy at %i: %i\n", + __func__, __LINE__, error); - return error; + return 0; } error = sysc_runtime_suspend(ddata->dev); -- GitLab From f4efa74c09a7eddcc12cd13208f78743763f6e7a Mon Sep 17 00:00:00 2001 From: Pavel Machek Date: Wed, 1 Aug 2018 22:28:21 +0200 Subject: [PATCH 0173/1692] ARM: dts: omap4-droid4: fix vibrations on Droid 4 Vibration GPIOs don't have anything to do with wakeup. Move it to normal section; this fixes vibrations on Droid 4. Fixes: a5effd968301 ("ARM: dts: omap4-droid4: Add vibrator") Signed-off-by: Pavel Machek Reviewed-by: Sebastian Reichel Signed-off-by: Tony Lindgren --- arch/arm/boot/dts/omap4-droid4-xt894.dts | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/arch/arm/boot/dts/omap4-droid4-xt894.dts b/arch/arm/boot/dts/omap4-droid4-xt894.dts index e7c3c563ff8f..edc97f89fae4 100644 --- a/arch/arm/boot/dts/omap4-droid4-xt894.dts +++ b/arch/arm/boot/dts/omap4-droid4-xt894.dts @@ -618,15 +618,6 @@ OMAP4_IOPAD(0x10a, PIN_INPUT | MUX_MODE1) /* abe_mcbsp3_clkx */ OMAP4_IOPAD(0x10c, PIN_INPUT | MUX_MODE1) /* abe_mcbsp3_fsx */ >; }; -}; - -&omap4_pmx_wkup { - usb_gpio_mux_sel2: pinmux_usb_gpio_mux_sel2_pins { - /* gpio_wk0 */ - pinctrl-single,pins = < - OMAP4_IOPAD(0x040, PIN_OUTPUT_PULLDOWN | MUX_MODE3) - >; - }; vibrator_direction_pin: pinmux_vibrator_direction_pin { pinctrl-single,pins = < @@ -641,6 +632,15 @@ OMAP4_IOPAD(0X1d0, PIN_OUTPUT | MUX_MODE1) /* dmtimer9_pwm_evt (gpio_28) */ }; }; +&omap4_pmx_wkup { + usb_gpio_mux_sel2: pinmux_usb_gpio_mux_sel2_pins { + /* gpio_wk0 */ + pinctrl-single,pins = < + OMAP4_IOPAD(0x040, PIN_OUTPUT_PULLDOWN | MUX_MODE3) + >; + }; +}; + /* * As uart1 is wired to mdm6600 with rts and cts, we can use the cts pin for * uart1 wakeirq. -- GitLab From ce32d59ee2cd036f6e8a6ed17a06a0b0bec5c67c Mon Sep 17 00:00:00 2001 From: Keerthy Date: Wed, 25 Jul 2018 11:25:35 +0530 Subject: [PATCH 0174/1692] arm: dts: am4372: setup rtc as system-power-controller RTC alarm2 is connected to pmic_en line and hence can be used to control the pmic enabling/disabling. Hence add the system-power-controller for rtc node. Signed-off-by: Keerthy Signed-off-by: Tony Lindgren --- arch/arm/boot/dts/am4372.dtsi | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi index f0cbd86312dc..d4b7c59eec68 100644 --- a/arch/arm/boot/dts/am4372.dtsi +++ b/arch/arm/boot/dts/am4372.dtsi @@ -469,6 +469,7 @@ rtc: rtc@44e3e000 { ti,hwmods = "rtc"; clocks = <&clk_32768_ck>; clock-names = "int-clk"; + system-power-controller; status = "disabled"; }; -- GitLab From 64d9d13828c6c8e188bba63794eee923df3d69a9 Mon Sep 17 00:00:00 2001 From: Jeremy Cline Date: Tue, 31 Jul 2018 01:37:30 +0000 Subject: [PATCH 0175/1692] fs/quota: Replace XQM_MAXQUOTAS usage with MAXQUOTAS XQM_MAXQUOTAS and MAXQUOTAS are, it appears, equivalent. Replace all usage of XQM_MAXQUOTAS and remove it along with the unused XQM_*QUOTA definitions. Signed-off-by: Jeremy Cline Signed-off-by: Jan Kara --- fs/quota/quota.c | 12 +++++------- include/linux/quota.h | 8 +------- 2 files changed, 6 insertions(+), 14 deletions(-) diff --git a/fs/quota/quota.c b/fs/quota/quota.c index 860bfbe7a07a..d403392d8a0f 100644 --- a/fs/quota/quota.c +++ b/fs/quota/quota.c @@ -120,8 +120,6 @@ static int quota_getinfo(struct super_block *sb, int type, void __user *addr) struct if_dqinfo uinfo; int ret; - /* This checks whether qc_state has enough entries... */ - BUILD_BUG_ON(MAXQUOTAS > XQM_MAXQUOTAS); if (!sb->s_qcop->get_state) return -ENOSYS; ret = sb->s_qcop->get_state(sb, &state); @@ -354,10 +352,10 @@ static int quota_getstate(struct super_block *sb, struct fs_quota_stat *fqs) * GETXSTATE quotactl has space for just one set of time limits so * report them for the first enabled quota type */ - for (type = 0; type < XQM_MAXQUOTAS; type++) + for (type = 0; type < MAXQUOTAS; type++) if (state.s_state[type].flags & QCI_ACCT_ENABLED) break; - BUG_ON(type == XQM_MAXQUOTAS); + BUG_ON(type == MAXQUOTAS); fqs->qs_btimelimit = state.s_state[type].spc_timelimit; fqs->qs_itimelimit = state.s_state[type].ino_timelimit; fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit; @@ -427,10 +425,10 @@ static int quota_getstatev(struct super_block *sb, struct fs_quota_statv *fqs) * GETXSTATV quotactl has space for just one set of time limits so * report them for the first enabled quota type */ - for (type = 0; type < XQM_MAXQUOTAS; type++) + for (type = 0; type < MAXQUOTAS; type++) if (state.s_state[type].flags & QCI_ACCT_ENABLED) break; - BUG_ON(type == XQM_MAXQUOTAS); + BUG_ON(type == MAXQUOTAS); fqs->qs_btimelimit = state.s_state[type].spc_timelimit; fqs->qs_itimelimit = state.s_state[type].ino_timelimit; fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit; @@ -701,7 +699,7 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, { int ret; - if (type >= (XQM_COMMAND(cmd) ? XQM_MAXQUOTAS : MAXQUOTAS)) + if (type >= MAXQUOTAS) return -EINVAL; /* * Quota not supported on this fs? Check this before s_quota_types diff --git a/include/linux/quota.h b/include/linux/quota.h index ca9772c8e48b..f32dd270b8e3 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -408,13 +408,7 @@ struct qc_type_state { struct qc_state { unsigned int s_incoredqs; /* Number of dquots in core */ - /* - * Per quota type information. The array should really have - * max(MAXQUOTAS, XQM_MAXQUOTAS) entries. BUILD_BUG_ON in - * quota_getinfo() makes sure XQM_MAXQUOTAS is large enough. Once VFS - * supports project quotas, this can be changed to MAXQUOTAS - */ - struct qc_type_state s_state[XQM_MAXQUOTAS]; + struct qc_type_state s_state[MAXQUOTAS]; /* Per quota type information */ }; /* Structure for communicating via ->set_info */ -- GitLab From 7b6924d94a60c6b8c1279ca003e8744e6cd9e8b1 Mon Sep 17 00:00:00 2001 From: Jeremy Cline Date: Tue, 31 Jul 2018 01:37:31 +0000 Subject: [PATCH 0176/1692] fs/quota: Fix spectre gadget in do_quotactl 'type' is user-controlled, so sanitize it after the bounds check to avoid using it in speculative execution. This covers the following potential gadgets detected with the help of smatch: * fs/ext4/super.c:5741 ext4_quota_read() warn: potential spectre issue 'sb_dqopt(sb)->files' [r] * fs/ext4/super.c:5778 ext4_quota_write() warn: potential spectre issue 'sb_dqopt(sb)->files' [r] * fs/f2fs/super.c:1552 f2fs_quota_read() warn: potential spectre issue 'sb_dqopt(sb)->files' [r] * fs/f2fs/super.c:1608 f2fs_quota_write() warn: potential spectre issue 'sb_dqopt(sb)->files' [r] * fs/quota/dquot.c:412 mark_info_dirty() warn: potential spectre issue 'sb_dqopt(sb)->info' [w] * fs/quota/dquot.c:933 dqinit_needed() warn: potential spectre issue 'dquots' [r] * fs/quota/dquot.c:2112 dquot_commit_info() warn: potential spectre issue 'dqopt->ops' [r] * fs/quota/dquot.c:2362 vfs_load_quota_inode() warn: potential spectre issue 'dqopt->files' [w] (local cap) * fs/quota/dquot.c:2369 vfs_load_quota_inode() warn: potential spectre issue 'dqopt->ops' [w] (local cap) * fs/quota/dquot.c:2370 vfs_load_quota_inode() warn: potential spectre issue 'dqopt->info' [w] (local cap) * fs/quota/quota.c:110 quota_getfmt() warn: potential spectre issue 'sb_dqopt(sb)->info' [r] * fs/quota/quota_v2.c:84 v2_check_quota_file() warn: potential spectre issue 'quota_magics' [w] * fs/quota/quota_v2.c:85 v2_check_quota_file() warn: potential spectre issue 'quota_versions' [w] * fs/quota/quota_v2.c:96 v2_read_file_info() warn: potential spectre issue 'dqopt->info' [r] * fs/quota/quota_v2.c:172 v2_write_file_info() warn: potential spectre issue 'dqopt->info' [r] Additionally, a quick inspection indicates there are array accesses with 'type' in quota_on() and quota_off() functions which are also addressed by this. Cc: Josh Poimboeuf Cc: stable@vger.kernel.org Signed-off-by: Jeremy Cline Signed-off-by: Jan Kara --- fs/quota/quota.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/quota/quota.c b/fs/quota/quota.c index d403392d8a0f..f0cbf58ad4da 100644 --- a/fs/quota/quota.c +++ b/fs/quota/quota.c @@ -18,6 +18,7 @@ #include #include #include +#include static int check_quotactl_permission(struct super_block *sb, int type, int cmd, qid_t id) @@ -701,6 +702,7 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, if (type >= MAXQUOTAS) return -EINVAL; + type = array_index_nospec(type, MAXQUOTAS); /* * Quota not supported on this fs? Check this before s_quota_types * since they needn't be set if quota is not supported at all. -- GitLab From b845c898b2f1ea458d5453f0fa1da6e2dfce3bb4 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 21 Aug 2018 15:55:00 +0200 Subject: [PATCH 0177/1692] bpf, sockmap: fix sock_hash_alloc and reject zero-sized keys Currently, it is possible to create a sock hash map with key size of 0 and have the kernel return a fd back to user space. This is invalid for hash maps (and kernel also hasn't been tested for zero key size support in general at this point). Thus, reject such configuration. Fixes: 81110384441a ("bpf: sockmap, add hash map support") Signed-off-by: Daniel Borkmann Acked-by: John Fastabend Acked-by: Song Liu --- kernel/bpf/sockmap.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index 98e621a29e8e..60ceb0e1fa56 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c @@ -2140,7 +2140,9 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr) return ERR_PTR(-EPERM); /* check sanity of attributes */ - if (attr->max_entries == 0 || attr->value_size != 4 || + if (attr->max_entries == 0 || + attr->key_size == 0 || + attr->value_size != 4 || attr->map_flags & ~SOCK_CREATE_FLAG_MASK) return ERR_PTR(-EINVAL); -- GitLab From eb29429d81e31b191f3b2bd19cf820279cec6463 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 22 Aug 2018 18:09:17 +0200 Subject: [PATCH 0178/1692] bpf, sockmap: fix sock hash count in alloc_sock_hash_elem When we try to allocate a new sock hash entry and the allocation fails, then sock hash map fails to reduce the map element counter, meaning we keep accounting this element although it was never used. Fix it by dropping the element counter on error. Fixes: 81110384441a ("bpf: sockmap, add hash map support") Signed-off-by: Daniel Borkmann Acked-by: John Fastabend --- kernel/bpf/sockmap.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index 60ceb0e1fa56..40c6ef9fc828 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c @@ -2269,8 +2269,10 @@ static struct htab_elem *alloc_sock_hash_elem(struct bpf_htab *htab, } l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN, htab->map.numa_node); - if (!l_new) + if (!l_new) { + atomic_dec(&htab->count); return ERR_PTR(-ENOMEM); + } memcpy(l_new->key, key, key_size); l_new->sk = sk; -- GitLab From 67db7cd249e71f64346f481b629724376d063e08 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Wed, 22 Aug 2018 08:37:32 -0700 Subject: [PATCH 0179/1692] tls: possible hang when do_tcp_sendpages hits sndbuf is full case Currently, the lower protocols sk_write_space handler is not called if TLS is sending a scatterlist via tls_push_sg. However, normally tls_push_sg calls do_tcp_sendpage, which may be under memory pressure, that in turn may trigger a wait via sk_wait_event. Typically, this happens when the in-flight bytes exceed the sdnbuf size. In the normal case when enough ACKs are received sk_write_space() will be called and the sk_wait_event will be woken up allowing it to send more data and/or return to the user. But, in the TLS case because the sk_write_space() handler does not wake up the events the above send will wait until the sndtimeo is exceeded. By default this is MAX_SCHEDULE_TIMEOUT so it look like a hang to the user (especially this impatient user). To fix this pass the sk_write_space event to the lower layers sk_write_space event which in the TCP case will wake any pending events. I observed the above while integrating sockmap and ktls. It initially appeared as test_sockmap (modified to use ktls) occasionally hanging. To reliably reproduce this reduce the sndbuf size and stress the tls layer by sending many 1B sends. This results in every byte needing a header and each byte individually being sent to the crypto layer. Signed-off-by: John Fastabend Acked-by: Dave Watson Signed-off-by: Daniel Borkmann --- net/tls/tls_main.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 93c0c225ab34..180b6640e531 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -213,9 +213,14 @@ static void tls_write_space(struct sock *sk) { struct tls_context *ctx = tls_get_ctx(sk); - /* We are already sending pages, ignore notification */ - if (ctx->in_tcp_sendpages) + /* If in_tcp_sendpages call lower protocol write space handler + * to ensure we wake up any waiting operations there. For example + * if do_tcp_sendpages where to call sk_wait_event. + */ + if (ctx->in_tcp_sendpages) { + ctx->sk_write_space(sk); return; + } if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) { gfp_t sk_allocation = sk->sk_allocation; -- GitLab From 9b2e0388bec8ec5427403e23faff3b58dd1c3200 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Wed, 22 Aug 2018 08:37:37 -0700 Subject: [PATCH 0180/1692] bpf: sockmap: write_space events need to be passed to TCP handler When sockmap code is using the stream parser it also handles the write space events in order to handle the case where (a) verdict redirects skb to another socket and (b) the sockmap then sends the skb but due to memory constraints (or other EAGAIN errors) needs to do a retry. But the initial code missed a third case where the skb_send_sock_locked() triggers an sk_wait_event(). A typically case would be when sndbuf size is exceeded. If this happens because we do not pass the write_space event to the lower layers we never wake up the event and it will wait for sndtimeo. Which as noted in ktls fix may be rather large and look like a hang to the user. To reproduce the best test is to reduce the sndbuf size and send 1B data chunks to stress the memory handling. To fix this pass the event from the upper layer to the lower layer. Signed-off-by: John Fastabend Signed-off-by: Daniel Borkmann --- kernel/bpf/sockmap.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index 40c6ef9fc828..cf5195c7c331 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c @@ -1427,12 +1427,15 @@ static void smap_tx_work(struct work_struct *w) static void smap_write_space(struct sock *sk) { struct smap_psock *psock; + void (*write_space)(struct sock *sk); rcu_read_lock(); psock = smap_psock_sk(sk); if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state))) schedule_work(&psock->tx_work); + write_space = psock->save_write_space; rcu_read_unlock(); + write_space(sk); } static void smap_stop_sock(struct smap_psock *psock, struct sock *sk) -- GitLab From 8604ffcbf04f8f4f3f55a9e46e5ff948b2ed4290 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 16 Aug 2018 12:01:03 +0200 Subject: [PATCH 0181/1692] drm/amdgpu: fix VM clearing for the root PD MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We need to figure out the address after validating the BO, not before. Signed-off-by: Christian König Reviewed-by: Felix Kuehling Reviewed-by: Junwei Zhang Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index ece0ac703e27..e40ca8676418 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -369,7 +369,6 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, uint64_t addr; int r; - addr = amdgpu_bo_gpu_offset(bo); entries = amdgpu_bo_size(bo) / 8; if (pte_support_ats) { @@ -401,6 +400,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, if (r) goto error; + addr = amdgpu_bo_gpu_offset(bo); if (ats_entries) { uint64_t ats_value; -- GitLab From d98ff24e8e9be3329eea7c84d5e244d0c1cd0ab3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 21 Aug 2018 15:09:39 +0200 Subject: [PATCH 0182/1692] drm/amdgpu: fix preamble handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit At this point the command submission can still be interrupted. Signed-off-by: Christian König Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 502b94fb116a..09703c87d676 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1012,13 +1012,9 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, if (r) return r; - if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) { - parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT; - if (!parser->ctx->preamble_presented) { - parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST; - parser->ctx->preamble_presented = true; - } - } + if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) + parser->job->preamble_status |= + AMDGPU_PREAMBLE_IB_PRESENT; if (parser->ring && parser->ring != ring) return -EINVAL; @@ -1241,6 +1237,12 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, amdgpu_cs_post_dependencies(p); + if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) && + !p->ctx->preamble_presented) { + job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST; + p->ctx->preamble_presented = true; + } + cs->out.handle = seq; job->uf_sequence = seq; -- GitLab From 2f40c6eac74a2a60921cdec9e9a8a57e88e31434 Mon Sep 17 00:00:00 2001 From: Emily Deng Date: Wed, 22 Aug 2018 20:18:25 +0800 Subject: [PATCH 0183/1692] amdgpu: fix multi-process hang issue MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit SWDEV-146499: hang during multi vulkan process testing cause: the second frame's PREAMBLE_IB have clear-state and LOAD actions, those actions ruin the pipeline that is still doing process in the previous frame's work-load IB. fix: need insert pipeline sync if have context switch for SRIOV (because only SRIOV will report PREEMPTION flag to UMD) Signed-off-by: Monk Liu Signed-off-by: Emily Deng Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 5518e623fed2..51b5e977ca88 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -164,8 +164,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, return r; } + need_ctx_switch = ring->current_ctx != fence_ctx; if (ring->funcs->emit_pipeline_sync && job && ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) || + (amdgpu_sriov_vf(adev) && need_ctx_switch) || amdgpu_vm_need_pipeline_sync(ring, job))) { need_pipe_sync = true; dma_fence_put(tmp); @@ -196,7 +198,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, } skip_preamble = ring->current_ctx == fence_ctx; - need_ctx_switch = ring->current_ctx != fence_ctx; if (job && ring->funcs->emit_cntxcntl) { if (need_ctx_switch) status |= AMDGPU_HAVE_CTX_SWITCH; -- GitLab From eb7e5cfced102e61814f6f3e4cb4acb9f9315760 Mon Sep 17 00:00:00 2001 From: Andrey Grodzovsky Date: Wed, 22 Aug 2018 10:07:35 -0400 Subject: [PATCH 0184/1692] drm/amdgpu: Fix page fault and kasan warning on pci device remove. Problem: When executing echo 1 > /sys/class/drm/card0/device/remove kasan warning as bellow and page fault happen because adev->gart.pages already freed by the time amdgpu_gart_unbind is called. BUG: KASAN: user-memory-access in amdgpu_gart_unbind+0x98/0x180 [amdgpu] Write of size 8 at addr 0000000000003648 by task bash/1828 CPU: 2 PID: 1828 Comm: bash Tainted: G W O 4.18.0-rc1-dev+ #29 Hardware name: Gigabyte Technology Co., Ltd. AX370-Gaming/AX370-Gaming-CF, BIOS F3 06/19/2017 Call Trace: dump_stack+0x71/0xab kasan_report+0x109/0x390 amdgpu_gart_unbind+0x98/0x180 [amdgpu] ttm_tt_unbind+0x43/0x60 [ttm] ttm_bo_move_ttm+0x83/0x1c0 [ttm] ttm_bo_handle_move_mem+0xb97/0xd00 [ttm] ttm_bo_evict+0x273/0x530 [ttm] ttm_mem_evict_first+0x29c/0x360 [ttm] ttm_bo_force_list_clean+0xfc/0x210 [ttm] ttm_bo_clean_mm+0xe7/0x160 [ttm] amdgpu_ttm_fini+0xda/0x1d0 [amdgpu] amdgpu_bo_fini+0xf/0x60 [amdgpu] gmc_v8_0_sw_fini+0x36/0x70 [amdgpu] amdgpu_device_fini+0x2d0/0x7d0 [amdgpu] amdgpu_driver_unload_kms+0x6a/0xd0 [amdgpu] drm_dev_unregister+0x79/0x180 [drm] amdgpu_pci_remove+0x2a/0x60 [amdgpu] pci_device_remove+0x5b/0x100 device_release_driver_internal+0x236/0x360 pci_stop_bus_device+0xbf/0xf0 pci_stop_and_remove_bus_device_locked+0x16/0x30 remove_store+0xda/0xf0 kernfs_fop_write+0x186/0x220 __vfs_write+0xcc/0x330 vfs_write+0xe6/0x250 ksys_write+0xb1/0x140 do_syscall_64+0x77/0x1e0 entry_SYSCALL_64_after_hwframe+0x44/0xa9 RIP: 0033:0x7f66ebbb32c0 Fix: Split gmc_v{6,7,8,9}_0_gart_fini to postpone amdgpu_gart_fini to after memory managers are shut down since gart unbind happens as part of this procedure Signed-off-by: Andrey Grodzovsky Reviewed-by: Junwei Zhang Acked-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 9 ++------- drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 16 ++-------------- drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 16 ++-------------- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 16 ++-------------- 4 files changed, 8 insertions(+), 49 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 75317f283c69..ad151fefa41f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -632,12 +632,6 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev) amdgpu_gart_table_vram_unpin(adev); } -static void gmc_v6_0_gart_fini(struct amdgpu_device *adev) -{ - amdgpu_gart_table_vram_free(adev); - amdgpu_gart_fini(adev); -} - static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev, u32 status, u32 addr, u32 mc_client) { @@ -935,8 +929,9 @@ static int gmc_v6_0_sw_fini(void *handle) amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); - gmc_v6_0_gart_fini(adev); + amdgpu_gart_table_vram_free(adev); amdgpu_bo_fini(adev); + amdgpu_gart_fini(adev); release_firmware(adev->gmc.fw); adev->gmc.fw = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 36dc367c4b45..f8d8a3a73e42 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -746,19 +746,6 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev) amdgpu_gart_table_vram_unpin(adev); } -/** - * gmc_v7_0_gart_fini - vm fini callback - * - * @adev: amdgpu_device pointer - * - * Tears down the driver GART/VM setup (CIK). - */ -static void gmc_v7_0_gart_fini(struct amdgpu_device *adev) -{ - amdgpu_gart_table_vram_free(adev); - amdgpu_gart_fini(adev); -} - /** * gmc_v7_0_vm_decode_fault - print human readable fault info * @@ -1095,8 +1082,9 @@ static int gmc_v7_0_sw_fini(void *handle) amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); kfree(adev->gmc.vm_fault_info); - gmc_v7_0_gart_fini(adev); + amdgpu_gart_table_vram_free(adev); amdgpu_bo_fini(adev); + amdgpu_gart_fini(adev); release_firmware(adev->gmc.fw); adev->gmc.fw = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 70fc97b59b4f..9333109b210d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -968,19 +968,6 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev) amdgpu_gart_table_vram_unpin(adev); } -/** - * gmc_v8_0_gart_fini - vm fini callback - * - * @adev: amdgpu_device pointer - * - * Tears down the driver GART/VM setup (CIK). - */ -static void gmc_v8_0_gart_fini(struct amdgpu_device *adev) -{ - amdgpu_gart_table_vram_free(adev); - amdgpu_gart_fini(adev); -} - /** * gmc_v8_0_vm_decode_fault - print human readable fault info * @@ -1199,8 +1186,9 @@ static int gmc_v8_0_sw_fini(void *handle) amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); kfree(adev->gmc.vm_fault_info); - gmc_v8_0_gart_fini(adev); + amdgpu_gart_table_vram_free(adev); amdgpu_bo_fini(adev); + amdgpu_gart_fini(adev); release_firmware(adev->gmc.fw); adev->gmc.fw = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 399a5db27649..72f8018fa2a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -942,26 +942,12 @@ static int gmc_v9_0_sw_init(void *handle) return 0; } -/** - * gmc_v9_0_gart_fini - vm fini callback - * - * @adev: amdgpu_device pointer - * - * Tears down the driver GART/VM setup (CIK). - */ -static void gmc_v9_0_gart_fini(struct amdgpu_device *adev) -{ - amdgpu_gart_table_vram_free(adev); - amdgpu_gart_fini(adev); -} - static int gmc_v9_0_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); - gmc_v9_0_gart_fini(adev); /* * TODO: @@ -974,7 +960,9 @@ static int gmc_v9_0_sw_fini(void *handle) */ amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL); + amdgpu_gart_table_vram_free(adev); amdgpu_bo_fini(adev); + amdgpu_gart_fini(adev); return 0; } -- GitLab From 0577ab482f46653d6210aeb1d7dc57aa5e2dbfc3 Mon Sep 17 00:00:00 2001 From: Azhar Shaikh Date: Wed, 22 Aug 2018 10:23:48 -0700 Subject: [PATCH 0185/1692] drm/i915/psr: Add PSR mode/revision to debugfs Log the PSR mode/revision (PSR1 or PSR2) in the debugfs file i915_edp_psr_status. Suggested-by: Dhinakaran Pandiyan Signed-off-by: Azhar Shaikh Reviewed-by: Dhinakaran Pandiyan Signed-off-by: Dhinakaran Pandiyan Link: https://patchwork.freedesktop.org/patch/msgid/1534958628-193724-1-git-send-email-azhar.shaikh@intel.com --- drivers/gpu/drm/i915/i915_debugfs.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 374b550d9a4f..a5265c236a33 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -2708,6 +2708,8 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) intel_runtime_pm_get(dev_priv); mutex_lock(&dev_priv->psr.lock); + seq_printf(m, "PSR mode: %s\n", + dev_priv->psr.psr2_enabled ? "PSR2" : "PSR1"); seq_printf(m, "Enabled: %s\n", yesno(dev_priv->psr.enabled)); seq_printf(m, "Busy frontbuffer bits: 0x%03x\n", dev_priv->psr.busy_frontbuffer_bits); -- GitLab From 63eaf9acc00d394c75cb1a442387e0a05c17bcac Mon Sep 17 00:00:00 2001 From: Dhinakaran Pandiyan Date: Wed, 22 Aug 2018 12:38:27 -0700 Subject: [PATCH 0186/1692] drm/i915: Add a small wrapper to check for CCS modifiers. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Code looks cleaner with modifiers hidden inside this wrapper. v2: Remove const qualifier (Ville) Signed-off-by: Dhinakaran Pandiyan Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20180822193827.6341-1-dhinakaran.pandiyan@intel.com --- drivers/gpu/drm/i915/intel_display.c | 21 +++++++++++---------- drivers/gpu/drm/i915/intel_display.h | 1 + drivers/gpu/drm/i915/intel_sprite.c | 3 +-- 3 files changed, 13 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index b4566c2f5d28..1bf6290dbb11 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2474,6 +2474,12 @@ intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd) } } +bool is_ccs_modifier(u64 modifier) +{ + return modifier == I915_FORMAT_MOD_Y_TILED_CCS || + modifier == I915_FORMAT_MOD_Yf_TILED_CCS; +} + static int intel_fill_fb_info(struct drm_i915_private *dev_priv, struct drm_framebuffer *fb) @@ -2504,8 +2510,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv, return ret; } - if ((fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || - fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) && i == 1) { + if (is_ccs_modifier(fb->modifier) && i == 1) { int hsub = fb->format->hsub; int vsub = fb->format->vsub; int tile_width, tile_height; @@ -3055,8 +3060,7 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state, * CCS AUX surface doesn't have its own x/y offsets, we must make sure * they match with the main surface x/y offsets. */ - if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || - fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) { + if (is_ccs_modifier(fb->modifier)) { while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) { if (offset == 0) break; @@ -3190,8 +3194,7 @@ int skl_check_plane_surface(const struct intel_crtc_state *crtc_state, ret = skl_check_nv12_aux_surface(plane_state); if (ret) return ret; - } else if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || - fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) { + } else if (is_ccs_modifier(fb->modifier)) { ret = skl_check_ccs_aux_surface(plane_state); if (ret) return ret; @@ -13398,8 +13401,7 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane, case DRM_FORMAT_XBGR8888: case DRM_FORMAT_ARGB8888: case DRM_FORMAT_ABGR8888: - if (modifier == I915_FORMAT_MOD_Yf_TILED_CCS || - modifier == I915_FORMAT_MOD_Y_TILED_CCS) + if (is_ccs_modifier(modifier)) return true; /* fall through */ case DRM_FORMAT_RGB565: @@ -14595,8 +14597,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, * potential runtime errors at plane configuration time. */ if (IS_GEN9(dev_priv) && i == 0 && fb->width > 3840 && - (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || - fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS)) + is_ccs_modifier(fb->modifier)) stride_alignment *= 4; if (fb->pitches[i] & (stride_alignment - 1)) { diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h index a04c5a495a2b..43f080c6538d 100644 --- a/drivers/gpu/drm/i915/intel_display.h +++ b/drivers/gpu/drm/i915/intel_display.h @@ -381,4 +381,5 @@ void intel_link_compute_m_n(int bpp, int nlanes, struct intel_link_m_n *m_n, bool reduce_m_n); +bool is_ccs_modifier(u64 modifier); #endif diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 774bfb03c5d9..c286dda625e4 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -1409,8 +1409,7 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane, case DRM_FORMAT_XBGR8888: case DRM_FORMAT_ARGB8888: case DRM_FORMAT_ABGR8888: - if (modifier == I915_FORMAT_MOD_Yf_TILED_CCS || - modifier == I915_FORMAT_MOD_Y_TILED_CCS) + if (is_ccs_modifier(modifier)) return true; /* fall through */ case DRM_FORMAT_RGB565: -- GitLab From 53867b46fa8443713b3aee520d6ca558b222d829 Mon Sep 17 00:00:00 2001 From: Dhinakaran Pandiyan Date: Tue, 21 Aug 2018 18:50:53 -0700 Subject: [PATCH 0187/1692] drm/i915: Rename PLANE_CTL_DECOMPRESSION_ENABLE MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rename PLANE_CTL_DECOMPRESSION_ENABLE to resemble the bpsec name - PLANE_CTL_RENDER_DECOMPRESSION_ENABLE Suggested-by: Rodrigo Vivi Cc: Daniel Vetter Signed-off-by: Dhinakaran Pandiyan Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20180822015053.1420-2-dhinakaran.pandiyan@intel.com --- drivers/gpu/drm/i915/i915_reg.h | 2 +- drivers/gpu/drm/i915/intel_display.c | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 59d06d0055bb..a338aaa2b313 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -6515,7 +6515,7 @@ enum { #define PLANE_CTL_YUV422_UYVY (1 << 16) #define PLANE_CTL_YUV422_YVYU (2 << 16) #define PLANE_CTL_YUV422_VYUY (3 << 16) -#define PLANE_CTL_DECOMPRESSION_ENABLE (1 << 15) +#define PLANE_CTL_RENDER_DECOMPRESSION_ENABLE (1 << 15) #define PLANE_CTL_TRICKLE_FEED_DISABLE (1 << 14) #define PLANE_CTL_PLANE_GAMMA_DISABLE (1 << 13) /* Pre-GLK */ #define PLANE_CTL_TILED_MASK (0x7 << 10) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 1bf6290dbb11..b0b6e1e9a294 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3555,11 +3555,11 @@ static u32 skl_plane_ctl_tiling(uint64_t fb_modifier) case I915_FORMAT_MOD_Y_TILED: return PLANE_CTL_TILED_Y; case I915_FORMAT_MOD_Y_TILED_CCS: - return PLANE_CTL_TILED_Y | PLANE_CTL_DECOMPRESSION_ENABLE; + return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; case I915_FORMAT_MOD_Yf_TILED: return PLANE_CTL_TILED_YF; case I915_FORMAT_MOD_Yf_TILED_CCS: - return PLANE_CTL_TILED_YF | PLANE_CTL_DECOMPRESSION_ENABLE; + return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; default: MISSING_CASE(fb_modifier); } @@ -8802,13 +8802,13 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, fb->modifier = I915_FORMAT_MOD_X_TILED; break; case PLANE_CTL_TILED_Y: - if (val & PLANE_CTL_DECOMPRESSION_ENABLE) + if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE) fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS; else fb->modifier = I915_FORMAT_MOD_Y_TILED; break; case PLANE_CTL_TILED_YF: - if (val & PLANE_CTL_DECOMPRESSION_ENABLE) + if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE) fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS; else fb->modifier = I915_FORMAT_MOD_Yf_TILED; -- GitLab From 00e1cae78120ee19462e7f96135cd1cc59a086e7 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Wed, 22 Aug 2018 00:02:19 +0200 Subject: [PATCH 0188/1692] net: ethernet: renesas: use SPDX identifier for Renesas drivers Signed-off-by: Wolfram Sang Acked-by: Sergei Shtylyov Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/ravb.h | 5 +---- drivers/net/ethernet/renesas/ravb_main.c | 5 +---- drivers/net/ethernet/renesas/sh_eth.c | 13 +------------ drivers/net/ethernet/renesas/sh_eth.h | 13 +------------ 4 files changed, 4 insertions(+), 32 deletions(-) diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index b81f4faf7b10..1470fc12282b 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* Renesas Ethernet AVB device driver * * Copyright (C) 2014-2015 Renesas Electronics Corporation @@ -5,10 +6,6 @@ * Copyright (C) 2015-2016 Cogent Embedded, Inc. * * Based on the SuperH Ethernet driver - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License version 2, - * as published by the Free Software Foundation. */ #ifndef __RAVB_H__ diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index c06f2df895c2..aff5516b781e 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* Renesas Ethernet AVB device driver * * Copyright (C) 2014-2015 Renesas Electronics Corporation @@ -5,10 +6,6 @@ * Copyright (C) 2015-2016 Cogent Embedded, Inc. * * Based on the SuperH Ethernet driver - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License version 2, - * as published by the Free Software Foundation. */ #include diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 5573199c4536..ad4433d59237 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* SuperH Ethernet device driver * * Copyright (C) 2014 Renesas Electronics Corporation @@ -5,18 +6,6 @@ * Copyright (C) 2008-2014 Renesas Solutions Corp. * Copyright (C) 2013-2017 Cogent Embedded, Inc. * Copyright (C) 2014 Codethink Limited - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". */ #include diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index f94be99cf400..0c18650bbfe6 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -1,19 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* SuperH Ethernet device driver * * Copyright (C) 2006-2012 Nobuhiro Iwamatsu * Copyright (C) 2008-2012 Renesas Solutions Corp. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". */ #ifndef __SH_ETH_H__ -- GitLab From 3d0371b313b84ba7c16ebf2526a7a34f1c57b19e Mon Sep 17 00:00:00 2001 From: Samuel Mendoza-Jonas Date: Wed, 22 Aug 2018 14:57:44 +1000 Subject: [PATCH 0189/1692] net/ncsi: Fixup .dumpit message flags and ID check in Netlink handler The ncsi_pkg_info_all_nl() .dumpit handler is missing the NLM_F_MULTI flag, causing additional package information after the first to be lost. Also fixup a sanity check in ncsi_write_package_info() to reject out of range package IDs. Signed-off-by: Samuel Mendoza-Jonas Signed-off-by: David S. Miller --- net/ncsi/ncsi-netlink.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/ncsi/ncsi-netlink.c b/net/ncsi/ncsi-netlink.c index 82e6edf9c5d9..45f33d6dedf7 100644 --- a/net/ncsi/ncsi-netlink.c +++ b/net/ncsi/ncsi-netlink.c @@ -100,7 +100,7 @@ static int ncsi_write_package_info(struct sk_buff *skb, bool found; int rc; - if (id > ndp->package_num) { + if (id > ndp->package_num - 1) { netdev_info(ndp->ndev.dev, "NCSI: No package with id %u\n", id); return -ENODEV; } @@ -240,7 +240,7 @@ static int ncsi_pkg_info_all_nl(struct sk_buff *skb, return 0; /* done */ hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, - &ncsi_genl_family, 0, NCSI_CMD_PKG_INFO); + &ncsi_genl_family, NLM_F_MULTI, NCSI_CMD_PKG_INFO); if (!hdr) { rc = -EMSGSIZE; goto err; -- GitLab From 93cfb6c17690c465509967aeb237717d10513a88 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= Date: Wed, 22 Aug 2018 12:29:43 +0200 Subject: [PATCH 0190/1692] sch_cake: Fix TC filter flow override and expand it to hosts as well MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The TC filter flow mapping override completely skipped the call to cake_hash(); however that meant that the internal state was not being updated, which ultimately leads to deadlocks in some configurations. Fix that by passing the overridden flow ID into cake_hash() instead so it can react appropriately. In addition, the major number of the class ID can now be set to override the host mapping in host isolation mode. If both host and flow are overridden (or if the respective modes are disabled), flow dissection and hashing will be skipped entirely; otherwise, the hashing will be kept for the portions that are not set by the filter. Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: David S. Miller --- net/sched/sch_cake.c | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index 4d26b0823cdf..c07c30b916d5 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -620,15 +620,20 @@ static bool cake_ddst(int flow_mode) } static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb, - int flow_mode) + int flow_mode, u16 flow_override, u16 host_override) { - u32 flow_hash = 0, srchost_hash, dsthost_hash; + u32 flow_hash = 0, srchost_hash = 0, dsthost_hash = 0; u16 reduced_hash, srchost_idx, dsthost_idx; struct flow_keys keys, host_keys; if (unlikely(flow_mode == CAKE_FLOW_NONE)) return 0; + /* If both overrides are set we can skip packet dissection entirely */ + if ((flow_override || !(flow_mode & CAKE_FLOW_FLOWS)) && + (host_override || !(flow_mode & CAKE_FLOW_HOSTS))) + goto skip_hash; + skb_flow_dissect_flow_keys(skb, &keys, FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); @@ -675,6 +680,14 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb, if (flow_mode & CAKE_FLOW_FLOWS) flow_hash = flow_hash_from_keys(&keys); +skip_hash: + if (flow_override) + flow_hash = flow_override - 1; + if (host_override) { + dsthost_hash = host_override - 1; + srchost_hash = host_override - 1; + } + if (!(flow_mode & CAKE_FLOW_FLOWS)) { if (flow_mode & CAKE_FLOW_SRC_IP) flow_hash ^= srchost_hash; @@ -1570,7 +1583,7 @@ static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t, struct cake_sched_data *q = qdisc_priv(sch); struct tcf_proto *filter; struct tcf_result res; - u32 flow = 0; + u16 flow = 0, host = 0; int result; filter = rcu_dereference_bh(q->filter_list); @@ -1594,10 +1607,12 @@ static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t, #endif if (TC_H_MIN(res.classid) <= CAKE_QUEUES) flow = TC_H_MIN(res.classid); + if (TC_H_MAJ(res.classid) <= (CAKE_QUEUES << 16)) + host = TC_H_MAJ(res.classid) >> 16; } hash: *t = cake_select_tin(sch, skb); - return flow ?: cake_hash(*t, skb, flow_mode) + 1; + return cake_hash(*t, skb, flow_mode, flow, host) + 1; } static void cake_reconfigure(struct Qdisc *sch); -- GitLab From 191672ca07a7c10c3b84d01019a33d59b4317997 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 22 Aug 2018 17:25:44 +0200 Subject: [PATCH 0191/1692] net_sched: fix unused variable warning in stmmac The new tcf_exts_for_each_action() macro doesn't reference its arguments when CONFIG_NET_CLS_ACT is disabled, which leads to a harmless warning in at least one driver: drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c: In function 'tc_fill_actions': drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c:64:6: error: unused variable 'i' [-Werror=unused-variable] Adding a cast to void lets us avoid this kind of warning. To be on the safe side, do it for all three arguments, not just the one that caused the warning. Fixes: 244cd96adb5f ("net_sched: remove list_head from tc_action") Signed-off-by: Arnd Bergmann Signed-off-by: David S. Miller --- include/net/pkt_cls.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index c17d51865469..75a3f3fdb359 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -303,7 +303,7 @@ static inline void tcf_exts_put_net(struct tcf_exts *exts) for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++) #else #define tcf_exts_for_each_action(i, a, exts) \ - for (; 0; ) + for (; 0; (void)(i), (void)(a), (void)(exts)) #endif static inline void -- GitLab From e500c6d349f7f36120886841c6f057ce248b48b2 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Wed, 22 Aug 2018 12:58:34 -0700 Subject: [PATCH 0192/1692] addrconf: reduce unnecessary atomic allocations All the 3 callers of addrconf_add_mroute() assert RTNL lock, they don't take any additional lock either, so it is safe to convert it to GFP_KERNEL. Same for sit_add_v4_addrs(). Cc: David Ahern Signed-off-by: Cong Wang Reviewed-by: David Ahern Signed-off-by: David S. Miller --- net/ipv6/addrconf.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 2fac4ad74867..d51a8c0b3372 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -2398,7 +2398,7 @@ static void addrconf_add_mroute(struct net_device *dev) ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0); - ip6_route_add(&cfg, GFP_ATOMIC, NULL); + ip6_route_add(&cfg, GFP_KERNEL, NULL); } static struct inet6_dev *addrconf_add_dev(struct net_device *dev) @@ -3062,7 +3062,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev) if (addr.s6_addr32[3]) { add_addr(idev, &addr, plen, scope); addrconf_prefix_route(&addr, plen, 0, idev->dev, 0, pflags, - GFP_ATOMIC); + GFP_KERNEL); return; } @@ -3087,7 +3087,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev) add_addr(idev, &addr, plen, flag); addrconf_prefix_route(&addr, plen, 0, idev->dev, - 0, pflags, GFP_ATOMIC); + 0, pflags, GFP_KERNEL); } } } -- GitLab From 431280eebed9f5079553daf003011097763e71fd Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 22 Aug 2018 13:30:45 -0700 Subject: [PATCH 0193/1692] ipv4: tcp: send zero IPID for RST and ACK sent in SYN-RECV and TIME-WAIT state tcp uses per-cpu (and per namespace) sockets (net->ipv4.tcp_sk) internally to send some control packets. 1) RST packets, through tcp_v4_send_reset() 2) ACK packets in SYN-RECV and TIME-WAIT state, through tcp_v4_send_ack() These packets assert IP_DF, and also use the hashed IP ident generator to provide an IPv4 ID number. Geoff Alexander reported this could be used to build off-path attacks. These packets should not be fragmented, since their size is smaller than IPV4_MIN_MTU. Only some tunneled paths could eventually have to fragment, regardless of inner IPID. We really can use zero IPID, to address the flaw, and as a bonus, avoid a couple of atomic operations in ip_idents_reserve() Signed-off-by: Eric Dumazet Reported-by: Geoff Alexander Tested-by: Geoff Alexander Signed-off-by: David S. Miller --- net/ipv4/tcp_ipv4.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 9e041fa5c545..44c09eddbb78 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2517,6 +2517,12 @@ static int __net_init tcp_sk_init(struct net *net) if (res) goto fail; sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); + + /* Please enforce IP_DF and IPID==0 for RST and + * ACK sent in SYN-RECV and TIME-WAIT state. + */ + inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO; + *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk; } -- GitLab From fb99886224294b2291d267da41395022fa4200e2 Mon Sep 17 00:00:00 2001 From: Kevin Yang Date: Wed, 22 Aug 2018 17:43:14 -0400 Subject: [PATCH 0194/1692] tcp_bbr: add bbr_check_probe_rtt_done() helper This patch add a helper function bbr_check_probe_rtt_done() to 1. check the condition to see if bbr should exit probe_rtt mode; 2. process the logic of exiting probe_rtt mode. Fixes: 0f8782ea1497 ("tcp_bbr: add BBR congestion control") Signed-off-by: Kevin Yang Signed-off-by: Neal Cardwell Signed-off-by: Yuchung Cheng Reviewed-by: Soheil Hassas Yeganeh Signed-off-by: David S. Miller --- net/ipv4/tcp_bbr.c | 34 ++++++++++++++++++---------------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c index 13d34427ca3d..fd7bccf36a26 100644 --- a/net/ipv4/tcp_bbr.c +++ b/net/ipv4/tcp_bbr.c @@ -95,11 +95,10 @@ struct bbr { u32 mode:3, /* current bbr_mode in state machine */ prev_ca_state:3, /* CA state on previous ACK */ packet_conservation:1, /* use packet conservation? */ - restore_cwnd:1, /* decided to revert cwnd to old value */ round_start:1, /* start of packet-timed tx->ack round? */ idle_restart:1, /* restarting after idle? */ probe_rtt_round_done:1, /* a BBR_PROBE_RTT round at 4 pkts? */ - unused:12, + unused:13, lt_is_sampling:1, /* taking long-term ("LT") samples now? */ lt_rtt_cnt:7, /* round trips in long-term interval */ lt_use_bw:1; /* use lt_bw as our bw estimate? */ @@ -396,17 +395,11 @@ static bool bbr_set_cwnd_to_recover_or_restore( cwnd = tcp_packets_in_flight(tp) + acked; } else if (prev_state >= TCP_CA_Recovery && state < TCP_CA_Recovery) { /* Exiting loss recovery; restore cwnd saved before recovery. */ - bbr->restore_cwnd = 1; + cwnd = max(cwnd, bbr->prior_cwnd); bbr->packet_conservation = 0; } bbr->prev_ca_state = state; - if (bbr->restore_cwnd) { - /* Restore cwnd after exiting loss recovery or PROBE_RTT. */ - cwnd = max(cwnd, bbr->prior_cwnd); - bbr->restore_cwnd = 0; - } - if (bbr->packet_conservation) { *new_cwnd = max(cwnd, tcp_packets_in_flight(tp) + acked); return true; /* yes, using packet conservation */ @@ -748,6 +741,20 @@ static void bbr_check_drain(struct sock *sk, const struct rate_sample *rs) bbr_reset_probe_bw_mode(sk); /* we estimate queue is drained */ } +static void bbr_check_probe_rtt_done(struct sock *sk) +{ + struct tcp_sock *tp = tcp_sk(sk); + struct bbr *bbr = inet_csk_ca(sk); + + if (!(bbr->probe_rtt_done_stamp && + after(tcp_jiffies32, bbr->probe_rtt_done_stamp))) + return; + + bbr->min_rtt_stamp = tcp_jiffies32; /* wait a while until PROBE_RTT */ + tp->snd_cwnd = max(tp->snd_cwnd, bbr->prior_cwnd); + bbr_reset_mode(sk); +} + /* The goal of PROBE_RTT mode is to have BBR flows cooperatively and * periodically drain the bottleneck queue, to converge to measure the true * min_rtt (unloaded propagation delay). This allows the flows to keep queues @@ -806,12 +813,8 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs) } else if (bbr->probe_rtt_done_stamp) { if (bbr->round_start) bbr->probe_rtt_round_done = 1; - if (bbr->probe_rtt_round_done && - after(tcp_jiffies32, bbr->probe_rtt_done_stamp)) { - bbr->min_rtt_stamp = tcp_jiffies32; - bbr->restore_cwnd = 1; /* snap to prior_cwnd */ - bbr_reset_mode(sk); - } + if (bbr->probe_rtt_round_done) + bbr_check_probe_rtt_done(sk); } } /* Restart after idle ends only once we process a new S/ACK for data */ @@ -862,7 +865,6 @@ static void bbr_init(struct sock *sk) bbr->has_seen_rtt = 0; bbr_init_pacing_rate_from_rtt(sk); - bbr->restore_cwnd = 0; bbr->round_start = 0; bbr->idle_restart = 0; bbr->full_bw_reached = 0; -- GitLab From 5490b32dce6932ea7ee8e3b2f76db2957c92af6e Mon Sep 17 00:00:00 2001 From: Kevin Yang Date: Wed, 22 Aug 2018 17:43:15 -0400 Subject: [PATCH 0195/1692] tcp_bbr: in restart from idle, see if we should exit PROBE_RTT This patch fix the case where BBR does not exit PROBE_RTT mode when it restarts from idle. When BBR restarts from idle and if BBR is in PROBE_RTT mode, BBR should check if it's time to exit PROBE_RTT. If yes, then BBR should exit PROBE_RTT mode and restore the cwnd to its full value. Fixes: 0f8782ea1497 ("tcp_bbr: add BBR congestion control") Signed-off-by: Kevin Yang Signed-off-by: Neal Cardwell Reviewed-by: Yuchung Cheng Reviewed-by: Soheil Hassas Yeganeh Signed-off-by: David S. Miller --- net/ipv4/tcp_bbr.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c index fd7bccf36a26..1d4bdd3b5e4d 100644 --- a/net/ipv4/tcp_bbr.c +++ b/net/ipv4/tcp_bbr.c @@ -174,6 +174,8 @@ static const u32 bbr_lt_bw_diff = 4000 / 8; /* If we estimate we're policed, use lt_bw for this many round trips: */ static const u32 bbr_lt_bw_max_rtts = 48; +static void bbr_check_probe_rtt_done(struct sock *sk); + /* Do we estimate that STARTUP filled the pipe? */ static bool bbr_full_bw_reached(const struct sock *sk) { @@ -308,6 +310,8 @@ static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event) */ if (bbr->mode == BBR_PROBE_BW) bbr_set_pacing_rate(sk, bbr_bw(sk), BBR_UNIT); + else if (bbr->mode == BBR_PROBE_RTT) + bbr_check_probe_rtt_done(sk); } } -- GitLab From 8e995bf14fdb7e33681d5c3312b602fa342b878a Mon Sep 17 00:00:00 2001 From: Kevin Yang Date: Wed, 22 Aug 2018 17:43:16 -0400 Subject: [PATCH 0196/1692] tcp_bbr: apply PROBE_RTT cwnd cap even if acked==0 This commit fixes a corner case where TCP BBR would enter PROBE_RTT mode but not reduce its cwnd. If a TCP receiver ACKed less than one full segment, the number of delivered/acked packets was 0, so that bbr_set_cwnd() would short-circuit and exit early, without cutting cwnd to the value we want for PROBE_RTT. The fix is to instead make sure that even when 0 full packets are ACKed, we do apply all the appropriate caps, including the cap that applies in PROBE_RTT mode. Fixes: 0f8782ea1497 ("tcp_bbr: add BBR congestion control") Signed-off-by: Kevin Yang Signed-off-by: Neal Cardwell Reviewed-by: Yuchung Cheng Reviewed-by: Soheil Hassas Yeganeh Signed-off-by: David S. Miller --- net/ipv4/tcp_bbr.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c index 1d4bdd3b5e4d..02ff2dde9609 100644 --- a/net/ipv4/tcp_bbr.c +++ b/net/ipv4/tcp_bbr.c @@ -420,10 +420,10 @@ static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs, { struct tcp_sock *tp = tcp_sk(sk); struct bbr *bbr = inet_csk_ca(sk); - u32 cwnd = 0, target_cwnd = 0; + u32 cwnd = tp->snd_cwnd, target_cwnd = 0; if (!acked) - return; + goto done; /* no packet fully ACKed; just apply caps */ if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd)) goto done; -- GitLab From 3ed614dce3ca9912d22be215ff0f11104b69fe62 Mon Sep 17 00:00:00 2001 From: Huazhong Tan Date: Thu, 23 Aug 2018 11:10:10 +0800 Subject: [PATCH 0197/1692] net: hns: fix length and page_offset overflow when CONFIG_ARM64_64K_PAGES When enable the config item "CONFIG_ARM64_64K_PAGES", the size of PAGE_SIZE is 65536(64K). But the type of length and page_offset are u16, they will overflow. So change them to u32. Fixes: 6fe6611ff275 ("net: add Hisilicon Network Subsystem hnae framework support") Signed-off-by: Huazhong Tan Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns/hnae.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h index fa5b30f547f6..cad52bd331f7 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.h +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h @@ -220,10 +220,10 @@ struct hnae_desc_cb { /* priv data for the desc, e.g. skb when use with ip stack*/ void *priv; - u16 page_offset; - u16 reuse_flag; + u32 page_offset; + u32 length; /* length of the buffer */ - u16 length; /* length of the buffer */ + u16 reuse_flag; /* desc type, used by the ring user to mark the type of the priv data */ u16 type; -- GitLab From ac4a5b52f5970479f4b2d94a7f98dbf9eaf675ab Mon Sep 17 00:00:00 2001 From: Huazhong Tan Date: Thu, 23 Aug 2018 11:10:11 +0800 Subject: [PATCH 0198/1692] net: hns: modify variable type in hns_nic_reuse_page 'truesize' is supposed to be u32, not int, so fix it. Signed-off-by: Huazhong tan Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns/hns_enet.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 9f2b552aee33..c8c0b0309c27 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -512,7 +512,8 @@ static void hns_nic_reuse_page(struct sk_buff *skb, int i, struct hnae_desc_cb *desc_cb) { struct hnae_desc *desc; - int truesize, size; + u32 truesize; + int size; int last_offset; bool twobufs; -- GitLab From b1ccd4c0ab6ef499f47dd84ed4920502a7147bba Mon Sep 17 00:00:00 2001 From: Huazhong Tan Date: Thu, 23 Aug 2018 11:10:12 +0800 Subject: [PATCH 0199/1692] net: hns: fix skb->truesize underestimation skb->truesize is not meant to be tracking amount of used bytes in a skb, but amount of reserved/consumed bytes in memory. For instance, if we use a single byte in last page fragment, we have to account the full size of the fragment. So skb_add_rx_frag needs to calculate the length of the entire buffer into turesize. Fixes: 9cbe9fd5214e ("net: hns: optimize XGE capability by reducing cpu usage") Signed-off-by: Huazhong tan Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns/hns_enet.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index c8c0b0309c27..71bd3bff6c67 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -531,7 +531,7 @@ static void hns_nic_reuse_page(struct sk_buff *skb, int i, } skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, - size - pull_len, truesize - pull_len); + size - pull_len, truesize); /* avoid re-using remote pages,flag default unreuse */ if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id())) -- GitLab From 339379a2fb13decd1802b403370cc3cdf21d819f Mon Sep 17 00:00:00 2001 From: Huazhong Tan Date: Thu, 23 Aug 2018 11:10:13 +0800 Subject: [PATCH 0200/1692] net: hns: use eth_get_headlen interface instead of hns_nic_get_headlen Update hns to drop the hns_nic_get_headlen function in favour of eth_get_headlen, and hence also removes now redundant hns_nic_get_headlen. Signed-off-by: Huazhong Tan Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns/hns_enet.c | 103 +----------------- 1 file changed, 1 insertion(+), 102 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 71bd3bff6c67..02a0ba20fad5 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -406,107 +406,6 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev, return NETDEV_TX_BUSY; } -/** - * hns_nic_get_headlen - determine size of header for RSC/LRO/GRO/FCOE - * @data: pointer to the start of the headers - * @max: total length of section to find headers in - * - * This function is meant to determine the length of headers that will - * be recognized by hardware for LRO, GRO, and RSC offloads. The main - * motivation of doing this is to only perform one pull for IPv4 TCP - * packets so that we can do basic things like calculating the gso_size - * based on the average data per packet. - **/ -static unsigned int hns_nic_get_headlen(unsigned char *data, u32 flag, - unsigned int max_size) -{ - unsigned char *network; - u8 hlen; - - /* this should never happen, but better safe than sorry */ - if (max_size < ETH_HLEN) - return max_size; - - /* initialize network frame pointer */ - network = data; - - /* set first protocol and move network header forward */ - network += ETH_HLEN; - - /* handle any vlan tag if present */ - if (hnae_get_field(flag, HNS_RXD_VLAN_M, HNS_RXD_VLAN_S) - == HNS_RX_FLAG_VLAN_PRESENT) { - if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN)) - return max_size; - - network += VLAN_HLEN; - } - - /* handle L3 protocols */ - if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S) - == HNS_RX_FLAG_L3ID_IPV4) { - if ((typeof(max_size))(network - data) > - (max_size - sizeof(struct iphdr))) - return max_size; - - /* access ihl as a u8 to avoid unaligned access on ia64 */ - hlen = (network[0] & 0x0F) << 2; - - /* verify hlen meets minimum size requirements */ - if (hlen < sizeof(struct iphdr)) - return network - data; - - /* record next protocol if header is present */ - } else if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S) - == HNS_RX_FLAG_L3ID_IPV6) { - if ((typeof(max_size))(network - data) > - (max_size - sizeof(struct ipv6hdr))) - return max_size; - - /* record next protocol */ - hlen = sizeof(struct ipv6hdr); - } else { - return network - data; - } - - /* relocate pointer to start of L4 header */ - network += hlen; - - /* finally sort out TCP/UDP */ - if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S) - == HNS_RX_FLAG_L4ID_TCP) { - if ((typeof(max_size))(network - data) > - (max_size - sizeof(struct tcphdr))) - return max_size; - - /* access doff as a u8 to avoid unaligned access on ia64 */ - hlen = (network[12] & 0xF0) >> 2; - - /* verify hlen meets minimum size requirements */ - if (hlen < sizeof(struct tcphdr)) - return network - data; - - network += hlen; - } else if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S) - == HNS_RX_FLAG_L4ID_UDP) { - if ((typeof(max_size))(network - data) > - (max_size - sizeof(struct udphdr))) - return max_size; - - network += sizeof(struct udphdr); - } - - /* If everything has gone correctly network should be the - * data section of the packet and will be the end of the header. - * If not then it probably represents the end of the last recognized - * header. - */ - if ((typeof(max_size))(network - data) < max_size) - return network - data; - else - return max_size; -} - static void hns_nic_reuse_page(struct sk_buff *skb, int i, struct hnae_ring *ring, int pull_len, struct hnae_desc_cb *desc_cb) @@ -696,7 +595,7 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data, } else { ring->stats.seg_pkt_cnt++; - pull_len = hns_nic_get_headlen(va, bnum_flag, HNS_RX_HEAD_SIZE); + pull_len = eth_get_headlen(va, HNS_RX_HEAD_SIZE); memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); -- GitLab From d23c4b6336ef30898dcdff351f21e633e7a64930 Mon Sep 17 00:00:00 2001 From: Hangbin Liu Date: Thu, 23 Aug 2018 11:31:37 +0800 Subject: [PATCH 0201/1692] net/ipv6: init ip6 anycast rt->dst.input as ip6_input Commit 6edb3c96a5f02 ("net/ipv6: Defer initialization of dst to data path") forgot to handle anycast route and init anycast rt->dst.input to ip6_forward. Fix it by setting anycast rt->dst.input back to ip6_input. Fixes: 6edb3c96a5f02 ("net/ipv6: Defer initialization of dst to data path") Signed-off-by: Hangbin Liu Reviewed-by: David Ahern Signed-off-by: David S. Miller --- net/ipv6/route.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 7208c16302f6..c4ea13e8360b 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -956,7 +956,7 @@ static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort) rt->dst.error = 0; rt->dst.output = ip6_output; - if (ort->fib6_type == RTN_LOCAL) { + if (ort->fib6_type == RTN_LOCAL || ort->fib6_type == RTN_ANYCAST) { rt->dst.input = ip6_input; } else if (ipv6_addr_type(&ort->fib6_dst.addr) & IPV6_ADDR_MULTICAST) { rt->dst.input = ip6_mc_input; -- GitLab From 27a5959308559fa6afcaa4e6cd81d25bcb2dda7c Mon Sep 17 00:00:00 2001 From: Huazhong Tan Date: Thu, 23 Aug 2018 11:37:15 +0800 Subject: [PATCH 0202/1692] net: hns3: fix page_offset overflow when CONFIG_ARM64_64K_PAGES When enable the config item "CONFIG_ARM64_64K_PAGES", the size of PAGE_SIZE is 65536(64K). But the type of page_offset is u16, it will overflow. So change it to u32, when "CONFIG_ARM64_64K_PAGES" enabled. Fixes: 76ad4f0ee747 ("net: hns3: Add support of HNS3 Ethernet Driver for hip08 SoC") Signed-off-by: Huazhong Tan Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index a02a96aee2a2..cb450d7ec8c1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -284,11 +284,11 @@ struct hns3_desc_cb { /* priv data for the desc, e.g. skb when use with ip stack*/ void *priv; - u16 page_offset; - u16 reuse_flag; - + u32 page_offset; u32 length; /* length of the buffer */ + u16 reuse_flag; + /* desc type, used by the ring user to mark the type of the priv data */ u16 type; }; -- GitLab From 583e7281f1d8234f3a3e483bd6fba7a72d24aa4e Mon Sep 17 00:00:00 2001 From: Huazhong Tan Date: Thu, 23 Aug 2018 11:37:16 +0800 Subject: [PATCH 0203/1692] net: hns3: modify variable type in hns3_nic_reuse_page 'truesize' is supposed to be u32, not int, so fix it. Signed-off-by: Huazhong tan Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 3554dca7a680..955c4ab18b03 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -2019,7 +2019,8 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, struct hns3_desc_cb *desc_cb) { struct hns3_desc *desc; - int truesize, size; + u32 truesize; + int size; int last_offset; bool twobufs; -- GitLab From 9faf870e559a710c44e747ba20383ea82d8ac5d2 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Wed, 22 Aug 2018 21:28:01 +0300 Subject: [PATCH 0204/1692] mmc: renesas_sdhi_internal_dmac: fix #define RST_RESERVED_BITS The DM_CM_RST register actually has bits 0-31 defaulting to 1s and bits 32-63 defaulting to 0s -- fix off-by-one in #define RST_RESERVED_BITS. Signed-off-by: Sergei Shtylyov Reviewed-by: Wolfram Sang Fixes: 2a68ea7896e3 ("mmc: renesas-sdhi: add support for R-Car Gen3 SDHI DMAC") Cc: stable@vger.kernel.org # v4.14+ Signed-off-by: Ulf Hansson --- drivers/mmc/host/renesas_sdhi_internal_dmac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c index 35cc0de6be67..f16677f424b9 100644 --- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c @@ -45,7 +45,7 @@ /* DM_CM_RST */ #define RST_DTRANRST1 BIT(9) #define RST_DTRANRST0 BIT(8) -#define RST_RESERVED_BITS GENMASK_ULL(32, 0) +#define RST_RESERVED_BITS GENMASK_ULL(31, 0) /* DM_CM_INFO1 and DM_CM_INFO1_MASK */ #define INFO1_CLEAR 0 -- GitLab From d2332f887ddfba50fee93b8e1736376517c2df0c Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Wed, 22 Aug 2018 21:22:26 +0300 Subject: [PATCH 0205/1692] mmc: renesas_sdhi_internal_dmac: mask DMAC interrupts I have encountered an interrupt storm during the eMMC chip probing (and the chip finally didn't get detected). It turned out that U-Boot left the SDHI DMA interrupts enabled while the Linux driver didn't use those. Masking those interrupts in renesas_sdhi_internal_dmac_request_dma() gets rid of both issues... Signed-off-by: Sergei Shtylyov Reviewed-by: Wolfram Sang Fixes: 2a68ea7896e3 ("mmc: renesas-sdhi: add support for R-Car Gen3 SDHI DMAC") Cc: stable@vger.kernel.org # v4.14+ Signed-off-by: Ulf Hansson --- drivers/mmc/host/renesas_sdhi_internal_dmac.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c index f16677f424b9..ca0b43973769 100644 --- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c @@ -49,10 +49,12 @@ /* DM_CM_INFO1 and DM_CM_INFO1_MASK */ #define INFO1_CLEAR 0 +#define INFO1_MASK_CLEAR GENMASK_ULL(31, 0) #define INFO1_DTRANEND1 BIT(17) #define INFO1_DTRANEND0 BIT(16) /* DM_CM_INFO2 and DM_CM_INFO2_MASK */ +#define INFO2_MASK_CLEAR GENMASK_ULL(31, 0) #define INFO2_DTRANERR1 BIT(17) #define INFO2_DTRANERR0 BIT(16) @@ -252,6 +254,12 @@ renesas_sdhi_internal_dmac_request_dma(struct tmio_mmc_host *host, { struct renesas_sdhi *priv = host_to_priv(host); + /* Disable DMAC interrupts, we don't use them */ + renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO1_MASK, + INFO1_MASK_CLEAR); + renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO2_MASK, + INFO2_MASK_CLEAR); + /* Each value is set to non-zero to assume "enabling" each DMA */ host->chan_rx = host->chan_tx = (void *)0xdeadbeaf; -- GitLab From a61d904fd6fa85bfa7e0ae9aeaa8992173322e77 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Wed, 22 Aug 2018 14:26:02 +0300 Subject: [PATCH 0206/1692] drm/i915: Simplify condition to keep DMC active during S0ix MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For S0ix we want to deinit power domains (and so deactivate the DMC firmware) exactly when the platform supports the DC9 state. To reach S0ix we need DC9 on these platforms (for which the DMC FW needs to be deactivated) while to reach S0ix on the rest of the DMC platforms we need DC6 (which needs the DMC FW to stay active). Simplify the condition accordingly so it will be automatically correct for upcoming DC9 platforms like ICL. Cc: Lucas De Marchi Cc: Michel Thierry Cc: Ville Syrjälä Cc: Rodrigo Vivi Cc: Paulo Zanoni Cc: Chris Wilson Signed-off-by: Imre Deak Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20180822112602.27543-1-imre.deak@intel.com --- drivers/gpu/drm/i915/intel_runtime_pm.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index ff3fd8dbd2b4..1b10b7041513 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -3848,13 +3848,14 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv, intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); /* - * In case of firmware assisted context save/restore don't manually - * deinit the power domains. This also means the CSR/DMC firmware will - * stay active, it will power down any HW resources as required and - * also enable deeper system power states that would be blocked if the - * firmware was inactive. + * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9 + * support don't manually deinit the power domains. This also means the + * CSR/DMC firmware will stay active, it will power down any HW + * resources as required and also enable deeper system power states + * that would be blocked if the firmware was inactive. */ - if (!IS_GEN9_LP(dev_priv) && suspend_mode == I915_DRM_SUSPEND_IDLE && + if (!(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_DC9) && + suspend_mode == I915_DRM_SUSPEND_IDLE && dev_priv->csr.dmc_payload != NULL) { intel_power_domains_verify_state(dev_priv); return; -- GitLab From d4682ba03ef618b6ef4be7cedc7aacaf505d3a58 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Mon, 11 Jun 2018 19:24:28 +0100 Subject: [PATCH 0207/1692] Btrfs: sync log after logging new name When we add a new name for an inode which was logged in the current transaction, we update the inode in the log so that its new name and ancestors are added to the log. However when we do this we do not persist the log, so the changes remain in memory only, and as a consequence, any ancestors that were created in the current transaction are updated such that future calls to btrfs_inode_in_log() return true. This leads to a subsequent fsync against such new ancestor directories returning immediately, without persisting the log, therefore after a power failure the new ancestor directories do not exist, despite fsync being called against them explicitly. Example: $ mkfs.btrfs -f /dev/sdb $ mount /dev/sdb /mnt $ mkdir /mnt/A $ mkdir /mnt/B $ mkdir /mnt/A/C $ touch /mnt/B/foo $ xfs_io -c "fsync" /mnt/B/foo $ ln /mnt/B/foo /mnt/A/C/foo $ xfs_io -c "fsync" /mnt/A After the power failure, directory "A" does not exist, despite the explicit fsync on it. Instead of fixing this by changing the behaviour of the explicit fsync on directory "A" to persist the log instead of doing nothing, make the logging of the new file name (which happens when creating a hard link or renaming) persist the log. This approach not only is simpler, not requiring addition of new fields to the inode in memory structure, but also gives us the same behaviour as ext4, xfs and f2fs (possibly other filesystems too). A test case for fstests follows soon. Fixes: 12fcfd22fe5b ("Btrfs: tree logging unlink/rename fixes") Reported-by: Vijay Chidambaram Signed-off-by: Filipe Manana Signed-off-by: David Sterba --- fs/btrfs/inode.c | 92 +++++++++++++++++++++++++++++++++++++++------ fs/btrfs/tree-log.c | 48 ++++++++++++++++++++--- fs/btrfs/tree-log.h | 10 ++++- 3 files changed, 131 insertions(+), 19 deletions(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index c6d8c5d19ff0..cb09873ad270 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -6634,6 +6634,8 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, drop_inode = 1; } else { struct dentry *parent = dentry->d_parent; + int ret; + err = btrfs_update_inode(trans, root, inode); if (err) goto fail; @@ -6647,7 +6649,12 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, goto fail; } d_instantiate(dentry, inode); - btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent); + ret = btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent, + true, NULL); + if (ret == BTRFS_NEED_TRANS_COMMIT) { + err = btrfs_commit_transaction(trans); + trans = NULL; + } } fail: @@ -9386,14 +9393,21 @@ static int btrfs_rename_exchange(struct inode *old_dir, u64 new_idx = 0; u64 root_objectid; int ret; - int ret2; bool root_log_pinned = false; bool dest_log_pinned = false; + struct btrfs_log_ctx ctx_root; + struct btrfs_log_ctx ctx_dest; + bool sync_log_root = false; + bool sync_log_dest = false; + bool commit_transaction = false; /* we only allow rename subvolume link between subvolumes */ if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) return -EXDEV; + btrfs_init_log_ctx(&ctx_root, old_inode); + btrfs_init_log_ctx(&ctx_dest, new_inode); + /* close the race window with snapshot create/destroy ioctl */ if (old_ino == BTRFS_FIRST_FREE_OBJECTID) down_read(&fs_info->subvol_sem); @@ -9540,15 +9554,29 @@ static int btrfs_rename_exchange(struct inode *old_dir, if (root_log_pinned) { parent = new_dentry->d_parent; - btrfs_log_new_name(trans, BTRFS_I(old_inode), BTRFS_I(old_dir), - parent); + ret = btrfs_log_new_name(trans, BTRFS_I(old_inode), + BTRFS_I(old_dir), parent, + false, &ctx_root); + if (ret == BTRFS_NEED_LOG_SYNC) + sync_log_root = true; + else if (ret == BTRFS_NEED_TRANS_COMMIT) + commit_transaction = true; + ret = 0; btrfs_end_log_trans(root); root_log_pinned = false; } if (dest_log_pinned) { - parent = old_dentry->d_parent; - btrfs_log_new_name(trans, BTRFS_I(new_inode), BTRFS_I(new_dir), - parent); + if (!commit_transaction) { + parent = old_dentry->d_parent; + ret = btrfs_log_new_name(trans, BTRFS_I(new_inode), + BTRFS_I(new_dir), parent, + false, &ctx_dest); + if (ret == BTRFS_NEED_LOG_SYNC) + sync_log_dest = true; + else if (ret == BTRFS_NEED_TRANS_COMMIT) + commit_transaction = true; + ret = 0; + } btrfs_end_log_trans(dest); dest_log_pinned = false; } @@ -9581,8 +9609,26 @@ static int btrfs_rename_exchange(struct inode *old_dir, dest_log_pinned = false; } } - ret2 = btrfs_end_transaction(trans); - ret = ret ? ret : ret2; + if (!ret && sync_log_root && !commit_transaction) { + ret = btrfs_sync_log(trans, BTRFS_I(old_inode)->root, + &ctx_root); + if (ret) + commit_transaction = true; + } + if (!ret && sync_log_dest && !commit_transaction) { + ret = btrfs_sync_log(trans, BTRFS_I(new_inode)->root, + &ctx_dest); + if (ret) + commit_transaction = true; + } + if (commit_transaction) { + ret = btrfs_commit_transaction(trans); + } else { + int ret2; + + ret2 = btrfs_end_transaction(trans); + ret = ret ? ret : ret2; + } out_notrans: if (new_ino == BTRFS_FIRST_FREE_OBJECTID) up_read(&fs_info->subvol_sem); @@ -9659,6 +9705,9 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, int ret; u64 old_ino = btrfs_ino(BTRFS_I(old_inode)); bool log_pinned = false; + struct btrfs_log_ctx ctx; + bool sync_log = false; + bool commit_transaction = false; if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) return -EPERM; @@ -9816,8 +9865,15 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, if (log_pinned) { struct dentry *parent = new_dentry->d_parent; - btrfs_log_new_name(trans, BTRFS_I(old_inode), BTRFS_I(old_dir), - parent); + btrfs_init_log_ctx(&ctx, old_inode); + ret = btrfs_log_new_name(trans, BTRFS_I(old_inode), + BTRFS_I(old_dir), parent, + false, &ctx); + if (ret == BTRFS_NEED_LOG_SYNC) + sync_log = true; + else if (ret == BTRFS_NEED_TRANS_COMMIT) + commit_transaction = true; + ret = 0; btrfs_end_log_trans(root); log_pinned = false; } @@ -9854,7 +9910,19 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, btrfs_end_log_trans(root); log_pinned = false; } - btrfs_end_transaction(trans); + if (!ret && sync_log) { + ret = btrfs_sync_log(trans, BTRFS_I(old_inode)->root, &ctx); + if (ret) + commit_transaction = true; + } + if (commit_transaction) { + ret = btrfs_commit_transaction(trans); + } else { + int ret2; + + ret2 = btrfs_end_transaction(trans); + ret = ret ? ret : ret2; + } out_notrans: if (old_ino == BTRFS_FIRST_FREE_OBJECTID) up_read(&fs_info->subvol_sem); diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 1650dc44a5e3..3c2ae0e4f25a 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -6025,14 +6025,25 @@ void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans, * Call this after adding a new name for a file and it will properly * update the log to reflect the new name. * - * It will return zero if all goes well, and it will return 1 if a - * full transaction commit is required. + * @ctx can not be NULL when @sync_log is false, and should be NULL when it's + * true (because it's not used). + * + * Return value depends on whether @sync_log is true or false. + * When true: returns BTRFS_NEED_TRANS_COMMIT if the transaction needs to be + * committed by the caller, and BTRFS_DONT_NEED_TRANS_COMMIT + * otherwise. + * When false: returns BTRFS_DONT_NEED_LOG_SYNC if the caller does not need to + * to sync the log, BTRFS_NEED_LOG_SYNC if it needs to sync the log, + * or BTRFS_NEED_TRANS_COMMIT if the transaction needs to be + * committed (without attempting to sync the log). */ int btrfs_log_new_name(struct btrfs_trans_handle *trans, struct btrfs_inode *inode, struct btrfs_inode *old_dir, - struct dentry *parent) + struct dentry *parent, + bool sync_log, struct btrfs_log_ctx *ctx) { struct btrfs_fs_info *fs_info = trans->fs_info; + int ret; /* * this will force the logging code to walk the dentry chain @@ -6047,9 +6058,34 @@ int btrfs_log_new_name(struct btrfs_trans_handle *trans, */ if (inode->logged_trans <= fs_info->last_trans_committed && (!old_dir || old_dir->logged_trans <= fs_info->last_trans_committed)) - return 0; + return sync_log ? BTRFS_DONT_NEED_TRANS_COMMIT : + BTRFS_DONT_NEED_LOG_SYNC; + + if (sync_log) { + struct btrfs_log_ctx ctx2; + + btrfs_init_log_ctx(&ctx2, &inode->vfs_inode); + ret = btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX, + LOG_INODE_EXISTS, &ctx2); + if (ret == BTRFS_NO_LOG_SYNC) + return BTRFS_DONT_NEED_TRANS_COMMIT; + else if (ret) + return BTRFS_NEED_TRANS_COMMIT; + + ret = btrfs_sync_log(trans, inode->root, &ctx2); + if (ret) + return BTRFS_NEED_TRANS_COMMIT; + return BTRFS_DONT_NEED_TRANS_COMMIT; + } + + ASSERT(ctx); + ret = btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX, + LOG_INODE_EXISTS, ctx); + if (ret == BTRFS_NO_LOG_SYNC) + return BTRFS_DONT_NEED_LOG_SYNC; + else if (ret) + return BTRFS_NEED_TRANS_COMMIT; - return btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX, - LOG_INODE_EXISTS, NULL); + return BTRFS_NEED_LOG_SYNC; } diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h index 122e68b89a5a..7ab9bb88a639 100644 --- a/fs/btrfs/tree-log.h +++ b/fs/btrfs/tree-log.h @@ -71,8 +71,16 @@ void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans, int for_rename); void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans, struct btrfs_inode *dir); +/* Return values for btrfs_log_new_name() */ +enum { + BTRFS_DONT_NEED_TRANS_COMMIT, + BTRFS_NEED_TRANS_COMMIT, + BTRFS_DONT_NEED_LOG_SYNC, + BTRFS_NEED_LOG_SYNC, +}; int btrfs_log_new_name(struct btrfs_trans_handle *trans, struct btrfs_inode *inode, struct btrfs_inode *old_dir, - struct dentry *parent); + struct dentry *parent, + bool sync_log, struct btrfs_log_ctx *ctx); #endif -- GitLab From de02b9f6bb65a6a1848f346f7a3617b7a9b930c0 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Fri, 17 Aug 2018 09:38:59 +0100 Subject: [PATCH 0208/1692] Btrfs: fix data corruption when deduplicating between different files If we deduplicate extents between two different files we can end up corrupting data if the source range ends at the size of the source file, the source file's size is not aligned to the filesystem's block size and the destination range does not go past the size of the destination file size. Example: $ mkfs.btrfs -f /dev/sdb $ mount /dev/sdb /mnt $ xfs_io -f -c "pwrite -S 0x6b 0 2518890" /mnt/foo # The first byte with a value of 0xae starts at an offset (2518890) # which is not a multiple of the sector size. $ xfs_io -c "pwrite -S 0xae 2518890 102398" /mnt/foo # Confirm the file content is full of bytes with values 0x6b and 0xae. $ od -t x1 /mnt/foo 0000000 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b * 11467540 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b ae ae ae ae ae ae 11467560 ae ae ae ae ae ae ae ae ae ae ae ae ae ae ae ae * 11777540 ae ae ae ae ae ae ae ae 11777550 # Create a second file with a length not aligned to the sector size, # whose bytes all have the value 0x6b, so that its extent(s) can be # deduplicated with the first file. $ xfs_io -f -c "pwrite -S 0x6b 0 557771" /mnt/bar # Now deduplicate the entire second file into a range of the first file # that also has all bytes with the value 0x6b. The destination range's # end offset must not be aligned to the sector size and must be less # then the offset of the first byte with the value 0xae (byte at offset # 2518890). $ xfs_io -c "dedupe /mnt/bar 0 1957888 557771" /mnt/foo # The bytes in the range starting at offset 2515659 (end of the # deduplication range) and ending at offset 2519040 (start offset # rounded up to the block size) must all have the value 0xae (and not # replaced with 0x00 values). In other words, we should have exactly # the same data we had before we asked for deduplication. $ od -t x1 /mnt/foo 0000000 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b * 11467540 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b ae ae ae ae ae ae 11467560 ae ae ae ae ae ae ae ae ae ae ae ae ae ae ae ae * 11777540 ae ae ae ae ae ae ae ae 11777550 # Unmount the filesystem and mount it again. This guarantees any file # data in the page cache is dropped. $ umount /dev/sdb $ mount /dev/sdb /mnt $ od -t x1 /mnt/foo 0000000 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b * 11461300 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 00 00 00 00 00 11461320 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 * 11470000 ae ae ae ae ae ae ae ae ae ae ae ae ae ae ae ae * 11777540 ae ae ae ae ae ae ae ae 11777550 # The bytes in range 2515659 to 2519040 have a value of 0x00 and not a # value of 0xae, data corruption happened due to the deduplication # operation. So fix this by rounding down, to the sector size, the length used for the deduplication when the following conditions are met: 1) Source file's range ends at its i_size; 2) Source file's i_size is not aligned to the sector size; 3) Destination range does not cross the i_size of the destination file. Fixes: e1d227a42ea2 ("btrfs: Handle unaligned length in extent_same") CC: stable@vger.kernel.org # 4.2+ Signed-off-by: Filipe Manana Signed-off-by: David Sterba --- fs/btrfs/ioctl.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 85c4284bb2cf..011ddfcc96e2 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -3469,6 +3469,25 @@ static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 olen, same_lock_start = min_t(u64, loff, dst_loff); same_lock_len = max_t(u64, loff, dst_loff) + len - same_lock_start; + } else { + /* + * If the source and destination inodes are different, the + * source's range end offset matches the source's i_size, that + * i_size is not a multiple of the sector size, and the + * destination range does not go past the destination's i_size, + * we must round down the length to the nearest sector size + * multiple. If we don't do this adjustment we end replacing + * with zeroes the bytes in the range that starts at the + * deduplication range's end offset and ends at the next sector + * size multiple. + */ + if (loff + olen == i_size_read(src) && + dst_loff + len < i_size_read(dst)) { + const u64 sz = BTRFS_I(src)->root->fs_info->sectorsize; + + len = round_down(i_size_read(src), sz) - loff; + olen = len; + } } again: -- GitLab From a5b7f4295eeae8b05ca91f6d145cd8773b08de9e Mon Sep 17 00:00:00 2001 From: Lu Fengqi Date: Thu, 9 Aug 2018 09:46:04 +0800 Subject: [PATCH 0209/1692] btrfs: fix qgroup_free wrong num_bytes in btrfs_subvolume_reserve_metadata After btrfs_qgroup_reserve_meta_prealloc(), num_bytes will be assigned again by btrfs_calc_trans_metadata_size(). Once block_rsv fails, we can't properly free the num_bytes of the previous qgroup_reserve. Use a separate variable to store the num_bytes of the qgroup_reserve. Delete the comment for the qgroup_reserved that does not exist and add a comment about use_global_rsv. Fixes: c4c129db5da8 ("btrfs: drop unused parameter qgroup_reserved") CC: stable@vger.kernel.org # 4.18+ Signed-off-by: Lu Fengqi Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent-tree.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index de6f75f5547b..2d9074295d7f 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -5800,7 +5800,7 @@ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans) * root: the root of the parent directory * rsv: block reservation * items: the number of items that we need do reservation - * qgroup_reserved: used to return the reserved size in qgroup + * use_global_rsv: allow fallback to the global block reservation * * This function is used to reserve the space for snapshot/subvolume * creation and deletion. Those operations are different with the @@ -5810,10 +5810,10 @@ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans) * the space reservation mechanism in start_transaction(). */ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root, - struct btrfs_block_rsv *rsv, - int items, + struct btrfs_block_rsv *rsv, int items, bool use_global_rsv) { + u64 qgroup_num_bytes = 0; u64 num_bytes; int ret; struct btrfs_fs_info *fs_info = root->fs_info; @@ -5821,12 +5821,11 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root, if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) { /* One for parent inode, two for dir entries */ - num_bytes = 3 * fs_info->nodesize; - ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true); + qgroup_num_bytes = 3 * fs_info->nodesize; + ret = btrfs_qgroup_reserve_meta_prealloc(root, + qgroup_num_bytes, true); if (ret) return ret; - } else { - num_bytes = 0; } num_bytes = btrfs_calc_trans_metadata_size(fs_info, items); @@ -5838,8 +5837,8 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root, if (ret == -ENOSPC && use_global_rsv) ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes, 1); - if (ret && num_bytes) - btrfs_qgroup_free_meta_prealloc(root, num_bytes); + if (ret && qgroup_num_bytes) + btrfs_qgroup_free_meta_prealloc(root, qgroup_num_bytes); return ret; } -- GitLab From 801660b040d132f67fac6a95910ad307c5929b49 Mon Sep 17 00:00:00 2001 From: Anand Jain Date: Mon, 6 Aug 2018 18:12:37 +0800 Subject: [PATCH 0210/1692] btrfs: btrfs_shrink_device should call commit transaction at the end Test case btrfs/164 reports use-after-free: [ 6712.084324] general protection fault: 0000 [#1] PREEMPT SMP .. [ 6712.195423] btrfs_update_commit_device_size+0x75/0xf0 [btrfs] [ 6712.201424] btrfs_commit_transaction+0x57d/0xa90 [btrfs] [ 6712.206999] btrfs_rm_device+0x627/0x850 [btrfs] [ 6712.211800] btrfs_ioctl+0x2b03/0x3120 [btrfs] Reason for this is that btrfs_shrink_device adds the resized device to the fs_devices::resized_devices after it has called the last commit transaction. So the list fs_devices::resized_devices is not empty when btrfs_shrink_device returns. Now the parent function btrfs_rm_device calls: btrfs_close_bdev(device); call_rcu(&device->rcu, free_device_rcu); and then does the transactio ncommit. It goes through the fs_devices::resized_devices in btrfs_update_commit_device_size and leads to use-after-free. Fix this by making sure btrfs_shrink_device calls the last needed btrfs_commit_transaction before the return. This is consistent with what the grow counterpart does and this makes sure the on-disk state is persistent when the function returns. Reported-by: Lu Fengqi Tested-by: Lu Fengqi Signed-off-by: Anand Jain Reviewed-by: David Sterba [ update changelog ] Signed-off-by: David Sterba --- fs/btrfs/volumes.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index da86706123ff..f4405e430da6 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -4491,7 +4491,12 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) /* Now btrfs_update_device() will change the on-disk size. */ ret = btrfs_update_device(trans, device); - btrfs_end_transaction(trans); + if (ret < 0) { + btrfs_abort_transaction(trans, ret); + btrfs_end_transaction(trans); + } else { + ret = btrfs_commit_transaction(trans); + } done: btrfs_free_path(path); if (ret) { -- GitLab From b9b8a41adeff5666b402996020b698504c927353 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 20 Aug 2018 11:25:33 +0300 Subject: [PATCH 0211/1692] btrfs: use after free in btrfs_quota_enable The issue here is that btrfs_commit_transaction() frees "trans" on both the error and the success path. So the problem would be if btrfs_commit_transaction() succeeds, and then qgroup_rescan_init() fails. That means that "ret" is non-zero and "trans" is non-NULL and it leads to a use after free inside the btrfs_end_transaction() macro. Fixes: 340f1aa27f36 ("btrfs: qgroups: Move transaction management inside btrfs_quota_enable/disable") Signed-off-by: Dan Carpenter Reviewed-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/qgroup.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 4353bb69bb86..d4917c0cddf5 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -1019,10 +1019,9 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info) spin_unlock(&fs_info->qgroup_lock); ret = btrfs_commit_transaction(trans); - if (ret) { - trans = NULL; + trans = NULL; + if (ret) goto out_free_path; - } ret = qgroup_rescan_init(fs_info, 0, 1); if (!ret) { -- GitLab From 4381147df9098706caa5cf9fda37e53b2fe4871f Mon Sep 17 00:00:00 2001 From: Anirudh Venkataramanan Date: Thu, 9 Aug 2018 06:28:51 -0700 Subject: [PATCH 0212/1692] ice: Fix multiple static analyser warnings This patch fixes the following smatch errors: 1) Fix "odd binop '0x0 & 0xc'" when performing the bitwise-and with a constant value of zero (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG). Remove a similar bitwise-and with 0 in ice_add_marker_act() and use the right mask ICE_LG_ACT_GENERIC_OFFSET_M in the expression. 2) Fix a similar issue "odd binop '0x0 & 0x1800' in ice_req_irq_msix_misc. 3) Fix "odd binop '0x380000 & 0x7fff8'" in ice_add_marker_act(). Also, use a new define ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX instead of magic number '7'. 4) Fix warn: odd binop '0x0 & 0x18' in ice_set_dflt_vsi_ctx() by removing unnecessary logic to explicitly unset bits 3 and 4 in port_vlan_bits. These bits are unset already by the memset on ctxt->info. Reported-by: Dan Carpenter Signed-off-by: Anirudh Venkataramanan Tested-by: Tony Brelinski Signed-off-by: Jeff Kirsher --- .../net/ethernet/intel/ice/ice_adminq_cmd.h | 1 + drivers/net/ethernet/intel/ice/ice_common.c | 25 +++++++++++-------- drivers/net/ethernet/intel/ice/ice_main.c | 19 ++++++-------- drivers/net/ethernet/intel/ice/ice_switch.c | 4 +-- 4 files changed, 25 insertions(+), 24 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 7541ec2270b3..6d3e11659ba5 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -594,6 +594,7 @@ struct ice_sw_rule_lg_act { #define ICE_LG_ACT_GENERIC_OFFSET_M (0x7 << ICE_LG_ACT_GENERIC_OFFSET_S) #define ICE_LG_ACT_GENERIC_PRIORITY_S 22 #define ICE_LG_ACT_GENERIC_PRIORITY_M (0x7 << ICE_LG_ACT_GENERIC_PRIORITY_S) +#define ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX 7 /* Action = 7 - Set Stat count */ #define ICE_LG_ACT_STAT_COUNT 0x7 diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 71d032cc5fa7..d5300b606d5a 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -1619,20 +1619,23 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut, } /* LUT size is only valid for Global and PF table types */ - if (lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128) { - flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG << - ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & - ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; - } else if (lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512) { + switch (lut_size) { + case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128: + break; + case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512: flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG << ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; - } else if ((lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) && - (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF)) { - flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG << - ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & - ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; - } else { + break; + case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K: + if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) { + flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG << + ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & + ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; + break; + } + /* fall-through */ + default: status = ICE_ERR_PARAM; goto ice_aq_get_set_rss_lut_exit; } diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 5299caf55a7f..186e764a469a 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -1352,14 +1352,13 @@ static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt) ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; /* Traffic from VSI can be sent to LAN */ ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; - /* Allow all packets untagged/tagged */ + /* By default bits 3 and 4 in port_vlan_flags are 0's which results in + * legacy behavior (show VLAN, DEI, and UP) in descriptor. Also, allow + * all packets untagged/tagged. + */ ctxt->info.port_vlan_flags = ((ICE_AQ_VSI_PVLAN_MODE_ALL & ICE_AQ_VSI_PVLAN_MODE_M) >> ICE_AQ_VSI_PVLAN_MODE_S); - /* Show VLAN/UP from packets in Rx descriptors */ - ctxt->info.port_vlan_flags |= ((ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH & - ICE_AQ_VSI_PVLAN_EMOD_M) >> - ICE_AQ_VSI_PVLAN_EMOD_S); /* Have 1:1 UP mapping for both ingress/egress tables */ table |= ICE_UP_TABLE_TRANSLATE(0, 0); table |= ICE_UP_TABLE_TRANSLATE(1, 1); @@ -2058,15 +2057,13 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf) skip_req_irq: ice_ena_misc_vector(pf); - val = (pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) | - (ICE_RX_ITR & PFINT_OICR_CTL_ITR_INDX_M) | - PFINT_OICR_CTL_CAUSE_ENA_M; + val = ((pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) | + PFINT_OICR_CTL_CAUSE_ENA_M); wr32(hw, PFINT_OICR_CTL, val); /* This enables Admin queue Interrupt causes */ - val = (pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) | - (ICE_RX_ITR & PFINT_FW_CTL_ITR_INDX_M) | - PFINT_FW_CTL_CAUSE_ENA_M; + val = ((pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) | + PFINT_FW_CTL_CAUSE_ENA_M); wr32(hw, PFINT_FW_CTL, val); itr_gran = hw->itr_gran_200; diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 723d15f1e90b..6b7ec2ae5ad6 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -645,14 +645,14 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M; lg_act->pdata.lg_act.act[1] = cpu_to_le32(act); - act = (7 << ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_VALUE_M; + act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX << + ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M; /* Third action Marker value */ act |= ICE_LG_ACT_GENERIC; act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M; - act |= (0 << ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_VALUE_M; lg_act->pdata.lg_act.act[2] = cpu_to_le32(act); /* call the fill switch rule to fill the lookup tx rx structure */ -- GitLab From 6efa6239e7f8777dccddcebb643e6d9a0304bf43 Mon Sep 17 00:00:00 2001 From: Bruce Allan Date: Thu, 9 Aug 2018 06:28:52 -0700 Subject: [PATCH 0213/1692] ice: Remove unnecessary node owner check There is already a check for owner == ICE_SCHED_NODE_OWNER_LAN at the beginning of ice_sched_update_vsi_child_nodes. Remove the additional check to address the static analysis tool smatch issue "warn: we tested 'owner' before and it was 'false'". Signed-off-by: Bruce Allan Signed-off-by: Anirudh Venkataramanan Tested-by: Tony Brelinski Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_sched.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index 2e6c1d92cc88..eeae199469b6 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -1576,8 +1576,7 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc, return status; } - if (owner == ICE_SCHED_NODE_OWNER_LAN) - vsi->max_lanq[tc] = new_numqs; + vsi->max_lanq[tc] = new_numqs; return status; } -- GitLab From c0203475765f827e7b2eaf0a87222d0766e2cc4b Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 22 Aug 2018 23:49:37 +0200 Subject: [PATCH 0214/1692] bpf: use per htab salt for bucket hash All BPF hash and LRU maps currently have a known and global seed we feed into jhash() which is 0. This is suboptimal, thus fix it by generating a random seed upon hashtab setup time which we can later on feed into jhash() on lookup, update and deletions. Fixes: 0f8e4bd8a1fc8 ("bpf: add hashtable type of eBPF maps") Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Acked-by: Song Liu Reviewed-by: Eduardo Valentin --- kernel/bpf/hashtab.c | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 04b8eda94e7d..03cc59ee9c95 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include "percpu_freelist.h" #include "bpf_lru_list.h" @@ -41,6 +42,7 @@ struct bpf_htab { atomic_t count; /* number of elements in this hashtable */ u32 n_buckets; /* number of hash buckets */ u32 elem_size; /* size of each element in bytes */ + u32 hashrnd; }; /* each htab element is struct htab_elem + key + value */ @@ -371,6 +373,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) if (!htab->buckets) goto free_htab; + htab->hashrnd = get_random_int(); for (i = 0; i < htab->n_buckets; i++) { INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i); raw_spin_lock_init(&htab->buckets[i].lock); @@ -402,9 +405,9 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) return ERR_PTR(err); } -static inline u32 htab_map_hash(const void *key, u32 key_len) +static inline u32 htab_map_hash(const void *key, u32 key_len, u32 hashrnd) { - return jhash(key, key_len, 0); + return jhash(key, key_len, hashrnd); } static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash) @@ -470,7 +473,7 @@ static void *__htab_map_lookup_elem(struct bpf_map *map, void *key) key_size = map->key_size; - hash = htab_map_hash(key, key_size); + hash = htab_map_hash(key, key_size, htab->hashrnd); head = select_bucket(htab, hash); @@ -597,7 +600,7 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key) if (!key) goto find_first_elem; - hash = htab_map_hash(key, key_size); + hash = htab_map_hash(key, key_size, htab->hashrnd); head = select_bucket(htab, hash); @@ -824,7 +827,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value, key_size = map->key_size; - hash = htab_map_hash(key, key_size); + hash = htab_map_hash(key, key_size, htab->hashrnd); b = __select_bucket(htab, hash); head = &b->head; @@ -880,7 +883,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value, key_size = map->key_size; - hash = htab_map_hash(key, key_size); + hash = htab_map_hash(key, key_size, htab->hashrnd); b = __select_bucket(htab, hash); head = &b->head; @@ -945,7 +948,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key, key_size = map->key_size; - hash = htab_map_hash(key, key_size); + hash = htab_map_hash(key, key_size, htab->hashrnd); b = __select_bucket(htab, hash); head = &b->head; @@ -998,7 +1001,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, key_size = map->key_size; - hash = htab_map_hash(key, key_size); + hash = htab_map_hash(key, key_size, htab->hashrnd); b = __select_bucket(htab, hash); head = &b->head; @@ -1071,7 +1074,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key) key_size = map->key_size; - hash = htab_map_hash(key, key_size); + hash = htab_map_hash(key, key_size, htab->hashrnd); b = __select_bucket(htab, hash); head = &b->head; @@ -1103,7 +1106,7 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key) key_size = map->key_size; - hash = htab_map_hash(key, key_size); + hash = htab_map_hash(key, key_size, htab->hashrnd); b = __select_bucket(htab, hash); head = &b->head; -- GitLab From 5ab522443bd1dafa9e32d6f4b029128efda072de Mon Sep 17 00:00:00 2001 From: Anirudh Venkataramanan Date: Thu, 9 Aug 2018 06:28:53 -0700 Subject: [PATCH 0215/1692] ice: Cleanup magic number Use define for the unit size shift of the Rx LAN context descriptor base address instead of the magic number 7. Signed-off-by: Anirudh Venkataramanan Tested-by: Tony Brelinski Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h | 1 + drivers/net/ethernet/intel/ice/ice_main.c | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index d23a91665b46..068dbc740b76 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -265,6 +265,7 @@ enum ice_rx_flex_desc_status_error_0_bits { struct ice_rlan_ctx { u16 head; u16 cpuid; /* bigger than needed, see above for reason */ +#define ICE_RLAN_BASE_S 7 u64 base; u16 qlen; #define ICE_RLAN_CTX_DBUF_S 7 diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 186e764a469a..7d65e0ed3588 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -3983,7 +3983,7 @@ static int ice_setup_rx_ctx(struct ice_ring *ring) /* clear the context structure first */ memset(&rlan_ctx, 0, sizeof(rlan_ctx)); - rlan_ctx.base = ring->dma >> 7; + rlan_ctx.base = ring->dma >> ICE_RLAN_BASE_S; rlan_ctx.qlen = ring->count; -- GitLab From f8ba7db850350319348b6d3c276f8ba19bc098ef Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 9 Aug 2018 06:28:54 -0700 Subject: [PATCH 0216/1692] ice: Report stats for allocated queues via ethtool stats It is not safe to have the string table for statistics change order or size over the lifetime of a given netdevice. This is because of the nature of the 3-step process for obtaining stats. First, user space performs a request for the size of the strings table. Second it performs a separate request for the strings themselves, after allocating space for the table. Third, it requests the stats themselves, also allocating space for the table. If the size decreased, there is potential to see garbage data or stats values. In the worst case, we could potentially see stats values become mis-aligned with their strings, so that it looks like a statistic is being reported differently than it actually is. Even worse, if the size increased, there is potential that the strings table or stats table was not allocated large enough and the stats code could access and write to memory it should not, potentially resulting in undefined behavior and system crashes. It isn't even safe if the size always changes under the RTNL lock. This is because the calls take place over multiple user space commands, so it is not possible to hold the RTNL lock for the entire duration of obtaining strings and stats. Further, not all consumers of the ethtool API are the user space ethtool program, and it is possible that one assumes the strings will not change (valid under the current contract), and thus only requests the stats values when requesting stats in a loop. Finally, it's not possible in the general case to detect when the size changes, because it is quite possible that one value which could impact the stat size increased, while another decreased. This would result in the same total number of stats, but reordering them so that stats no longer line up with the strings they belong to. Since only size changes aren't enough, we would need some sort of hash or token to determine when the strings no longer match. This would require extending the ethtool stats commands, but there is no more space in the relevant structures. The real solution to resolve this would be to add a completely new API for stats, probably over netlink. In the ice driver, the only thing impacting the stats that is not constant is the number of queues. Instead of reporting stats for each used queue, report stats for each allocated queue. We do not change the number of queues allocated for a given netdevice, as we pass this into the alloc_etherdev_mq() function to set the num_tx_queues and num_rx_queues. This resolves the potential bugs at the slight cost of displaying many queue statistics which will not be activated. Signed-off-by: Jacob Keller Signed-off-by: Anirudh Venkataramanan Tested-by: Tony Brelinski Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice.h | 7 +++ drivers/net/ethernet/intel/ice/ice_ethtool.c | 52 +++++++++++++++----- 2 files changed, 46 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index d8b5fff581e7..ed071ea75f20 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -89,6 +89,13 @@ extern const char ice_drv_ver[]; #define ice_for_each_rxq(vsi, i) \ for ((i) = 0; (i) < (vsi)->num_rxq; (i)++) +/* Macros for each allocated tx/rx ring whether used or not in a VSI */ +#define ice_for_each_alloc_txq(vsi, i) \ + for ((i) = 0; (i) < (vsi)->alloc_txq; (i)++) + +#define ice_for_each_alloc_rxq(vsi, i) \ + for ((i) = 0; (i) < (vsi)->alloc_rxq; (i)++) + struct ice_tc_info { u16 qoffset; u16 qcount; diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 1db304c01d10..c71a9b528d6d 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -26,7 +26,7 @@ static int ice_q_stats_len(struct net_device *netdev) { struct ice_netdev_priv *np = netdev_priv(netdev); - return ((np->vsi->num_txq + np->vsi->num_rxq) * + return ((np->vsi->alloc_txq + np->vsi->alloc_rxq) * (sizeof(struct ice_q_stats) / sizeof(u64))); } @@ -218,7 +218,7 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data) p += ETH_GSTRING_LEN; } - ice_for_each_txq(vsi, i) { + ice_for_each_alloc_txq(vsi, i) { snprintf(p, ETH_GSTRING_LEN, "tx-queue-%u.tx_packets", i); p += ETH_GSTRING_LEN; @@ -226,7 +226,7 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data) p += ETH_GSTRING_LEN; } - ice_for_each_rxq(vsi, i) { + ice_for_each_alloc_rxq(vsi, i) { snprintf(p, ETH_GSTRING_LEN, "rx-queue-%u.rx_packets", i); p += ETH_GSTRING_LEN; @@ -253,6 +253,24 @@ static int ice_get_sset_count(struct net_device *netdev, int sset) { switch (sset) { case ETH_SS_STATS: + /* The number (and order) of strings reported *must* remain + * constant for a given netdevice. This function must not + * report a different number based on run time parameters + * (such as the number of queues in use, or the setting of + * a private ethtool flag). This is due to the nature of the + * ethtool stats API. + * + * User space programs such as ethtool must make 3 separate + * ioctl requests, one for size, one for the strings, and + * finally one for the stats. Since these cross into + * user space, changes to the number or size could result in + * undefined memory access or incorrect string<->value + * correlations for statistics. + * + * Even if it appears to be safe, changes to the size or + * order of strings will suffer from race conditions and are + * not safe. + */ return ICE_ALL_STATS_LEN(netdev); default: return -EOPNOTSUPP; @@ -280,18 +298,26 @@ ice_get_ethtool_stats(struct net_device *netdev, /* populate per queue stats */ rcu_read_lock(); - ice_for_each_txq(vsi, j) { + ice_for_each_alloc_txq(vsi, j) { ring = READ_ONCE(vsi->tx_rings[j]); - if (!ring) - continue; - data[i++] = ring->stats.pkts; - data[i++] = ring->stats.bytes; + if (ring) { + data[i++] = ring->stats.pkts; + data[i++] = ring->stats.bytes; + } else { + data[i++] = 0; + data[i++] = 0; + } } - ice_for_each_rxq(vsi, j) { + ice_for_each_alloc_rxq(vsi, j) { ring = READ_ONCE(vsi->rx_rings[j]); - data[i++] = ring->stats.pkts; - data[i++] = ring->stats.bytes; + if (ring) { + data[i++] = ring->stats.pkts; + data[i++] = ring->stats.bytes; + } else { + data[i++] = 0; + data[i++] = 0; + } } rcu_read_unlock(); @@ -519,7 +545,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) goto done; } - for (i = 0; i < vsi->num_txq; i++) { + for (i = 0; i < vsi->alloc_txq; i++) { /* clone ring and setup updated count */ tx_rings[i] = *vsi->tx_rings[i]; tx_rings[i].count = new_tx_cnt; @@ -551,7 +577,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) goto done; } - for (i = 0; i < vsi->num_rxq; i++) { + for (i = 0; i < vsi->alloc_rxq; i++) { /* clone ring and setup updated count */ rx_rings[i] = *vsi->rx_rings[i]; rx_rings[i].count = new_rx_cnt; -- GitLab From b29bc220e2c7bd494a4605defcd93b18d5a8cf86 Mon Sep 17 00:00:00 2001 From: Preethi Banala Date: Thu, 9 Aug 2018 06:28:55 -0700 Subject: [PATCH 0217/1692] ice: Clean control queues only when they are initialized Clean control queues only when they are initialized. One of the ways to validate if the basic initialization is done is by checking value of cq->sq.head and cq->rq.head variables that specify the register address. This patch adds a check to avoid NULL pointer dereference crash when tried to shutdown uninitialized control queue. Signed-off-by: Preethi Banala Signed-off-by: Anirudh Venkataramanan Tested-by: Tony Brelinski Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_controlq.c | 24 ++++++++++++------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index 7c511f144ed6..c064416080e7 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c @@ -597,10 +597,14 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw) return 0; init_ctrlq_free_rq: - ice_shutdown_rq(hw, cq); - ice_shutdown_sq(hw, cq); - mutex_destroy(&cq->sq_lock); - mutex_destroy(&cq->rq_lock); + if (cq->rq.head) { + ice_shutdown_rq(hw, cq); + mutex_destroy(&cq->rq_lock); + } + if (cq->sq.head) { + ice_shutdown_sq(hw, cq); + mutex_destroy(&cq->sq_lock); + } return status; } @@ -706,10 +710,14 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) return; } - ice_shutdown_sq(hw, cq); - ice_shutdown_rq(hw, cq); - mutex_destroy(&cq->sq_lock); - mutex_destroy(&cq->rq_lock); + if (cq->sq.head) { + ice_shutdown_sq(hw, cq); + mutex_destroy(&cq->sq_lock); + } + if (cq->rq.head) { + ice_shutdown_rq(hw, cq); + mutex_destroy(&cq->rq_lock); + } } /** -- GitLab From 3d6b640efcc1b07709b42dd2e9609401c6f88633 Mon Sep 17 00:00:00 2001 From: Anirudh Venkataramanan Date: Thu, 9 Aug 2018 06:28:56 -0700 Subject: [PATCH 0218/1692] ice: Fix bugs in control queue processing This patch is a consolidation of multiple bug fixes for control queue processing. 1) In ice_clean_adminq_subtask() remove unnecessary reads/writes to registers. The bits PFINT_FW_CTL, PFINT_MBX_CTL and PFINT_SB_CTL are not set when an interrupt arrives, which means that clearing them again can be omitted. 2) Get an accurate value in "pending" by re-reading the control queue head register from the hardware. 3) Fix a corner case involving lost control queue messages by checking for new control messages (using ice_ctrlq_pending) before exiting the cleanup routine. Signed-off-by: Anirudh Venkataramanan Tested-by: Tony Brelinski Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_controlq.c | 5 +++- drivers/net/ethernet/intel/ice/ice_main.c | 26 ++++++++++++++++--- 2 files changed, 26 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index c064416080e7..62be72fdc8f3 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c @@ -1065,8 +1065,11 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, clean_rq_elem_out: /* Set pending if needed, unlock and return */ - if (pending) + if (pending) { + /* re-read HW head to calculate actual pending messages */ + ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc)); + } clean_rq_elem_err: mutex_unlock(&cq->rq_lock); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 7d65e0ed3588..f3ba4f76b6cb 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -916,6 +916,21 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) return pending && (i == ICE_DFLT_IRQ_WORK); } +/** + * ice_ctrlq_pending - check if there is a difference between ntc and ntu + * @hw: pointer to hardware info + * @cq: control queue information + * + * returns true if there are pending messages in a queue, false if there aren't + */ +static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq) +{ + u16 ntu; + + ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); + return cq->rq.next_to_clean != ntu; +} + /** * ice_clean_adminq_subtask - clean the AdminQ rings * @pf: board private structure @@ -923,7 +938,6 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) static void ice_clean_adminq_subtask(struct ice_pf *pf) { struct ice_hw *hw = &pf->hw; - u32 val; if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state)) return; @@ -933,9 +947,13 @@ static void ice_clean_adminq_subtask(struct ice_pf *pf) clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state); - /* re-enable Admin queue interrupt causes */ - val = rd32(hw, PFINT_FW_CTL); - wr32(hw, PFINT_FW_CTL, (val | PFINT_FW_CTL_CAUSE_ENA_M)); + /* There might be a situation where new messages arrive to a control + * queue between processing the last message and clearing the + * EVENT_PENDING bit. So before exiting, check queue head again (using + * ice_ctrlq_pending) and process new messages if any. + */ + if (ice_ctrlq_pending(hw, &hw->adminq)) + __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN); ice_flush(hw); } -- GitLab From 1eb43fc754485d772b1165118a8fb80c385f0492 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 9 Aug 2018 06:28:57 -0700 Subject: [PATCH 0219/1692] ice: Use order_base_2 to calculate higher power of 2 Currently, we use a combination of ilog2 and is_power_of_2() to calculate the next power of 2 for the qcount. This appears to be causing a warning on some combinations of GCC and the Linux kernel: MODPOST 1 modules WARNING: "____ilog2_NaN" [ice.ko] undefined! This appears to because because GCC realizes that qcount could be zero in some circumstances and thus attempts to link against the intentionally undefined ___ilog2_NaN function. The order_base_2 function is intentionally defined to return 0 when passed 0 as an argument, and thus will be safe to use here. This not only fixes the warning but makes the resulting code slightly cleaner, and is really what we should have used originally. Also update the comment to make it more clear that we are rounding up, not just incrementing the ilog2 of qcount unconditionally. Signed-off-by: Jacob Keller Signed-off-by: Anirudh Venkataramanan Tested-by: Tony Brelinski Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_main.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index f3ba4f76b6cb..3eff1d2d1543 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -1313,11 +1313,8 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) qcount = numq_tc; } - /* find higher power-of-2 of qcount */ - pow = ilog2(qcount); - - if (!is_power_of_2(qcount)) - pow++; + /* find the (rounded up) power-of-2 of qcount */ + pow = order_base_2(qcount); for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { if (!(vsi->tc_cfg.ena_tc & BIT(i))) { -- GitLab From 5d8778d803e21f235e9bc727b5bd619f02abb88b Mon Sep 17 00:00:00 2001 From: Brett Creeley Date: Thu, 9 Aug 2018 06:28:58 -0700 Subject: [PATCH 0220/1692] ice: Set VLAN flags correctly In the struct ice_aqc_vsi_props the field port_vlan_flags is an overloaded term because it is used for both port VLANs (PVLANs) and regular VLANs. This is an issue and is very confusing especially when dealing with VFs because normal VLANs and port VLANs are not the same. To fix this the field was renamed to vlan_flags and all of the #define's labeled *_PVLAN_* were renamed to *_VLAN_* if they are not specific to port VLANs. Also in ice_vsi_manage_vlan_stripping, set the ICE_AQ_VSI_VLAN_MODE_ALL bit to allow the driver to add a VLAN tag to all packets it sends. Signed-off-by: Brett Creeley Signed-off-by: Anirudh Venkataramanan Tested-by: Tony Brelinski Signed-off-by: Jeff Kirsher --- .../net/ethernet/intel/ice/ice_adminq_cmd.h | 24 +++++++------- drivers/net/ethernet/intel/ice/ice_main.c | 31 +++++++++++-------- 2 files changed, 30 insertions(+), 25 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 6d3e11659ba5..a0614f472658 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -329,19 +329,19 @@ struct ice_aqc_vsi_props { /* VLAN section */ __le16 pvid; /* VLANS include priority bits */ u8 pvlan_reserved[2]; - u8 port_vlan_flags; -#define ICE_AQ_VSI_PVLAN_MODE_S 0 -#define ICE_AQ_VSI_PVLAN_MODE_M (0x3 << ICE_AQ_VSI_PVLAN_MODE_S) -#define ICE_AQ_VSI_PVLAN_MODE_UNTAGGED 0x1 -#define ICE_AQ_VSI_PVLAN_MODE_TAGGED 0x2 -#define ICE_AQ_VSI_PVLAN_MODE_ALL 0x3 + u8 vlan_flags; +#define ICE_AQ_VSI_VLAN_MODE_S 0 +#define ICE_AQ_VSI_VLAN_MODE_M (0x3 << ICE_AQ_VSI_VLAN_MODE_S) +#define ICE_AQ_VSI_VLAN_MODE_UNTAGGED 0x1 +#define ICE_AQ_VSI_VLAN_MODE_TAGGED 0x2 +#define ICE_AQ_VSI_VLAN_MODE_ALL 0x3 #define ICE_AQ_VSI_PVLAN_INSERT_PVID BIT(2) -#define ICE_AQ_VSI_PVLAN_EMOD_S 3 -#define ICE_AQ_VSI_PVLAN_EMOD_M (0x3 << ICE_AQ_VSI_PVLAN_EMOD_S) -#define ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH (0x0 << ICE_AQ_VSI_PVLAN_EMOD_S) -#define ICE_AQ_VSI_PVLAN_EMOD_STR_UP (0x1 << ICE_AQ_VSI_PVLAN_EMOD_S) -#define ICE_AQ_VSI_PVLAN_EMOD_STR (0x2 << ICE_AQ_VSI_PVLAN_EMOD_S) -#define ICE_AQ_VSI_PVLAN_EMOD_NOTHING (0x3 << ICE_AQ_VSI_PVLAN_EMOD_S) +#define ICE_AQ_VSI_VLAN_EMOD_S 3 +#define ICE_AQ_VSI_VLAN_EMOD_M (0x3 << ICE_AQ_VSI_VLAN_EMOD_S) +#define ICE_AQ_VSI_VLAN_EMOD_STR_BOTH (0x0 << ICE_AQ_VSI_VLAN_EMOD_S) +#define ICE_AQ_VSI_VLAN_EMOD_STR_UP (0x1 << ICE_AQ_VSI_VLAN_EMOD_S) +#define ICE_AQ_VSI_VLAN_EMOD_STR (0x2 << ICE_AQ_VSI_VLAN_EMOD_S) +#define ICE_AQ_VSI_VLAN_EMOD_NOTHING (0x3 << ICE_AQ_VSI_VLAN_EMOD_S) u8 pvlan_reserved2[3]; /* ingress egress up sections */ __le32 ingress_table; /* bitmap, 3 bits per up */ diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 3eff1d2d1543..68003fad33d1 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -1367,13 +1367,15 @@ static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt) ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; /* Traffic from VSI can be sent to LAN */ ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; - /* By default bits 3 and 4 in port_vlan_flags are 0's which results in - * legacy behavior (show VLAN, DEI, and UP) in descriptor. Also, allow - * all packets untagged/tagged. + + /* By default bits 3 and 4 in vlan_flags are 0's which results in legacy + * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all + * packets untagged/tagged. */ - ctxt->info.port_vlan_flags = ((ICE_AQ_VSI_PVLAN_MODE_ALL & - ICE_AQ_VSI_PVLAN_MODE_M) >> - ICE_AQ_VSI_PVLAN_MODE_S); + ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL & + ICE_AQ_VSI_VLAN_MODE_M) >> + ICE_AQ_VSI_VLAN_MODE_S); + /* Have 1:1 UP mapping for both ingress/egress tables */ table |= ICE_UP_TABLE_TRANSLATE(0, 0); table |= ICE_UP_TABLE_TRANSLATE(1, 1); @@ -3732,10 +3734,10 @@ static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi) enum ice_status status; /* Here we are configuring the VSI to let the driver add VLAN tags by - * setting port_vlan_flags to ICE_AQ_VSI_PVLAN_MODE_ALL. The actual VLAN - * tag insertion happens in the Tx hot path, in ice_tx_map. + * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag + * insertion happens in the Tx hot path, in ice_tx_map. */ - ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_MODE_ALL; + ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL; ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); ctxt.vsi_num = vsi->vsi_num; @@ -3747,7 +3749,7 @@ static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi) return -EIO; } - vsi->info.port_vlan_flags = ctxt.info.port_vlan_flags; + vsi->info.vlan_flags = ctxt.info.vlan_flags; return 0; } @@ -3769,12 +3771,15 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) */ if (ena) { /* Strip VLAN tag from Rx packet and put it in the desc */ - ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH; + ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH; } else { /* Disable stripping. Leave tag in packet */ - ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_EMOD_NOTHING; + ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING; } + /* Allow all packets untagged/tagged */ + ctxt.info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL; + ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); ctxt.vsi_num = vsi->vsi_num; @@ -3785,7 +3790,7 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) return -EIO; } - vsi->info.port_vlan_flags = ctxt.info.port_vlan_flags; + vsi->info.vlan_flags = ctxt.info.vlan_flags; return 0; } -- GitLab From 785e76d7a2051a9e28b9134d5388a45b16f5eb72 Mon Sep 17 00:00:00 2001 From: Quentin Monnet Date: Thu, 23 Aug 2018 17:46:25 +0100 Subject: [PATCH 0221/1692] tools: bpftool: return from do_event_pipe() on bad arguments When command line parsing fails in the while loop in do_event_pipe() because the number of arguments is incorrect or because the keyword is unknown, an error message is displayed, but bpftool remains stuck in the loop. Make sure we exit the loop upon failure. Fixes: f412eed9dfde ("tools: bpftool: add simple perf event output reader") Signed-off-by: Quentin Monnet Reviewed-by: Jakub Kicinski Signed-off-by: Daniel Borkmann --- tools/bpf/bpftool/map_perf_ring.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/bpf/bpftool/map_perf_ring.c b/tools/bpf/bpftool/map_perf_ring.c index 1832100d1b27..6d41323be291 100644 --- a/tools/bpf/bpftool/map_perf_ring.c +++ b/tools/bpf/bpftool/map_perf_ring.c @@ -194,8 +194,10 @@ int do_event_pipe(int argc, char **argv) } while (argc) { - if (argc < 2) + if (argc < 2) { BAD_ARG(); + goto err_close_map; + } if (is_prefix(*argv, "cpu")) { char *endptr; @@ -221,6 +223,7 @@ int do_event_pipe(int argc, char **argv) NEXT_ARG(); } else { BAD_ARG(); + goto err_close_map; } do_all = false; -- GitLab From 444c8263151afc06c01ac8ddcd1204624a7d4bb3 Mon Sep 17 00:00:00 2001 From: Yue Haibing Date: Tue, 21 Aug 2018 14:03:04 +0000 Subject: [PATCH 0222/1692] netfilter: conntrack: remove duplicated include from nf_conntrack_proto_udp.c Remove duplicated include. Fixes: c779e849608a ("netfilter: conntrack: remove get_timeout() indirection") Signed-off-by: Yue Haibing Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_proto_udp.c | 1 - 1 file changed, 1 deletion(-) diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c index 7a1b8988a931..9272a2c525a8 100644 --- a/net/netfilter/nf_conntrack_proto_udp.c +++ b/net/netfilter/nf_conntrack_proto_udp.c @@ -393,4 +393,3 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 = }; EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udplite6); #endif -#include -- GitLab From 3bcd7fa37f33cda8c5639a908e9eb42d856e5d8b Mon Sep 17 00:00:00 2001 From: Bruce Allan Date: Thu, 9 Aug 2018 06:28:59 -0700 Subject: [PATCH 0223/1692] ice: Update to interrupts enabled in OICR Remove the following interrupt causes that are not applicable or not handled: - PFINT_OICR_HLP_RDY_M - PFINT_OICR_CPM_RDY_M - PFINT_OICR_GPIO_M - PFINT_OICR_STORM_DETECT_M Add the following interrupt cause that's actually handled in ice_misc_intr: - PFINT_OICR_PE_CRITERR_M Signed-off-by: Bruce Allan Signed-off-by: Anirudh Venkataramanan Tested-by: Tony Brelinski Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_hw_autogen.h | 8 -------- drivers/net/ethernet/intel/ice/ice_main.c | 9 +++------ 2 files changed, 3 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 499904874b3f..6076fc87df9d 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -121,10 +121,6 @@ #define PFINT_FW_CTL_CAUSE_ENA_S 30 #define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S) #define PFINT_OICR 0x0016CA00 -#define PFINT_OICR_HLP_RDY_S 14 -#define PFINT_OICR_HLP_RDY_M BIT(PFINT_OICR_HLP_RDY_S) -#define PFINT_OICR_CPM_RDY_S 15 -#define PFINT_OICR_CPM_RDY_M BIT(PFINT_OICR_CPM_RDY_S) #define PFINT_OICR_ECC_ERR_S 16 #define PFINT_OICR_ECC_ERR_M BIT(PFINT_OICR_ECC_ERR_S) #define PFINT_OICR_MAL_DETECT_S 19 @@ -133,10 +129,6 @@ #define PFINT_OICR_GRST_M BIT(PFINT_OICR_GRST_S) #define PFINT_OICR_PCI_EXCEPTION_S 21 #define PFINT_OICR_PCI_EXCEPTION_M BIT(PFINT_OICR_PCI_EXCEPTION_S) -#define PFINT_OICR_GPIO_S 22 -#define PFINT_OICR_GPIO_M BIT(PFINT_OICR_GPIO_S) -#define PFINT_OICR_STORM_DETECT_S 24 -#define PFINT_OICR_STORM_DETECT_M BIT(PFINT_OICR_STORM_DETECT_S) #define PFINT_OICR_HMC_ERR_S 26 #define PFINT_OICR_HMC_ERR_M BIT(PFINT_OICR_HMC_ERR_S) #define PFINT_OICR_PE_CRITERR_S 28 diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 68003fad33d1..34be94a30a60 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -1704,15 +1704,12 @@ static void ice_ena_misc_vector(struct ice_pf *pf) wr32(hw, PFINT_OICR_ENA, 0); /* disable all */ rd32(hw, PFINT_OICR); /* read to clear */ - val = (PFINT_OICR_HLP_RDY_M | - PFINT_OICR_CPM_RDY_M | - PFINT_OICR_ECC_ERR_M | + val = (PFINT_OICR_ECC_ERR_M | PFINT_OICR_MAL_DETECT_M | PFINT_OICR_GRST_M | PFINT_OICR_PCI_EXCEPTION_M | - PFINT_OICR_GPIO_M | - PFINT_OICR_STORM_DETECT_M | - PFINT_OICR_HMC_ERR_M); + PFINT_OICR_HMC_ERR_M | + PFINT_OICR_PE_CRITERR_M); wr32(hw, PFINT_OICR_ENA, val); -- GitLab From c1dc2912059901f97345d9e10c96b841215fdc0f Mon Sep 17 00:00:00 2001 From: Martin Willi Date: Wed, 22 Aug 2018 10:27:17 +0200 Subject: [PATCH 0224/1692] netfilter: xt_cluster: add dependency on conntrack module The cluster match requires conntrack for matching packets. If the netns does not have conntrack hooks registered, the match does not work at all. Implicitly load the conntrack hook for the family, exactly as many other extensions do. This ensures that the match works even if the hooks have not been registered by other means. Signed-off-by: Martin Willi Acked-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/xt_cluster.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/net/netfilter/xt_cluster.c b/net/netfilter/xt_cluster.c index dfbdbb2fc0ed..51d0c257e7a5 100644 --- a/net/netfilter/xt_cluster.c +++ b/net/netfilter/xt_cluster.c @@ -125,6 +125,7 @@ xt_cluster_mt(const struct sk_buff *skb, struct xt_action_param *par) static int xt_cluster_mt_checkentry(const struct xt_mtchk_param *par) { struct xt_cluster_match_info *info = par->matchinfo; + int ret; if (info->total_nodes > XT_CLUSTER_NODES_MAX) { pr_info_ratelimited("you have exceeded the maximum number of cluster nodes (%u > %u)\n", @@ -135,7 +136,17 @@ static int xt_cluster_mt_checkentry(const struct xt_mtchk_param *par) pr_info_ratelimited("node mask cannot exceed total number of nodes\n"); return -EDOM; } - return 0; + + ret = nf_ct_netns_get(par->net, par->family); + if (ret < 0) + pr_info_ratelimited("cannot load conntrack support for proto=%u\n", + par->family); + return ret; +} + +static void xt_cluster_mt_destroy(const struct xt_mtdtor_param *par) +{ + nf_ct_netns_put(par->net, par->family); } static struct xt_match xt_cluster_match __read_mostly = { @@ -144,6 +155,7 @@ static struct xt_match xt_cluster_match __read_mostly = { .match = xt_cluster_mt, .checkentry = xt_cluster_mt_checkentry, .matchsize = sizeof(struct xt_cluster_match_info), + .destroy = xt_cluster_mt_destroy, .me = THIS_MODULE, }; -- GitLab From c7f2c42b80ed6009f44e355aefc1e40db9485a9d Mon Sep 17 00:00:00 2001 From: Anirudh Venkataramanan Date: Thu, 9 Aug 2018 06:29:00 -0700 Subject: [PATCH 0225/1692] ice: Fix a few null pointer dereference issues 1) When ice_ena_msix_range() fails to reserve vectors, a devm_kfree() warning was seen in the error flow path. So check pf->irq_tracker before use in ice_clear_interrupt_scheme(). 2) In ice_vsi_cfg(), check vsi->netdev before use. 3) In ice_get_link_status, check link_up before use. Signed-off-by: Anirudh Venkataramanan Tested-by: Tony Brelinski Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_common.c | 2 +- drivers/net/ethernet/intel/ice/ice_main.c | 17 ++++++++++------- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index d5300b606d5a..ebd701ac9428 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -1483,7 +1483,7 @@ enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up) struct ice_phy_info *phy_info; enum ice_status status = 0; - if (!pi) + if (!pi || !link_up) return ICE_ERR_PARAM; phy_info = &pi->phy; diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 34be94a30a60..d5d83c8848f8 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -3257,8 +3257,10 @@ static void ice_clear_interrupt_scheme(struct ice_pf *pf) if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) ice_dis_msix(pf); - devm_kfree(&pf->pdev->dev, pf->irq_tracker); - pf->irq_tracker = NULL; + if (pf->irq_tracker) { + devm_kfree(&pf->pdev->dev, pf->irq_tracker); + pf->irq_tracker = NULL; + } } /** @@ -4112,11 +4114,12 @@ static int ice_vsi_cfg(struct ice_vsi *vsi) { int err; - ice_set_rx_mode(vsi->netdev); - - err = ice_restore_vlan(vsi); - if (err) - return err; + if (vsi->netdev) { + ice_set_rx_mode(vsi->netdev); + err = ice_restore_vlan(vsi); + if (err) + return err; + } err = ice_vsi_cfg_txqs(vsi); if (!err) -- GitLab From dab0588fb616c1774bbf108eab1749dda4ac6942 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Thu, 9 Aug 2018 06:29:01 -0700 Subject: [PATCH 0226/1692] ice: Fix potential return of uninitialized value In ice_vsi_setup_[tx|rx]_rings, err is uninitialized which can result in a garbage value return to the caller. Fix that. Signed-off-by: Jesse Brandeburg Signed-off-by: Anirudh Venkataramanan Tested-by: Tony Brelinski Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index d5d83c8848f8..e23156515186 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -4885,7 +4885,7 @@ int ice_down(struct ice_vsi *vsi) */ static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) { - int i, err; + int i, err = 0; if (!vsi->num_txq) { dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n", @@ -4910,7 +4910,7 @@ static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) */ static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) { - int i, err; + int i, err = 0; if (!vsi->num_rxq) { dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n", -- GitLab From 43f8b22450f0a721915f1d2c7e3db6dd9573e76b Mon Sep 17 00:00:00 2001 From: Bruce Allan Date: Thu, 9 Aug 2018 06:29:02 -0700 Subject: [PATCH 0227/1692] ice: Change struct members from bool to u8 Recent versions of checkpatch have a new warning based on a documented preference of Linus to not use bool in structures due to wasted space and the size of bool is implementation dependent. For more information, see the email thread at https://lkml.org/lkml/2017/11/21/384. Signed-off-by: Bruce Allan Signed-off-by: Anirudh Venkataramanan Tested-by: Tony Brelinski Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice.h | 8 ++++---- drivers/net/ethernet/intel/ice/ice_switch.h | 6 +++--- drivers/net/ethernet/intel/ice/ice_txrx.h | 2 +- drivers/net/ethernet/intel/ice/ice_type.h | 16 ++++++++-------- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index ed071ea75f20..868f4a1d0f72 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -196,9 +196,9 @@ struct ice_vsi { struct list_head tmp_sync_list; /* MAC filters to be synced */ struct list_head tmp_unsync_list; /* MAC filters to be unsynced */ - bool irqs_ready; - bool current_isup; /* Sync 'link up' logging */ - bool stat_offsets_loaded; + u8 irqs_ready; + u8 current_isup; /* Sync 'link up' logging */ + u8 stat_offsets_loaded; /* queue information */ u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ @@ -269,7 +269,7 @@ struct ice_pf { struct ice_hw_port_stats stats; struct ice_hw_port_stats stats_prev; struct ice_hw hw; - bool stat_prev_loaded; /* has previous stats been loaded */ + u8 stat_prev_loaded; /* has previous stats been loaded */ char int_name[ICE_INT_NAME_STR_LEN]; }; diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h index 6f4a0d159dbf..9b8ec128ee31 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.h +++ b/drivers/net/ethernet/intel/ice/ice_switch.h @@ -17,7 +17,7 @@ struct ice_vsi_ctx { u16 vsis_unallocated; u16 flags; struct ice_aqc_vsi_props info; - bool alloc_from_pool; + u8 alloc_from_pool; }; enum ice_sw_fwd_act_type { @@ -94,8 +94,8 @@ struct ice_fltr_info { u8 qgrp_size; /* Rule creations populate these indicators basing on the switch type */ - bool lb_en; /* Indicate if packet can be looped back */ - bool lan_en; /* Indicate if packet can be forwarded to the uplink */ + u8 lb_en; /* Indicate if packet can be looped back */ + u8 lan_en; /* Indicate if packet can be forwarded to the uplink */ }; /* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list id */ diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index 567067b650c4..31bc998fe200 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -143,7 +143,7 @@ struct ice_ring { u16 next_to_use; u16 next_to_clean; - bool ring_active; /* is ring online or not */ + u8 ring_active; /* is ring online or not */ /* stats structs */ struct ice_q_stats stats; diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 99c8a9a71b5e..97c366e0ca59 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -83,7 +83,7 @@ struct ice_link_status { u64 phy_type_low; u16 max_frame_size; u16 link_speed; - bool lse_ena; /* Link Status Event notification */ + u8 lse_ena; /* Link Status Event notification */ u8 link_info; u8 an_info; u8 ext_info; @@ -101,7 +101,7 @@ struct ice_phy_info { struct ice_link_status link_info_old; u64 phy_type_low; enum ice_media_type media_type; - bool get_link_info; + u8 get_link_info; }; /* Common HW capabilities for SW use */ @@ -167,7 +167,7 @@ struct ice_nvm_info { u32 oem_ver; /* OEM version info */ u16 sr_words; /* Shadow RAM size in words */ u16 ver; /* NVM package version */ - bool blank_nvm_mode; /* is NVM empty (no FW present) */ + u8 blank_nvm_mode; /* is NVM empty (no FW present) */ }; /* Max number of port to queue branches w.r.t topology */ @@ -181,7 +181,7 @@ struct ice_sched_node { struct ice_aqc_txsched_elem_data info; u32 agg_id; /* aggregator group id */ u16 vsi_id; - bool in_use; /* suspended or in use */ + u8 in_use; /* suspended or in use */ u8 tx_sched_layer; /* Logical Layer (1-9) */ u8 num_children; u8 tc_num; @@ -218,7 +218,7 @@ struct ice_sched_vsi_info { struct ice_sched_tx_policy { u16 max_num_vsis; u8 max_num_lan_qs_per_tc[ICE_MAX_TRAFFIC_CLASS]; - bool rdma_ena; + u8 rdma_ena; }; struct ice_port_info { @@ -243,7 +243,7 @@ struct ice_port_info { struct list_head agg_list; /* lists all aggregator */ u8 lport; #define ICE_LPORT_MASK 0xff - bool is_vf; + u8 is_vf; }; struct ice_switch_info { @@ -287,7 +287,7 @@ struct ice_hw { u8 max_cgds; u8 sw_entry_point_layer; - bool evb_veb; /* true for VEB, false for VEPA */ + u8 evb_veb; /* true for VEB, false for VEPA */ struct ice_bus_info bus; struct ice_nvm_info nvm; struct ice_hw_dev_caps dev_caps; /* device capabilities */ @@ -318,7 +318,7 @@ struct ice_hw { u8 itr_gran_100; u8 itr_gran_50; u8 itr_gran_25; - bool ucast_shared; /* true if VSIs can share unicast addr */ + u8 ucast_shared; /* true if VSIs can share unicast addr */ }; -- GitLab From 3968540ba61e9a19a0c4bda733db70952708d264 Mon Sep 17 00:00:00 2001 From: Anirudh Venkataramanan Date: Thu, 9 Aug 2018 06:29:03 -0700 Subject: [PATCH 0228/1692] ice: Trivial formatting fixes 1) Add missing "\n" when printing link event error message. 2) Update dev_err statement in probe. 3) Add function description for ice_clear_pf_cfg. 4) Fix coding style for ice_acquire_nvm. 5) netdev->mtu is unsigned so use %u. Signed-off-by: Anirudh Venkataramanan Tested-by: Tony Brelinski Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_common.c | 3 +++ drivers/net/ethernet/intel/ice/ice_main.c | 6 +++--- drivers/net/ethernet/intel/ice/ice_nvm.c | 5 ++--- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index ebd701ac9428..661beea6af79 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -45,6 +45,9 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw) /** * ice_clear_pf_cfg - Clear PF configuration * @hw: pointer to the hardware structure + * + * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port + * configuration, flow director filters, etc.). */ enum ice_status ice_clear_pf_cfg(struct ice_hw *hw) { diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index e23156515186..f1e80eed2fd6 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -901,7 +901,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) case ice_aqc_opc_get_link_status: if (ice_handle_link_event(pf)) dev_err(&pf->pdev->dev, - "Could not handle link event"); + "Could not handle link event\n"); break; default: dev_dbg(&pf->pdev->dev, @@ -3284,7 +3284,7 @@ static int ice_probe(struct pci_dev *pdev, err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev)); if (err) { - dev_err(&pdev->dev, "I/O map error %d\n", err); + dev_err(&pdev->dev, "BAR0 I/O map error %d\n", err); return err; } @@ -5252,7 +5252,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) u8 count = 0; if (new_mtu == netdev->mtu) { - netdev_warn(netdev, "mtu is already %d\n", netdev->mtu); + netdev_warn(netdev, "mtu is already %u\n", netdev->mtu); return 0; } diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c index 92da0a626ce0..295a8cd87fc1 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.c +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c @@ -131,9 +131,8 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data) * * This function will request NVM ownership. */ -static enum -ice_status ice_acquire_nvm(struct ice_hw *hw, - enum ice_aq_res_access_type access) +static enum ice_status +ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access) { if (hw->nvm.blank_nvm_mode) return 0; -- GitLab From 62d3a8deaa10b8346d979d0dabde56c33b742afa Mon Sep 17 00:00:00 2001 From: Rodrigo Vivi Date: Thu, 23 Aug 2018 13:51:36 -0700 Subject: [PATCH 0229/1692] drm/i915: Free write_buf that we allocated with kzalloc. We use kzalloc to allocate the write_buf that we use for i2c transfer on hdcp write. But it seems that we are forgetting to free the memory that is not needed after i2c transfer is completed. Reported-by: Brian J Wood Fixes: 2320175feb74 ("drm/i915: Implement HDCP for HDMI") Cc: Ramalingam C Cc: Sean Paul Cc: Jani Nikula Cc: Rodrigo Vivi Cc: # v4.17+ Signed-off-by: Rodrigo Vivi Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20180823205136.31310-1-rodrigo.vivi@intel.com --- drivers/gpu/drm/i915/intel_hdmi.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 8363fbd18ee8..a1799b5c12bb 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -943,8 +943,12 @@ static int intel_hdmi_hdcp_write(struct intel_digital_port *intel_dig_port, ret = i2c_transfer(adapter, &msg, 1); if (ret == 1) - return 0; - return ret >= 0 ? -EIO : ret; + ret = 0; + else if (ret >= 0) + ret = -EIO; + + kfree(write_buf); + return ret; } static -- GitLab From 10568f6c5761db24249c610c94d6e44d5505a0ba Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Wed, 22 Aug 2018 11:33:27 +0200 Subject: [PATCH 0230/1692] netfilter: xt_checksum: ignore gso skbs Satish Patel reports a skb_warn_bad_offload() splat caused by -j CHECKSUM rules: -A POSTROUTING -p tcp -m tcp --sport 80 -j CHECKSUM The CHECKSUM target has never worked with GSO skbs, and the above rule makes no sense as kernel will handle checksum updates on transmit. Unfortunately, there are 3rd party tools that install such rules, so we cannot reject this from the config plane without potential breakage. Amend Kconfig text to clarify that the CHECKSUM target is only useful in virtualized environments, where old dhcp clients that use AF_PACKET used to discard UDP packets with a 'bad' header checksum and add a one-time warning in case such rule isn't restricted to UDP. v2: check IP6T_F_PROTO flag before cmp (Michal Kubecek) Reported-by: Satish Patel Reported-by: Markos Chandras Reported-by: Michal Kubecek Signed-off-by: Florian Westphal Reviewed-by: Michal Kubecek Signed-off-by: Pablo Neira Ayuso --- net/netfilter/Kconfig | 12 ++++++------ net/netfilter/xt_CHECKSUM.c | 22 +++++++++++++++++++++- 2 files changed, 27 insertions(+), 7 deletions(-) diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 71709c104081..f61c306de1d0 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -771,13 +771,13 @@ config NETFILTER_XT_TARGET_CHECKSUM depends on NETFILTER_ADVANCED ---help--- This option adds a `CHECKSUM' target, which can be used in the iptables mangle - table. + table to work around buggy DHCP clients in virtualized environments. - You can use this target to compute and fill in the checksum in - a packet that lacks a checksum. This is particularly useful, - if you need to work around old applications such as dhcp clients, - that do not work well with checksum offloads, but don't want to disable - checksum offload in your device. + Some old DHCP clients drop packets because they are not aware + that the checksum would normally be offloaded to hardware and + thus should be considered valid. + This target can be used to fill in the checksum using iptables + when such packets are sent via a virtual network device. To compile it as a module, choose M here. If unsure, say N. diff --git a/net/netfilter/xt_CHECKSUM.c b/net/netfilter/xt_CHECKSUM.c index 9f4151ec3e06..6c7aa6a0a0d2 100644 --- a/net/netfilter/xt_CHECKSUM.c +++ b/net/netfilter/xt_CHECKSUM.c @@ -16,6 +16,9 @@ #include #include +#include +#include + MODULE_LICENSE("GPL"); MODULE_AUTHOR("Michael S. Tsirkin "); MODULE_DESCRIPTION("Xtables: checksum modification"); @@ -25,7 +28,7 @@ MODULE_ALIAS("ip6t_CHECKSUM"); static unsigned int checksum_tg(struct sk_buff *skb, const struct xt_action_param *par) { - if (skb->ip_summed == CHECKSUM_PARTIAL) + if (skb->ip_summed == CHECKSUM_PARTIAL && !skb_is_gso(skb)) skb_checksum_help(skb); return XT_CONTINUE; @@ -34,6 +37,8 @@ checksum_tg(struct sk_buff *skb, const struct xt_action_param *par) static int checksum_tg_check(const struct xt_tgchk_param *par) { const struct xt_CHECKSUM_info *einfo = par->targinfo; + const struct ip6t_ip6 *i6 = par->entryinfo; + const struct ipt_ip *i4 = par->entryinfo; if (einfo->operation & ~XT_CHECKSUM_OP_FILL) { pr_info_ratelimited("unsupported CHECKSUM operation %x\n", @@ -43,6 +48,21 @@ static int checksum_tg_check(const struct xt_tgchk_param *par) if (!einfo->operation) return -EINVAL; + switch (par->family) { + case NFPROTO_IPV4: + if (i4->proto == IPPROTO_UDP && + (i4->invflags & XT_INV_PROTO) == 0) + return 0; + break; + case NFPROTO_IPV6: + if ((i6->flags & IP6T_F_PROTO) && + i6->proto == IPPROTO_UDP && + (i6->invflags & XT_INV_PROTO) == 0) + return 0; + break; + } + + pr_warn_once("CHECKSUM should be avoided. If really needed, restrict with \"-p udp\" and only use in OUTPUT\n"); return 0; } -- GitLab From 82c82ab658655befcb6aa47cbdb98dadce1a0cfe Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 21 Aug 2018 12:00:08 +0200 Subject: [PATCH 0231/1692] udf: Remove dead code from udf_find_fileset() Remove dead code and slightly simplify code in udf_find_fileset(). Signed-off-by: Jan Kara --- fs/udf/super.c | 62 +------------------------------------------------- 1 file changed, 1 insertion(+), 61 deletions(-) diff --git a/fs/udf/super.c b/fs/udf/super.c index 3040dc2a32f6..68d57b61f3af 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -764,9 +764,7 @@ static int udf_find_fileset(struct super_block *sb, struct kernel_lb_addr *root) { struct buffer_head *bh = NULL; - long lastblock; uint16_t ident; - struct udf_sb_info *sbi; if (fileset->logicalBlockNum != 0xFFFFFFFF || fileset->partitionReferenceNum != 0xFFFF) { @@ -779,69 +777,11 @@ static int udf_find_fileset(struct super_block *sb, return 1; } - } - - sbi = UDF_SB(sb); - if (!bh) { - /* Search backwards through the partitions */ - struct kernel_lb_addr newfileset; - -/* --> cvg: FIXME - is it reasonable? */ - return 1; - - for (newfileset.partitionReferenceNum = sbi->s_partitions - 1; - (newfileset.partitionReferenceNum != 0xFFFF && - fileset->logicalBlockNum == 0xFFFFFFFF && - fileset->partitionReferenceNum == 0xFFFF); - newfileset.partitionReferenceNum--) { - lastblock = sbi->s_partmaps - [newfileset.partitionReferenceNum] - .s_partition_len; - newfileset.logicalBlockNum = 0; - - do { - bh = udf_read_ptagged(sb, &newfileset, 0, - &ident); - if (!bh) { - newfileset.logicalBlockNum++; - continue; - } - - switch (ident) { - case TAG_IDENT_SBD: - { - struct spaceBitmapDesc *sp; - sp = (struct spaceBitmapDesc *) - bh->b_data; - newfileset.logicalBlockNum += 1 + - ((le32_to_cpu(sp->numOfBytes) + - sizeof(struct spaceBitmapDesc) - - 1) >> sb->s_blocksize_bits); - brelse(bh); - break; - } - case TAG_IDENT_FSD: - *fileset = newfileset; - break; - default: - newfileset.logicalBlockNum++; - brelse(bh); - bh = NULL; - break; - } - } while (newfileset.logicalBlockNum < lastblock && - fileset->logicalBlockNum == 0xFFFFFFFF && - fileset->partitionReferenceNum == 0xFFFF); - } - } - - if ((fileset->logicalBlockNum != 0xFFFFFFFF || - fileset->partitionReferenceNum != 0xFFFF) && bh) { udf_debug("Fileset at block=%u, partition=%u\n", fileset->logicalBlockNum, fileset->partitionReferenceNum); - sbi->s_partition = fileset->partitionReferenceNum; + UDF_SB(sb)->s_partition = fileset->partitionReferenceNum; udf_load_fileset(sb, bh, root); brelse(bh); return 0; -- GitLab From ee4af50ca94f58afc3532662779b9cf80bbe27c8 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 21 Aug 2018 14:52:34 +0200 Subject: [PATCH 0232/1692] udf: Fix mounting of Win7 created UDF filesystems Win7 is creating UDF filesystems with single partition with number 8192. Current partition descriptor scanning code does not handle this well as it incorrectly assumes that partition numbers will form mostly contiguous space of small numbers. This results in unmountable media due to errors like: UDF-fs: error (device dm-1): udf_read_tagged: tag version 0x0000 != 0x0002 || 0x0003, block 0 UDF-fs: warning (device dm-1): udf_fill_super: No fileset found Fix the problem by handling partition descriptors in a way that sparse partition numbering does not matter. Reported-and-tested-by: jean-luc malet CC: stable@vger.kernel.org Fixes: 7b78fd02fb19530fd101ae137a1f46aa466d9bb6 Signed-off-by: Jan Kara --- fs/udf/super.c | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/fs/udf/super.c b/fs/udf/super.c index 68d57b61f3af..6f515651a2c2 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -1510,10 +1510,16 @@ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_ */ #define PART_DESC_ALLOC_STEP 32 +struct part_desc_seq_scan_data { + struct udf_vds_record rec; + u32 partnum; +}; + struct desc_seq_scan_data { struct udf_vds_record vds[VDS_POS_LENGTH]; unsigned int size_part_descs; - struct udf_vds_record *part_descs_loc; + unsigned int num_part_descs; + struct part_desc_seq_scan_data *part_descs_loc; }; static struct udf_vds_record *handle_partition_descriptor( @@ -1522,10 +1528,14 @@ static struct udf_vds_record *handle_partition_descriptor( { struct partitionDesc *desc = (struct partitionDesc *)bh->b_data; int partnum; + int i; partnum = le16_to_cpu(desc->partitionNumber); - if (partnum >= data->size_part_descs) { - struct udf_vds_record *new_loc; + for (i = 0; i < data->num_part_descs; i++) + if (partnum == data->part_descs_loc[i].partnum) + return &(data->part_descs_loc[i].rec); + if (data->num_part_descs >= data->size_part_descs) { + struct part_desc_seq_scan_data *new_loc; unsigned int new_size = ALIGN(partnum, PART_DESC_ALLOC_STEP); new_loc = kcalloc(new_size, sizeof(*new_loc), GFP_KERNEL); @@ -1537,7 +1547,7 @@ static struct udf_vds_record *handle_partition_descriptor( data->part_descs_loc = new_loc; data->size_part_descs = new_size; } - return &(data->part_descs_loc[partnum]); + return &(data->part_descs_loc[data->num_part_descs++].rec); } @@ -1587,6 +1597,7 @@ static noinline int udf_process_sequence( memset(data.vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH); data.size_part_descs = PART_DESC_ALLOC_STEP; + data.num_part_descs = 0; data.part_descs_loc = kcalloc(data.size_part_descs, sizeof(*data.part_descs_loc), GFP_KERNEL); @@ -1598,7 +1609,6 @@ static noinline int udf_process_sequence( * are in it. */ for (; (!done && block <= lastblock); block++) { - bh = udf_read_tagged(sb, block, block, &ident); if (!bh) break; @@ -1670,13 +1680,10 @@ static noinline int udf_process_sequence( } /* Now handle prevailing Partition Descriptors */ - for (i = 0; i < data.size_part_descs; i++) { - if (data.part_descs_loc[i].block) { - ret = udf_load_partdesc(sb, - data.part_descs_loc[i].block); - if (ret < 0) - return ret; - } + for (i = 0; i < data.num_part_descs; i++) { + ret = udf_load_partdesc(sb, data.part_descs_loc[i].rec.block); + if (ret < 0) + return ret; } return 0; -- GitLab From 5e2e2f9f76e157063a656351728703cb02b068f1 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 23 Aug 2018 16:59:25 +0300 Subject: [PATCH 0233/1692] PM / clk: signedness bug in of_pm_clk_add_clks() "count" needs to be signed for the error handling to work. I made "i" signed as well so they match. Fixes: 02113ba93ea4 (PM / clk: Add support for obtaining clocks from device-tree) Cc: 4.6+ # 4.6+ Signed-off-by: Dan Carpenter Signed-off-by: Rafael J. Wysocki --- drivers/base/power/clock_ops.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index 8e2e4757adcb..5a42ae4078c2 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c @@ -185,7 +185,7 @@ EXPORT_SYMBOL_GPL(of_pm_clk_add_clk); int of_pm_clk_add_clks(struct device *dev) { struct clk **clks; - unsigned int i, count; + int i, count; int ret; if (!dev || !dev->of_node) -- GitLab From b6fdfbff078975c53383fc146a2a54985eab6b6d Mon Sep 17 00:00:00 2001 From: Misono Tomohiro Date: Fri, 24 Aug 2018 11:35:28 +0900 Subject: [PATCH 0234/1692] btrfs: Fix suspicious RCU usage warning in btrfs_debug_in_rcu Commit 672d599041c8 ("btrfs: Use wrapper macro for rcu string to remove duplicate code") replaces some open coded RCU string handling with macro. It turns out that btrfs_debug_in_rcu() is used for the first time and the macro lacks lock/unlock of RCU string for non-debug case (i.e. when the message is not printed), leading to suspicious RCU usage warning when CONFIG_PROVE_RCU is on. Fix this by adding a wrapper to call lock/unlock for the non-debug case too. Fixes: 672d599041c8 ("btrfs: Use wrapper macro for rcu string to remove duplicate code") Reported-by: David Howells Tested-by: David Howells Signed-off-by: Misono Tomohiro Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index a67cc190a84b..0b856da2fd3b 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -3390,9 +3390,9 @@ do { \ #define btrfs_debug(fs_info, fmt, args...) \ btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args) #define btrfs_debug_in_rcu(fs_info, fmt, args...) \ - btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args) + btrfs_no_printk_in_rcu(fs_info, KERN_DEBUG fmt, ##args) #define btrfs_debug_rl_in_rcu(fs_info, fmt, args...) \ - btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args) + btrfs_no_printk_in_rcu(fs_info, KERN_DEBUG fmt, ##args) #define btrfs_debug_rl(fs_info, fmt, args...) \ btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args) #endif @@ -3404,6 +3404,13 @@ do { \ rcu_read_unlock(); \ } while (0) +#define btrfs_no_printk_in_rcu(fs_info, fmt, args...) \ +do { \ + rcu_read_lock(); \ + btrfs_no_printk(fs_info, fmt, ##args); \ + rcu_read_unlock(); \ +} while (0) + #define btrfs_printk_ratelimited(fs_info, fmt, args...) \ do { \ static DEFINE_RATELIMIT_STATE(_rs, \ -- GitLab From a9910c0886470b659a6c3629d6467d5639c327e9 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Thu, 19 Jul 2018 22:04:07 +0800 Subject: [PATCH 0235/1692] ixgb: use dma_zalloc_coherent instead of allocator/memset Use dma_zalloc_coherent instead of dma_alloc_coherent followed by memset 0. Signed-off-by: YueHaibing Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgb/ixgb_main.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index 43664adf7a3c..d3e72d0f66ef 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -771,14 +771,13 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter) rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc); rxdr->size = ALIGN(rxdr->size, 4096); - rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, - GFP_KERNEL); + rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, + GFP_KERNEL); if (!rxdr->desc) { vfree(rxdr->buffer_info); return -ENOMEM; } - memset(rxdr->desc, 0, rxdr->size); rxdr->next_to_clean = 0; rxdr->next_to_use = 0; -- GitLab From cf1acec008f8d7761aa3fd7c4bca7e17b2d2512d Mon Sep 17 00:00:00 2001 From: Bo Chen Date: Mon, 23 Jul 2018 09:01:29 -0700 Subject: [PATCH 0236/1692] e1000: check on netif_running() before calling e1000_up() When the device is not up, the call to 'e1000_up()' from the error handling path of 'e1000_set_ringparam()' causes a kernel oops with a null-pointer dereference. The null-pointer dereference is triggered in function 'e1000_alloc_rx_buffers()' at line 'buffer_info = &rx_ring->buffer_info[i]'. This bug was reported by COD, a tool for testing kernel module binaries I am building. This bug was also detected by KFI from Dr. Kai Cong. This patch fixes the bug by checking on 'netif_running()' before calling 'e1000_up()' in 'e1000_set_ringparam()'. Signed-off-by: Bo Chen Acked-by: Alexander Duyck Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000/e1000_ethtool.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index bdb3f8e65ed4..c1e4e94f100f 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -644,7 +644,8 @@ static int e1000_set_ringparam(struct net_device *netdev, err_alloc_rx: kfree(txdr); err_alloc_tx: - e1000_up(adapter); + if (netif_running(adapter->netdev)) + e1000_up(adapter); err_setup: clear_bit(__E1000_RESETTING, &adapter->flags); return err; -- GitLab From ee400a3f1bfe7004a3e14b81c38ccc5583c26295 Mon Sep 17 00:00:00 2001 From: Bo Chen Date: Mon, 23 Jul 2018 09:01:30 -0700 Subject: [PATCH 0237/1692] e1000: ensure to free old tx/rx rings in set_ringparam() In 'e1000_set_ringparam()', the tx_ring and rx_ring are updated with new value and the old tx/rx rings are freed only when the device is up. There are resource leaks on old tx/rx rings when the device is not up. This bug is reported by COD, a tool for testing kernel module binaries I am building. This patch fixes the bug by always calling 'kfree()' on old tx/rx rings in 'e1000_set_ringparam()'. Signed-off-by: Bo Chen Reviewed-by: Alexander Duyck Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000/e1000_ethtool.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index c1e4e94f100f..2569a168334c 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -624,14 +624,14 @@ static int e1000_set_ringparam(struct net_device *netdev, adapter->tx_ring = tx_old; e1000_free_all_rx_resources(adapter); e1000_free_all_tx_resources(adapter); - kfree(tx_old); - kfree(rx_old); adapter->rx_ring = rxdr; adapter->tx_ring = txdr; err = e1000_up(adapter); if (err) goto err_setup; } + kfree(tx_old); + kfree(rx_old); clear_bit(__E1000_RESETTING, &adapter->flags); return 0; -- GitLab From a798fbac33c4cbfe7d539298623b7af6c3f9525a Mon Sep 17 00:00:00 2001 From: Jesus Sanchez-Palencia Date: Thu, 26 Jul 2018 10:20:38 -0700 Subject: [PATCH 0238/1692] igb: Use an advanced ctx descriptor for launchtime On i210, Launchtime (TxTime) requires the usage of an "Advanced Transmit Context Descriptor" for retrieving the timestamp of a packet. The igb driver correctly builds such descriptor on the segmentation flow (i.e. igb_tso()) or on the checksum one (i.e. igb_tx_csum()), but the feature is broken for AF_PACKET if the IGB_TX_FLAGS_VLAN is not set, which happens due to an early return on igb_tx_csum(). This flag is only set by the kernel when a VLAN interface is used, thus we can't just rely on it. Here we are fixing this issue by checking if launchtime is enabled for the current tx_ring before performing the early return. Signed-off-by: Jesus Sanchez-Palencia Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index d03c2f0d7592..74416f8a9446 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -5816,7 +5816,8 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first) if (skb->ip_summed != CHECKSUM_PARTIAL) { csum_failed: - if (!(first->tx_flags & IGB_TX_FLAGS_VLAN)) + if (!(first->tx_flags & IGB_TX_FLAGS_VLAN) && + !tx_ring->launchtime_enable) return; goto no_csum; } -- GitLab From 151356270b0761e455ed82bba3353fb494451555 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Fri, 27 Jul 2018 16:04:53 +0800 Subject: [PATCH 0239/1692] igb: Replace GFP_ATOMIC with GFP_KERNEL in igb_sw_init() igb_sw_init() is never called in atomic context. It calls kzalloc() and kcalloc() with GFP_ATOMIC, which is not necessary. GFP_ATOMIC can be replaced with GFP_KERNEL. This is found by a static analysis tool named DCNS written by myself. Signed-off-by: Jia-Ju Bai Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 74416f8a9446..a32c576c1e65 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -3873,7 +3873,7 @@ static int igb_sw_init(struct igb_adapter *adapter) adapter->mac_table = kcalloc(hw->mac.rar_entry_count, sizeof(struct igb_mac_addr), - GFP_ATOMIC); + GFP_KERNEL); if (!adapter->mac_table) return -ENOMEM; @@ -3883,7 +3883,7 @@ static int igb_sw_init(struct igb_adapter *adapter) /* Setup and initialize a copy of the hw vlan table array */ adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32), - GFP_ATOMIC); + GFP_KERNEL); if (!adapter->shadow_vfta) return -ENOMEM; -- GitLab From 69a64658de502c8ca383fe2c5a5208f00b5844cd Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Fri, 27 Jul 2018 16:07:38 +0800 Subject: [PATCH 0240/1692] igb: Replace mdelay() with msleep() in igb_integrated_phy_loopback() igb_integrated_phy_loopback() is never called in atomic context. It calls mdelay() to busily wait, which is not necessary. mdelay() can be replaced with msleep(). This is found by a static analysis tool named DCNS written by myself. Signed-off-by: Jia-Ju Bai Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_ethtool.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index f92f7918112d..5acf3b743876 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -1649,7 +1649,7 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter) if (hw->phy.type == e1000_phy_m88) igb_phy_disable_receiver(adapter); - mdelay(500); + msleep(500); return 0; } -- GitLab From 374f78f75be98c72876a4d0311b278cd226effba Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Fri, 27 Jul 2018 16:22:31 +0800 Subject: [PATCH 0241/1692] ixgbe: Replace GFP_ATOMIC with GFP_KERNEL ixgbe_fcoe_ddp_setup(), ixgbe_setup_fcoe_ddp_resources() and ixgbe_sw_init() are never called in atomic context. They call kmalloc(), dma_pool_alloc() and kzalloc() with GFP_ATOMIC, which is not necessary. GFP_ATOMIC can be replaced with GFP_KERNEL. This is found by a static analysis tool named DCNS written by myself. Signed-off-by: Jia-Ju Bai Acked-by: Sebastian Basierski Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c | 4 ++-- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index 94b3165ff543..ccd852ad62a4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -192,7 +192,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, } /* alloc the udl from per cpu ddp pool */ - ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp); + ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_KERNEL, &ddp->udp); if (!ddp->udl) { e_err(drv, "failed allocated ddp context\n"); goto out_noddp_unmap; @@ -760,7 +760,7 @@ int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter) return 0; /* Extra buffer to be shared by all DDPs for HW work around */ - buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); + buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_KERNEL); if (!buffer) return -ENOMEM; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index af4c9ae7f432..663d59ba527a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -6201,7 +6201,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, adapter->mac_table = kcalloc(hw->mac.num_rar_entries, sizeof(struct ixgbe_mac_addr), - GFP_ATOMIC); + GFP_KERNEL); if (!adapter->mac_table) return -ENOMEM; -- GitLab From fabf1bce103aa8e3db27ff2cc55f8e0fb0abcc30 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Mon, 30 Jul 2018 15:52:48 -0700 Subject: [PATCH 0242/1692] ixgbe: Prevent unsupported configurations with XDP These changes address comments by Jakub Kicinski on commit 38b7e7f8ae82 ("ixgbe: Do not allow LRO or MTU change with XDP"). Change the MTU check with XDP to allow any supported value and only reject those outside of the range as opposed to rejecting any change when XDP is active. In situations where MTU size is not supported, return -EINVAL instead of -EPERM. Add checks when enabling SRIOV, DCB, or adding L2FW offloaded device as they are not supported with XDP. CC: Jakub Kicinski Signed-off-by: Tony Nguyen Acked-by: Jakub Kicinski Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 28 +++++++++++++++++-- .../net/ethernet/intel/ixgbe/ixgbe_sriov.c | 5 ++++ 2 files changed, 31 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 663d59ba527a..9a23d33a47ed 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -6620,8 +6620,18 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) struct ixgbe_adapter *adapter = netdev_priv(netdev); if (adapter->xdp_prog) { - e_warn(probe, "MTU cannot be changed while XDP program is loaded\n"); - return -EPERM; + int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + + VLAN_HLEN; + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ixgbe_ring *ring = adapter->rx_ring[i]; + + if (new_frame_size > ixgbe_rx_bufsz(ring)) { + e_warn(probe, "Requested MTU size is not supported with XDP\n"); + return -EINVAL; + } + } } /* @@ -8983,6 +8993,15 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) #ifdef CONFIG_IXGBE_DCB if (tc) { + if (adapter->xdp_prog) { + e_warn(probe, "DCB is not supported with XDP\n"); + + ixgbe_init_interrupt_scheme(adapter); + if (netif_running(dev)) + ixgbe_open(dev); + return -EINVAL; + } + netdev_set_num_tc(dev, tc); ixgbe_set_prio_tc_map(adapter); @@ -9934,6 +9953,11 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) int tcs = adapter->hw_tcs ? : 1; int pool, err; + if (adapter->xdp_prog) { + e_warn(probe, "L2FW offload is not supported with XDP\n"); + return ERR_PTR(-EINVAL); + } + /* The hardware supported by ixgbe only filters on the destination MAC * address. In order to avoid issues we only support offloading modes * where the hardware can actually provide the functionality. diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 6f59933cdff7..9264a5f8a5d0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -53,6 +53,11 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter, struct ixgbe_hw *hw = &adapter->hw; int i; + if (adapter->xdp_prog) { + e_warn(probe, "SRIOV is not supported with XDP\n"); + return -EINVAL; + } + /* Enable VMDq flag so device will be set in VM mode */ adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_VMDQ_ENABLED; -- GitLab From 939b701ad63314f5aa90dcd3d866f73954945209 Mon Sep 17 00:00:00 2001 From: Sebastian Basierski Date: Tue, 31 Jul 2018 18:16:00 +0200 Subject: [PATCH 0243/1692] ixgbe: fix driver behaviour after issuing VFLR Since VFLR doesn't clear VFMBMEM (VF Mailbox Memory) and is not re-enabling queues correctly we should fix this behavior. Signed-off-by: Sebastian Basierski Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- .../net/ethernet/intel/ixgbe/ixgbe_sriov.c | 26 +++++++++++++++++++ drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 1 + 2 files changed, 27 insertions(+) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 9264a5f8a5d0..3c6f01c41b78 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -693,8 +693,13 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) { struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; + u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); u8 num_tcs = adapter->hw_tcs; + u32 reg_val; + u32 queue; + u32 word; /* remove VLAN filters beloning to this VF */ ixgbe_clear_vf_vlans(adapter, vf); @@ -731,6 +736,27 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) /* reset VF api back to unknown */ adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10; + + /* Restart each queue for given VF */ + for (queue = 0; queue < q_per_pool; queue++) { + unsigned int reg_idx = (vf * q_per_pool) + queue; + + reg_val = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(reg_idx)); + + /* Re-enabling only configured queues */ + if (reg_val) { + reg_val |= IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val); + reg_val &= ~IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val); + } + } + + /* Clear VF's mailbox memory */ + for (word = 0; word < IXGBE_VFMAILBOX_SIZE; word++) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf), word, 0); + + IXGBE_WRITE_FLUSH(hw); } static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 44cfb2021145..41bcbb337e83 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -2518,6 +2518,7 @@ enum { /* Translated register #defines */ #define IXGBE_PVFTDH(P) (0x06010 + (0x40 * (P))) #define IXGBE_PVFTDT(P) (0x06018 + (0x40 * (P))) +#define IXGBE_PVFTXDCTL(P) (0x06028 + (0x40 * (P))) #define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P))) #define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P))) -- GitLab From fa38e30ac73fbb01d7e5d0fd1b12d412fa3ac3ee Mon Sep 17 00:00:00 2001 From: Martyna Szapar Date: Tue, 7 Aug 2018 17:11:23 -0700 Subject: [PATCH 0244/1692] i40e: Fix for Tx timeouts when interface is brought up if DCB is enabled If interface is connected to switch port configured for DCB there are TX timeouts when bringing up interface. Problem started appearing after adding in i40e driver code mqprio hardware offload mode. In function i40e_vsi_configure_bw_alloc was added resetting BW rate which should be executing when mqprio qdisc is removed but was also when there was no mqprio qdisc added and DCB was enabled. In this patch was added additional check for DCB flag so now when DCB is enabled the correct DCB configs from before mqprio patch are restored. Signed-off-by: Martyna Szapar Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index f2c622e78802..ac685ad4d877 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -5122,15 +5122,17 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, u8 *bw_share) { struct i40e_aqc_configure_vsi_tc_bw_data bw_data; + struct i40e_pf *pf = vsi->back; i40e_status ret; int i; - if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) + /* There is no need to reset BW when mqprio mode is on. */ + if (pf->flags & I40E_FLAG_TC_MQPRIO) return 0; - if (!vsi->mqprio_qopt.qopt.hw) { + if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) { ret = i40e_set_bw_limit(vsi, vsi->seid, 0); if (ret) - dev_info(&vsi->back->pdev->dev, + dev_info(&pf->pdev->dev, "Failed to reset tx rate for vsi->seid %u\n", vsi->seid); return ret; @@ -5139,12 +5141,11 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) bw_data.tc_bw_credits[i] = bw_share[i]; - ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data, - NULL); + ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL); if (ret) { - dev_info(&vsi->back->pdev->dev, + dev_info(&pf->pdev->dev, "AQ command Config VSI BW allocation per TC failed = %d\n", - vsi->back->hw.aq.asq_last_status); + pf->hw.aq.asq_last_status); return -EINVAL; } -- GitLab From 07f3701387dcab3a4fb0166098fb2754a1b927e1 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Mon, 20 Aug 2018 08:12:27 -0700 Subject: [PATCH 0245/1692] i40e: fix condition of WARN_ONCE for stat strings Commit 9b10df596bd4 ("i40e: use WARN_ONCE to replace the commented BUG_ON size check") introduced a warning check to make sure that the size of the stat strings was always the expected value. This code accidentally inverted the check of the data pointer. Fix this so that we accurately count the size of the stats we copied in. This fixes an erroneous WARN kernel splat that occurs when requesting ethtool statistics. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Tested-by: Mauro S M Rodrigues Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index abcd096ede14..5ff6caa83948 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -2013,7 +2013,7 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data) for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) i40e_add_stat_strings(&data, i40e_gstrings_pfc_stats, i); - WARN_ONCE(p - data != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN, + WARN_ONCE(data - p != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN, "stat strings count mismatch!"); } -- GitLab From a296b16270ab8d3b1c2a41ca1dd4d2fc34b598d9 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 16 Aug 2018 11:36:38 +0800 Subject: [PATCH 0246/1692] drm/amd/display: Fix bug use wrong pp interface Used wrong pp interface, the original interface is exposed by dpm on SI and paritial CI. Pointed out by Francis David v2: dal only need to set min_dcefclk and min_fclk to smu. so use display_clock_voltage_request interface, instand of update all display configuration. Acked-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index fbe878ae1e8c..4ba0003a9d32 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c @@ -480,12 +480,20 @@ void pp_rv_set_display_requirement(struct pp_smu *pp, { struct dc_context *ctx = pp->ctx; struct amdgpu_device *adev = ctx->driver_context; + void *pp_handle = adev->powerplay.pp_handle; const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + struct pp_display_clock_request clock = {0}; - if (!pp_funcs || !pp_funcs->display_configuration_changed) + if (!pp_funcs || !pp_funcs->display_clock_voltage_request) return; - amdgpu_dpm_display_configuration_changed(adev); + clock.clock_type = amd_pp_dcf_clock; + clock.clock_freq_in_khz = req->hard_min_dcefclk_khz; + pp_funcs->display_clock_voltage_request(pp_handle, &clock); + + clock.clock_type = amd_pp_f_clock; + clock.clock_freq_in_khz = req->hard_min_fclk_khz; + pp_funcs->display_clock_voltage_request(pp_handle, &clock); } void pp_rv_set_wm_ranges(struct pp_smu *pp, -- GitLab From 39d1e234e1e13f65f4d53715d34aadfb6249eeaf Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Wed, 1 Aug 2018 10:34:41 -0700 Subject: [PATCH 0247/1692] drm/i915/icl: implement the tc/legacy HPD {dis,}connect flows Unlike the other ports, TC ports are not available to use as soon as we get a hotplug. The TC PHYs can be shared between multiple controllers: display, USB, etc. As a result, handshaking through FIA is required around connect and disconnect to cleanly transfer ownership with the controller and set the type-C power state. This patch implements the flow sequences described by our specification. We opt to grab ownership of the ports as soon as we get the hotplugs in order to simplify the interactions and avoid surprises in the user space side. We may consider changing this in the future, once we improve our testing capabilities on this area. v2: * This unifies the DP and HDMI patches so we can discuss everything at once so people looking at random single patches can actually understand the direction. * I found out the spec was updated a while ago. There's a small difference in the connect flow and the patch was updated for that. * Our spec also now gives a good explanation on what is really happening. As a result, comments were added. * Add some more comments as requested by Rodrigo (Rodrigo). v3: * Downgrade a DRM_ERROR that shouldn't ever happen but we can't act on in case it does (Chris). BSpec: 21750, 4250. Cc: Animesh Manna Cc: Rodrigo Vivi Reviewed-by: Rodrigo Vivi Signed-off-by: Paulo Zanoni Link: https://patchwork.freedesktop.org/patch/msgid/20180801173441.9789-1-paulo.r.zanoni@intel.com --- drivers/gpu/drm/i915/i915_reg.h | 6 ++ drivers/gpu/drm/i915/intel_dp.c | 110 +++++++++++++++++++++++++++++- drivers/gpu/drm/i915/intel_hdmi.c | 11 ++- 3 files changed, 123 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index a338aaa2b313..8534f88a60f6 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -10691,4 +10691,10 @@ enum skl_power_gate { #define DP_LANE_ASSIGNMENT_MASK(tc_port) (0xf << ((tc_port) * 8)) #define DP_LANE_ASSIGNMENT(tc_port, x) ((x) << ((tc_port) * 8)) +#define PORT_TX_DFLEXDPPMS _MMIO(0x163890) +#define DP_PHY_MODE_STATUS_COMPLETED(tc_port) (1 << (tc_port)) + +#define PORT_TX_DFLEXDPCSSS _MMIO(0x163894) +#define DP_PHY_MODE_STATUS_NOT_SAFE(tc_port) (1 << (tc_port)) + #endif /* _I915_REG_H_ */ diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 8e0e14ba534f..b3f6f04c3c7d 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -4799,6 +4799,104 @@ static void icl_update_tc_port_type(struct drm_i915_private *dev_priv, type_str); } +/* + * This function implements the first part of the Connect Flow described by our + * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading + * lanes, EDID, etc) is done as needed in the typical places. + * + * Unlike the other ports, type-C ports are not available to use as soon as we + * get a hotplug. The type-C PHYs can be shared between multiple controllers: + * display, USB, etc. As a result, handshaking through FIA is required around + * connect and disconnect to cleanly transfer ownership with the controller and + * set the type-C power state. + * + * We could opt to only do the connect flow when we actually try to use the AUX + * channels or do a modeset, then immediately run the disconnect flow after + * usage, but there are some implications on this for a dynamic environment: + * things may go away or change behind our backs. So for now our driver is + * always trying to acquire ownership of the controller as soon as it gets an + * interrupt (or polls state and sees a port is connected) and only gives it + * back when it sees a disconnect. Implementation of a more fine-grained model + * will require a lot of coordination with user space and thorough testing for + * the extra possible cases. + */ +static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv, + struct intel_digital_port *dig_port) +{ + enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); + u32 val; + + if (dig_port->tc_type != TC_PORT_LEGACY && + dig_port->tc_type != TC_PORT_TYPEC) + return true; + + val = I915_READ(PORT_TX_DFLEXDPPMS); + if (!(val & DP_PHY_MODE_STATUS_COMPLETED(tc_port))) { + DRM_DEBUG_KMS("DP PHY for TC port %d not ready\n", tc_port); + return false; + } + + /* + * This function may be called many times in a row without an HPD event + * in between, so try to avoid the write when we can. + */ + val = I915_READ(PORT_TX_DFLEXDPCSSS); + if (!(val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port))) { + val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc_port); + I915_WRITE(PORT_TX_DFLEXDPCSSS, val); + } + + /* + * Now we have to re-check the live state, in case the port recently + * became disconnected. Not necessary for legacy mode. + */ + if (dig_port->tc_type == TC_PORT_TYPEC && + !(I915_READ(PORT_TX_DFLEXDPSP) & TC_LIVE_STATE_TC(tc_port))) { + DRM_DEBUG_KMS("TC PHY %d sudden disconnect.\n", tc_port); + val = I915_READ(PORT_TX_DFLEXDPCSSS); + val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port); + I915_WRITE(PORT_TX_DFLEXDPCSSS, val); + return false; + } + + return true; +} + +/* + * See the comment at the connect function. This implements the Disconnect + * Flow. + */ +static void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv, + struct intel_digital_port *dig_port) +{ + enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); + u32 val; + + if (dig_port->tc_type != TC_PORT_LEGACY && + dig_port->tc_type != TC_PORT_TYPEC) + return; + + /* + * This function may be called many times in a row without an HPD event + * in between, so try to avoid the write when we can. + */ + val = I915_READ(PORT_TX_DFLEXDPCSSS); + if (val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port)) { + val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port); + I915_WRITE(PORT_TX_DFLEXDPCSSS, val); + } +} + +/* + * The type-C ports are different because even when they are connected, they may + * not be available/usable by the graphics driver: see the comment on + * icl_tc_phy_connect(). So in our driver instead of adding the additional + * concept of "usable" and make everything check for "connected and usable" we + * define a port as "connected" when it is not only connected, but also when it + * is usable by the rest of the driver. That maintains the old assumption that + * connected ports are usable, and avoids exposing to the users objects they + * can't really use. + */ static bool icl_tc_port_connected(struct drm_i915_private *dev_priv, struct intel_digital_port *intel_dig_port) { @@ -4817,12 +4915,17 @@ static bool icl_tc_port_connected(struct drm_i915_private *dev_priv, is_typec = dpsp & TC_LIVE_STATE_TC(tc_port); is_tbt = dpsp & TC_LIVE_STATE_TBT(tc_port); - if (!is_legacy && !is_typec && !is_tbt) + if (!is_legacy && !is_typec && !is_tbt) { + icl_tc_phy_disconnect(dev_priv, intel_dig_port); return false; + } icl_update_tc_port_type(dev_priv, intel_dig_port, is_legacy, is_typec, is_tbt); + if (!icl_tc_phy_connect(dev_priv, intel_dig_port)) + return false; + return true; } @@ -4850,6 +4953,11 @@ static bool icl_digital_port_connected(struct intel_encoder *encoder) * intel_digital_port_connected - is the specified port connected? * @encoder: intel_encoder * + * In cases where there's a connector physically connected but it can't be used + * by our hardware we also return false, since the rest of the driver should + * pretty much treat the port as disconnected. This is relevant for type-C + * (starting on ICL) where there's ownership involved. + * * Return %true if port is connected, %false otherwise. */ bool intel_digital_port_connected(struct intel_encoder *encoder) diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index a1799b5c12bb..02faa2cf4a85 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -1909,21 +1909,26 @@ intel_hdmi_set_edid(struct drm_connector *connector) static enum drm_connector_status intel_hdmi_detect(struct drm_connector *connector, bool force) { - enum drm_connector_status status; + enum drm_connector_status status = connector_status_disconnected; struct drm_i915_private *dev_priv = to_i915(connector->dev); + struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); + struct intel_encoder *encoder = &hdmi_to_dig_port(intel_hdmi)->base; DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); + if (IS_ICELAKE(dev_priv) && + !intel_digital_port_connected(encoder)) + goto out; + intel_hdmi_unset_edid(connector); if (intel_hdmi_set_edid(connector)) status = connector_status_connected; - else - status = connector_status_disconnected; +out: intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS); return status; -- GitLab From 99da0b35396f3907fa8594b554bf81904389c48c Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Mon, 20 Aug 2018 16:31:36 -0700 Subject: [PATCH 0248/1692] drm/i915: WARN() if we can't lookup_power_well() None of the current lookup_power_well() callers are actually checking for NULL return values, they all just use the pointer right away. The first idea was to replace these theoretical segfaults with a BUG() since this would at least make our code a little more explicit to the reader. It was suggested that just converting the BUG() to a WARN() and returning any power well would probably be better since it would still keep the system running while at the same time exposing the driver bug. We can only hit this NULL/BUG()/WARN() condition if we try to lookup a power well that isn't defined on a given platform. If that ever happens, we have to fix our code, making it lookup the correct power well. Because of this, I don't think it's worth trying to implement error checking in every caller. Improving our CI system will be a better use of our time once a bug is found in the wild. v2: Avoid the BUG() with a WARN() return a random PW (Michal). Cc: Michal Wajdeczko Cc: Imre Deak Reviewed-by: Imre Deak Signed-off-by: Paulo Zanoni Link: https://patchwork.freedesktop.org/patch/msgid/20180820233139.11936-2-paulo.r.zanoni@intel.com --- drivers/gpu/drm/i915/intel_runtime_pm.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 1b10b7041513..dd5d1ba4a7f1 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -1098,7 +1098,15 @@ lookup_power_well(struct drm_i915_private *dev_priv, return power_well; } - return NULL; + /* + * It's not feasible to add error checking code to the callers since + * this condition really shouldn't happen and it doesn't even make sense + * to abort things like display initialization sequences. Just return + * the first power well and hope the WARN gets reported so we can fix + * our driver. + */ + WARN(1, "Power well %d not defined for this platform\n", power_well_id); + return &power_domains->power_wells[0]; } #define BITS_SET(val, bits) (((val) & (bits)) == (bits)) -- GitLab From 0229bfd42bbe7fd49d23b5f696c22241096b8847 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Mon, 20 Aug 2018 16:31:37 -0700 Subject: [PATCH 0249/1692] drm/i915: use for_each_power_well in lookup_power_well() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use the nice helper function to make the implementation simpler. v2: Rebase. Cc: Imre Deak Reviewed-by: José Roberto de Souza (v1) Signed-off-by: Paulo Zanoni Link: https://patchwork.freedesktop.org/patch/msgid/20180820233139.11936-3-paulo.r.zanoni@intel.com --- drivers/gpu/drm/i915/intel_runtime_pm.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index dd5d1ba4a7f1..33dbbd9efe65 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -1087,16 +1087,11 @@ static struct i915_power_well * lookup_power_well(struct drm_i915_private *dev_priv, enum i915_power_well_id power_well_id) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; - int i; - - for (i = 0; i < power_domains->power_well_count; i++) { - struct i915_power_well *power_well; + struct i915_power_well *power_well; - power_well = &power_domains->power_wells[i]; + for_each_power_well(dev_priv, power_well) if (power_well->desc->id == power_well_id) return power_well; - } /* * It's not feasible to add error checking code to the callers since @@ -1106,7 +1101,7 @@ lookup_power_well(struct drm_i915_private *dev_priv, * our driver. */ WARN(1, "Power well %d not defined for this platform\n", power_well_id); - return &power_domains->power_wells[0]; + return &dev_priv->power_domains.power_wells[0]; } #define BITS_SET(val, bits) (((val) & (bits)) == (bits)) -- GitLab From f7480b2f65919d104ee41c2b609bc4ae3a3e6d60 Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Mon, 20 Aug 2018 16:31:38 -0700 Subject: [PATCH 0250/1692] drm/i915: move lookup_power_well() up There's no need for that forward declaration. Cc: Imre Deak Reviewed-by: Imre Deak Signed-off-by: Paulo Zanoni Link: https://patchwork.freedesktop.org/patch/msgid/20180820233139.11936-4-paulo.r.zanoni@intel.com --- drivers/gpu/drm/i915/intel_runtime_pm.c | 46 +++++++++++-------------- 1 file changed, 21 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 33dbbd9efe65..2852395125cd 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -52,10 +52,6 @@ bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, enum i915_power_well_id power_well_id); -static struct i915_power_well * -lookup_power_well(struct drm_i915_private *dev_priv, - enum i915_power_well_id power_well_id); - const char * intel_display_power_domain_str(enum intel_display_power_domain domain) { @@ -652,6 +648,27 @@ static void assert_csr_loaded(struct drm_i915_private *dev_priv) WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n"); } +static struct i915_power_well * +lookup_power_well(struct drm_i915_private *dev_priv, + enum i915_power_well_id power_well_id) +{ + struct i915_power_well *power_well; + + for_each_power_well(dev_priv, power_well) + if (power_well->desc->id == power_well_id) + return power_well; + + /* + * It's not feasible to add error checking code to the callers since + * this condition really shouldn't happen and it doesn't even make sense + * to abort things like display initialization sequences. Just return + * the first power well and hope the WARN gets reported so we can fix + * our driver. + */ + WARN(1, "Power well %d not defined for this platform\n", power_well_id); + return &dev_priv->power_domains.power_wells[0]; +} + static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) { bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv, @@ -1083,27 +1100,6 @@ static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0)) -static struct i915_power_well * -lookup_power_well(struct drm_i915_private *dev_priv, - enum i915_power_well_id power_well_id) -{ - struct i915_power_well *power_well; - - for_each_power_well(dev_priv, power_well) - if (power_well->desc->id == power_well_id) - return power_well; - - /* - * It's not feasible to add error checking code to the callers since - * this condition really shouldn't happen and it doesn't even make sense - * to abort things like display initialization sequences. Just return - * the first power well and hope the WARN gets reported so we can fix - * our driver. - */ - WARN(1, "Power well %d not defined for this platform\n", power_well_id); - return &dev_priv->power_domains.power_wells[0]; -} - #define BITS_SET(val, bits) (((val) & (bits)) == (bits)) static void assert_chv_phy_status(struct drm_i915_private *dev_priv) -- GitLab From 757ab15c3f4968b5a29caf3fe8b67660ce84c3cd Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 21 Aug 2018 10:44:10 +0200 Subject: [PATCH 0251/1692] cpuidle: menu: Retain tick when shallow state is selected The case addressed by commit 5ef499cd571c (cpuidle: menu: Handle stopped tick more aggressively) in the stopped tick case is present when the tick has not been stopped yet too. Namely, if only two CPU idle states, shallow state A with target residency significantly below the tick boundary and deep state B with target residency significantly above it, are available and the predicted idle duration is above the tick boundary, but below the target residency of state B, state A will be selected and the CPU may spend indefinite amount of time in it, which is not quite energy-efficient. However, if the tick has not been stopped yet and the governor is about to select a shallow idle state for the CPU even though the idle duration predicted by it is above the tick boundary, it should be fine to wake up the CPU early, so the tick can be retained then and the governor will have a chance to select a deeper state when it runs next time. [Note that when this really happens, it will make the idle duration predictor believe that the CPU might be idle longer than predicted, which will make it more likely to predict longer idle durations going forward, but that will also cause deeper idle states to be selected going forward, on average, which is what's needed here.] Fixes: 87c9fe6ee495 (cpuidle: menu: Avoid selecting shallow states with stopped tick) Reported-by: Leo Yan Cc: 4.17+ # 4.17+: 5ef499cd571c (cpuidle: menu: Handle ...) Signed-off-by: Rafael J. Wysocki --- drivers/cpuidle/governors/menu.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 110483f0e3fb..e26a40971b26 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -379,9 +379,20 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, if (idx == -1) idx = i; /* first enabled state */ if (s->target_residency > data->predicted_us) { - if (!tick_nohz_tick_stopped()) + if (data->predicted_us < TICK_USEC) break; + if (!tick_nohz_tick_stopped()) { + /* + * If the state selected so far is shallow, + * waking up early won't hurt, so retain the + * tick in that case and let the governor run + * again in the next iteration of the loop. + */ + expected_interval = drv->states[idx].target_residency; + break; + } + /* * If the state selected so far is shallow and this * state's target residency matches the time till the -- GitLab From cc98963dbaaea93d17608641b8d6942a5327fc31 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Horia=20Geant=C4=83?= Date: Mon, 6 Aug 2018 15:29:09 +0300 Subject: [PATCH 0252/1692] crypto: caam/jr - fix descriptor DMA unmapping MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Descriptor address needs to be swapped to CPU endianness before being DMA unmapped. Cc: # 4.8+ Fixes: 261ea058f016 ("crypto: caam - handle core endianness != caam endianness") Reported-by: Laurentiu Tudor Signed-off-by: Horia Geantă Signed-off-by: Herbert Xu --- drivers/crypto/caam/jr.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index f4f258075b89..acdd72016ffe 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c @@ -190,7 +190,8 @@ static void caam_jr_dequeue(unsigned long devarg) BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0); /* Unmap just-run descriptor so we can post-process */ - dma_unmap_single(dev, jrp->outring[hw_idx].desc, + dma_unmap_single(dev, + caam_dma_to_cpu(jrp->outring[hw_idx].desc), jrp->entinfo[sw_idx].desc_size, DMA_TO_DEVICE); -- GitLab From ad876a18048f43b1f66f5d474b7598538668c5de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Horia=20Geant=C4=83?= Date: Mon, 6 Aug 2018 15:29:39 +0300 Subject: [PATCH 0253/1692] crypto: caam/qi - fix error path in xts setkey MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit xts setkey callback returns 0 on some error paths. Fix this by returning -EINVAL. Cc: # 4.12+ Fixes: b189817cf789 ("crypto: caam/qi - add ablkcipher and authenc algorithms") Signed-off-by: Horia Geantă Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg_qi.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index 6e61cc93c2b0..d7aa7d7ff102 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c @@ -679,10 +679,8 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, int ret = 0; if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { - crypto_ablkcipher_set_flags(ablkcipher, - CRYPTO_TFM_RES_BAD_KEY_LEN); dev_err(jrdev, "key size mismatch\n"); - return -EINVAL; + goto badkey; } ctx->cdata.keylen = keylen; @@ -715,7 +713,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, return ret; badkey: crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); - return 0; + return -EINVAL; } /* -- GitLab From f1bf9e60a0779ec97de9ecdc353e1d01cdd73f43 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Horia=20Geant=C4=83?= Date: Mon, 6 Aug 2018 15:29:55 +0300 Subject: [PATCH 0254/1692] crypto: caam - fix DMA mapping direction for RSA forms 2 & 3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Crypto engine needs some temporary locations in external memory for running RSA decrypt forms 2 and 3 (CRT). These are named "tmp1" and "tmp2" in the PDB. Update DMA mapping direction of tmp1 and tmp2 from TO_DEVICE to BIDIRECTIONAL, since engine needs r/w access. Cc: # 4.13+ Fixes: 52e26d77b8b3 ("crypto: caam - add support for RSA key form 2") Fixes: 4a651b122adb ("crypto: caam - add support for RSA key form 3") Signed-off-by: Horia Geantă Signed-off-by: Herbert Xu --- drivers/crypto/caam/caampkc.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index 578ea63a3109..f26d62e5533a 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c @@ -71,8 +71,8 @@ static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc, dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); - dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); - dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE); + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); + dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL); } static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, @@ -90,8 +90,8 @@ static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); - dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); - dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE); + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); + dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL); } /* RSA Job Completion handler */ @@ -417,13 +417,13 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req, goto unmap_p; } - pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE); + pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, pdb->tmp1_dma)) { dev_err(dev, "Unable to map RSA tmp1 memory\n"); goto unmap_q; } - pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE); + pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, pdb->tmp2_dma)) { dev_err(dev, "Unable to map RSA tmp2 memory\n"); goto unmap_tmp1; @@ -451,7 +451,7 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req, return 0; unmap_tmp1: - dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); unmap_q: dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); unmap_p: @@ -504,13 +504,13 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req, goto unmap_dq; } - pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE); + pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, pdb->tmp1_dma)) { dev_err(dev, "Unable to map RSA tmp1 memory\n"); goto unmap_qinv; } - pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE); + pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, pdb->tmp2_dma)) { dev_err(dev, "Unable to map RSA tmp2 memory\n"); goto unmap_tmp1; @@ -538,7 +538,7 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req, return 0; unmap_tmp1: - dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); unmap_qinv: dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); unmap_dq: -- GitLab From 7fa885e2a22fd0f91a2c23d9275f5021f618ff5a Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 7 Aug 2018 23:18:36 +0200 Subject: [PATCH 0255/1692] crypto: arm64/sm4-ce - check for the right CPU feature bit ARMv8.2 specifies special instructions for the SM3 cryptographic hash and the SM4 symmetric cipher. While it is unlikely that a core would implement one and not the other, we should only use SM4 instructions if the SM4 CPU feature bit is set, and we currently check the SM3 feature bit instead. So fix that. Fixes: e99ce921c468 ("crypto: arm64 - add support for SM4...") Cc: Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- arch/arm64/crypto/sm4-ce-glue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/crypto/sm4-ce-glue.c b/arch/arm64/crypto/sm4-ce-glue.c index b7fb5274b250..0c4fc223f225 100644 --- a/arch/arm64/crypto/sm4-ce-glue.c +++ b/arch/arm64/crypto/sm4-ce-glue.c @@ -69,5 +69,5 @@ static void __exit sm4_ce_mod_fini(void) crypto_unregister_alg(&sm4_ce_alg); } -module_cpu_feature_match(SM3, sm4_ce_mod_init); +module_cpu_feature_match(SM4, sm4_ce_mod_init); module_exit(sm4_ce_mod_fini); -- GitLab From 65b2c12dcdb883fc015c0ec65d6c2f857e0456ac Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Fri, 10 Aug 2018 18:27:41 +0530 Subject: [PATCH 0256/1692] crypto: chtls - fix null dereference chtls_free_uld() call chtls_free_uld() only for the initialized cdev, this fixes NULL dereference in chtls_free_uld() Signed-off-by: Ganesh Goudar Signed-off-by: Atul Gupta Signed-off-by: Herbert Xu --- drivers/crypto/chelsio/chtls/chtls.h | 5 +++++ drivers/crypto/chelsio/chtls/chtls_main.c | 7 +++++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/chelsio/chtls/chtls.h b/drivers/crypto/chelsio/chtls/chtls.h index a53a0e6ba024..7725b6ee14ef 100644 --- a/drivers/crypto/chelsio/chtls/chtls.h +++ b/drivers/crypto/chelsio/chtls/chtls.h @@ -96,6 +96,10 @@ enum csk_flags { CSK_CONN_INLINE, /* Connection on HW */ }; +enum chtls_cdev_state { + CHTLS_CDEV_STATE_UP = 1 +}; + struct listen_ctx { struct sock *lsk; struct chtls_dev *cdev; @@ -146,6 +150,7 @@ struct chtls_dev { unsigned int send_page_order; int max_host_sndbuf; struct key_map kmap; + unsigned int cdev_state; }; struct chtls_hws { diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c index 9b07f9165658..f59b044ebd25 100644 --- a/drivers/crypto/chelsio/chtls/chtls_main.c +++ b/drivers/crypto/chelsio/chtls/chtls_main.c @@ -160,6 +160,7 @@ static void chtls_register_dev(struct chtls_dev *cdev) tlsdev->hash = chtls_create_hash; tlsdev->unhash = chtls_destroy_hash; tls_register_device(&cdev->tlsdev); + cdev->cdev_state = CHTLS_CDEV_STATE_UP; } static void chtls_unregister_dev(struct chtls_dev *cdev) @@ -281,8 +282,10 @@ static void chtls_free_all_uld(void) struct chtls_dev *cdev, *tmp; mutex_lock(&cdev_mutex); - list_for_each_entry_safe(cdev, tmp, &cdev_list, list) - chtls_free_uld(cdev); + list_for_each_entry_safe(cdev, tmp, &cdev_list, list) { + if (cdev->cdev_state == CHTLS_CDEV_STATE_UP) + chtls_free_uld(cdev); + } mutex_unlock(&cdev_mutex); } -- GitLab From e5b954e8d11fdde55eed35017370a3a0d8837754 Mon Sep 17 00:00:00 2001 From: Dave Watson Date: Wed, 15 Aug 2018 10:29:42 -0700 Subject: [PATCH 0257/1692] crypto: aesni - Use unaligned loads from gcm_context_data A regression was reported bisecting to 1476db2d12 "Move HashKey computation from stack to gcm_context". That diff moved HashKey computation from the stack, which was explicitly aligned in the asm, to a struct provided from the C code, depending on AESNI_ALIGN_ATTR for alignment. It appears some compilers may not align this struct correctly, resulting in a crash on the movdqa instruction when attempting to encrypt or decrypt data. Fix by using unaligned loads for the HashKeys. On modern hardware there is no perf difference between the unaligned and aligned loads. All other accesses to gcm_context_data already use unaligned loads. Reported-by: Mauro Rossi Fixes: 1476db2d12 ("Move HashKey computation from stack to gcm_context") Cc: Signed-off-by: Dave Watson Signed-off-by: Herbert Xu --- arch/x86/crypto/aesni-intel_asm.S | 66 +++++++++++++++---------------- 1 file changed, 33 insertions(+), 33 deletions(-) diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index e762ef417562..d27a50656aa1 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S @@ -223,34 +223,34 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff pcmpeqd TWOONE(%rip), \TMP2 pand POLY(%rip), \TMP2 pxor \TMP2, \TMP3 - movdqa \TMP3, HashKey(%arg2) + movdqu \TMP3, HashKey(%arg2) movdqa \TMP3, \TMP5 pshufd $78, \TMP3, \TMP1 pxor \TMP3, \TMP1 - movdqa \TMP1, HashKey_k(%arg2) + movdqu \TMP1, HashKey_k(%arg2) GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 # TMP5 = HashKey^2<<1 (mod poly) - movdqa \TMP5, HashKey_2(%arg2) + movdqu \TMP5, HashKey_2(%arg2) # HashKey_2 = HashKey^2<<1 (mod poly) pshufd $78, \TMP5, \TMP1 pxor \TMP5, \TMP1 - movdqa \TMP1, HashKey_2_k(%arg2) + movdqu \TMP1, HashKey_2_k(%arg2) GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 # TMP5 = HashKey^3<<1 (mod poly) - movdqa \TMP5, HashKey_3(%arg2) + movdqu \TMP5, HashKey_3(%arg2) pshufd $78, \TMP5, \TMP1 pxor \TMP5, \TMP1 - movdqa \TMP1, HashKey_3_k(%arg2) + movdqu \TMP1, HashKey_3_k(%arg2) GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 # TMP5 = HashKey^3<<1 (mod poly) - movdqa \TMP5, HashKey_4(%arg2) + movdqu \TMP5, HashKey_4(%arg2) pshufd $78, \TMP5, \TMP1 pxor \TMP5, \TMP1 - movdqa \TMP1, HashKey_4_k(%arg2) + movdqu \TMP1, HashKey_4_k(%arg2) .endm # GCM_INIT initializes a gcm_context struct to prepare for encoding/decoding. @@ -271,7 +271,7 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff movdqu %xmm0, CurCount(%arg2) # ctx_data.current_counter = iv PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, - movdqa HashKey(%arg2), %xmm13 + movdqu HashKey(%arg2), %xmm13 CALC_AAD_HASH %xmm13, \AAD, \AADLEN, %xmm0, %xmm1, %xmm2, %xmm3, \ %xmm4, %xmm5, %xmm6 @@ -997,7 +997,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation pshufd $78, \XMM5, \TMP6 pxor \XMM5, \TMP6 paddd ONE(%rip), \XMM0 # INCR CNT - movdqa HashKey_4(%arg2), \TMP5 + movdqu HashKey_4(%arg2), \TMP5 PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1 movdqa \XMM0, \XMM1 paddd ONE(%rip), \XMM0 # INCR CNT @@ -1016,7 +1016,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation pxor (%arg1), \XMM2 pxor (%arg1), \XMM3 pxor (%arg1), \XMM4 - movdqa HashKey_4_k(%arg2), \TMP5 + movdqu HashKey_4_k(%arg2), \TMP5 PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0) movaps 0x10(%arg1), \TMP1 AESENC \TMP1, \XMM1 # Round 1 @@ -1031,7 +1031,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation movdqa \XMM6, \TMP1 pshufd $78, \XMM6, \TMP2 pxor \XMM6, \TMP2 - movdqa HashKey_3(%arg2), \TMP5 + movdqu HashKey_3(%arg2), \TMP5 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1 movaps 0x30(%arg1), \TMP3 AESENC \TMP3, \XMM1 # Round 3 @@ -1044,7 +1044,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation AESENC \TMP3, \XMM2 AESENC \TMP3, \XMM3 AESENC \TMP3, \XMM4 - movdqa HashKey_3_k(%arg2), \TMP5 + movdqu HashKey_3_k(%arg2), \TMP5 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) movaps 0x50(%arg1), \TMP3 AESENC \TMP3, \XMM1 # Round 5 @@ -1058,7 +1058,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation movdqa \XMM7, \TMP1 pshufd $78, \XMM7, \TMP2 pxor \XMM7, \TMP2 - movdqa HashKey_2(%arg2), \TMP5 + movdqu HashKey_2(%arg2), \TMP5 # Multiply TMP5 * HashKey using karatsuba @@ -1074,7 +1074,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation AESENC \TMP3, \XMM2 AESENC \TMP3, \XMM3 AESENC \TMP3, \XMM4 - movdqa HashKey_2_k(%arg2), \TMP5 + movdqu HashKey_2_k(%arg2), \TMP5 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) movaps 0x80(%arg1), \TMP3 AESENC \TMP3, \XMM1 # Round 8 @@ -1092,7 +1092,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation movdqa \XMM8, \TMP1 pshufd $78, \XMM8, \TMP2 pxor \XMM8, \TMP2 - movdqa HashKey(%arg2), \TMP5 + movdqu HashKey(%arg2), \TMP5 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 movaps 0x90(%arg1), \TMP3 AESENC \TMP3, \XMM1 # Round 9 @@ -1121,7 +1121,7 @@ aes_loop_par_enc_done\@: AESENCLAST \TMP3, \XMM2 AESENCLAST \TMP3, \XMM3 AESENCLAST \TMP3, \XMM4 - movdqa HashKey_k(%arg2), \TMP5 + movdqu HashKey_k(%arg2), \TMP5 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) movdqu (%arg4,%r11,1), \TMP3 pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK @@ -1205,7 +1205,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation pshufd $78, \XMM5, \TMP6 pxor \XMM5, \TMP6 paddd ONE(%rip), \XMM0 # INCR CNT - movdqa HashKey_4(%arg2), \TMP5 + movdqu HashKey_4(%arg2), \TMP5 PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1 movdqa \XMM0, \XMM1 paddd ONE(%rip), \XMM0 # INCR CNT @@ -1224,7 +1224,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation pxor (%arg1), \XMM2 pxor (%arg1), \XMM3 pxor (%arg1), \XMM4 - movdqa HashKey_4_k(%arg2), \TMP5 + movdqu HashKey_4_k(%arg2), \TMP5 PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0) movaps 0x10(%arg1), \TMP1 AESENC \TMP1, \XMM1 # Round 1 @@ -1239,7 +1239,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation movdqa \XMM6, \TMP1 pshufd $78, \XMM6, \TMP2 pxor \XMM6, \TMP2 - movdqa HashKey_3(%arg2), \TMP5 + movdqu HashKey_3(%arg2), \TMP5 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1 movaps 0x30(%arg1), \TMP3 AESENC \TMP3, \XMM1 # Round 3 @@ -1252,7 +1252,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation AESENC \TMP3, \XMM2 AESENC \TMP3, \XMM3 AESENC \TMP3, \XMM4 - movdqa HashKey_3_k(%arg2), \TMP5 + movdqu HashKey_3_k(%arg2), \TMP5 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) movaps 0x50(%arg1), \TMP3 AESENC \TMP3, \XMM1 # Round 5 @@ -1266,7 +1266,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation movdqa \XMM7, \TMP1 pshufd $78, \XMM7, \TMP2 pxor \XMM7, \TMP2 - movdqa HashKey_2(%arg2), \TMP5 + movdqu HashKey_2(%arg2), \TMP5 # Multiply TMP5 * HashKey using karatsuba @@ -1282,7 +1282,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation AESENC \TMP3, \XMM2 AESENC \TMP3, \XMM3 AESENC \TMP3, \XMM4 - movdqa HashKey_2_k(%arg2), \TMP5 + movdqu HashKey_2_k(%arg2), \TMP5 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) movaps 0x80(%arg1), \TMP3 AESENC \TMP3, \XMM1 # Round 8 @@ -1300,7 +1300,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation movdqa \XMM8, \TMP1 pshufd $78, \XMM8, \TMP2 pxor \XMM8, \TMP2 - movdqa HashKey(%arg2), \TMP5 + movdqu HashKey(%arg2), \TMP5 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 movaps 0x90(%arg1), \TMP3 AESENC \TMP3, \XMM1 # Round 9 @@ -1329,7 +1329,7 @@ aes_loop_par_dec_done\@: AESENCLAST \TMP3, \XMM2 AESENCLAST \TMP3, \XMM3 AESENCLAST \TMP3, \XMM4 - movdqa HashKey_k(%arg2), \TMP5 + movdqu HashKey_k(%arg2), \TMP5 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) movdqu (%arg4,%r11,1), \TMP3 pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK @@ -1405,10 +1405,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst movdqa \XMM1, \TMP6 pshufd $78, \XMM1, \TMP2 pxor \XMM1, \TMP2 - movdqa HashKey_4(%arg2), \TMP5 + movdqu HashKey_4(%arg2), \TMP5 PCLMULQDQ 0x11, \TMP5, \TMP6 # TMP6 = a1*b1 PCLMULQDQ 0x00, \TMP5, \XMM1 # XMM1 = a0*b0 - movdqa HashKey_4_k(%arg2), \TMP4 + movdqu HashKey_4_k(%arg2), \TMP4 PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) movdqa \XMM1, \XMMDst movdqa \TMP2, \XMM1 # result in TMP6, XMMDst, XMM1 @@ -1418,10 +1418,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst movdqa \XMM2, \TMP1 pshufd $78, \XMM2, \TMP2 pxor \XMM2, \TMP2 - movdqa HashKey_3(%arg2), \TMP5 + movdqu HashKey_3(%arg2), \TMP5 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 PCLMULQDQ 0x00, \TMP5, \XMM2 # XMM2 = a0*b0 - movdqa HashKey_3_k(%arg2), \TMP4 + movdqu HashKey_3_k(%arg2), \TMP4 PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) pxor \TMP1, \TMP6 pxor \XMM2, \XMMDst @@ -1433,10 +1433,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst movdqa \XMM3, \TMP1 pshufd $78, \XMM3, \TMP2 pxor \XMM3, \TMP2 - movdqa HashKey_2(%arg2), \TMP5 + movdqu HashKey_2(%arg2), \TMP5 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 PCLMULQDQ 0x00, \TMP5, \XMM3 # XMM3 = a0*b0 - movdqa HashKey_2_k(%arg2), \TMP4 + movdqu HashKey_2_k(%arg2), \TMP4 PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) pxor \TMP1, \TMP6 pxor \XMM3, \XMMDst @@ -1446,10 +1446,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst movdqa \XMM4, \TMP1 pshufd $78, \XMM4, \TMP2 pxor \XMM4, \TMP2 - movdqa HashKey(%arg2), \TMP5 + movdqu HashKey(%arg2), \TMP5 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 PCLMULQDQ 0x00, \TMP5, \XMM4 # XMM4 = a0*b0 - movdqa HashKey_k(%arg2), \TMP4 + movdqu HashKey_k(%arg2), \TMP4 PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) pxor \TMP1, \TMP6 pxor \XMM4, \XMMDst -- GitLab From c2b24c36e0a30ebd8fc7d068da7f0451f2c05c76 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 20 Aug 2018 16:58:34 +0200 Subject: [PATCH 0258/1692] crypto: arm64/aes-gcm-ce - fix scatterwalk API violation Commit 71e52c278c54 ("crypto: arm64/aes-ce-gcm - operate on two input blocks at a time") modified the granularity at which the AES/GCM code processes its input to allow subsequent changes to be applied that improve performance by using aggregation to process multiple input blocks at once. For this reason, it doubled the algorithm's 'chunksize' property to 2 x AES_BLOCK_SIZE, but retained the non-SIMD fallback path that processes a single block at a time. In some cases, this violates the skcipher scatterwalk API, by calling skcipher_walk_done() with a non-zero residue value for a chunk that is expected to be handled in its entirety. This results in a WARN_ON() to be hit by the TLS self test code, but is likely to break other user cases as well. Unfortunately, none of the current test cases exercises this exact code path at the moment. Fixes: 71e52c278c54 ("crypto: arm64/aes-ce-gcm - operate on two ...") Reported-by: Vakul Garg Signed-off-by: Ard Biesheuvel Tested-by: Vakul Garg Signed-off-by: Herbert Xu --- arch/arm64/crypto/ghash-ce-glue.c | 29 +++++++++++++++++++++++------ 1 file changed, 23 insertions(+), 6 deletions(-) diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c index 6e9f33d14930..067d8937d5af 100644 --- a/arch/arm64/crypto/ghash-ce-glue.c +++ b/arch/arm64/crypto/ghash-ce-glue.c @@ -417,7 +417,7 @@ static int gcm_encrypt(struct aead_request *req) __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds); put_unaligned_be32(2, iv + GCM_IV_SIZE); - while (walk.nbytes >= AES_BLOCK_SIZE) { + while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) { int blocks = walk.nbytes / AES_BLOCK_SIZE; u8 *dst = walk.dst.virt.addr; u8 *src = walk.src.virt.addr; @@ -437,11 +437,18 @@ static int gcm_encrypt(struct aead_request *req) NULL); err = skcipher_walk_done(&walk, - walk.nbytes % AES_BLOCK_SIZE); + walk.nbytes % (2 * AES_BLOCK_SIZE)); } - if (walk.nbytes) + if (walk.nbytes) { __aes_arm64_encrypt(ctx->aes_key.key_enc, ks, iv, nrounds); + if (walk.nbytes > AES_BLOCK_SIZE) { + crypto_inc(iv, AES_BLOCK_SIZE); + __aes_arm64_encrypt(ctx->aes_key.key_enc, + ks + AES_BLOCK_SIZE, iv, + nrounds); + } + } } /* handle the tail */ @@ -545,7 +552,7 @@ static int gcm_decrypt(struct aead_request *req) __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds); put_unaligned_be32(2, iv + GCM_IV_SIZE); - while (walk.nbytes >= AES_BLOCK_SIZE) { + while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) { int blocks = walk.nbytes / AES_BLOCK_SIZE; u8 *dst = walk.dst.virt.addr; u8 *src = walk.src.virt.addr; @@ -564,11 +571,21 @@ static int gcm_decrypt(struct aead_request *req) } while (--blocks > 0); err = skcipher_walk_done(&walk, - walk.nbytes % AES_BLOCK_SIZE); + walk.nbytes % (2 * AES_BLOCK_SIZE)); } - if (walk.nbytes) + if (walk.nbytes) { + if (walk.nbytes > AES_BLOCK_SIZE) { + u8 *iv2 = iv + AES_BLOCK_SIZE; + + memcpy(iv2, iv, AES_BLOCK_SIZE); + crypto_inc(iv2, AES_BLOCK_SIZE); + + __aes_arm64_encrypt(ctx->aes_key.key_enc, iv2, + iv2, nrounds); + } __aes_arm64_encrypt(ctx->aes_key.key_enc, iv, iv, nrounds); + } } /* handle the tail */ -- GitLab From 0522236d4f9c5ab2e79889cb020d1acbe5da416e Mon Sep 17 00:00:00 2001 From: Ondrej Mosnacek Date: Wed, 22 Aug 2018 08:26:31 +0200 Subject: [PATCH 0259/1692] crypto: vmx - Fix sleep-in-atomic bugs This patch fixes sleep-in-atomic bugs in AES-CBC and AES-XTS VMX implementations. The problem is that the blkcipher_* functions should not be called in atomic context. The bugs can be reproduced via the AF_ALG interface by trying to encrypt/decrypt sufficiently large buffers (at least 64 KiB) using the VMX implementations of 'cbc(aes)' or 'xts(aes)'. Such operations then trigger BUG in crypto_yield(): [ 891.863680] BUG: sleeping function called from invalid context at include/crypto/algapi.h:424 [ 891.864622] in_atomic(): 1, irqs_disabled(): 0, pid: 12347, name: kcapi-enc [ 891.864739] 1 lock held by kcapi-enc/12347: [ 891.864811] #0: 00000000f5d42c46 (sk_lock-AF_ALG){+.+.}, at: skcipher_recvmsg+0x50/0x530 [ 891.865076] CPU: 5 PID: 12347 Comm: kcapi-enc Not tainted 4.19.0-0.rc0.git3.1.fc30.ppc64le #1 [ 891.865251] Call Trace: [ 891.865340] [c0000003387578c0] [c000000000d67ea4] dump_stack+0xe8/0x164 (unreliable) [ 891.865511] [c000000338757910] [c000000000172a58] ___might_sleep+0x2f8/0x310 [ 891.865679] [c000000338757990] [c0000000006bff74] blkcipher_walk_done+0x374/0x4a0 [ 891.865825] [c0000003387579e0] [d000000007e73e70] p8_aes_cbc_encrypt+0x1c8/0x260 [vmx_crypto] [ 891.865993] [c000000338757ad0] [c0000000006c0ee0] skcipher_encrypt_blkcipher+0x60/0x80 [ 891.866128] [c000000338757b10] [c0000000006ec504] skcipher_recvmsg+0x424/0x530 [ 891.866283] [c000000338757bd0] [c000000000b00654] sock_recvmsg+0x74/0xa0 [ 891.866403] [c000000338757c10] [c000000000b00f64] ___sys_recvmsg+0xf4/0x2f0 [ 891.866515] [c000000338757d90] [c000000000b02bb8] __sys_recvmsg+0x68/0xe0 [ 891.866631] [c000000338757e30] [c00000000000bbe4] system_call+0x5c/0x70 Fixes: 8c755ace357c ("crypto: vmx - Adding CBC routines for VMX module") Fixes: c07f5d3da643 ("crypto: vmx - Adding support for XTS") Cc: stable@vger.kernel.org Signed-off-by: Ondrej Mosnacek Signed-off-by: Herbert Xu --- drivers/crypto/vmx/aes_cbc.c | 30 ++++++++++++++---------------- drivers/crypto/vmx/aes_xts.c | 21 ++++++++++++++------- 2 files changed, 28 insertions(+), 23 deletions(-) diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c index 5285ece4f33a..b71895871be3 100644 --- a/drivers/crypto/vmx/aes_cbc.c +++ b/drivers/crypto/vmx/aes_cbc.c @@ -107,24 +107,23 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc, ret = crypto_skcipher_encrypt(req); skcipher_request_zero(req); } else { - preempt_disable(); - pagefault_disable(); - enable_kernel_vsx(); - blkcipher_walk_init(&walk, dst, src, nbytes); ret = blkcipher_walk_virt(desc, &walk); while ((nbytes = walk.nbytes)) { + preempt_disable(); + pagefault_disable(); + enable_kernel_vsx(); aes_p8_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr, nbytes & AES_BLOCK_MASK, &ctx->enc_key, walk.iv, 1); + disable_kernel_vsx(); + pagefault_enable(); + preempt_enable(); + nbytes &= AES_BLOCK_SIZE - 1; ret = blkcipher_walk_done(desc, &walk, nbytes); } - - disable_kernel_vsx(); - pagefault_enable(); - preempt_enable(); } return ret; @@ -147,24 +146,23 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc, ret = crypto_skcipher_decrypt(req); skcipher_request_zero(req); } else { - preempt_disable(); - pagefault_disable(); - enable_kernel_vsx(); - blkcipher_walk_init(&walk, dst, src, nbytes); ret = blkcipher_walk_virt(desc, &walk); while ((nbytes = walk.nbytes)) { + preempt_disable(); + pagefault_disable(); + enable_kernel_vsx(); aes_p8_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr, nbytes & AES_BLOCK_MASK, &ctx->dec_key, walk.iv, 0); + disable_kernel_vsx(); + pagefault_enable(); + preempt_enable(); + nbytes &= AES_BLOCK_SIZE - 1; ret = blkcipher_walk_done(desc, &walk, nbytes); } - - disable_kernel_vsx(); - pagefault_enable(); - preempt_enable(); } return ret; diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c index 8bd9aff0f55f..e9954a7d4694 100644 --- a/drivers/crypto/vmx/aes_xts.c +++ b/drivers/crypto/vmx/aes_xts.c @@ -116,32 +116,39 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc, ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req); skcipher_request_zero(req); } else { + blkcipher_walk_init(&walk, dst, src, nbytes); + + ret = blkcipher_walk_virt(desc, &walk); + preempt_disable(); pagefault_disable(); enable_kernel_vsx(); - blkcipher_walk_init(&walk, dst, src, nbytes); - - ret = blkcipher_walk_virt(desc, &walk); iv = walk.iv; memset(tweak, 0, AES_BLOCK_SIZE); aes_p8_encrypt(iv, tweak, &ctx->tweak_key); + disable_kernel_vsx(); + pagefault_enable(); + preempt_enable(); + while ((nbytes = walk.nbytes)) { + preempt_disable(); + pagefault_disable(); + enable_kernel_vsx(); if (enc) aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr, nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak); else aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr, nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak); + disable_kernel_vsx(); + pagefault_enable(); + preempt_enable(); nbytes &= AES_BLOCK_SIZE - 1; ret = blkcipher_walk_done(desc, &walk, nbytes); } - - disable_kernel_vsx(); - pagefault_enable(); - preempt_enable(); } return ret; } -- GitLab From 3d7c82060d1fe65bde4023aac41a0b1bd7718e07 Mon Sep 17 00:00:00 2001 From: Srikanth Jampala Date: Wed, 22 Aug 2018 12:40:52 +0530 Subject: [PATCH 0260/1692] crypto: cavium/nitrox - fix for command corruption in queue full case with backlog submissions. Earlier used to post the current command without checking queue full after backlog submissions. So, post the current command only after confirming the space in queue after backlog submissions. Maintain host write index instead of reading device registers to get the next free slot to post the command. Return -ENOSPC in queue full case. Signed-off-by: Srikanth Jampala Reviewed-by: Gadam Sreerama Tested-by: Jha, Chandan Signed-off-by: Herbert Xu --- drivers/crypto/cavium/nitrox/nitrox_dev.h | 3 +- drivers/crypto/cavium/nitrox/nitrox_lib.c | 1 + drivers/crypto/cavium/nitrox/nitrox_reqmgr.c | 57 +++++++++++--------- 3 files changed, 35 insertions(+), 26 deletions(-) diff --git a/drivers/crypto/cavium/nitrox/nitrox_dev.h b/drivers/crypto/cavium/nitrox/nitrox_dev.h index 9a476bb6d4c7..af596455b420 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_dev.h +++ b/drivers/crypto/cavium/nitrox/nitrox_dev.h @@ -35,6 +35,7 @@ struct nitrox_cmdq { /* requests in backlog queues */ atomic_t backlog_count; + int write_idx; /* command size 32B/64B */ u8 instr_size; u8 qno; @@ -87,7 +88,7 @@ struct nitrox_bh { struct bh_data *slc; }; -/* NITROX-5 driver state */ +/* NITROX-V driver state */ #define NITROX_UCODE_LOADED 0 #define NITROX_READY 1 diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c index ebe267379ac9..4d31df07777f 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_lib.c +++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c @@ -36,6 +36,7 @@ static int cmdq_common_init(struct nitrox_cmdq *cmdq) cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN); cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN); cmdq->qsize = (qsize + PKT_IN_ALIGN); + cmdq->write_idx = 0; spin_lock_init(&cmdq->response_lock); spin_lock_init(&cmdq->cmdq_lock); diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c index deaefd532aaa..4a362fc22f62 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c +++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c @@ -42,6 +42,16 @@ * Invalid flag options in AES-CCM IV. */ +static inline int incr_index(int index, int count, int max) +{ + if ((index + count) >= max) + index = index + count - max; + else + index += count; + + return index; +} + /** * dma_free_sglist - unmap and free the sg lists. * @ndev: N5 device @@ -426,30 +436,29 @@ static void post_se_instr(struct nitrox_softreq *sr, struct nitrox_cmdq *cmdq) { struct nitrox_device *ndev = sr->ndev; - union nps_pkt_in_instr_baoff_dbell pkt_in_baoff_dbell; - u64 offset; + int idx; u8 *ent; spin_lock_bh(&cmdq->cmdq_lock); - /* get the next write offset */ - offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(cmdq->qno); - pkt_in_baoff_dbell.value = nitrox_read_csr(ndev, offset); + idx = cmdq->write_idx; /* copy the instruction */ - ent = cmdq->head + pkt_in_baoff_dbell.s.aoff; + ent = cmdq->head + (idx * cmdq->instr_size); memcpy(ent, &sr->instr, cmdq->instr_size); - /* flush the command queue updates */ - dma_wmb(); - sr->tstamp = jiffies; atomic_set(&sr->status, REQ_POSTED); response_list_add(sr, cmdq); + sr->tstamp = jiffies; + /* flush the command queue updates */ + dma_wmb(); /* Ring doorbell with count 1 */ writeq(1, cmdq->dbell_csr_addr); /* orders the doorbell rings */ mmiowb(); + cmdq->write_idx = incr_index(idx, 1, ndev->qlen); + spin_unlock_bh(&cmdq->cmdq_lock); } @@ -459,6 +468,9 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq) struct nitrox_softreq *sr, *tmp; int ret = 0; + if (!atomic_read(&cmdq->backlog_count)) + return 0; + spin_lock_bh(&cmdq->backlog_lock); list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) { @@ -466,7 +478,7 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq) /* submit until space available */ if (unlikely(cmdq_full(cmdq, ndev->qlen))) { - ret = -EBUSY; + ret = -ENOSPC; break; } /* delete from backlog list */ @@ -491,23 +503,20 @@ static int nitrox_enqueue_request(struct nitrox_softreq *sr) { struct nitrox_cmdq *cmdq = sr->cmdq; struct nitrox_device *ndev = sr->ndev; - int ret = -EBUSY; + + /* try to post backlog requests */ + post_backlog_cmds(cmdq); if (unlikely(cmdq_full(cmdq, ndev->qlen))) { if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) - return -EAGAIN; - + return -ENOSPC; + /* add to backlog list */ backlog_list_add(sr, cmdq); - } else { - ret = post_backlog_cmds(cmdq); - if (ret) { - backlog_list_add(sr, cmdq); - return ret; - } - post_se_instr(sr, cmdq); - ret = -EINPROGRESS; + return -EBUSY; } - return ret; + post_se_instr(sr, cmdq); + + return -EINPROGRESS; } /** @@ -624,11 +633,9 @@ int nitrox_process_se_request(struct nitrox_device *ndev, */ sr->instr.fdata[0] = *((u64 *)&req->gph); sr->instr.fdata[1] = 0; - /* flush the soft_req changes before posting the cmd */ - wmb(); ret = nitrox_enqueue_request(sr); - if (ret == -EAGAIN) + if (ret == -ENOSPC) goto send_fail; return ret; -- GitLab From 3cf71bc9904d7ee4a25a822c5dcb54c7804ea388 Mon Sep 17 00:00:00 2001 From: Jan-Marek Glogowski Date: Sat, 25 Aug 2018 15:10:35 -0400 Subject: [PATCH 0261/1692] drm/i915: Re-apply "Perform link quality check, unconditionally during long pulse" This re-applies the workaround for "some DP sinks, [which] are a little nuts" from commit 1a36147bb939 ("drm/i915: Perform link quality check unconditionally during long pulse"). It makes the secondary AOC E2460P monitor connected via DP to an acer Veriton N4640G usable again. This hunk was dropped in commit c85d200e8321 ("drm/i915: Move SST DP link retraining into the ->post_hotplug() hook") Fixes: c85d200e8321 ("drm/i915: Move SST DP link retraining into the ->post_hotplug() hook") [Cleaned up commit message, added stable cc] Signed-off-by: Lyude Paul Signed-off-by: Jan-Marek Glogowski Cc: stable@vger.kernel.org Link: https://patchwork.freedesktop.org/patch/msgid/20180825191035.3945-1-lyude@redhat.com --- drivers/gpu/drm/i915/intel_dp.c | 33 +++++++++++++++++++-------------- 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index b3f6f04c3c7d..db8515171270 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -4333,18 +4333,6 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp) return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); } -/* - * If display is now connected check links status, - * there has been known issues of link loss triggering - * long pulse. - * - * Some sinks (eg. ASUS PB287Q) seem to perform some - * weird HPD ping pong during modesets. So we can apparently - * end up with HPD going low during a modeset, and then - * going back up soon after. And once that happens we must - * retrain the link to get a picture. That's in case no - * userspace component reacted to intermittent HPD dip. - */ int intel_dp_retrain_link(struct intel_encoder *encoder, struct drm_modeset_acquire_ctx *ctx) { @@ -5031,7 +5019,8 @@ intel_dp_unset_edid(struct intel_dp *intel_dp) } static int -intel_dp_long_pulse(struct intel_connector *connector) +intel_dp_long_pulse(struct intel_connector *connector, + struct drm_modeset_acquire_ctx *ctx) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_dp *intel_dp = intel_attached_dp(&connector->base); @@ -5090,6 +5079,22 @@ intel_dp_long_pulse(struct intel_connector *connector) */ status = connector_status_disconnected; goto out; + } else { + /* + * If display is now connected check links status, + * there has been known issues of link loss triggering + * long pulse. + * + * Some sinks (eg. ASUS PB287Q) seem to perform some + * weird HPD ping pong during modesets. So we can apparently + * end up with HPD going low during a modeset, and then + * going back up soon after. And once that happens we must + * retrain the link to get a picture. That's in case no + * userspace component reacted to intermittent HPD dip. + */ + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + + intel_dp_retrain_link(encoder, ctx); } /* @@ -5151,7 +5156,7 @@ intel_dp_detect(struct drm_connector *connector, return ret; } - status = intel_dp_long_pulse(intel_dp->attached_connector); + status = intel_dp_long_pulse(intel_dp->attached_connector, ctx); } intel_dp->detect_done = false; -- GitLab From 602b74eda81311dbdb5dbab08c30f789f648ebdc Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Fri, 24 Aug 2018 15:41:35 +0300 Subject: [PATCH 0262/1692] mlxsw: spectrum_switchdev: Do not leak RIFs when removing bridge When a bridge device is removed, the VLANs are flushed from each configured port. This causes the ports to decrement the reference count on the associated FIDs (filtering identifier). If the reference count of a FID is 1 and it has a RIF (router interface), then this RIF is destroyed. However, if no port is member in the VLAN for which a RIF exists, then the RIF will continue to exist after the removal of the bridge. To reproduce: # ip link add name br0 type bridge vlan_filtering 1 # ip link set dev swp1 master br0 # ip link add link br0 name br0.10 type vlan id 10 # ip address add 192.0.2.0/24 dev br0.10 # ip link del dev br0 The RIF associated with br0.10 continues to exist. Fix this by iterating over all the bridge device uppers when it is destroyed and take care of destroying their RIFs. Fixes: 99f44bb3527b ("mlxsw: spectrum: Enable L3 interfaces on top of bridge devices") Signed-off-by: Ido Schimmel Reviewed-by: Petr Machata Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum.h | 2 ++ .../ethernet/mellanox/mlxsw/spectrum_router.c | 11 ++++++++++ .../mellanox/mlxsw/spectrum_switchdev.c | 20 +++++++++++++++++++ 3 files changed, 33 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 3ae930196741..3cdb7aca90b7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -414,6 +414,8 @@ mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp, void mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan); void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif); +void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp, + struct net_device *dev); /* spectrum_kvdl.c */ enum mlxsw_sp_kvdl_entry_type { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 3a96307f51b0..2ab9cf25a08a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -6234,6 +6234,17 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif) mlxsw_sp_vr_put(mlxsw_sp, vr); } +void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp, + struct net_device *dev) +{ + struct mlxsw_sp_rif *rif; + + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); + if (!rif) + return; + mlxsw_sp_rif_destroy(rif); +} + static void mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params, struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 0d8444aaba01..db715da7bab7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -127,6 +127,24 @@ bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp, return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); } +static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev, + void *data) +{ + struct mlxsw_sp *mlxsw_sp = data; + + mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev); + return 0; +} + +static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp, + struct net_device *dev) +{ + mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev); + netdev_walk_all_upper_dev_rcu(dev, + mlxsw_sp_bridge_device_upper_rif_destroy, + mlxsw_sp); +} + static struct mlxsw_sp_bridge_device * mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge, struct net_device *br_dev) @@ -165,6 +183,8 @@ static void mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge, struct mlxsw_sp_bridge_device *bridge_device) { + mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp, + bridge_device->dev); list_del(&bridge_device->list); if (bridge_device->vlan_enabled) bridge->vlan_enabled_exists = false; -- GitLab From ab5f11055fdf8dfc3ddbd89e8e3cc550de41d1d3 Mon Sep 17 00:00:00 2001 From: Ahmad Fatoum Date: Tue, 21 Aug 2018 17:35:48 +0200 Subject: [PATCH 0263/1692] net: macb: Fix regression breaking non-MDIO fixed-link PHYs commit 739de9a1563a ("net: macb: Reorganize macb_mii bringup") broke initializing macb on the EVB-KSZ9477 eval board. There, of_mdiobus_register was called even for the fixed-link representing the RGMII-link to the switch with the result that the driver attempts to enumerate PHYs on a non-existent MDIO bus: libphy: MACB_mii_bus: probed mdio_bus f0028000.ethernet-ffffffff: fixed-link has invalid PHY address mdio_bus f0028000.ethernet-ffffffff: scan phy fixed-link at address 0 [snip] mdio_bus f0028000.ethernet-ffffffff: scan phy fixed-link at address 31 The "MDIO" bus registration succeeds regardless, having claimed the reset GPIO, and calling of_phy_register_fixed_link later on fails because it tries to claim the same GPIO: macb f0028000.ethernet: broken fixed-link specification Fix this by registering the fixed-link before calling mdiobus_register. Fixes: 739de9a1563a ("net: macb: Reorganize macb_mii bringup") Signed-off-by: Ahmad Fatoum Signed-off-by: David S. Miller --- drivers/net/ethernet/cadence/macb_main.c | 27 +++++++++++++++--------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index dc09f9a8a49b..f46b854d34b5 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -482,11 +482,6 @@ static int macb_mii_probe(struct net_device *dev) if (np) { if (of_phy_is_fixed_link(np)) { - if (of_phy_register_fixed_link(np) < 0) { - dev_err(&bp->pdev->dev, - "broken fixed-link specification\n"); - return -ENODEV; - } bp->phy_node = of_node_get(np); } else { bp->phy_node = of_parse_phandle(np, "phy-handle", 0); @@ -569,7 +564,7 @@ static int macb_mii_init(struct macb *bp) { struct macb_platform_data *pdata; struct device_node *np; - int err; + int err = -ENXIO; /* Enable management port */ macb_writel(bp, NCR, MACB_BIT(MPE)); @@ -592,12 +587,23 @@ static int macb_mii_init(struct macb *bp) dev_set_drvdata(&bp->dev->dev, bp->mii_bus); np = bp->pdev->dev.of_node; - if (pdata) - bp->mii_bus->phy_mask = pdata->phy_mask; + if (np && of_phy_is_fixed_link(np)) { + if (of_phy_register_fixed_link(np) < 0) { + dev_err(&bp->pdev->dev, + "broken fixed-link specification %pOF\n", np); + goto err_out_free_mdiobus; + } + + err = mdiobus_register(bp->mii_bus); + } else { + if (pdata) + bp->mii_bus->phy_mask = pdata->phy_mask; + + err = of_mdiobus_register(bp->mii_bus, np); + } - err = of_mdiobus_register(bp->mii_bus, np); if (err) - goto err_out_free_mdiobus; + goto err_out_free_fixed_link; err = macb_mii_probe(bp->dev); if (err) @@ -607,6 +613,7 @@ static int macb_mii_init(struct macb *bp) err_out_unregister_bus: mdiobus_unregister(bp->mii_bus); +err_out_free_fixed_link: if (np && of_phy_is_fixed_link(np)) of_phy_deregister_fixed_link(np); err_out_free_mdiobus: -- GitLab From f7b9e8e111e0ce04ed7d1a1cb5b01b6e57775708 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 22 Aug 2018 15:27:23 +0200 Subject: [PATCH 0264/1692] Revert "net: stmmac: fix build failure due to missing COMMON_CLK dependency" This reverts commit bde4975310eb1982bd0bbff673989052d92fd481. All legacy clock implementations now implement clk_set_rate() (Some implementations may be dummies, though). Signed-off-by: Geert Uytterhoeven Acked-by: Arnd Bergmann Acked-by: Jose Abreu Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/Kconfig | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index edf20361ea5f..bf4acebb6bcd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -33,7 +33,7 @@ config DWMAC_DWC_QOS_ETH select PHYLIB select CRC32 select MII - depends on OF && COMMON_CLK && HAS_DMA + depends on OF && HAS_DMA help Support for chips using the snps,dwc-qos-ethernet.txt DT binding. @@ -57,7 +57,7 @@ config DWMAC_ANARION config DWMAC_IPQ806X tristate "QCA IPQ806x DWMAC support" default ARCH_QCOM - depends on OF && COMMON_CLK && (ARCH_QCOM || COMPILE_TEST) + depends on OF && (ARCH_QCOM || COMPILE_TEST) select MFD_SYSCON help Support for QCA IPQ806X DWMAC Ethernet. @@ -100,7 +100,7 @@ config DWMAC_OXNAS config DWMAC_ROCKCHIP tristate "Rockchip dwmac support" default ARCH_ROCKCHIP - depends on OF && COMMON_CLK && (ARCH_ROCKCHIP || COMPILE_TEST) + depends on OF && (ARCH_ROCKCHIP || COMPILE_TEST) select MFD_SYSCON help Support for Ethernet controller on Rockchip RK3288 SoC. @@ -123,7 +123,7 @@ config DWMAC_SOCFPGA config DWMAC_STI tristate "STi GMAC support" default ARCH_STI - depends on OF && COMMON_CLK && (ARCH_STI || COMPILE_TEST) + depends on OF && (ARCH_STI || COMPILE_TEST) select MFD_SYSCON ---help--- Support for ethernet controller on STi SOCs. @@ -147,7 +147,7 @@ config DWMAC_STM32 config DWMAC_SUNXI tristate "Allwinner GMAC support" default ARCH_SUNXI - depends on OF && COMMON_CLK && (ARCH_SUNXI || COMPILE_TEST) + depends on OF && (ARCH_SUNXI || COMPILE_TEST) ---help--- Support for Allwinner A20/A31 GMAC ethernet controllers. -- GitLab From 0da70f808029476001109b6cb076737bc04cea2e Mon Sep 17 00:00:00 2001 From: Anssi Hannula Date: Thu, 23 Aug 2018 10:45:22 +0300 Subject: [PATCH 0265/1692] net: macb: do not disable MDIO bus at open/close time macb_reset_hw() is called from macb_close() and indirectly from macb_open(). macb_reset_hw() zeroes the NCR register, including the MPE (Management Port Enable) bit. This will prevent accessing any other PHYs for other Ethernet MACs on the MDIO bus, which remains registered at macb_reset_hw() time, until macb_init_hw() is called from macb_open() which sets the MPE bit again. I.e. currently the MDIO bus has a short disruption at open time and is disabled at close time until the interface is opened again. Fix that by only touching the RE and TE bits when enabling and disabling RX/TX. v2: Make macb_init_hw() NCR write a single statement. Fixes: 6c36a7074436 ("macb: Use generic PHY layer") Signed-off-by: Anssi Hannula Reviewed-by: Claudiu Beznea Tested-by: Claudiu Beznea Signed-off-by: David S. Miller --- drivers/net/ethernet/cadence/macb_main.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index f46b854d34b5..c6707ea2d751 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -2035,14 +2035,17 @@ static void macb_reset_hw(struct macb *bp) { struct macb_queue *queue; unsigned int q; + u32 ctrl = macb_readl(bp, NCR); /* Disable RX and TX (XXX: Should we halt the transmission * more gracefully?) */ - macb_writel(bp, NCR, 0); + ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE)); /* Clear the stats registers (XXX: Update stats first?) */ - macb_writel(bp, NCR, MACB_BIT(CLRSTAT)); + ctrl |= MACB_BIT(CLRSTAT); + + macb_writel(bp, NCR, ctrl); /* Clear all status flags */ macb_writel(bp, TSR, -1); @@ -2230,7 +2233,7 @@ static void macb_init_hw(struct macb *bp) } /* Enable TX and RX */ - macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE)); + macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE)); } /* The hash address register is 64 bits long and takes up two -- GitLab From 6750c87074c5b534d82fdaabb1deb45b8f1f57de Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Thu, 23 Aug 2018 13:20:52 -0700 Subject: [PATCH 0266/1692] qlge: Fix netdev features configuration. qlge_fix_features() is not supposed to modify hardware or driver state, rather it is supposed to only fix requested fetures bits. Currently qlge_fix_features() also goes for interface down and up unnecessarily if there is not even any change in features set. This patch changes/fixes following - 1) Move reload of interface or device re-config from qlge_fix_features() to qlge_set_features(). 2) Reload of interface in qlge_set_features() only if relevant feature bit (NETIF_F_HW_VLAN_CTAG_RX) is changed. 3) Get rid of qlge_fix_features() since driver is not really required to fix any features bit. Signed-off-by: Manish Reviewed-by: Benjamin Poirier Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlge/qlge_main.c | 23 +++++++------------- 1 file changed, 8 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 353f1c129af1..059ba9429e51 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -2384,26 +2384,20 @@ static int qlge_update_hw_vlan_features(struct net_device *ndev, return status; } -static netdev_features_t qlge_fix_features(struct net_device *ndev, - netdev_features_t features) -{ - int err; - - /* Update the behavior of vlan accel in the adapter */ - err = qlge_update_hw_vlan_features(ndev, features); - if (err) - return err; - - return features; -} - static int qlge_set_features(struct net_device *ndev, netdev_features_t features) { netdev_features_t changed = ndev->features ^ features; + int err; + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) { + /* Update the behavior of vlan accel in the adapter */ + err = qlge_update_hw_vlan_features(ndev, features); + if (err) + return err; - if (changed & NETIF_F_HW_VLAN_CTAG_RX) qlge_vlan_mode(ndev, features); + } return 0; } @@ -4719,7 +4713,6 @@ static const struct net_device_ops qlge_netdev_ops = { .ndo_set_mac_address = qlge_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = qlge_tx_timeout, - .ndo_fix_features = qlge_fix_features, .ndo_set_features = qlge_set_features, .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid, -- GitLab From 2d66f997f0545c8f7fc5cf0b49af1decb35170e7 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Fri, 24 Aug 2018 16:53:13 +0800 Subject: [PATCH 0267/1692] vhost: correctly check the iova range when waking virtqueue We don't wakeup the virtqueue if the first byte of pending iova range is the last byte of the range we just got updated. This will lead a virtqueue to wait for IOTLB updating forever. Fixing by correct the check and wake up the virtqueue in this case. Fixes: 6b1e6cc7855b ("vhost: new device IOTLB API") Reported-by: Peter Xu Signed-off-by: Jason Wang Reviewed-by: Peter Xu Tested-by: Peter Xu Acked-by: Michael S. Tsirkin Signed-off-by: David S. Miller --- drivers/vhost/vhost.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 96c1d8400822..b13c6b4b2c66 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -952,7 +952,7 @@ static void vhost_iotlb_notify_vq(struct vhost_dev *d, list_for_each_entry_safe(node, n, &d->pending_list, node) { struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb; if (msg->iova <= vq_msg->iova && - msg->iova + msg->size - 1 > vq_msg->iova && + msg->iova + msg->size - 1 >= vq_msg->iova && vq_msg->type == VHOST_IOTLB_MISS) { vhost_poll_queue(&node->vq->poll); list_del(&node->node); -- GitLab From e75d039a54090470b7a42e803fd4f9398390f907 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 24 Aug 2018 12:18:30 +0100 Subject: [PATCH 0268/1692] qed: fix spelling mistake "comparsion" -> "comparison" Trivial fix to spelling mistake in DP_ERR error message Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_init_ops.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c index d9ab5add27a8..34193c2f1699 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c @@ -407,7 +407,7 @@ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn, if (i == QED_INIT_MAX_POLL_COUNT) { DP_ERR(p_hwfn, - "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n", + "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparison %08x)]\n", addr, le32_to_cpu(cmd->expected_val), val, le32_to_cpu(cmd->op_data)); } -- GitLab From 98c8f125fd8a6240ea343c1aa50a1be9047791b8 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Sat, 25 Aug 2018 22:58:01 -0700 Subject: [PATCH 0269/1692] net: sched: Fix memory exposure from short TCA_U32_SEL Via u32_change(), TCA_U32_SEL has an unspecified type in the netlink policy, so max length isn't enforced, only minimum. This means nkeys (from userspace) was being trusted without checking the actual size of nla_len(), which could lead to a memory over-read, and ultimately an exposure via a call to u32_dump(). Reachability is CAP_NET_ADMIN within a namespace. Reported-by: Al Viro Cc: Jamal Hadi Salim Cc: Cong Wang Cc: Jiri Pirko Cc: "David S. Miller" Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- net/sched/cls_u32.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index d5d2a6dc3921..f218ccf1e2d9 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -914,6 +914,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, struct nlattr *opt = tca[TCA_OPTIONS]; struct nlattr *tb[TCA_U32_MAX + 1]; u32 htid, flags = 0; + size_t sel_size; int err; #ifdef CONFIG_CLS_U32_PERF size_t size; @@ -1076,8 +1077,13 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, } s = nla_data(tb[TCA_U32_SEL]); + sel_size = struct_size(s, keys, s->nkeys); + if (nla_len(tb[TCA_U32_SEL]) < sel_size) { + err = -EINVAL; + goto erridr; + } - n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL); + n = kzalloc(offsetof(typeof(*n), sel) + sel_size, GFP_KERNEL); if (n == NULL) { err = -ENOBUFS; goto erridr; @@ -1092,7 +1098,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, } #endif - memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key)); + memcpy(&n->sel, s, sel_size); RCU_INIT_POINTER(n->ht_up, ht); n->handle = handle; n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0; -- GitLab From 3ad867001c91657c46dcf6656d52eb6080286fd5 Mon Sep 17 00:00:00 2001 From: Lothar Felten Date: Tue, 14 Aug 2018 09:09:37 +0200 Subject: [PATCH 0270/1692] hwmon: (ina2xx) fix sysfs shunt resistor read access fix the sysfs shunt resistor read access: return the shunt resistor value, not the calibration register contents. update email address Signed-off-by: Lothar Felten Signed-off-by: Guenter Roeck --- Documentation/hwmon/ina2xx | 2 +- drivers/hwmon/ina2xx.c | 13 +++++++++++-- include/linux/platform_data/ina2xx.h | 2 +- 3 files changed, 13 insertions(+), 4 deletions(-) diff --git a/Documentation/hwmon/ina2xx b/Documentation/hwmon/ina2xx index 72d16f08e431..b8df81f6d6bc 100644 --- a/Documentation/hwmon/ina2xx +++ b/Documentation/hwmon/ina2xx @@ -32,7 +32,7 @@ Supported chips: Datasheet: Publicly available at the Texas Instruments website http://www.ti.com/ -Author: Lothar Felten +Author: Lothar Felten Description ----------- diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c index e9e6aeabbf84..71d3445ba869 100644 --- a/drivers/hwmon/ina2xx.c +++ b/drivers/hwmon/ina2xx.c @@ -17,7 +17,7 @@ * Bi-directional Current/Power Monitor with I2C Interface * Datasheet: http://www.ti.com/product/ina230 * - * Copyright (C) 2012 Lothar Felten + * Copyright (C) 2012 Lothar Felten * Thanks to Jan Volkering * * This program is free software; you can redistribute it and/or modify @@ -329,6 +329,15 @@ static int ina2xx_set_shunt(struct ina2xx_data *data, long val) return 0; } +static ssize_t ina2xx_show_shunt(struct device *dev, + struct device_attribute *da, + char *buf) +{ + struct ina2xx_data *data = dev_get_drvdata(dev); + + return snprintf(buf, PAGE_SIZE, "%li\n", data->rshunt); +} + static ssize_t ina2xx_store_shunt(struct device *dev, struct device_attribute *da, const char *buf, size_t count) @@ -403,7 +412,7 @@ static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL, /* shunt resistance */ static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR, - ina2xx_show_value, ina2xx_store_shunt, + ina2xx_show_shunt, ina2xx_store_shunt, INA2XX_CALIBRATION); /* update interval (ina226 only) */ diff --git a/include/linux/platform_data/ina2xx.h b/include/linux/platform_data/ina2xx.h index 9abc0ca7259b..9f0aa1b48c78 100644 --- a/include/linux/platform_data/ina2xx.h +++ b/include/linux/platform_data/ina2xx.h @@ -1,7 +1,7 @@ /* * Driver for Texas Instruments INA219, INA226 power monitor chips * - * Copyright (C) 2012 Lothar Felten + * Copyright (C) 2012 Lothar Felten * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as -- GitLab From 9d19371df50a73301aec66a479b490587e889055 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 14 Aug 2018 12:12:36 +0300 Subject: [PATCH 0271/1692] hwmon: (adt7475) Potential error pointer dereferences The adt7475_update_device() function returns error pointers. The problem is that in show_pwmfreq() we dereference it before the check. And then in pwm_use_point2_pwm_at_crit_show() there isn't a check at all. I don't know if it's required, but it silences a static checker warning and it's doesn't hurt anything to check. Signed-off-by: Dan Carpenter Reviewed-by: Tokunori Ikegami Signed-off-by: Guenter Roeck --- drivers/hwmon/adt7475.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c index 90837f7c7d0f..16045149f3db 100644 --- a/drivers/hwmon/adt7475.c +++ b/drivers/hwmon/adt7475.c @@ -962,13 +962,14 @@ static ssize_t show_pwmfreq(struct device *dev, struct device_attribute *attr, { struct adt7475_data *data = adt7475_update_device(dev); struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); - int i = clamp_val(data->range[sattr->index] & 0xf, 0, - ARRAY_SIZE(pwmfreq_table) - 1); + int idx; if (IS_ERR(data)) return PTR_ERR(data); + idx = clamp_val(data->range[sattr->index] & 0xf, 0, + ARRAY_SIZE(pwmfreq_table) - 1); - return sprintf(buf, "%d\n", pwmfreq_table[i]); + return sprintf(buf, "%d\n", pwmfreq_table[idx]); } static ssize_t set_pwmfreq(struct device *dev, struct device_attribute *attr, @@ -1004,6 +1005,10 @@ static ssize_t pwm_use_point2_pwm_at_crit_show(struct device *dev, char *buf) { struct adt7475_data *data = adt7475_update_device(dev); + + if (IS_ERR(data)) + return PTR_ERR(data); + return sprintf(buf, "%d\n", !!(data->config4 & CONFIG4_MAXDUTY)); } -- GitLab From f196dec6d50abb2e65fb54a0621b2f1b4d922995 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 14 Aug 2018 13:07:47 +0300 Subject: [PATCH 0272/1692] hwmon: (adt7475) Make adt7475_read_word() return errors The adt7475_read_word() function was meant to return negative error codes on failure. Signed-off-by: Dan Carpenter Reviewed-by: Tokunori Ikegami Signed-off-by: Guenter Roeck --- drivers/hwmon/adt7475.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c index 16045149f3db..f4c7516eb989 100644 --- a/drivers/hwmon/adt7475.c +++ b/drivers/hwmon/adt7475.c @@ -302,14 +302,18 @@ static inline u16 volt2reg(int channel, long volt, u8 bypass_attn) return clamp_val(reg, 0, 1023) & (0xff << 2); } -static u16 adt7475_read_word(struct i2c_client *client, int reg) +static int adt7475_read_word(struct i2c_client *client, int reg) { - u16 val; + int val1, val2; - val = i2c_smbus_read_byte_data(client, reg); - val |= (i2c_smbus_read_byte_data(client, reg + 1) << 8); + val1 = i2c_smbus_read_byte_data(client, reg); + if (val1 < 0) + return val1; + val2 = i2c_smbus_read_byte_data(client, reg + 1); + if (val2 < 0) + return val2; - return val; + return val1 | (val2 << 8); } static void adt7475_write_word(struct i2c_client *client, int reg, u16 val) -- GitLab From d49dbfade96d5b0863ca8a90122a805edd5ef50a Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Wed, 15 Aug 2018 08:14:37 -0500 Subject: [PATCH 0273/1692] hwmon: (nct6775) Fix potential Spectre v1 val can be indirectly controlled by user-space, hence leading to a potential exploitation of the Spectre variant 1 vulnerability. This issue was detected with the help of Smatch: vers/hwmon/nct6775.c:2698 store_pwm_weight_temp_sel() warn: potential spectre issue 'data->temp_src' [r] Fix this by sanitizing val before using it to index data->temp_src Notice that given that speculation windows are large, the policy is to kill the speculation on the first load and not worry if it can be completed with a dependent load/store [1]. [1] https://marc.info/?l=linux-kernel&m=152449131114778&w=2 Cc: stable@vger.kernel.org Signed-off-by: Gustavo A. R. Silva Signed-off-by: Guenter Roeck --- drivers/hwmon/nct6775.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index c6bd61e4695a..944f5b63aecd 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c @@ -63,6 +63,7 @@ #include #include #include +#include #include "lm75.h" #define USE_ALTERNATE @@ -2689,6 +2690,7 @@ store_pwm_weight_temp_sel(struct device *dev, struct device_attribute *attr, return err; if (val > NUM_TEMP) return -EINVAL; + val = array_index_nospec(val, NUM_TEMP + 1); if (val && (!(data->have_temp & BIT(val - 1)) || !data->temp_src[val - 1])) return -EINVAL; -- GitLab From c7c09dc187f0323ad40b5b6c57a6db673a386a7f Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Fri, 24 Aug 2018 11:29:27 +0800 Subject: [PATCH 0274/1692] nios2: kconfig: remove duplicate DEBUG_STACK_USAGE symbol defintions DEBUG_STACK_USAGE is already defined in lib/Kconfig.debug Signed-off-by: Tobias Klauser Signed-off-by: Ley Foon Tan --- arch/nios2/Kconfig.debug | 9 --------- 1 file changed, 9 deletions(-) diff --git a/arch/nios2/Kconfig.debug b/arch/nios2/Kconfig.debug index 7a49f0d28d14..f1da8a7b17ff 100644 --- a/arch/nios2/Kconfig.debug +++ b/arch/nios2/Kconfig.debug @@ -3,15 +3,6 @@ config TRACE_IRQFLAGS_SUPPORT def_bool y -config DEBUG_STACK_USAGE - bool "Enable stack utilization instrumentation" - depends on DEBUG_KERNEL - help - Enables the display of the minimum amount of free stack which each - task has ever had available in the sysrq-T and sysrq-P debug output. - - This option will slow down process creation somewhat. - config EARLY_PRINTK bool "Activate early kernel debugging" default y -- GitLab From 2b7bd20d5605e0314d677ea21b462543e73e466c Mon Sep 17 00:00:00 2001 From: Souptick Joarder Date: Sat, 28 Jul 2018 19:08:45 +0530 Subject: [PATCH 0275/1692] drm/mediatek: Convert drm_atomic_helper_suspend/resume() convert drm_atomic_helper_suspend/resume() to use drm_mode_config_helper_suspend/resume(). Signed-off-by: Souptick Joarder Signed-off-by: Ajit Negi Signed-off-by: CK Hu --- drivers/gpu/drm/mediatek/mtk_drm_drv.c | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 39721119713b..b68922a793cb 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -580,29 +580,24 @@ static int mtk_drm_sys_suspend(struct device *dev) { struct mtk_drm_private *private = dev_get_drvdata(dev); struct drm_device *drm = private->drm; + int ret; - drm_kms_helper_poll_disable(drm); - - private->suspend_state = drm_atomic_helper_suspend(drm); - if (IS_ERR(private->suspend_state)) { - drm_kms_helper_poll_enable(drm); - return PTR_ERR(private->suspend_state); - } - + ret = drm_mode_config_helper_suspend(drm); DRM_DEBUG_DRIVER("mtk_drm_sys_suspend\n"); - return 0; + + return ret; } static int mtk_drm_sys_resume(struct device *dev) { struct mtk_drm_private *private = dev_get_drvdata(dev); struct drm_device *drm = private->drm; + int ret; - drm_atomic_helper_resume(drm, private->suspend_state); - drm_kms_helper_poll_enable(drm); - + ret = drm_mode_config_helper_resume(drm); DRM_DEBUG_DRIVER("mtk_drm_sys_resume\n"); - return 0; + + return ret; } #endif -- GitLab From 8272806d21bf2a7fd74602cc7bade7d12f73ac4b Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Tue, 17 Jul 2018 10:35:18 +0200 Subject: [PATCH 0276/1692] drm/mediatek: Replace drm_dev_unref with drm_dev_put This patch unifies the naming of DRM functions for reference counting of struct drm_device. The resulting code is more aligned with the rest of the Linux kernel interfaces. Signed-off-by: Thomas Zimmermann Reviewed-by: Philipp Zabel Signed-off-by: CK Hu --- drivers/gpu/drm/mediatek/mtk_drm_drv.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index b68922a793cb..47ec604289b7 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -381,7 +381,7 @@ static int mtk_drm_bind(struct device *dev) err_deinit: mtk_drm_kms_deinit(drm); err_free: - drm_dev_unref(drm); + drm_dev_put(drm); return ret; } @@ -390,7 +390,7 @@ static void mtk_drm_unbind(struct device *dev) struct mtk_drm_private *private = dev_get_drvdata(dev); drm_dev_unregister(private->drm); - drm_dev_unref(private->drm); + drm_dev_put(private->drm); private->drm = NULL; } @@ -564,7 +564,7 @@ static int mtk_drm_remove(struct platform_device *pdev) drm_dev_unregister(drm); mtk_drm_kms_deinit(drm); - drm_dev_unref(drm); + drm_dev_put(drm); component_master_del(&pdev->dev, &mtk_drm_ops); pm_runtime_disable(&pdev->dev); -- GitLab From 29d32e466e98e03378878e95339334971d0fdaf4 Mon Sep 17 00:00:00 2001 From: Stu Hsieh Date: Thu, 9 Aug 2018 10:15:36 +0800 Subject: [PATCH 0277/1692] drm/mediatek: add connection from RDMA0 to DPI1 This patch add connection from RDMA0 to DPI1 Signed-off-by: Stu Hsieh Signed-off-by: CK Hu --- drivers/gpu/drm/mediatek/mtk_drm_ddp.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c index 87e4191c250e..03e3628b5b0d 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c @@ -106,6 +106,7 @@ #define OVL1_MOUT_EN_COLOR1 0x1 #define GAMMA_MOUT_EN_RDMA1 0x1 #define RDMA0_SOUT_DPI0 0x2 +#define RDMA0_SOUT_DPI1 0x3 #define RDMA0_SOUT_DSI2 0x4 #define RDMA0_SOUT_DSI3 0x5 #define RDMA1_SOUT_DPI0 0x2 @@ -224,6 +225,9 @@ static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur, } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI0) { *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; value = RDMA0_SOUT_DPI0; + } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI1) { + *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; + value = RDMA0_SOUT_DPI1; } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI2) { *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; value = RDMA0_SOUT_DSI2; -- GitLab From 48d25d243bfb20a7230e9f226c560b71d989d962 Mon Sep 17 00:00:00 2001 From: Stu Hsieh Date: Thu, 9 Aug 2018 10:15:37 +0800 Subject: [PATCH 0278/1692] drm/mediatek: add connection from RDMA0 to DSI1 This patch add connection from RDMA0 to DSI1 Signed-off-by: Stu Hsieh Signed-off-by: CK Hu --- drivers/gpu/drm/mediatek/mtk_drm_ddp.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c index 03e3628b5b0d..310d8482d5a0 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c @@ -107,6 +107,7 @@ #define GAMMA_MOUT_EN_RDMA1 0x1 #define RDMA0_SOUT_DPI0 0x2 #define RDMA0_SOUT_DPI1 0x3 +#define RDMA0_SOUT_DSI1 0x1 #define RDMA0_SOUT_DSI2 0x4 #define RDMA0_SOUT_DSI3 0x5 #define RDMA1_SOUT_DPI0 0x2 @@ -228,6 +229,9 @@ static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur, } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI1) { *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; value = RDMA0_SOUT_DPI1; + } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI1) { + *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; + value = RDMA0_SOUT_DSI1; } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI2) { *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; value = RDMA0_SOUT_DSI2; -- GitLab From 0a14785ee32ad40458657edaf1025f71ebbc1147 Mon Sep 17 00:00:00 2001 From: Stu Hsieh Date: Thu, 9 Aug 2018 10:15:38 +0800 Subject: [PATCH 0279/1692] drm/mediatek: add connection from RDMA1 to DSI0 This patch add connection from RDMA1 to DSI0 Signed-off-by: Stu Hsieh Signed-off-by: CK Hu --- drivers/gpu/drm/mediatek/mtk_drm_ddp.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c index 310d8482d5a0..31189fad8d4e 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c @@ -124,6 +124,7 @@ #define DPI0_SEL_IN_RDMA2 0x3 #define DPI1_SEL_IN_RDMA1 (0x1 << 8) #define DPI1_SEL_IN_RDMA2 (0x3 << 8) +#define DSI0_SEL_IN_RDMA1 0x1 #define DSI1_SEL_IN_RDMA1 0x1 #define DSI1_SEL_IN_RDMA2 0x4 #define DSI2_SEL_IN_RDMA1 (0x1 << 16) @@ -290,6 +291,9 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur, } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) { *addr = DISP_REG_CONFIG_DPI_SEL_IN; value = DPI1_SEL_IN_RDMA1; + } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI0) { + *addr = DISP_REG_CONFIG_DSIE_SEL_IN; + value = DSI0_SEL_IN_RDMA1; } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) { *addr = DISP_REG_CONFIG_DSIO_SEL_IN; value = DSI1_SEL_IN_RDMA1; -- GitLab From 85186efc2a5975801cd4ba03a9143305e474b645 Mon Sep 17 00:00:00 2001 From: Stu Hsieh Date: Thu, 9 Aug 2018 10:15:39 +0800 Subject: [PATCH 0280/1692] drm/mediatek: add connection from RDMA2 to DSI0 This patch add connection from RDMA2 to DSI0 Signed-off-by: Stu Hsieh Signed-off-by: CK Hu --- drivers/gpu/drm/mediatek/mtk_drm_ddp.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c index 31189fad8d4e..3239f22785fd 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c @@ -125,6 +125,7 @@ #define DPI1_SEL_IN_RDMA1 (0x1 << 8) #define DPI1_SEL_IN_RDMA2 (0x3 << 8) #define DSI0_SEL_IN_RDMA1 0x1 +#define DSI0_SEL_IN_RDMA2 0x4 #define DSI1_SEL_IN_RDMA1 0x1 #define DSI1_SEL_IN_RDMA2 0x4 #define DSI2_SEL_IN_RDMA1 (0x1 << 16) @@ -309,6 +310,9 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur, } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) { *addr = DISP_REG_CONFIG_DPI_SEL_IN; value = DPI1_SEL_IN_RDMA2; + } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI0) { + *addr = DISP_REG_CONFIG_DSIE_SEL_IN; + value = DSI0_SEL_IN_RDMA2; } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) { *addr = DISP_REG_CONFIG_DSIE_SEL_IN; value = DSI1_SEL_IN_RDMA2; -- GitLab From 182add0b1b9170a1f8f2a049fe2e298222cf405a Mon Sep 17 00:00:00 2001 From: Stu Hsieh Date: Thu, 9 Aug 2018 10:15:40 +0800 Subject: [PATCH 0281/1692] drm/mediatek: add memory mode and layer_config for RDMA This patch add memory mode for RDMA and layer_config for RDMA If use RDMA to read data from memory, it should set memory mode to RDMA Layer config set the data address and pitch to RDMA from plane setting. Signed-off-by: Stu Hsieh Signed-off-by: CK Hu --- drivers/gpu/drm/mediatek/mtk_disp_rdma.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c index 585943c81e1f..08866550740f 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c @@ -31,14 +31,20 @@ #define RDMA_REG_UPDATE_INT BIT(0) #define DISP_REG_RDMA_GLOBAL_CON 0x0010 #define RDMA_ENGINE_EN BIT(0) +#define RDMA_MODE_MEMORY BIT(1) #define DISP_REG_RDMA_SIZE_CON_0 0x0014 #define DISP_REG_RDMA_SIZE_CON_1 0x0018 #define DISP_REG_RDMA_TARGET_LINE 0x001c +#define DISP_RDMA_MEM_SRC_PITCH 0x002c +#define DISP_RDMA_MEM_GMC_SETTING_0 0x0030 #define DISP_REG_RDMA_FIFO_CON 0x0040 #define RDMA_FIFO_UNDERFLOW_EN BIT(31) #define RDMA_FIFO_PSEUDO_SIZE(bytes) (((bytes) / 16) << 16) #define RDMA_OUTPUT_VALID_FIFO_THRESHOLD(bytes) ((bytes) / 16) #define RDMA_FIFO_SIZE(rdma) ((rdma)->data->fifo_size) +#define DISP_RDMA_MEM_START_ADDR 0x0f00 + +#define RDMA_MEM_GMC 0x40402020 struct mtk_disp_rdma_data { unsigned int fifo_size; @@ -138,12 +144,27 @@ static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width, writel(reg, comp->regs + DISP_REG_RDMA_FIFO_CON); } +static void mtk_rdma_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, + struct mtk_plane_state *state) +{ + struct mtk_plane_pending_state *pending = &state->pending; + unsigned int addr = pending->addr; + unsigned int pitch = pending->pitch & 0xffff; + + writel_relaxed(addr, comp->regs + DISP_RDMA_MEM_START_ADDR); + writel_relaxed(pitch, comp->regs + DISP_RDMA_MEM_SRC_PITCH); + writel(RDMA_MEM_GMC, comp->regs + DISP_RDMA_MEM_GMC_SETTING_0); + rdma_update_bits(comp, DISP_REG_RDMA_GLOBAL_CON, + RDMA_MODE_MEMORY, RDMA_MODE_MEMORY); +} + static const struct mtk_ddp_comp_funcs mtk_disp_rdma_funcs = { .config = mtk_rdma_config, .start = mtk_rdma_start, .stop = mtk_rdma_stop, .enable_vblank = mtk_rdma_enable_vblank, .disable_vblank = mtk_rdma_disable_vblank, + .layer_config = mtk_rdma_layer_config, }; static int mtk_disp_rdma_bind(struct device *dev, struct device *master, -- GitLab From b428391ed6bd5e3cb8ea9d1738ef4bd16af6cdb2 Mon Sep 17 00:00:00 2001 From: Stu Hsieh Date: Thu, 9 Aug 2018 10:15:41 +0800 Subject: [PATCH 0282/1692] drm/mediatek: add RGB color format support for RDMA This patch add RGB color format support for RDMA, including RGB565, RGB888, RGBA8888 and ARGB8888. Signed-off-by: Stu Hsieh Signed-off-by: CK Hu --- drivers/gpu/drm/mediatek/mtk_disp_rdma.c | 45 ++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c index 08866550740f..091e48e51501 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c @@ -35,6 +35,12 @@ #define DISP_REG_RDMA_SIZE_CON_0 0x0014 #define DISP_REG_RDMA_SIZE_CON_1 0x0018 #define DISP_REG_RDMA_TARGET_LINE 0x001c +#define DISP_RDMA_MEM_CON 0x0024 +#define MEM_MODE_INPUT_FORMAT_RGB565 (0x000 << 4) +#define MEM_MODE_INPUT_FORMAT_RGB888 (0x001 << 4) +#define MEM_MODE_INPUT_FORMAT_RGBA8888 (0x002 << 4) +#define MEM_MODE_INPUT_FORMAT_ARGB8888 (0x003 << 4) +#define MEM_MODE_INPUT_SWAP BIT(8) #define DISP_RDMA_MEM_SRC_PITCH 0x002c #define DISP_RDMA_MEM_GMC_SETTING_0 0x0030 #define DISP_REG_RDMA_FIFO_CON 0x0040 @@ -144,12 +150,51 @@ static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width, writel(reg, comp->regs + DISP_REG_RDMA_FIFO_CON); } +static unsigned int rdma_fmt_convert(struct mtk_disp_rdma *rdma, + unsigned int fmt) +{ + /* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX" + * is defined in mediatek HW data sheet. + * The alphabet order in XXX is no relation to data + * arrangement in memory. + */ + switch (fmt) { + default: + case DRM_FORMAT_RGB565: + return MEM_MODE_INPUT_FORMAT_RGB565; + case DRM_FORMAT_BGR565: + return MEM_MODE_INPUT_FORMAT_RGB565 | MEM_MODE_INPUT_SWAP; + case DRM_FORMAT_RGB888: + return MEM_MODE_INPUT_FORMAT_RGB888; + case DRM_FORMAT_BGR888: + return MEM_MODE_INPUT_FORMAT_RGB888 | MEM_MODE_INPUT_SWAP; + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_RGBA8888: + return MEM_MODE_INPUT_FORMAT_ARGB8888; + case DRM_FORMAT_BGRX8888: + case DRM_FORMAT_BGRA8888: + return MEM_MODE_INPUT_FORMAT_ARGB8888 | MEM_MODE_INPUT_SWAP; + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_ARGB8888: + return MEM_MODE_INPUT_FORMAT_RGBA8888; + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ABGR8888: + return MEM_MODE_INPUT_FORMAT_RGBA8888 | MEM_MODE_INPUT_SWAP; + } +} + static void mtk_rdma_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, struct mtk_plane_state *state) { + struct mtk_disp_rdma *rdma = comp_to_rdma(comp); struct mtk_plane_pending_state *pending = &state->pending; unsigned int addr = pending->addr; unsigned int pitch = pending->pitch & 0xffff; + unsigned int fmt = pending->format; + unsigned int con; + + con = rdma_fmt_convert(rdma, fmt); + writel_relaxed(con, comp->regs + DISP_RDMA_MEM_CON); writel_relaxed(addr, comp->regs + DISP_RDMA_MEM_START_ADDR); writel_relaxed(pitch, comp->regs + DISP_RDMA_MEM_SRC_PITCH); -- GitLab From 55b53f6f7ccf0990ad83acf4fdb0436ff79fdfb6 Mon Sep 17 00:00:00 2001 From: Stu Hsieh Date: Thu, 9 Aug 2018 10:15:42 +0800 Subject: [PATCH 0283/1692] drm/mediatek: add the comment about color format setting for OVL This patch add the comment about color format setting for OVL Signed-off-by: Stu Hsieh Signed-off-by: CK Hu --- drivers/gpu/drm/mediatek/mtk_disp_ovl.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c index 978782a77629..0facd823c552 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c @@ -157,6 +157,11 @@ static void mtk_ovl_layer_off(struct mtk_ddp_comp *comp, unsigned int idx) static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt) { + /* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX" + * is defined in mediatek HW data sheet. + * The alphabet order in XXX is no relation to data + * arrangement in memory. + */ switch (fmt) { default: case DRM_FORMAT_RGB565: -- GitLab From 94420a63cf784945061b7b5f38511b7a48f034eb Mon Sep 17 00:00:00 2001 From: Stu Hsieh Date: Thu, 9 Aug 2018 10:15:43 +0800 Subject: [PATCH 0284/1692] drm/mediatek: add YUYV/UYVY color format support for RDMA This patch add YUYV/UYVY color format support for RDMA and transform matrix for YUYV/UYVY. Signed-off-by: Stu Hsieh Signed-off-by: CK Hu --- drivers/gpu/drm/mediatek/mtk_disp_rdma.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c index 091e48e51501..2d27e15445d2 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c @@ -33,6 +33,9 @@ #define RDMA_ENGINE_EN BIT(0) #define RDMA_MODE_MEMORY BIT(1) #define DISP_REG_RDMA_SIZE_CON_0 0x0014 +#define RDMA_MATRIX_ENABLE BIT(17) +#define RDMA_MATRIX_INT_MTX_SEL GENMASK(23, 20) +#define RDMA_MATRIX_INT_MTX_BT601_to_RGB (6 << 20) #define DISP_REG_RDMA_SIZE_CON_1 0x0018 #define DISP_REG_RDMA_TARGET_LINE 0x001c #define DISP_RDMA_MEM_CON 0x0024 @@ -40,6 +43,8 @@ #define MEM_MODE_INPUT_FORMAT_RGB888 (0x001 << 4) #define MEM_MODE_INPUT_FORMAT_RGBA8888 (0x002 << 4) #define MEM_MODE_INPUT_FORMAT_ARGB8888 (0x003 << 4) +#define MEM_MODE_INPUT_FORMAT_UYVY (0x004 << 4) +#define MEM_MODE_INPUT_FORMAT_YUYV (0x005 << 4) #define MEM_MODE_INPUT_SWAP BIT(8) #define DISP_RDMA_MEM_SRC_PITCH 0x002c #define DISP_RDMA_MEM_GMC_SETTING_0 0x0030 @@ -180,6 +185,10 @@ static unsigned int rdma_fmt_convert(struct mtk_disp_rdma *rdma, case DRM_FORMAT_XBGR8888: case DRM_FORMAT_ABGR8888: return MEM_MODE_INPUT_FORMAT_RGBA8888 | MEM_MODE_INPUT_SWAP; + case DRM_FORMAT_UYVY: + return MEM_MODE_INPUT_FORMAT_UYVY; + case DRM_FORMAT_YUYV: + return MEM_MODE_INPUT_FORMAT_YUYV; } } @@ -196,6 +205,17 @@ static void mtk_rdma_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, con = rdma_fmt_convert(rdma, fmt); writel_relaxed(con, comp->regs + DISP_RDMA_MEM_CON); + if (fmt == DRM_FORMAT_UYVY || fmt == DRM_FORMAT_YUYV) { + rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, + RDMA_MATRIX_ENABLE, RDMA_MATRIX_ENABLE); + rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, + RDMA_MATRIX_INT_MTX_SEL, + RDMA_MATRIX_INT_MTX_BT601_to_RGB); + } else { + rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, + RDMA_MATRIX_ENABLE, 0); + } + writel_relaxed(addr, comp->regs + DISP_RDMA_MEM_START_ADDR); writel_relaxed(pitch, comp->regs + DISP_RDMA_MEM_SRC_PITCH); writel(RDMA_MEM_GMC, comp->regs + DISP_RDMA_MEM_GMC_SETTING_0); -- GitLab From 650afd49572b56a5c58134d4acfeb77acc69d622 Mon Sep 17 00:00:00 2001 From: Stu Hsieh Date: Thu, 9 Aug 2018 10:15:44 +0800 Subject: [PATCH 0285/1692] drm/mediatek: add function to get layer number for component This patch add function to get layer number for component Signed-off-by: Stu Hsieh Signed-off-by: CK Hu --- drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h index 7413ffeb3c9d..8399229e6ad2 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h @@ -78,6 +78,7 @@ struct mtk_ddp_comp_funcs { void (*stop)(struct mtk_ddp_comp *comp); void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc); void (*disable_vblank)(struct mtk_ddp_comp *comp); + unsigned int (*layer_nr)(struct mtk_ddp_comp *comp); void (*layer_on)(struct mtk_ddp_comp *comp, unsigned int idx); void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx); void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx, @@ -128,6 +129,14 @@ static inline void mtk_ddp_comp_disable_vblank(struct mtk_ddp_comp *comp) comp->funcs->disable_vblank(comp); } +static inline unsigned int mtk_ddp_comp_layer_nr(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->layer_nr) + return comp->funcs->layer_nr(comp); + + return 0; +} + static inline void mtk_ddp_comp_layer_on(struct mtk_ddp_comp *comp, unsigned int idx) { -- GitLab From 1cbcb763ea5035e7ef01010ea68eb3b5143ad7cb Mon Sep 17 00:00:00 2001 From: Stu Hsieh Date: Thu, 9 Aug 2018 10:15:45 +0800 Subject: [PATCH 0286/1692] drm/mediatek: add function to return OVL layer number This patch add function to return OVL layer number For now, MT8173, MT2712, MT2701 OVL all has 4 layer. Signed-off-by: Stu Hsieh Signed-off-by: CK Hu --- drivers/gpu/drm/mediatek/mtk_disp_ovl.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c index 0facd823c552..28d191192945 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c @@ -132,6 +132,11 @@ static void mtk_ovl_config(struct mtk_ddp_comp *comp, unsigned int w, writel(0x0, comp->regs + DISP_REG_OVL_RST); } +static unsigned int mtk_ovl_layer_nr(struct mtk_ddp_comp *comp) +{ + return 4; +} + static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx) { unsigned int reg; @@ -226,6 +231,7 @@ static const struct mtk_ddp_comp_funcs mtk_disp_ovl_funcs = { .stop = mtk_ovl_stop, .enable_vblank = mtk_ovl_enable_vblank, .disable_vblank = mtk_ovl_disable_vblank, + .layer_nr = mtk_ovl_layer_nr, .layer_on = mtk_ovl_layer_on, .layer_off = mtk_ovl_layer_off, .layer_config = mtk_ovl_layer_config, -- GitLab From 98b6d76f957ba80017a3118fe0e33030b4bc017b Mon Sep 17 00:00:00 2001 From: Stu Hsieh Date: Thu, 9 Aug 2018 10:15:46 +0800 Subject: [PATCH 0287/1692] drm/mediatek: add function to return RDMA layer number This patch add function to return RDMA layer number RDMA always has one layer. Signed-off-by: Stu Hsieh Signed-off-by: CK Hu --- drivers/gpu/drm/mediatek/mtk_disp_rdma.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c index 2d27e15445d2..b0a5cffe345a 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c @@ -192,6 +192,11 @@ static unsigned int rdma_fmt_convert(struct mtk_disp_rdma *rdma, } } +static unsigned int mtk_rdma_layer_nr(struct mtk_ddp_comp *comp) +{ + return 1; +} + static void mtk_rdma_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, struct mtk_plane_state *state) { @@ -229,6 +234,7 @@ static const struct mtk_ddp_comp_funcs mtk_disp_rdma_funcs = { .stop = mtk_rdma_stop, .enable_vblank = mtk_rdma_enable_vblank, .disable_vblank = mtk_rdma_disable_vblank, + .layer_nr = mtk_rdma_layer_nr, .layer_config = mtk_rdma_layer_config, }; -- GitLab From 66b2cf9623facfad790b335fcfd717258a00896b Mon Sep 17 00:00:00 2001 From: Stu Hsieh Date: Thu, 9 Aug 2018 10:15:47 +0800 Subject: [PATCH 0288/1692] drm/mediatek: use layer_nr function to get layer number to init plane This patch use layer_nr function to get layer number to init plane When plane init in crtc create, it use the number of OVL layer to init plane. That's OVL can read 4 memory address. For mt2712 third ddp, it use RDMA to read memory. RDMA can read 1 memory address, so it just init one plane. For compatibility, this patch use mtk_ddp_comp_layer_nr function to get layer number from their HW component in ddp for plane init. Signed-off-by: Stu Hsieh Signed-off-by: CK Hu --- drivers/gpu/drm/mediatek/mtk_drm_crtc.c | 21 ++++++++++++++------- drivers/gpu/drm/mediatek/mtk_drm_crtc.h | 1 - 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index 2d6aa150a9ff..845d1608465e 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -45,7 +45,8 @@ struct mtk_drm_crtc { bool pending_needs_vblank; struct drm_pending_vblank_event *event; - struct drm_plane planes[OVL_LAYER_NR]; + struct drm_plane *planes; + unsigned int layer_nr; bool pending_planes; void __iomem *config_regs; @@ -286,7 +287,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) } /* Initially configure all planes */ - for (i = 0; i < OVL_LAYER_NR; i++) { + for (i = 0; i < mtk_crtc->layer_nr; i++) { struct drm_plane *plane = &mtk_crtc->planes[i]; struct mtk_plane_state *plane_state; @@ -351,7 +352,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc) } if (mtk_crtc->pending_planes) { - for (i = 0; i < OVL_LAYER_NR; i++) { + for (i = 0; i < mtk_crtc->layer_nr; i++) { struct drm_plane *plane = &mtk_crtc->planes[i]; struct mtk_plane_state *plane_state; @@ -403,7 +404,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc, return; /* Set all pending plane state to disabled */ - for (i = 0; i < OVL_LAYER_NR; i++) { + for (i = 0; i < mtk_crtc->layer_nr; i++) { struct drm_plane *plane = &mtk_crtc->planes[i]; struct mtk_plane_state *plane_state; @@ -450,7 +451,7 @@ static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc, if (mtk_crtc->event) mtk_crtc->pending_needs_vblank = true; - for (i = 0; i < OVL_LAYER_NR; i++) { + for (i = 0; i < mtk_crtc->layer_nr; i++) { struct drm_plane *plane = &mtk_crtc->planes[i]; struct mtk_plane_state *plane_state; @@ -598,7 +599,12 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, mtk_crtc->ddp_comp[i] = comp; } - for (zpos = 0; zpos < OVL_LAYER_NR; zpos++) { + mtk_crtc->layer_nr = mtk_ddp_comp_layer_nr(mtk_crtc->ddp_comp[0]); + mtk_crtc->planes = devm_kzalloc(dev, mtk_crtc->layer_nr * + sizeof(struct drm_plane), + GFP_KERNEL); + + for (zpos = 0; zpos < mtk_crtc->layer_nr; zpos++) { type = (zpos == 0) ? DRM_PLANE_TYPE_PRIMARY : (zpos == 1) ? DRM_PLANE_TYPE_CURSOR : DRM_PLANE_TYPE_OVERLAY; @@ -609,7 +615,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, } ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0], - &mtk_crtc->planes[1], pipe); + mtk_crtc->layer_nr > 1 ? &mtk_crtc->planes[1] : + NULL, pipe); if (ret < 0) goto unprepare; drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE); diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h index 9d9410c67ae9..60bcc8aba8e3 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h @@ -18,7 +18,6 @@ #include "mtk_drm_ddp_comp.h" #include "mtk_drm_plane.h" -#define OVL_LAYER_NR 4 #define MTK_LUT_SIZE 512 #define MTK_MAX_BPC 10 #define MTK_MIN_BPC 3 -- GitLab From f265905c939e21a0c9e83540d4c2776c3e43c310 Mon Sep 17 00:00:00 2001 From: Stu Hsieh Date: Thu, 9 Aug 2018 10:15:48 +0800 Subject: [PATCH 0289/1692] drm/mediatek: update some variable name from ovl to comp This patch update some variable name from ovl to comp Because RDMA would be first HW in ddp, the naming ovl should be change to comp. Signed-off-by: Stu Hsieh Signed-off-by: CK Hu --- drivers/gpu/drm/mediatek/mtk_drm_crtc.c | 26 ++++++++++++------------- drivers/gpu/drm/mediatek/mtk_drm_crtc.h | 2 +- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index 845d1608465e..0b976dfd04df 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -172,9 +172,9 @@ static void mtk_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc) { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); - struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; + struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; - mtk_ddp_comp_enable_vblank(ovl, &mtk_crtc->base); + mtk_ddp_comp_enable_vblank(comp, &mtk_crtc->base); return 0; } @@ -182,9 +182,9 @@ static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc) static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc) { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); - struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; + struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; - mtk_ddp_comp_disable_vblank(ovl); + mtk_ddp_comp_disable_vblank(comp); } static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc) @@ -335,7 +335,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc) { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state); - struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; + struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; unsigned int i; /* @@ -344,7 +344,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc) * queue update module registers on vblank. */ if (state->pending_config) { - mtk_ddp_comp_config(ovl, state->pending_width, + mtk_ddp_comp_config(comp, state->pending_width, state->pending_height, state->pending_vrefresh, 0); @@ -359,7 +359,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc) plane_state = to_mtk_plane_state(plane->state); if (plane_state->pending.config) { - mtk_ddp_comp_layer_config(ovl, i, plane_state); + mtk_ddp_comp_layer_config(comp, i, plane_state); plane_state->pending.config = false; } } @@ -371,12 +371,12 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); - struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; + struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; int ret; DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id); - ret = mtk_smi_larb_get(ovl->larb_dev); + ret = mtk_smi_larb_get(comp->larb_dev); if (ret) { DRM_ERROR("Failed to get larb: %d\n", ret); return; @@ -384,7 +384,7 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc, ret = mtk_crtc_ddp_hw_init(mtk_crtc); if (ret) { - mtk_smi_larb_put(ovl->larb_dev); + mtk_smi_larb_put(comp->larb_dev); return; } @@ -396,7 +396,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); - struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; + struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; int i; DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id); @@ -419,7 +419,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc, drm_crtc_vblank_off(crtc); mtk_crtc_ddp_hw_fini(mtk_crtc); - mtk_smi_larb_put(ovl->larb_dev); + mtk_smi_larb_put(comp->larb_dev); mtk_crtc->enabled = false; } @@ -517,7 +517,7 @@ static int mtk_drm_crtc_init(struct drm_device *drm, return ret; } -void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl) +void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp) { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); struct mtk_drm_private *priv = crtc->dev->dev_private; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h index 60bcc8aba8e3..091adb2087eb 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h @@ -23,7 +23,7 @@ #define MTK_MIN_BPC 3 void mtk_drm_crtc_commit(struct drm_crtc *crtc); -void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl); +void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp); int mtk_drm_crtc_create(struct drm_device *drm_dev, const enum mtk_ddp_comp_id *path, unsigned int path_len); -- GitLab From 08bcbed747eb87f00d2e2590b49607af1a9f4fe9 Mon Sep 17 00:00:00 2001 From: Stu Hsieh Date: Thu, 9 Aug 2018 10:15:49 +0800 Subject: [PATCH 0290/1692] drm/mediatek: fix connection from RDMA2 to DSI1 This patch fix connection from RDMA2 to DSI1 Signed-off-by: Stu Hsieh Signed-off-by: CK Hu --- drivers/gpu/drm/mediatek/mtk_drm_ddp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c index 3239f22785fd..546b3e3b300b 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c @@ -314,7 +314,7 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur, *addr = DISP_REG_CONFIG_DSIE_SEL_IN; value = DSI0_SEL_IN_RDMA2; } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) { - *addr = DISP_REG_CONFIG_DSIE_SEL_IN; + *addr = DISP_REG_CONFIG_DSIO_SEL_IN; value = DSI1_SEL_IN_RDMA2; } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) { *addr = DISP_REG_CONFIG_DSIE_SEL_IN; -- GitLab From 072ebb3bffe67d71d1f1e52add799f4491eab691 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Mon, 27 Aug 2018 01:15:11 -0400 Subject: [PATCH 0291/1692] ext4: add nonstring annotations to ext4.h This suppresses some false positives in gcc 8's -Wstringop-truncation Suggested by Miguel Ojeda (hopefully the __nonstring definition will eventually get accepted in the compiler-gcc.h header file). Signed-off-by: Theodore Ts'o Cc: Miguel Ojeda --- fs/ext4/ext4.h | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 1fc013f3d944..249bcee4d7b2 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -43,6 +43,17 @@ #define __FS_HAS_ENCRYPTION IS_ENABLED(CONFIG_EXT4_FS_ENCRYPTION) #include +#include + +/* Until this gets included into linux/compiler-gcc.h */ +#ifndef __nonstring +#if defined(GCC_VERSION) && (GCC_VERSION >= 80000) +#define __nonstring __attribute__((nonstring)) +#else +#define __nonstring +#endif +#endif + /* * The fourth extended filesystem constants/structures */ @@ -1226,7 +1237,7 @@ struct ext4_super_block { __le32 s_feature_ro_compat; /* readonly-compatible feature set */ /*68*/ __u8 s_uuid[16]; /* 128-bit uuid for volume */ /*78*/ char s_volume_name[16]; /* volume name */ -/*88*/ char s_last_mounted[64]; /* directory where last mounted */ +/*88*/ char s_last_mounted[64] __nonstring; /* directory where last mounted */ /*C8*/ __le32 s_algorithm_usage_bitmap; /* For compression */ /* * Performance hints. Directory preallocation should only @@ -1277,13 +1288,13 @@ struct ext4_super_block { __le32 s_first_error_time; /* first time an error happened */ __le32 s_first_error_ino; /* inode involved in first error */ __le64 s_first_error_block; /* block involved of first error */ - __u8 s_first_error_func[32]; /* function where the error happened */ + __u8 s_first_error_func[32] __nonstring; /* function where the error happened */ __le32 s_first_error_line; /* line number where error happened */ __le32 s_last_error_time; /* most recent time of an error */ __le32 s_last_error_ino; /* inode involved in last error */ __le32 s_last_error_line; /* line number where error happened */ __le64 s_last_error_block; /* block involved of last error */ - __u8 s_last_error_func[32]; /* function where the error happened */ + __u8 s_last_error_func[32] __nonstring; /* function where the error happened */ #define EXT4_S_ERR_END offsetof(struct ext4_super_block, s_mount_opts) __u8 s_mount_opts[64]; __le32 s_usr_quota_inum; /* inode for tracking user quota */ -- GitLab From 111b009f7e8bcdfc8d565b1f0e3ee5072bb7490b Mon Sep 17 00:00:00 2001 From: Huang Shijie Date: Wed, 22 Aug 2018 10:40:27 +0800 Subject: [PATCH 0292/1692] dmaengine: mic_x100_dma: use devm_kzalloc to fix an issue The following patch introduced an issue. commit f6206f00d8c5 ("dmaengine: mic_x100_dma: use the new helper to simplify the code") This issue is : kfree(mic_dma_dev) ..... dma_async_device_unregister(mic_dma_dev->device); Free the memory, and use it again. So use devm_kzalloc to allocate mic_dma_dev to fix it. When the Devres try to release the resources, it will call release at the following order: dma_async_device_unregister(mic_dma_dev->device); ..... kfree(mic_dma_dev) Fixes: f6206f00d8c5 ("dmaengine: mic_x100_dma: use the new helper to simplify the code") Signed-off-by: Huang Shijie Signed-off-by: Vinod Koul --- drivers/dma/mic_x100_dma.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c index b76cb17d879c..adfd316db1a8 100644 --- a/drivers/dma/mic_x100_dma.c +++ b/drivers/dma/mic_x100_dma.c @@ -639,7 +639,7 @@ static struct mic_dma_device *mic_dma_dev_reg(struct mbus_device *mbdev, int ret; struct device *dev = &mbdev->dev; - mic_dma_dev = kzalloc(sizeof(*mic_dma_dev), GFP_KERNEL); + mic_dma_dev = devm_kzalloc(dev, sizeof(*mic_dma_dev), GFP_KERNEL); if (!mic_dma_dev) { ret = -ENOMEM; goto alloc_error; @@ -664,7 +664,6 @@ static struct mic_dma_device *mic_dma_dev_reg(struct mbus_device *mbdev, reg_error: mic_dma_uninit(mic_dma_dev); init_error: - kfree(mic_dma_dev); mic_dma_dev = NULL; alloc_error: dev_err(dev, "Error at %s %d ret=%d\n", __func__, __LINE__, ret); @@ -674,7 +673,6 @@ static struct mic_dma_device *mic_dma_dev_reg(struct mbus_device *mbdev, static void mic_dma_dev_unreg(struct mic_dma_device *mic_dma_dev) { mic_dma_uninit(mic_dma_dev); - kfree(mic_dma_dev); } /* DEBUGFS CODE */ -- GitLab From b50282f3241acee880514212d88b6049fb5039c8 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Mon, 27 Aug 2018 01:47:09 -0400 Subject: [PATCH 0293/1692] ext4: check to make sure the rename(2)'s destination is not freed If the destination of the rename(2) system call exists, the inode's link count (i_nlinks) must be non-zero. If it is, the inode can end up on the orphan list prematurely, leading to all sorts of hilarity, including a use-after-free. https://bugzilla.kernel.org/show_bug.cgi?id=200931 Signed-off-by: Theodore Ts'o Reported-by: Wen Xu Cc: stable@vger.kernel.org --- fs/ext4/namei.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 116ff68c5bd4..377d516c475f 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -3478,6 +3478,12 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, int credits; u8 old_file_type; + if (new.inode && new.inode->i_nlink == 0) { + EXT4_ERROR_INODE(new.inode, + "target of rename is already freed"); + return -EFSCORRUPTED; + } + if ((ext4_test_inode_flag(new_dir, EXT4_INODE_PROJINHERIT)) && (!projid_eq(EXT4_I(new_dir)->i_projid, EXT4_I(old_dentry->d_inode)->i_projid))) -- GitLab From 538d6e9d597584e80514698e24321645debde78f Mon Sep 17 00:00:00 2001 From: Leonard Crestez Date: Tue, 24 Jul 2018 19:14:19 +0300 Subject: [PATCH 0294/1692] Revert "ARM: dts: imx7d: Invert legacy PCI irq mapping" This reverts commit 1c86c9dd82f859b474474a7fee0d5195da2c9c1d. That commit followed the reference manual but unfortunately the imx7d manual is incorrect. Tested with ath9k pcie card and confirmed internally. Signed-off-by: Leonard Crestez Acked-by: Lucas Stach Fixes: 1c86c9dd82f8 ("ARM: dts: imx7d: Invert legacy PCI irq mapping") Signed-off-by: Shawn Guo --- arch/arm/boot/dts/imx7d.dtsi | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/arch/arm/boot/dts/imx7d.dtsi b/arch/arm/boot/dts/imx7d.dtsi index 7cbc2ffa4b3a..7234e8330a57 100644 --- a/arch/arm/boot/dts/imx7d.dtsi +++ b/arch/arm/boot/dts/imx7d.dtsi @@ -126,10 +126,14 @@ pcie: pcie@33800000 { interrupt-names = "msi"; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0x7>; - interrupt-map = <0 0 0 1 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>, - <0 0 0 2 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>, - <0 0 0 3 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>, - <0 0 0 4 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>; + /* + * Reference manual lists pci irqs incorrectly + * Real hardware ordering is same as imx6: D+MSI, C, B, A + */ + interrupt-map = <0 0 0 1 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>, + <0 0 0 2 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>, + <0 0 0 3 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>, + <0 0 0 4 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clks IMX7D_PCIE_CTRL_ROOT_CLK>, <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>, <&clks IMX7D_PCIE_PHY_ROOT_CLK>; -- GitLab From 90a96087b5fa835790a54c588184adfc867dbc12 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Fri, 20 Jul 2018 17:39:16 -0300 Subject: [PATCH 0295/1692] ARM: dts: imx28-evk: Move regulators outside simple-bus It is recommended to place regulators outside simple-bus, so move them accordingly. Signed-off-by: Fabio Estevam Signed-off-by: Shawn Guo --- arch/arm/boot/dts/imx28-evk.dts | 135 +++++++++++++++----------------- 1 file changed, 61 insertions(+), 74 deletions(-) diff --git a/arch/arm/boot/dts/imx28-evk.dts b/arch/arm/boot/dts/imx28-evk.dts index 6b0ae667640f..210aee097b36 100644 --- a/arch/arm/boot/dts/imx28-evk.dts +++ b/arch/arm/boot/dts/imx28-evk.dts @@ -13,6 +13,67 @@ memory@40000000 { reg = <0x40000000 0x08000000>; }; + + reg_3p3v: regulator-3p3v { + compatible = "regulator-fixed"; + regulator-name = "3P3V"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + }; + + reg_vddio_sd0: regulator-vddio-sd0 { + compatible = "regulator-fixed"; + regulator-name = "vddio-sd0"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&gpio3 28 0>; + }; + + reg_fec_3v3: regulator-fec-3v3 { + compatible = "regulator-fixed"; + regulator-name = "fec-3v3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&gpio2 15 0>; + }; + + reg_usb0_vbus: regulator-usb0-vbus { + compatible = "regulator-fixed"; + regulator-name = "usb0_vbus"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + gpio = <&gpio3 9 0>; + enable-active-high; + }; + + reg_usb1_vbus: regulator-usb1-vbus { + compatible = "regulator-fixed"; + regulator-name = "usb1_vbus"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + gpio = <&gpio3 8 0>; + enable-active-high; + }; + + reg_lcd_3v3: regulator-lcd-3v3 { + compatible = "regulator-fixed"; + regulator-name = "lcd-3v3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&gpio3 30 0>; + enable-active-high; + }; + + reg_can_3v3: regulator-can-3v3 { + compatible = "regulator-fixed"; + regulator-name = "can-3v3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&gpio2 13 0>; + enable-active-high; + }; + apb@80000000 { apbh@80000000 { gpmi-nand@8000c000 { @@ -269,80 +330,6 @@ mac1: ethernet@800f4000 { }; }; - regulators { - compatible = "simple-bus"; - #address-cells = <1>; - #size-cells = <0>; - - reg_3p3v: regulator@0 { - compatible = "regulator-fixed"; - reg = <0>; - regulator-name = "3P3V"; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; - regulator-always-on; - }; - - reg_vddio_sd0: regulator@1 { - compatible = "regulator-fixed"; - reg = <1>; - regulator-name = "vddio-sd0"; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; - gpio = <&gpio3 28 0>; - }; - - reg_fec_3v3: regulator@2 { - compatible = "regulator-fixed"; - reg = <2>; - regulator-name = "fec-3v3"; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; - gpio = <&gpio2 15 0>; - }; - - reg_usb0_vbus: regulator@3 { - compatible = "regulator-fixed"; - reg = <3>; - regulator-name = "usb0_vbus"; - regulator-min-microvolt = <5000000>; - regulator-max-microvolt = <5000000>; - gpio = <&gpio3 9 0>; - enable-active-high; - }; - - reg_usb1_vbus: regulator@4 { - compatible = "regulator-fixed"; - reg = <4>; - regulator-name = "usb1_vbus"; - regulator-min-microvolt = <5000000>; - regulator-max-microvolt = <5000000>; - gpio = <&gpio3 8 0>; - enable-active-high; - }; - - reg_lcd_3v3: regulator@5 { - compatible = "regulator-fixed"; - reg = <5>; - regulator-name = "lcd-3v3"; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; - gpio = <&gpio3 30 0>; - enable-active-high; - }; - - reg_can_3v3: regulator@6 { - compatible = "regulator-fixed"; - reg = <6>; - regulator-name = "can-3v3"; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; - gpio = <&gpio2 13 0>; - enable-active-high; - }; - - }; - sound { compatible = "fsl,imx28-evk-sgtl5000", "fsl,mxs-audio-sgtl5000"; -- GitLab From c1539840fc25d89f3fe51038144dc0233b333453 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Fri, 20 Jul 2018 17:39:17 -0300 Subject: [PATCH 0296/1692] ARM: dts: imx28-evk: Convert to the new display bindings imx28-evk board has a Seiko 43WVF1G parallel display. Instead of hardcoding the display timings in the device tree, use the "sii,43wvf1g" compatible instead. This aligns with the new mxsfb bindings scheme documented at: Documentation/devicetree/bindings/display/mxsfb.txt Signed-off-by: Fabio Estevam Signed-off-by: Shawn Guo --- arch/arm/boot/dts/imx28-evk.dts | 48 ++++++++++++++++----------------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/arch/arm/boot/dts/imx28-evk.dts b/arch/arm/boot/dts/imx28-evk.dts index 210aee097b36..93ab5bdfe068 100644 --- a/arch/arm/boot/dts/imx28-evk.dts +++ b/arch/arm/boot/dts/imx28-evk.dts @@ -74,6 +74,26 @@ reg_can_3v3: regulator-can-3v3 { enable-active-high; }; + reg_lcd_5v: regulator-lcd-5v { + compatible = "regulator-fixed"; + regulator-name = "lcd-5v"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + }; + + panel { + compatible = "sii,43wvf1g"; + backlight = <&backlight_display>; + dvdd-supply = <®_lcd_3v3>; + avdd-supply = <®_lcd_5v>; + + port { + panel_in: endpoint { + remote-endpoint = <&display_out>; + }; + }; + }; + apb@80000000 { apbh@80000000 { gpmi-nand@8000c000 { @@ -177,31 +197,11 @@ lcdif@80030000 { pinctrl-names = "default"; pinctrl-0 = <&lcdif_24bit_pins_a &lcdif_pins_evk>; - lcd-supply = <®_lcd_3v3>; - display = <&display0>; status = "okay"; - display0: display0 { - bits-per-pixel = <32>; - bus-width = <24>; - - display-timings { - native-mode = <&timing0>; - timing0: timing0 { - clock-frequency = <33500000>; - hactive = <800>; - vactive = <480>; - hback-porch = <89>; - hfront-porch = <164>; - vback-porch = <23>; - vfront-porch = <10>; - hsync-len = <10>; - vsync-len = <10>; - hsync-active = <0>; - vsync-active = <0>; - de-active = <1>; - pixelclk-active = <0>; - }; + port { + display_out: endpoint { + remote-endpoint = <&panel_in>; }; }; }; @@ -350,7 +350,7 @@ user { }; }; - backlight { + backlight_display: backlight { compatible = "pwm-backlight"; pwms = <&pwm 2 5000000>; brightness-levels = <0 4 8 16 32 64 128 255>; -- GitLab From cde305e9ce28d96d7f63e4fb5298291e90a91f8f Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Fri, 20 Jul 2018 17:39:18 -0300 Subject: [PATCH 0297/1692] ARM: dts: imx23-evk: Move regulators outside simple-bus It is recommended to place regulators outside simple-bus, so move them accordingly. Signed-off-by: Fabio Estevam Signed-off-by: Shawn Guo --- arch/arm/boot/dts/imx23-evk.dts | 42 +++++++++++++-------------------- 1 file changed, 17 insertions(+), 25 deletions(-) diff --git a/arch/arm/boot/dts/imx23-evk.dts b/arch/arm/boot/dts/imx23-evk.dts index 9fb47724b9c1..494095d40327 100644 --- a/arch/arm/boot/dts/imx23-evk.dts +++ b/arch/arm/boot/dts/imx23-evk.dts @@ -13,6 +13,23 @@ memory@40000000 { reg = <0x40000000 0x08000000>; }; + reg_vddio_sd0: regulator-vddio-sd0 { + compatible = "regulator-fixed"; + regulator-name = "vddio-sd0"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&gpio1 29 0>; + }; + + reg_lcd_3v3: regulator-lcd-3v3 { + compatible = "regulator-fixed"; + regulator-name = "lcd-3v3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&gpio1 18 0>; + enable-active-high; + }; + apb@80000000 { apbh@80000000 { gpmi-nand@8000c000 { @@ -118,31 +135,6 @@ usb0: usb@80080000 { }; }; - regulators { - compatible = "simple-bus"; - #address-cells = <1>; - #size-cells = <0>; - - reg_vddio_sd0: regulator@0 { - compatible = "regulator-fixed"; - reg = <0>; - regulator-name = "vddio-sd0"; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; - gpio = <&gpio1 29 0>; - }; - - reg_lcd_3v3: regulator@1 { - compatible = "regulator-fixed"; - reg = <1>; - regulator-name = "lcd-3v3"; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; - gpio = <&gpio1 18 0>; - enable-active-high; - }; - }; - backlight { compatible = "pwm-backlight"; pwms = <&pwm 2 5000000>; -- GitLab From 549644b8c3c125355a361def1e42f8319ac6ad6c Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Fri, 20 Jul 2018 17:39:19 -0300 Subject: [PATCH 0298/1692] ARM: dts: imx23-evk: Convert to the new display bindings imx23-evk board has a Seiko 43WVF1G parallel display. Instead of hardcoding the display timings in the device tree, use the "sii,43wvf1g" compatible instead. This aligns with the new mxsfb bindings scheme documented at: Documentation/devicetree/bindings/display/mxsfb.txt Signed-off-by: Fabio Estevam Signed-off-by: Shawn Guo --- arch/arm/boot/dts/imx23-evk.dts | 48 ++++++++++++++++----------------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/arch/arm/boot/dts/imx23-evk.dts b/arch/arm/boot/dts/imx23-evk.dts index 494095d40327..ad2ae25b7b4d 100644 --- a/arch/arm/boot/dts/imx23-evk.dts +++ b/arch/arm/boot/dts/imx23-evk.dts @@ -30,6 +30,26 @@ reg_lcd_3v3: regulator-lcd-3v3 { enable-active-high; }; + reg_lcd_5v: regulator-lcd-5v { + compatible = "regulator-fixed"; + regulator-name = "lcd-5v"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + }; + + panel { + compatible = "sii,43wvf1g"; + backlight = <&backlight_display>; + dvdd-supply = <®_lcd_3v3>; + avdd-supply = <®_lcd_5v>; + + port { + panel_in: endpoint { + remote-endpoint = <&display_out>; + }; + }; + }; + apb@80000000 { apbh@80000000 { gpmi-nand@8000c000 { @@ -69,31 +89,11 @@ MX23_PAD_SSP1_DETECT__SSP1_DETECT lcdif@80030000 { pinctrl-names = "default"; pinctrl-0 = <&lcdif_24bit_pins_a>; - lcd-supply = <®_lcd_3v3>; - display = <&display0>; status = "okay"; - display0: display0 { - bits-per-pixel = <32>; - bus-width = <24>; - - display-timings { - native-mode = <&timing0>; - timing0: timing0 { - clock-frequency = <9200000>; - hactive = <480>; - vactive = <272>; - hback-porch = <15>; - hfront-porch = <8>; - vback-porch = <12>; - vfront-porch = <4>; - hsync-len = <1>; - vsync-len = <1>; - hsync-active = <0>; - vsync-active = <0>; - de-active = <1>; - pixelclk-active = <0>; - }; + port { + display_out: endpoint { + remote-endpoint = <&panel_in>; }; }; }; @@ -135,7 +135,7 @@ usb0: usb@80080000 { }; }; - backlight { + backlight_display: backlight { compatible = "pwm-backlight"; pwms = <&pwm 2 5000000>; brightness-levels = <0 4 8 16 32 64 128 255>; -- GitLab From 0ffbc2824282793a3ebf04d4804dea15d5b211c6 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Fri, 20 Jul 2018 17:39:20 -0300 Subject: [PATCH 0299/1692] ARM: mxs_defconfig: Select CONFIG_DRM_PANEL_SEIKO_43WVF1G imx23-evk and imx28-evk boards use a Seiko 43WVF1G panel. Now that the DRM mxsfb driver is the one selected by default, let's also select CONFIG_DRM_PANEL_SEIKO_43WVF1G so that these boards continue to have a working display by default. Signed-off-by: Fabio Estevam Signed-off-by: Shawn Guo --- arch/arm/configs/mxs_defconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig index 148226e36152..7b8212857535 100644 --- a/arch/arm/configs/mxs_defconfig +++ b/arch/arm/configs/mxs_defconfig @@ -95,6 +95,7 @@ CONFIG_MFD_MXS_LRADC=y CONFIG_REGULATOR=y CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_DRM=y +CONFIG_DRM_PANEL_SEIKO_43WVF1G=y CONFIG_DRM_MXSFB=y CONFIG_FB_MODE_HELPERS=y CONFIG_BACKLIGHT_LCD_SUPPORT=y -- GitLab From b9543a2e39dc909e6b7ec901b6c7208d01d5c0dd Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Fri, 20 Jul 2018 17:39:21 -0300 Subject: [PATCH 0300/1692] ARM: imx_v6_v7_defconfig: Select CONFIG_DRM_PANEL_SEIKO_43WVF1G imx6sl-evk, imx6sll-evk and imx6sx-sdb boards use a Seiko 43WVF1G panel. Now that the DRM mxsfb driver is the one selected by default, let's also select CONFIG_DRM_PANEL_SEIKO_43WVF1G so that these boards continue to have a working display by default. Signed-off-by: Fabio Estevam Signed-off-by: Shawn Guo --- arch/arm/configs/imx_v6_v7_defconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig index e2c127608bcc..7eca43ff69bb 100644 --- a/arch/arm/configs/imx_v6_v7_defconfig +++ b/arch/arm/configs/imx_v6_v7_defconfig @@ -257,6 +257,7 @@ CONFIG_IMX_IPUV3_CORE=y CONFIG_DRM=y CONFIG_DRM_PANEL_LVDS=y CONFIG_DRM_PANEL_SIMPLE=y +CONFIG_DRM_PANEL_SEIKO_43WVF1G=y CONFIG_DRM_DW_HDMI_AHB_AUDIO=m CONFIG_DRM_DW_HDMI_CEC=y CONFIG_DRM_IMX=y -- GitLab From d177c8b61d6b4ef360b1c2682e4d8e3bae01738b Mon Sep 17 00:00:00 2001 From: Icenowy Zheng Date: Thu, 26 Jul 2018 12:48:05 +0800 Subject: [PATCH 0301/1692] arm64: allwinner: dts: h6: fix Pine H64 MMC bus width Currently the enabled MMC controllers on Pine H64 do not have bus-width set, which make them fall back to 1-bit mode and become quite slow. Fix this by add the corresponding bus-width properties. Fixes: ecbd611882a1 ("arm64: allwinner: h6: enable MMC0/2 on Pine H64") Signed-off-by: Icenowy Zheng Signed-off-by: Maxime Ripard --- arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts index ceffc40810ee..48daec7f78ba 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts @@ -46,6 +46,7 @@ &mmc0 { pinctrl-0 = <&mmc0_pins>; vmmc-supply = <®_cldo1>; cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; + bus-width = <4>; status = "okay"; }; @@ -56,6 +57,7 @@ &mmc2 { vqmmc-supply = <®_bldo2>; non-removable; cap-mmc-hw-reset; + bus-width = <8>; status = "okay"; }; -- GitLab From 166cd4421b0dabd2e438f5384f4ecf930dc8ab08 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 23 Aug 2018 23:43:45 +0200 Subject: [PATCH 0302/1692] mtd: rawnand: docg4: Remove wrong __init annotations If gcc (e.g. 4.1.2) decides not to inline init_mtd_structs() and read_id_reg(), this will cause section mismatches, and crashes: WARNING: drivers/mtd/nand/raw/docg4.o(.text+0xc10): Section mismatch in reference from the function docg4_attach_chip() to the function .init.text:init_mtd_structs() The function docg4_attach_chip() references the function __init init_mtd_structs(). This is often because docg4_attach_chip lacks a __init annotation or the annotation of init_mtd_structs is wrong. WARNING: drivers/mtd/nand/raw/docg4.o(.text+0xc3e): Section mismatch in reference from the function docg4_attach_chip() to the function .init.text:read_id_reg() The function docg4_attach_chip() references the function __init read_id_reg(). This is often because docg4_attach_chip lacks a __init annotation or the annotation of read_id_reg is wrong. Fix this by dropping the now incorrect __init annotations from init_mtd_structs() and read_id_reg(). Fixes: 66a38478dcc5b5a3 ("mtd: rawnand: docg4: convert driver to nand_scan()") Signed-off-by: Geert Uytterhoeven Signed-off-by: Boris Brezillon --- drivers/mtd/nand/raw/docg4.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/mtd/nand/raw/docg4.c b/drivers/mtd/nand/raw/docg4.c index a3f04315c05c..427fcbc1b71c 100644 --- a/drivers/mtd/nand/raw/docg4.c +++ b/drivers/mtd/nand/raw/docg4.c @@ -1218,7 +1218,7 @@ static int docg4_resume(struct platform_device *pdev) return 0; } -static void __init init_mtd_structs(struct mtd_info *mtd) +static void init_mtd_structs(struct mtd_info *mtd) { /* initialize mtd and nand data structures */ @@ -1290,7 +1290,7 @@ static void __init init_mtd_structs(struct mtd_info *mtd) } -static int __init read_id_reg(struct mtd_info *mtd) +static int read_id_reg(struct mtd_info *mtd) { struct nand_chip *nand = mtd_to_nand(mtd); struct docg4_priv *doc = nand_get_controller_data(nand); -- GitLab From 1ab534e85c93945f7862378d8c8adcf408205b19 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Fri, 24 Aug 2018 10:03:51 -0700 Subject: [PATCH 0303/1692] x86/spectre: Add missing family 6 check to microcode check The check for Spectre microcodes does not check for family 6, only the model numbers. Add a family 6 check to avoid ambiguity with other families. Fixes: a5b296636453 ("x86/cpufeature: Blacklist SPEC_CTRL/PRED_CMD on early Spectre v2 microcodes") Signed-off-by: Andi Kleen Signed-off-by: Thomas Gleixner Cc: x86@kernel.org Cc: linux-kernel@vger.kernel.org Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20180824170351.34874-2-andi@firstfloor.org --- arch/x86/kernel/cpu/intel.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 401e8c133108..fc3c07fe7df5 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -150,6 +150,9 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c) if (cpu_has(c, X86_FEATURE_HYPERVISOR)) return false; + if (c->x86 != 6) + return false; + for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) { if (c->x86_model == spectre_bad_microcodes[i].model && c->x86_stepping == spectre_bad_microcodes[i].stepping) -- GitLab From cc51e5428ea54f575d49cfcede1d4cb3a72b4ec4 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Fri, 24 Aug 2018 10:03:50 -0700 Subject: [PATCH 0304/1692] x86/speculation/l1tf: Increase l1tf memory limit for Nehalem+ On Nehalem and newer core CPUs the CPU cache internally uses 44 bits physical address space. The L1TF workaround is limited by this internal cache address width, and needs to have one bit free there for the mitigation to work. Older client systems report only 36bit physical address space so the range check decides that L1TF is not mitigated for a 36bit phys/32GB system with some memory holes. But since these actually have the larger internal cache width this warning is bogus because it would only really be needed if the system had more than 43bits of memory. Add a new internal x86_cache_bits field. Normally it is the same as the physical bits field reported by CPUID, but for Nehalem and newerforce it to be at least 44bits. Change the L1TF memory size warning to use the new cache_bits field to avoid bogus warnings and remove the bogus comment about memory size. Fixes: 17dbca119312 ("x86/speculation/l1tf: Add sysfs reporting for l1tf") Reported-by: George Anchev Reported-by: Christopher Snowhill Signed-off-by: Andi Kleen Signed-off-by: Thomas Gleixner Cc: x86@kernel.org Cc: linux-kernel@vger.kernel.org Cc: Michael Hocko Cc: vbabka@suse.cz Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20180824170351.34874-1-andi@firstfloor.org --- arch/x86/include/asm/processor.h | 4 ++- arch/x86/kernel/cpu/bugs.c | 46 ++++++++++++++++++++++++++++---- arch/x86/kernel/cpu/common.c | 1 + 3 files changed, 45 insertions(+), 6 deletions(-) diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index c24297268ebc..d53c54b842da 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -132,6 +132,8 @@ struct cpuinfo_x86 { /* Index into per_cpu list: */ u16 cpu_index; u32 microcode; + /* Address space bits used by the cache internally */ + u8 x86_cache_bits; unsigned initialized : 1; } __randomize_layout; @@ -183,7 +185,7 @@ extern void cpu_detect(struct cpuinfo_x86 *c); static inline unsigned long long l1tf_pfn_limit(void) { - return BIT_ULL(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT); + return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT); } extern void early_cpu_init(void); diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 4c2313d0b9ca..40bdaea97fe7 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -668,6 +668,45 @@ EXPORT_SYMBOL_GPL(l1tf_mitigation); enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation); +/* + * These CPUs all support 44bits physical address space internally in the + * cache but CPUID can report a smaller number of physical address bits. + * + * The L1TF mitigation uses the top most address bit for the inversion of + * non present PTEs. When the installed memory reaches into the top most + * address bit due to memory holes, which has been observed on machines + * which report 36bits physical address bits and have 32G RAM installed, + * then the mitigation range check in l1tf_select_mitigation() triggers. + * This is a false positive because the mitigation is still possible due to + * the fact that the cache uses 44bit internally. Use the cache bits + * instead of the reported physical bits and adjust them on the affected + * machines to 44bit if the reported bits are less than 44. + */ +static void override_cache_bits(struct cpuinfo_x86 *c) +{ + if (c->x86 != 6) + return; + + switch (c->x86_model) { + case INTEL_FAM6_NEHALEM: + case INTEL_FAM6_WESTMERE: + case INTEL_FAM6_SANDYBRIDGE: + case INTEL_FAM6_IVYBRIDGE: + case INTEL_FAM6_HASWELL_CORE: + case INTEL_FAM6_HASWELL_ULT: + case INTEL_FAM6_HASWELL_GT3E: + case INTEL_FAM6_BROADWELL_CORE: + case INTEL_FAM6_BROADWELL_GT3E: + case INTEL_FAM6_SKYLAKE_MOBILE: + case INTEL_FAM6_SKYLAKE_DESKTOP: + case INTEL_FAM6_KABYLAKE_MOBILE: + case INTEL_FAM6_KABYLAKE_DESKTOP: + if (c->x86_cache_bits < 44) + c->x86_cache_bits = 44; + break; + } +} + static void __init l1tf_select_mitigation(void) { u64 half_pa; @@ -675,6 +714,8 @@ static void __init l1tf_select_mitigation(void) if (!boot_cpu_has_bug(X86_BUG_L1TF)) return; + override_cache_bits(&boot_cpu_data); + switch (l1tf_mitigation) { case L1TF_MITIGATION_OFF: case L1TF_MITIGATION_FLUSH_NOWARN: @@ -694,11 +735,6 @@ static void __init l1tf_select_mitigation(void) return; #endif - /* - * This is extremely unlikely to happen because almost all - * systems have far more MAX_PA/2 than RAM can be fit into - * DIMM slots. - */ half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n"); diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 84dee5ab745a..44c4ef3d989b 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -919,6 +919,7 @@ void get_cpu_address_sizes(struct cpuinfo_x86 *c) else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) c->x86_phys_bits = 36; #endif + c->x86_cache_bits = c->x86_phys_bits; } static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) -- GitLab From e3a5dc08715abba646324fd8456282bd77798e9c Mon Sep 17 00:00:00 2001 From: Nikolas Nyby Date: Sat, 25 Aug 2018 19:10:54 -0400 Subject: [PATCH 0305/1692] x86/Kconfig: Fix trivial typo Fix a typo in the Kconfig help text: adverticed -> advertised. Signed-off-by: Nikolas Nyby Signed-off-by: Thomas Gleixner Cc: trivial@kernel.org Cc: tglx@linutronix.de Cc: x86@kernel.org Link: https://lkml.kernel.org/r/20180825231054.23813-1-nikolas@gnu.org --- arch/x86/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index c5ff296bc5d1..1a0be022f91d 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2843,7 +2843,7 @@ config X86_SYSFB This option, if enabled, marks VGA/VBE/EFI framebuffers as generic framebuffers so the new generic system-framebuffer drivers can be used on x86. If the framebuffer is not compatible with the generic - modes, it is adverticed as fallback platform framebuffer so legacy + modes, it is advertised as fallback platform framebuffer so legacy drivers like efifb, vesafb and uvesafb can pick it up. If this option is not selected, all system framebuffers are always marked as fallback platform framebuffers as usual. -- GitLab From 691a03cfe8ca483f9c48153b869d354e4ae3abef Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Tue, 21 Aug 2018 11:59:52 +0200 Subject: [PATCH 0306/1692] USB: serial: io_ti: fix array underflow in completion handler As reported by Dan Carpenter, a malicious USB device could set port_number to a negative value and we would underflow the port array in the interrupt completion handler. As these devices only have one or two ports, fix this by making sure we only consider the seventh bit when determining the port number (and ignore bits 0xb0 which are typically set to 0x30). Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Cc: stable Reported-by: Dan Carpenter Signed-off-by: Johan Hovold --- drivers/usb/serial/io_ti.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/serial/io_ti.h b/drivers/usb/serial/io_ti.h index e53c68261017..9bbcee37524e 100644 --- a/drivers/usb/serial/io_ti.h +++ b/drivers/usb/serial/io_ti.h @@ -173,7 +173,7 @@ struct ump_interrupt { } __attribute__((packed)); -#define TIUMP_GET_PORT_FROM_CODE(c) (((c) >> 4) - 3) +#define TIUMP_GET_PORT_FROM_CODE(c) (((c) >> 6) & 0x01) #define TIUMP_GET_FUNC_FROM_CODE(c) ((c) & 0x0f) #define TIUMP_INTERRUPT_CODE_LSR 0x03 #define TIUMP_INTERRUPT_CODE_MSR 0x04 -- GitLab From 5dfdd24eb3d39d815bc952ae98128e967c9bba49 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Tue, 21 Aug 2018 11:59:53 +0200 Subject: [PATCH 0307/1692] USB: serial: ti_usb_3410_5052: fix array underflow in completion handler Similarly to a recently reported bug in io_ti, a malicious USB device could set port_number to a negative value and we would underflow the port array in the interrupt completion handler. As these devices only have one or two ports, fix this by making sure we only consider the seventh bit when determining the port number (and ignore bits 0xb0 which are typically set to 0x30). Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Cc: stable Signed-off-by: Johan Hovold --- drivers/usb/serial/ti_usb_3410_5052.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index 3010878f7f8e..e3c5832337e0 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c @@ -1119,7 +1119,7 @@ static void ti_break(struct tty_struct *tty, int break_state) static int ti_get_port_from_code(unsigned char code) { - return (code >> 4) - 3; + return (code >> 6) & 0x01; } static int ti_get_func_from_code(unsigned char code) -- GitLab From 4d982e25d0bdc83d8c64e66fdeca0b89240b3b85 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Mon, 27 Aug 2018 09:22:45 -0400 Subject: [PATCH 0308/1692] ext4: avoid divide by zero fault when deleting corrupted inline directories A specially crafted file system can trick empty_inline_dir() into reading past the last valid entry in a inline directory, and then run into the end of xattr marker. This will trigger a divide by zero fault. Fix this by using the size of the inline directory instead of dir->i_size. Also clean up error reporting in __ext4_check_dir_entry so that the message is clearer and more understandable --- and avoids the division by zero trap if the size passed in is zero. (I'm not sure why we coded it that way in the first place; printing offset % size is actually more confusing and less useful.) https://bugzilla.kernel.org/show_bug.cgi?id=200933 Signed-off-by: Theodore Ts'o Reported-by: Wen Xu Cc: stable@vger.kernel.org --- fs/ext4/dir.c | 20 +++++++++----------- fs/ext4/inline.c | 4 +++- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c index e2902d394f1b..f93f9881ec18 100644 --- a/fs/ext4/dir.c +++ b/fs/ext4/dir.c @@ -76,7 +76,7 @@ int __ext4_check_dir_entry(const char *function, unsigned int line, else if (unlikely(rlen < EXT4_DIR_REC_LEN(de->name_len))) error_msg = "rec_len is too small for name_len"; else if (unlikely(((char *) de - buf) + rlen > size)) - error_msg = "directory entry across range"; + error_msg = "directory entry overrun"; else if (unlikely(le32_to_cpu(de->inode) > le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count))) error_msg = "inode out of bounds"; @@ -85,18 +85,16 @@ int __ext4_check_dir_entry(const char *function, unsigned int line, if (filp) ext4_error_file(filp, function, line, bh->b_blocknr, - "bad entry in directory: %s - offset=%u(%u), " - "inode=%u, rec_len=%d, name_len=%d", - error_msg, (unsigned) (offset % size), - offset, le32_to_cpu(de->inode), - rlen, de->name_len); + "bad entry in directory: %s - offset=%u, " + "inode=%u, rec_len=%d, name_len=%d, size=%d", + error_msg, offset, le32_to_cpu(de->inode), + rlen, de->name_len, size); else ext4_error_inode(dir, function, line, bh->b_blocknr, - "bad entry in directory: %s - offset=%u(%u), " - "inode=%u, rec_len=%d, name_len=%d", - error_msg, (unsigned) (offset % size), - offset, le32_to_cpu(de->inode), - rlen, de->name_len); + "bad entry in directory: %s - offset=%u, " + "inode=%u, rec_len=%d, name_len=%d, size=%d", + error_msg, offset, le32_to_cpu(de->inode), + rlen, de->name_len, size); return 1; } diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index 3543fe80a3c4..7b4736022761 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -1753,6 +1753,7 @@ bool empty_inline_dir(struct inode *dir, int *has_inline_data) { int err, inline_size; struct ext4_iloc iloc; + size_t inline_len; void *inline_pos; unsigned int offset; struct ext4_dir_entry_2 *de; @@ -1780,8 +1781,9 @@ bool empty_inline_dir(struct inode *dir, int *has_inline_data) goto out; } + inline_len = ext4_get_inline_size(dir); offset = EXT4_INLINE_DOTDOT_SIZE; - while (offset < dir->i_size) { + while (offset < inline_len) { de = ext4_get_inline_entry(dir, &iloc, offset, &inline_pos, &inline_size); if (ext4_check_dir_entry(dir, NULL, de, -- GitLab From f7c90c2aa4004808dff777ba6ae2c7294dd06851 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Tue, 21 Aug 2018 17:37:54 +0200 Subject: [PATCH 0309/1692] x86/xen: don't write ptes directly in 32-bit PV guests In some cases 32-bit PAE PV guests still write PTEs directly instead of using hypercalls. This is especially bad when clearing a PTE as this is done via 32-bit writes which will produce intermediate L1TF attackable PTEs. Change the code to use hypercalls instead. Signed-off-by: Juergen Gross Reviewed-by: Jan Beulich Signed-off-by: Boris Ostrovsky --- arch/x86/xen/mmu_pv.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index 9e7012858420..9396b4d17064 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c @@ -434,14 +434,13 @@ static void xen_set_pud(pud_t *ptr, pud_t val) static void xen_set_pte_atomic(pte_t *ptep, pte_t pte) { trace_xen_mmu_set_pte_atomic(ptep, pte); - set_64bit((u64 *)ptep, native_pte_val(pte)); + __xen_set_pte(ptep, pte); } static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { trace_xen_mmu_pte_clear(mm, addr, ptep); - if (!xen_batched_set_pte(ptep, native_make_pte(0))) - native_pte_clear(mm, addr, ptep); + __xen_set_pte(ptep, native_make_pte(0)); } static void xen_pmd_clear(pmd_t *pmdp) @@ -1569,7 +1568,7 @@ static void __init xen_set_pte_init(pte_t *ptep, pte_t pte) pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & pte_val_ma(pte)); #endif - native_set_pte(ptep, pte); + __xen_set_pte(ptep, pte); } /* Early in boot, while setting up the initial pagetable, assume -- GitLab From 908946c4bee705542f38bc06c0203a6d83e3700c Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 27 Aug 2018 08:37:46 -0600 Subject: [PATCH 0310/1692] Fix up libata MAINTAINERS entry The email was botched in one entry, and I also forgot to update the location of the git tree. It'll be under the linux-block umbrella, just with different branches. Reported-by: Baruch Siach Fixes: 7634ccd2da97 ("libata: maintainership update") Signed-off-by: Jens Axboe Signed-off-by: Linus Torvalds --- MAINTAINERS | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index a5b256b25905..9ad052aeac39 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8255,9 +8255,9 @@ F: drivers/ata/pata_arasan_cf.c LIBATA PATA DRIVERS M: Bartlomiej Zolnierkiewicz -M: Jens Axboe +M: Jens Axboe L: linux-ide@vger.kernel.org -T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git S: Maintained F: drivers/ata/pata_*.c F: drivers/ata/ata_generic.c @@ -8275,7 +8275,7 @@ LIBATA SATA AHCI PLATFORM devices support M: Hans de Goede M: Jens Axboe L: linux-ide@vger.kernel.org -T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git S: Maintained F: drivers/ata/ahci_platform.c F: drivers/ata/libahci_platform.c @@ -8291,7 +8291,7 @@ F: drivers/ata/sata_promise.* LIBATA SUBSYSTEM (Serial and Parallel ATA drivers) M: Jens Axboe L: linux-ide@vger.kernel.org -T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git S: Maintained F: drivers/ata/ F: include/linux/ata.h -- GitLab From 4051c323c59b535cfb7dc10b349544b4ad49c21e Mon Sep 17 00:00:00 2001 From: Alexey Brodkin Date: Thu, 2 Aug 2018 11:41:07 +0300 Subject: [PATCH 0311/1692] ARC: configs: cleanup - Remove CONFIG_DEFAULT_HOSTNAME from defconfigs There's no reason to set the same hostname to all ARC boards by default. It usually gets overwritten by init scripts anyways. - Remove disabled CONFIG_DEVKMEM from defconfigs It is disabled by default Signed-off-by: Alexey Brodkin Signed-off-by: Vineet Gupta --- arch/arc/configs/axs101_defconfig | 2 -- arch/arc/configs/axs103_defconfig | 2 -- arch/arc/configs/axs103_smp_defconfig | 2 -- arch/arc/configs/haps_hs_defconfig | 2 -- arch/arc/configs/haps_hs_smp_defconfig | 2 -- arch/arc/configs/hsdk_defconfig | 1 - arch/arc/configs/nps_defconfig | 1 - arch/arc/configs/nsim_700_defconfig | 2 -- arch/arc/configs/nsim_hs_defconfig | 2 -- arch/arc/configs/nsim_hs_smp_defconfig | 2 -- arch/arc/configs/nsimosci_defconfig | 2 -- arch/arc/configs/nsimosci_hs_defconfig | 2 -- arch/arc/configs/nsimosci_hs_smp_defconfig | 2 -- arch/arc/configs/tb10x_defconfig | 1 - arch/arc/configs/vdk_hs38_defconfig | 2 -- arch/arc/configs/vdk_hs38_smp_defconfig | 1 - 16 files changed, 28 deletions(-) diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig index a635ea972304..41a97eb7598d 100644 --- a/arch/arc/configs/axs101_defconfig +++ b/arch/arc/configs/axs101_defconfig @@ -1,4 +1,3 @@ -CONFIG_DEFAULT_HOSTNAME="ARCLinux" # CONFIG_SWAP is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y @@ -63,7 +62,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y CONFIG_MOUSE_SERIAL=y CONFIG_MOUSE_SYNAPTICS_USB=y # CONFIG_LEGACY_PTYS is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_DW=y diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig index aa507e423075..d8e2ca2385cc 100644 --- a/arch/arc/configs/axs103_defconfig +++ b/arch/arc/configs/axs103_defconfig @@ -1,4 +1,3 @@ -CONFIG_DEFAULT_HOSTNAME="ARCLinux" # CONFIG_SWAP is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y @@ -64,7 +63,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y CONFIG_MOUSE_SERIAL=y CONFIG_MOUSE_SYNAPTICS_USB=y # CONFIG_LEGACY_PTYS is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_DW=y diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig index eba07f468654..1e729b9726cd 100644 --- a/arch/arc/configs/axs103_smp_defconfig +++ b/arch/arc/configs/axs103_smp_defconfig @@ -1,4 +1,3 @@ -CONFIG_DEFAULT_HOSTNAME="ARCLinux" # CONFIG_SWAP is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y @@ -65,7 +64,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y CONFIG_MOUSE_SERIAL=y CONFIG_MOUSE_SYNAPTICS_USB=y # CONFIG_LEGACY_PTYS is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_DW=y diff --git a/arch/arc/configs/haps_hs_defconfig b/arch/arc/configs/haps_hs_defconfig index 098b19fbaa51..240dd2cd5148 100644 --- a/arch/arc/configs/haps_hs_defconfig +++ b/arch/arc/configs/haps_hs_defconfig @@ -1,4 +1,3 @@ -CONFIG_DEFAULT_HOSTNAME="ARCLinux" # CONFIG_SWAP is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y @@ -57,7 +56,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y # CONFIG_SERIO_SERPORT is not set CONFIG_SERIO_ARC_PS2=y # CONFIG_LEGACY_PTYS is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=1 diff --git a/arch/arc/configs/haps_hs_smp_defconfig b/arch/arc/configs/haps_hs_smp_defconfig index 0104c404d897..14ae7e5acc7c 100644 --- a/arch/arc/configs/haps_hs_smp_defconfig +++ b/arch/arc/configs/haps_hs_smp_defconfig @@ -1,4 +1,3 @@ -CONFIG_DEFAULT_HOSTNAME="ARCLinux" # CONFIG_SWAP is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y @@ -60,7 +59,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y # CONFIG_SERIO_SERPORT is not set CONFIG_SERIO_ARC_PS2=y # CONFIG_LEGACY_PTYS is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=1 diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig index 6491be0ddbc9..1dec2b4bc5e6 100644 --- a/arch/arc/configs/hsdk_defconfig +++ b/arch/arc/configs/hsdk_defconfig @@ -1,4 +1,3 @@ -CONFIG_DEFAULT_HOSTNAME="ARCLinux" CONFIG_SYSVIPC=y # CONFIG_CROSS_MEMORY_ATTACH is not set CONFIG_NO_HZ_IDLE=y diff --git a/arch/arc/configs/nps_defconfig b/arch/arc/configs/nps_defconfig index 7c9c706ae7f6..31ba224bbfb4 100644 --- a/arch/arc/configs/nps_defconfig +++ b/arch/arc/configs/nps_defconfig @@ -59,7 +59,6 @@ CONFIG_NETCONSOLE=y # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set # CONFIG_LEGACY_PTYS is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=1 diff --git a/arch/arc/configs/nsim_700_defconfig b/arch/arc/configs/nsim_700_defconfig index 99e05cf63fca..8e0b8b134cd9 100644 --- a/arch/arc/configs/nsim_700_defconfig +++ b/arch/arc/configs/nsim_700_defconfig @@ -1,5 +1,4 @@ # CONFIG_LOCALVERSION_AUTO is not set -CONFIG_DEFAULT_HOSTNAME="ARCLinux" # CONFIG_SWAP is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y @@ -44,7 +43,6 @@ CONFIG_LXT_PHY=y # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set # CONFIG_LEGACY_PTYS is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_ARC=y CONFIG_SERIAL_ARC_CONSOLE=y # CONFIG_HW_RANDOM is not set diff --git a/arch/arc/configs/nsim_hs_defconfig b/arch/arc/configs/nsim_hs_defconfig index 0dc4f9b737e7..739b90e5e893 100644 --- a/arch/arc/configs/nsim_hs_defconfig +++ b/arch/arc/configs/nsim_hs_defconfig @@ -1,5 +1,4 @@ # CONFIG_LOCALVERSION_AUTO is not set -CONFIG_DEFAULT_HOSTNAME="ARCLinux" # CONFIG_SWAP is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y @@ -45,7 +44,6 @@ CONFIG_DEVTMPFS=y # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set # CONFIG_LEGACY_PTYS is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_ARC=y CONFIG_SERIAL_ARC_CONSOLE=y # CONFIG_HW_RANDOM is not set diff --git a/arch/arc/configs/nsim_hs_smp_defconfig b/arch/arc/configs/nsim_hs_smp_defconfig index be3c30a15e54..b5895bdf3a93 100644 --- a/arch/arc/configs/nsim_hs_smp_defconfig +++ b/arch/arc/configs/nsim_hs_smp_defconfig @@ -1,5 +1,4 @@ # CONFIG_LOCALVERSION_AUTO is not set -CONFIG_DEFAULT_HOSTNAME="ARCLinux" # CONFIG_SWAP is not set # CONFIG_CROSS_MEMORY_ATTACH is not set CONFIG_HIGH_RES_TIMERS=y @@ -44,7 +43,6 @@ CONFIG_DEVTMPFS=y # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set # CONFIG_LEGACY_PTYS is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_ARC=y CONFIG_SERIAL_ARC_CONSOLE=y # CONFIG_HW_RANDOM is not set diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig index 3a74b9b21772..f14eeff7d308 100644 --- a/arch/arc/configs/nsimosci_defconfig +++ b/arch/arc/configs/nsimosci_defconfig @@ -1,5 +1,4 @@ # CONFIG_LOCALVERSION_AUTO is not set -CONFIG_DEFAULT_HOSTNAME="ARCLinux" # CONFIG_SWAP is not set CONFIG_SYSVIPC=y # CONFIG_CROSS_MEMORY_ATTACH is not set @@ -48,7 +47,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y # CONFIG_SERIO_SERPORT is not set CONFIG_SERIO_ARC_PS2=y # CONFIG_LEGACY_PTYS is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=1 diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig index ea2834b4dc1d..025298a48305 100644 --- a/arch/arc/configs/nsimosci_hs_defconfig +++ b/arch/arc/configs/nsimosci_hs_defconfig @@ -1,5 +1,4 @@ # CONFIG_LOCALVERSION_AUTO is not set -CONFIG_DEFAULT_HOSTNAME="ARCLinux" # CONFIG_SWAP is not set CONFIG_SYSVIPC=y # CONFIG_CROSS_MEMORY_ATTACH is not set @@ -47,7 +46,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y # CONFIG_SERIO_SERPORT is not set CONFIG_SERIO_ARC_PS2=y # CONFIG_LEGACY_PTYS is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=1 diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig index 80a5a1b4924b..df7b77b13b82 100644 --- a/arch/arc/configs/nsimosci_hs_smp_defconfig +++ b/arch/arc/configs/nsimosci_hs_smp_defconfig @@ -1,4 +1,3 @@ -CONFIG_DEFAULT_HOSTNAME="ARCLinux" # CONFIG_SWAP is not set CONFIG_SYSVIPC=y # CONFIG_CROSS_MEMORY_ATTACH is not set @@ -58,7 +57,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y # CONFIG_SERIO_SERPORT is not set CONFIG_SERIO_ARC_PS2=y # CONFIG_LEGACY_PTYS is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=1 diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig index 2cc87f909747..a7f65313f84a 100644 --- a/arch/arc/configs/tb10x_defconfig +++ b/arch/arc/configs/tb10x_defconfig @@ -57,7 +57,6 @@ CONFIG_STMMAC_ETH=y # CONFIG_SERIO is not set # CONFIG_VT is not set # CONFIG_LEGACY_PTYS is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=1 diff --git a/arch/arc/configs/vdk_hs38_defconfig b/arch/arc/configs/vdk_hs38_defconfig index f629493929ea..db47c3541f15 100644 --- a/arch/arc/configs/vdk_hs38_defconfig +++ b/arch/arc/configs/vdk_hs38_defconfig @@ -1,5 +1,4 @@ # CONFIG_LOCALVERSION_AUTO is not set -CONFIG_DEFAULT_HOSTNAME="ARCLinux" # CONFIG_CROSS_MEMORY_ATTACH is not set CONFIG_HIGH_RES_TIMERS=y CONFIG_IKCONFIG=y @@ -53,7 +52,6 @@ CONFIG_NATIONAL_PHY=y CONFIG_MOUSE_PS2_TOUCHKIT=y CONFIG_SERIO_ARC_PS2=y # CONFIG_LEGACY_PTYS is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_DW=y diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig index 21f0ca26a05d..a8ac5e917d9a 100644 --- a/arch/arc/configs/vdk_hs38_smp_defconfig +++ b/arch/arc/configs/vdk_hs38_smp_defconfig @@ -1,5 +1,4 @@ # CONFIG_LOCALVERSION_AUTO is not set -CONFIG_DEFAULT_HOSTNAME="ARCLinux" # CONFIG_CROSS_MEMORY_ATTACH is not set CONFIG_HIGH_RES_TIMERS=y CONFIG_IKCONFIG=y -- GitLab From 5c0920897af59779546e9ea0e89c5db45c8aff33 Mon Sep 17 00:00:00 2001 From: Alexey Brodkin Date: Thu, 2 Aug 2018 13:19:37 +0300 Subject: [PATCH 0312/1692] ARC: [plat-axs*/plat-hsdk]: Allow U-Boot to pass MAC-address to the kernel Otherwise kernel uses random MAC which is not very conveniet. With that change in place use might set desired MAC in U-Boot with "setenv ethaddr 11:22:33:44:55:66", save environment and then from boot to boot the same MAC will be used by the kernel. One other note for this to happen it's required to pass board's .dtb in U-Boot's "bootm" command like that: ------------------->8----------------- bootm 0x82000000 - 0x84000000 ------------------->8----------------- Here 0x82000000 is location of uImage while 0x80000000 is location of either axs10x.dtb or hsdk.dtb previously loaded from SD-card, USB storage or TFTP server. Signed-off-by: Alexey Brodkin Cc: Rob Herring Cc: stable@vger.kernel.org # 4.14 Cc: devicetree@vger.kernel.org Signed-off-by: Vineet Gupta --- arch/arc/boot/dts/axs10x_mb.dtsi | 7 ++++++- arch/arc/boot/dts/hsdk.dts | 7 ++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi index 47b74fbc403c..37bafd44e36d 100644 --- a/arch/arc/boot/dts/axs10x_mb.dtsi +++ b/arch/arc/boot/dts/axs10x_mb.dtsi @@ -9,6 +9,10 @@ */ / { + aliases { + ethernet = &gmac; + }; + axs10x_mb { compatible = "simple-bus"; #address-cells = <1>; @@ -68,7 +72,7 @@ pguclk: pguclk { }; }; - ethernet@0x18000 { + gmac: ethernet@0x18000 { #interrupt-cells = <1>; compatible = "snps,dwmac"; reg = < 0x18000 0x2000 >; @@ -81,6 +85,7 @@ ethernet@0x18000 { max-speed = <100>; resets = <&creg_rst 5>; reset-names = "stmmaceth"; + mac-address = [00 00 00 00 00 00]; /* Filled in by U-Boot */ }; ehci@0x40000 { diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts index 006aa3de5348..d00f283094d3 100644 --- a/arch/arc/boot/dts/hsdk.dts +++ b/arch/arc/boot/dts/hsdk.dts @@ -25,6 +25,10 @@ chosen { bootargs = "earlycon=uart8250,mmio32,0xf0005000,115200n8 console=ttyS0,115200n8 debug print-fatal-signals=1"; }; + aliases { + ethernet = &gmac; + }; + cpus { #address-cells = <1>; #size-cells = <0>; @@ -163,7 +167,7 @@ mmcclk_biu: mmcclk-biu { #clock-cells = <0>; }; - ethernet@8000 { + gmac: ethernet@8000 { #interrupt-cells = <1>; compatible = "snps,dwmac"; reg = <0x8000 0x2000>; @@ -176,6 +180,7 @@ ethernet@8000 { phy-handle = <&phy0>; resets = <&cgu_rst HSDK_ETH_RESET>; reset-names = "stmmaceth"; + mac-address = [00 00 00 00 00 00]; /* Filled in by U-Boot */ mdio { #address-cells = <1>; -- GitLab From c83532fb0fe053d2e43e9387354cb1b52ba26427 Mon Sep 17 00:00:00 2001 From: Alexey Brodkin Date: Thu, 2 Aug 2018 11:50:16 +0300 Subject: [PATCH 0313/1692] ARC: [plat-axs*]: Enable SWAP SWAP support on ARC was fixed earlier by commit 6e3761145a9b ("ARC: Fix CONFIG_SWAP") so now we may safely enable it on platforms that have external media like USB and SD-card. Note: it was already allowed for HSDK Signed-off-by: Alexey Brodkin Cc: stable@vger.kernel.org # 6e3761145a9b: ARC: Fix CONFIG_SWAP Signed-off-by: Vineet Gupta --- arch/arc/configs/axs101_defconfig | 1 - arch/arc/configs/axs103_defconfig | 1 - arch/arc/configs/axs103_smp_defconfig | 1 - 3 files changed, 3 deletions(-) diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig index 41a97eb7598d..41bc08be6a3b 100644 --- a/arch/arc/configs/axs101_defconfig +++ b/arch/arc/configs/axs101_defconfig @@ -1,4 +1,3 @@ -# CONFIG_SWAP is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y # CONFIG_CROSS_MEMORY_ATTACH is not set diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig index d8e2ca2385cc..1e1c4a8011b5 100644 --- a/arch/arc/configs/axs103_defconfig +++ b/arch/arc/configs/axs103_defconfig @@ -1,4 +1,3 @@ -# CONFIG_SWAP is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y # CONFIG_CROSS_MEMORY_ATTACH is not set diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig index 1e729b9726cd..6b0c0cfd5c30 100644 --- a/arch/arc/configs/axs103_smp_defconfig +++ b/arch/arc/configs/axs103_smp_defconfig @@ -1,4 +1,3 @@ -# CONFIG_SWAP is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y # CONFIG_CROSS_MEMORY_ATTACH is not set -- GitLab From 1e3bece2ded71eb85ac297a43002a942964e381d Mon Sep 17 00:00:00 2001 From: Eugeniy Paltsev Date: Mon, 6 Aug 2018 19:44:23 +0300 Subject: [PATCH 0314/1692] ARC: cleanup show_faulting_vma() - Remove unused variables - check return value of file_path Signed-off-by: Eugeniy Paltsev Signed-off-by: Vineet Gupta --- arch/arc/kernel/troubleshoot.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c index 783b20354f8b..e8d9fb452346 100644 --- a/arch/arc/kernel/troubleshoot.c +++ b/arch/arc/kernel/troubleshoot.c @@ -83,9 +83,6 @@ static void print_task_path_n_nm(struct task_struct *tsk, char *buf) static void show_faulting_vma(unsigned long address, char *buf) { struct vm_area_struct *vma; - struct inode *inode; - unsigned long ino = 0; - dev_t dev = 0; char *nm = buf; struct mm_struct *active_mm = current->active_mm; @@ -99,12 +96,10 @@ static void show_faulting_vma(unsigned long address, char *buf) * if the container VMA is not found */ if (vma && (vma->vm_start <= address)) { - struct file *file = vma->vm_file; - if (file) { - nm = file_path(file, buf, PAGE_SIZE - 1); - inode = file_inode(vma->vm_file); - dev = inode->i_sb->s_dev; - ino = inode->i_ino; + if (vma->vm_file) { + nm = file_path(vma->vm_file, buf, PAGE_SIZE - 1); + if (IS_ERR(nm)) + nm = "?"; } pr_info(" @off 0x%lx in [%s]\n" " VMA: 0x%08lx to 0x%08lx\n", -- GitLab From c27d0e9045bbbabffdde2bdba74e9971c4194ac4 Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Thu, 16 Aug 2018 10:20:33 -0700 Subject: [PATCH 0315/1692] ARC: sort Kconfig Signed-off-by: Vineet Gupta --- arch/arc/Kconfig | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 6d5eb8267e42..b4441b0764d7 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -9,6 +9,7 @@ config ARC def_bool y select ARC_TIMERS + select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_HAS_SG_CHAIN @@ -28,8 +29,12 @@ config ARC select GENERIC_SMP_IDLE_THREAD select HAVE_ARCH_KGDB select HAVE_ARCH_TRACEHOOK + select HAVE_DEBUG_STACKOVERFLOW select HAVE_FUTEX_CMPXCHG if FUTEX + select HAVE_GENERIC_DMA_COHERENT select HAVE_IOREMAP_PROT + select HAVE_KERNEL_GZIP + select HAVE_KERNEL_LZMA select HAVE_KPROBES select HAVE_KRETPROBES select HAVE_MEMBLOCK @@ -44,11 +49,6 @@ config ARC select OF_EARLY_FLATTREE select OF_RESERVED_MEM select PERF_USE_VMALLOC if ARC_CACHE_VIPT_ALIASING - select HAVE_DEBUG_STACKOVERFLOW - select HAVE_GENERIC_DMA_COHERENT - select HAVE_KERNEL_GZIP - select HAVE_KERNEL_LZMA - select ARCH_HAS_PTE_SPECIAL config ARCH_HAS_CACHE_LINE_SIZE def_bool y -- GitLab From 1cadf2b36809dca78ea9bbee7789a30727c1b5b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 16 Aug 2018 12:01:03 +0200 Subject: [PATCH 0316/1692] drm/amdgpu: fix VM clearing for the root PD MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We need to figure out the address after validating the BO, not before. Signed-off-by: Christian König Reviewed-by: Felix Kuehling Reviewed-by: Junwei Zhang Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index ece0ac703e27..e40ca8676418 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -369,7 +369,6 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, uint64_t addr; int r; - addr = amdgpu_bo_gpu_offset(bo); entries = amdgpu_bo_size(bo) / 8; if (pte_support_ats) { @@ -401,6 +400,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, if (r) goto error; + addr = amdgpu_bo_gpu_offset(bo); if (ats_entries) { uint64_t ats_value; -- GitLab From 9296435729dc8a2fd28b42391ff9f1ff310ebb7b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 21 Aug 2018 15:09:39 +0200 Subject: [PATCH 0317/1692] drm/amdgpu: fix preamble handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit At this point the command submission can still be interrupted. Signed-off-by: Christian König Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 502b94fb116a..09703c87d676 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1012,13 +1012,9 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, if (r) return r; - if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) { - parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT; - if (!parser->ctx->preamble_presented) { - parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST; - parser->ctx->preamble_presented = true; - } - } + if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) + parser->job->preamble_status |= + AMDGPU_PREAMBLE_IB_PRESENT; if (parser->ring && parser->ring != ring) return -EINVAL; @@ -1241,6 +1237,12 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, amdgpu_cs_post_dependencies(p); + if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) && + !p->ctx->preamble_presented) { + job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST; + p->ctx->preamble_presented = true; + } + cs->out.handle = seq; job->uf_sequence = seq; -- GitLab From 4f0ecd36f276941453f6ea7f76308a2f14540987 Mon Sep 17 00:00:00 2001 From: Emily Deng Date: Wed, 22 Aug 2018 20:18:25 +0800 Subject: [PATCH 0318/1692] amdgpu: fix multi-process hang issue MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit SWDEV-146499: hang during multi vulkan process testing cause: the second frame's PREAMBLE_IB have clear-state and LOAD actions, those actions ruin the pipeline that is still doing process in the previous frame's work-load IB. fix: need insert pipeline sync if have context switch for SRIOV (because only SRIOV will report PREEMPTION flag to UMD) Signed-off-by: Monk Liu Signed-off-by: Emily Deng Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 5518e623fed2..51b5e977ca88 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -164,8 +164,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, return r; } + need_ctx_switch = ring->current_ctx != fence_ctx; if (ring->funcs->emit_pipeline_sync && job && ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) || + (amdgpu_sriov_vf(adev) && need_ctx_switch) || amdgpu_vm_need_pipeline_sync(ring, job))) { need_pipe_sync = true; dma_fence_put(tmp); @@ -196,7 +198,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, } skip_preamble = ring->current_ctx == fence_ctx; - need_ctx_switch = ring->current_ctx != fence_ctx; if (job && ring->funcs->emit_cntxcntl) { if (need_ctx_switch) status |= AMDGPU_HAVE_CTX_SWITCH; -- GitLab From a3d9103ebfa03824d255060fc2c11ac94e3ef441 Mon Sep 17 00:00:00 2001 From: Andrey Grodzovsky Date: Wed, 22 Aug 2018 10:07:35 -0400 Subject: [PATCH 0319/1692] drm/amdgpu: Fix page fault and kasan warning on pci device remove. Problem: When executing echo 1 > /sys/class/drm/card0/device/remove kasan warning as bellow and page fault happen because adev->gart.pages already freed by the time amdgpu_gart_unbind is called. BUG: KASAN: user-memory-access in amdgpu_gart_unbind+0x98/0x180 [amdgpu] Write of size 8 at addr 0000000000003648 by task bash/1828 CPU: 2 PID: 1828 Comm: bash Tainted: G W O 4.18.0-rc1-dev+ #29 Hardware name: Gigabyte Technology Co., Ltd. AX370-Gaming/AX370-Gaming-CF, BIOS F3 06/19/2017 Call Trace: dump_stack+0x71/0xab kasan_report+0x109/0x390 amdgpu_gart_unbind+0x98/0x180 [amdgpu] ttm_tt_unbind+0x43/0x60 [ttm] ttm_bo_move_ttm+0x83/0x1c0 [ttm] ttm_bo_handle_move_mem+0xb97/0xd00 [ttm] ttm_bo_evict+0x273/0x530 [ttm] ttm_mem_evict_first+0x29c/0x360 [ttm] ttm_bo_force_list_clean+0xfc/0x210 [ttm] ttm_bo_clean_mm+0xe7/0x160 [ttm] amdgpu_ttm_fini+0xda/0x1d0 [amdgpu] amdgpu_bo_fini+0xf/0x60 [amdgpu] gmc_v8_0_sw_fini+0x36/0x70 [amdgpu] amdgpu_device_fini+0x2d0/0x7d0 [amdgpu] amdgpu_driver_unload_kms+0x6a/0xd0 [amdgpu] drm_dev_unregister+0x79/0x180 [drm] amdgpu_pci_remove+0x2a/0x60 [amdgpu] pci_device_remove+0x5b/0x100 device_release_driver_internal+0x236/0x360 pci_stop_bus_device+0xbf/0xf0 pci_stop_and_remove_bus_device_locked+0x16/0x30 remove_store+0xda/0xf0 kernfs_fop_write+0x186/0x220 __vfs_write+0xcc/0x330 vfs_write+0xe6/0x250 ksys_write+0xb1/0x140 do_syscall_64+0x77/0x1e0 entry_SYSCALL_64_after_hwframe+0x44/0xa9 RIP: 0033:0x7f66ebbb32c0 Fix: Split gmc_v{6,7,8,9}_0_gart_fini to postpone amdgpu_gart_fini to after memory managers are shut down since gart unbind happens as part of this procedure Signed-off-by: Andrey Grodzovsky Reviewed-by: Junwei Zhang Acked-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 9 ++------- drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 16 ++-------------- drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 16 ++-------------- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 16 ++-------------- 4 files changed, 8 insertions(+), 49 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 75317f283c69..ad151fefa41f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -632,12 +632,6 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev) amdgpu_gart_table_vram_unpin(adev); } -static void gmc_v6_0_gart_fini(struct amdgpu_device *adev) -{ - amdgpu_gart_table_vram_free(adev); - amdgpu_gart_fini(adev); -} - static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev, u32 status, u32 addr, u32 mc_client) { @@ -935,8 +929,9 @@ static int gmc_v6_0_sw_fini(void *handle) amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); - gmc_v6_0_gart_fini(adev); + amdgpu_gart_table_vram_free(adev); amdgpu_bo_fini(adev); + amdgpu_gart_fini(adev); release_firmware(adev->gmc.fw); adev->gmc.fw = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 36dc367c4b45..f8d8a3a73e42 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -746,19 +746,6 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev) amdgpu_gart_table_vram_unpin(adev); } -/** - * gmc_v7_0_gart_fini - vm fini callback - * - * @adev: amdgpu_device pointer - * - * Tears down the driver GART/VM setup (CIK). - */ -static void gmc_v7_0_gart_fini(struct amdgpu_device *adev) -{ - amdgpu_gart_table_vram_free(adev); - amdgpu_gart_fini(adev); -} - /** * gmc_v7_0_vm_decode_fault - print human readable fault info * @@ -1095,8 +1082,9 @@ static int gmc_v7_0_sw_fini(void *handle) amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); kfree(adev->gmc.vm_fault_info); - gmc_v7_0_gart_fini(adev); + amdgpu_gart_table_vram_free(adev); amdgpu_bo_fini(adev); + amdgpu_gart_fini(adev); release_firmware(adev->gmc.fw); adev->gmc.fw = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 70fc97b59b4f..9333109b210d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -968,19 +968,6 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev) amdgpu_gart_table_vram_unpin(adev); } -/** - * gmc_v8_0_gart_fini - vm fini callback - * - * @adev: amdgpu_device pointer - * - * Tears down the driver GART/VM setup (CIK). - */ -static void gmc_v8_0_gart_fini(struct amdgpu_device *adev) -{ - amdgpu_gart_table_vram_free(adev); - amdgpu_gart_fini(adev); -} - /** * gmc_v8_0_vm_decode_fault - print human readable fault info * @@ -1199,8 +1186,9 @@ static int gmc_v8_0_sw_fini(void *handle) amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); kfree(adev->gmc.vm_fault_info); - gmc_v8_0_gart_fini(adev); + amdgpu_gart_table_vram_free(adev); amdgpu_bo_fini(adev); + amdgpu_gart_fini(adev); release_firmware(adev->gmc.fw); adev->gmc.fw = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 399a5db27649..72f8018fa2a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -942,26 +942,12 @@ static int gmc_v9_0_sw_init(void *handle) return 0; } -/** - * gmc_v9_0_gart_fini - vm fini callback - * - * @adev: amdgpu_device pointer - * - * Tears down the driver GART/VM setup (CIK). - */ -static void gmc_v9_0_gart_fini(struct amdgpu_device *adev) -{ - amdgpu_gart_table_vram_free(adev); - amdgpu_gart_fini(adev); -} - static int gmc_v9_0_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); - gmc_v9_0_gart_fini(adev); /* * TODO: @@ -974,7 +960,9 @@ static int gmc_v9_0_sw_fini(void *handle) */ amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL); + amdgpu_gart_table_vram_free(adev); amdgpu_bo_fini(adev); + amdgpu_gart_fini(adev); return 0; } -- GitLab From 9650205a32e7f69c9846a205351e307ea525c1e7 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 16 Aug 2018 11:36:38 +0800 Subject: [PATCH 0320/1692] drm/amd/display: Fix bug use wrong pp interface Used wrong pp interface, the original interface is exposed by dpm on SI and paritial CI. Pointed out by Francis David v2: dal only need to set min_dcefclk and min_fclk to smu. so use display_clock_voltage_request interface, instand of update all display configuration. Acked-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index fbe878ae1e8c..4ba0003a9d32 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c @@ -480,12 +480,20 @@ void pp_rv_set_display_requirement(struct pp_smu *pp, { struct dc_context *ctx = pp->ctx; struct amdgpu_device *adev = ctx->driver_context; + void *pp_handle = adev->powerplay.pp_handle; const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + struct pp_display_clock_request clock = {0}; - if (!pp_funcs || !pp_funcs->display_configuration_changed) + if (!pp_funcs || !pp_funcs->display_clock_voltage_request) return; - amdgpu_dpm_display_configuration_changed(adev); + clock.clock_type = amd_pp_dcf_clock; + clock.clock_freq_in_khz = req->hard_min_dcefclk_khz; + pp_funcs->display_clock_voltage_request(pp_handle, &clock); + + clock.clock_type = amd_pp_f_clock; + clock.clock_freq_in_khz = req->hard_min_fclk_khz; + pp_funcs->display_clock_voltage_request(pp_handle, &clock); } void pp_rv_set_wm_ranges(struct pp_smu *pp, -- GitLab From 982976d92f17aa8ff5d7a4d09fab4608795d12f3 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 19 Jul 2018 13:49:07 +0800 Subject: [PATCH 0321/1692] drm/amd/pp: Add ACP PG support in SMU when ACP block not enabled, we power off acp block to save power. Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 18 ++++++++++++++++ .../gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c | 21 ++++++++++++++++++- drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 1 + 3 files changed, 39 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index 7a646f94b478..da4ebff5b74d 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -1181,6 +1181,21 @@ static int pp_dpm_powergate_gfx(void *handle, bool gate) return hwmgr->hwmgr_func->powergate_gfx(hwmgr, gate); } +static void pp_dpm_powergate_acp(void *handle, bool gate) +{ + struct pp_hwmgr *hwmgr = handle; + + if (!hwmgr || !hwmgr->pm_en) + return; + + if (hwmgr->hwmgr_func->powergate_acp == NULL) { + pr_info("%s was not implemented.\n", __func__); + return; + } + + hwmgr->hwmgr_func->powergate_acp(hwmgr, gate); +} + static int pp_set_powergating_by_smu(void *handle, uint32_t block_type, bool gate) { @@ -1200,6 +1215,9 @@ static int pp_set_powergating_by_smu(void *handle, case AMD_IP_BLOCK_TYPE_GFX: ret = pp_dpm_powergate_gfx(handle, gate); break; + case AMD_IP_BLOCK_TYPE_ACP: + pp_dpm_powergate_acp(handle, gate); + break; default: break; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c index 0adfc5392cd3..b8637049198d 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c @@ -664,8 +664,13 @@ static void smu8_init_power_gate_state(struct pp_hwmgr *hwmgr) data->uvd_power_gated = false; data->vce_power_gated = false; data->samu_power_gated = false; +#ifdef CONFIG_DRM_AMD_ACP data->acp_power_gated = false; - data->pgacpinit = true; +#else + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF); + data->acp_power_gated = true; +#endif + } static void smu8_init_sclk_threshold(struct pp_hwmgr *hwmgr) @@ -1886,6 +1891,19 @@ static int smu8_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable) } +static void smu8_dpm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate) +{ + struct smu8_hwmgr *data = hwmgr->backend; + + if (data->acp_power_gated == bgate) + return; + + if (bgate) + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF); + else + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerON); +} + static void smu8_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) { struct smu8_hwmgr *data = hwmgr->backend; @@ -1951,6 +1969,7 @@ static const struct pp_hwmgr_func smu8_hwmgr_funcs = { .powerdown_uvd = smu8_dpm_powerdown_uvd, .powergate_uvd = smu8_dpm_powergate_uvd, .powergate_vce = smu8_dpm_powergate_vce, + .powergate_acp = smu8_dpm_powergate_acp, .get_mclk = smu8_dpm_get_mclk, .get_sclk = smu8_dpm_get_sclk, .patch_boot_state = smu8_dpm_patch_boot_state, diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index d3d96260f440..7e58a0da5ccf 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -247,6 +247,7 @@ struct pp_hwmgr_func { int (*powerdown_uvd)(struct pp_hwmgr *hwmgr); void (*powergate_vce)(struct pp_hwmgr *hwmgr, bool bgate); void (*powergate_uvd)(struct pp_hwmgr *hwmgr, bool bgate); + void (*powergate_acp)(struct pp_hwmgr *hwmgr, bool bgate); uint32_t (*get_mclk)(struct pp_hwmgr *hwmgr, bool low); uint32_t (*get_sclk)(struct pp_hwmgr *hwmgr, bool low); int (*power_state_set)(struct pp_hwmgr *hwmgr, -- GitLab From be2d6aa51e66625f93d3ba1eb817cd33d8136c60 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 19 Jul 2018 11:48:36 -0500 Subject: [PATCH 0322/1692] drm/amdgpu: Power down acp if board uses AZ (v2) if board uses AZ rather than ACP, we power down acp through smu to save power. v2: handle S3/S4 and hw_fini (Alex) Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c index 71efcf38f11b..d4d1738da3b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c @@ -289,10 +289,12 @@ static int acp_hw_init(void *handle) r = amd_acp_hw_init(adev->acp.cgs_device, ip_block->version->major, ip_block->version->minor); /* -ENODEV means board uses AZ rather than ACP */ - if (r == -ENODEV) + if (r == -ENODEV) { + amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true); return 0; - else if (r) + } else if (r) { return r; + } if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289) return -EINVAL; @@ -497,8 +499,10 @@ static int acp_hw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* return early if no ACP */ - if (!adev->acp.acp_cell) + if (!adev->acp.acp_cell) { + amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false); return 0; + } /* Assert Soft reset of ACP */ val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET); @@ -556,11 +560,21 @@ static int acp_hw_fini(void *handle) static int acp_suspend(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + /* power up on suspend */ + if (!adev->acp.acp_cell) + amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false); return 0; } static int acp_resume(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + /* power down again on resume */ + if (!adev->acp.acp_cell) + amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true); return 0; } -- GitLab From 1062ddb6d5749f58fb3f086b9cdf596487ca2d6a Mon Sep 17 00:00:00 2001 From: Vijendar Mukunda Date: Sun, 29 Jul 2018 19:08:32 +0800 Subject: [PATCH 0323/1692] drm/amd/amdgpu: Enabling Power Gating for Stoney platform Removed condition checks to skip the power gating feature for stoney platform. Reviewed-by: Alex Deucher Signed-off-by: Vijendar Mukunda Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c | 53 ++++++++++++------------- 1 file changed, 25 insertions(+), 28 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c index d4d1738da3b6..bab8fab118d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c @@ -301,20 +301,19 @@ static int acp_hw_init(void *handle) acp_base = adev->rmmio_base; - if (adev->asic_type != CHIP_STONEY) { - adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL); - if (adev->acp.acp_genpd == NULL) - return -ENOMEM; - adev->acp.acp_genpd->gpd.name = "ACP_AUDIO"; - adev->acp.acp_genpd->gpd.power_off = acp_poweroff; - adev->acp.acp_genpd->gpd.power_on = acp_poweron; + adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL); + if (adev->acp.acp_genpd == NULL) + return -ENOMEM; + adev->acp.acp_genpd->gpd.name = "ACP_AUDIO"; + adev->acp.acp_genpd->gpd.power_off = acp_poweroff; + adev->acp.acp_genpd->gpd.power_on = acp_poweron; - adev->acp.acp_genpd->cgs_dev = adev->acp.cgs_device; - pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false); - } + adev->acp.acp_genpd->cgs_dev = adev->acp.cgs_device; + + pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false); adev->acp.acp_cell = kcalloc(ACP_DEVS, sizeof(struct mfd_cell), GFP_KERNEL); @@ -431,17 +430,17 @@ static int acp_hw_init(void *handle) if (r) return r; - if (adev->asic_type != CHIP_STONEY) { - for (i = 0; i < ACP_DEVS ; i++) { - dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); - r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev); - if (r) { - dev_err(dev, "Failed to add dev to genpd\n"); - return r; - } + + for (i = 0; i < ACP_DEVS ; i++) { + dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); + r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev); + if (r) { + dev_err(dev, "Failed to add dev to genpd\n"); + return r; } } + /* Assert Soft reset of ACP */ val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET); @@ -499,7 +498,7 @@ static int acp_hw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* return early if no ACP */ - if (!adev->acp.acp_cell) { + if (!adev->acp.acp_genpd) { amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false); return 0; } @@ -540,19 +539,17 @@ static int acp_hw_fini(void *handle) udelay(100); } - if (adev->acp.acp_genpd) { - for (i = 0; i < ACP_DEVS ; i++) { - dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); - ret = pm_genpd_remove_device(dev); - /* If removal fails, dont giveup and try rest */ - if (ret) - dev_err(dev, "remove dev from genpd failed\n"); - } - kfree(adev->acp.acp_genpd); + for (i = 0; i < ACP_DEVS ; i++) { + dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); + ret = pm_genpd_remove_device(dev); + /* If removal fails, dont giveup and try rest */ + if (ret) + dev_err(dev, "remove dev from genpd failed\n"); } mfd_remove_devices(adev->acp.parent); kfree(adev->acp.acp_res); + kfree(adev->acp.acp_genpd); kfree(adev->acp.acp_cell); return 0; -- GitLab From 3a54d2c8951113171655425bd173f43bbcb531c5 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Sun, 29 Jul 2018 18:44:06 +0800 Subject: [PATCH 0324/1692] drm/amdgpu/acp: Powrgate acp via smu Call smu to power gate/ungate acp instand of only powr down acp tiles in acp block. when smu power gate acp: smu will turn off clock, power down acp tiles,check and enter in ULV state. when smu ungate acp: smu will exit ulv, turn on clocks, power on acp tiles. Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c | 134 ++++-------------------- 1 file changed, 22 insertions(+), 112 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c index bab8fab118d6..b5b66c3d1b43 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c @@ -116,136 +116,47 @@ static int acp_sw_fini(void *handle) return 0; } -/* power off a tile/block within ACP */ -static int acp_suspend_tile(void *cgs_dev, int tile) -{ - u32 val = 0; - u32 count = 0; - - if ((tile < ACP_TILE_P1) || (tile > ACP_TILE_DSP2)) { - pr_err("Invalid ACP tile : %d to suspend\n", tile); - return -1; - } - - val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 + tile); - val &= ACP_TILE_ON_MASK; - - if (val == 0x0) { - val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG); - val = val | (1 << tile); - cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val); - cgs_write_register(cgs_dev, mmACP_PGFSM_CONFIG_REG, - 0x500 + tile); - - count = ACP_TIMEOUT_LOOP; - while (true) { - val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 - + tile); - val = val & ACP_TILE_ON_MASK; - if (val == ACP_TILE_OFF_MASK) - break; - if (--count == 0) { - pr_err("Timeout reading ACP PGFSM status\n"); - return -ETIMEDOUT; - } - udelay(100); - } - - val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG); - - val |= ACP_TILE_OFF_RETAIN_REG_MASK; - cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val); - } - return 0; -} - -/* power on a tile/block within ACP */ -static int acp_resume_tile(void *cgs_dev, int tile) -{ - u32 val = 0; - u32 count = 0; - - if ((tile < ACP_TILE_P1) || (tile > ACP_TILE_DSP2)) { - pr_err("Invalid ACP tile to resume\n"); - return -1; - } - - val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 + tile); - val = val & ACP_TILE_ON_MASK; - - if (val != 0x0) { - cgs_write_register(cgs_dev, mmACP_PGFSM_CONFIG_REG, - 0x600 + tile); - count = ACP_TIMEOUT_LOOP; - while (true) { - val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 - + tile); - val = val & ACP_TILE_ON_MASK; - if (val == 0x0) - break; - if (--count == 0) { - pr_err("Timeout reading ACP PGFSM status\n"); - return -ETIMEDOUT; - } - udelay(100); - } - val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG); - if (tile == ACP_TILE_P1) - val = val & (ACP_TILE_P1_MASK); - else if (tile == ACP_TILE_P2) - val = val & (ACP_TILE_P2_MASK); - - cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val); - } - return 0; -} - struct acp_pm_domain { - void *cgs_dev; + void *adev; struct generic_pm_domain gpd; }; static int acp_poweroff(struct generic_pm_domain *genpd) { - int i, ret; struct acp_pm_domain *apd; + struct amdgpu_device *adev; apd = container_of(genpd, struct acp_pm_domain, gpd); if (apd != NULL) { - /* Donot return abruptly if any of power tile fails to suspend. - * Log it and continue powering off other tile - */ - for (i = 4; i >= 0 ; i--) { - ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_P1 + i); - if (ret) - pr_err("ACP tile %d tile suspend failed\n", i); - } + adev = apd->adev; + /* call smu to POWER GATE ACP block + * smu will + * 1. turn off the acp clock + * 2. power off the acp tiles + * 3. check and enter ulv state + */ + if (adev->powerplay.pp_funcs->set_powergating_by_smu) + amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true); } return 0; } static int acp_poweron(struct generic_pm_domain *genpd) { - int i, ret; struct acp_pm_domain *apd; + struct amdgpu_device *adev; apd = container_of(genpd, struct acp_pm_domain, gpd); if (apd != NULL) { - for (i = 0; i < 2; i++) { - ret = acp_resume_tile(apd->cgs_dev, ACP_TILE_P1 + i); - if (ret) { - pr_err("ACP tile %d resume failed\n", i); - break; - } - } - - /* Disable DSPs which are not going to be used */ - for (i = 0; i < 3; i++) { - ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_DSP0 + i); - /* Continue suspending other DSP, even if one fails */ - if (ret) - pr_err("ACP DSP %d suspend failed\n", i); - } + adev = apd->adev; + /* call smu to UNGATE ACP block + * smu will + * 1. exit ulv + * 2. turn on acp clock + * 3. power on acp tiles + */ + if (adev->powerplay.pp_funcs->set_powergating_by_smu) + amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false); } return 0; } @@ -311,7 +222,7 @@ static int acp_hw_init(void *handle) adev->acp.acp_genpd->gpd.power_on = acp_poweron; - adev->acp.acp_genpd->cgs_dev = adev->acp.cgs_device; + adev->acp.acp_genpd->adev = adev; pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false); @@ -430,7 +341,6 @@ static int acp_hw_init(void *handle) if (r) return r; - for (i = 0; i < ACP_DEVS ; i++) { dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev); -- GitLab From c36628d8989a7a0d1cc4fe2ae93c6cd99a865f68 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Sun, 29 Jul 2018 18:53:02 +0800 Subject: [PATCH 0325/1692] drm/amgpu/acp: Implement set_powergating_state for acp so driver can powergate acp block after asic initialized to save power. Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c index b5b66c3d1b43..297a5490ad8c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c @@ -514,6 +514,12 @@ static int acp_set_clockgating_state(void *handle, static int acp_set_powergating_state(void *handle, enum amd_powergating_state state) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + bool enable = state == AMD_PG_STATE_GATE ? true : false; + + if (adev->powerplay.pp_funcs->set_powergating_by_smu) + amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, enable); + return 0; } -- GitLab From ac0a6cf1c6ef91e4af2a9d56eeaee8fca61d6ad7 Mon Sep 17 00:00:00 2001 From: Nayan Deshmukh Date: Wed, 1 Aug 2018 13:49:59 +0530 Subject: [PATCH 0326/1692] drm/scheduler: add a list of run queues to the entity MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit These are the potential run queues on which the jobs from this entity can be scheduled. We will use this to do load balancing. Signed-off-by: Nayan Deshmukh Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/scheduler/gpu_scheduler.c | 8 ++++++++ include/drm/gpu_scheduler.h | 7 ++++++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index 4fc211e19d6e..afffb9a60cdb 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -179,6 +179,8 @@ int drm_sched_entity_init(struct drm_sched_entity *entity, unsigned int num_rq_list, atomic_t *guilty) { + int i; + if (!(entity && rq_list && num_rq_list > 0 && rq_list[0])) return -EINVAL; @@ -186,6 +188,11 @@ int drm_sched_entity_init(struct drm_sched_entity *entity, INIT_LIST_HEAD(&entity->list); entity->rq = rq_list[0]; entity->guilty = guilty; + entity->num_rq_list = num_rq_list; + entity->rq_list = kcalloc(num_rq_list, sizeof(struct drm_sched_rq *), + GFP_KERNEL); + for (i = 0; i < num_rq_list; ++i) + entity->rq_list[i] = rq_list[i]; entity->last_scheduled = NULL; spin_lock_init(&entity->rq_lock); @@ -348,6 +355,7 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity) dma_fence_put(entity->last_scheduled); entity->last_scheduled = NULL; + kfree(entity->rq_list); } EXPORT_SYMBOL(drm_sched_entity_fini); diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 21c648b0b2a1..2419887e25eb 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -50,7 +50,10 @@ enum drm_sched_priority { * * @list: used to append this struct to the list of entities in the * runqueue. - * @rq: runqueue to which this entity belongs. + * @rq: runqueue on which this entity is currently scheduled. + * @rq_list: a list of run queues on which jobs from this entity can + * be scheduled + * @num_rq_list: number of run queues in the rq_list * @rq_lock: lock to modify the runqueue to which this entity belongs. * @job_queue: the list of jobs of this entity. * @fence_seq: a linearly increasing seqno incremented with each @@ -75,6 +78,8 @@ enum drm_sched_priority { struct drm_sched_entity { struct list_head list; struct drm_sched_rq *rq; + struct drm_sched_rq **rq_list; + unsigned int num_rq_list; spinlock_t rq_lock; struct spsc_queue job_queue; -- GitLab From 249a07c05a8da9637c2eb3205f1fc739c216f707 Mon Sep 17 00:00:00 2001 From: Nayan Deshmukh Date: Wed, 1 Aug 2018 13:50:00 +0530 Subject: [PATCH 0327/1692] drm/scheduler: add counter for total jobs in scheduler MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To keep track of the scheduler load. Signed-off-by: Nayan Deshmukh Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/scheduler/gpu_scheduler.c | 3 +++ include/drm/gpu_scheduler.h | 2 ++ 2 files changed, 5 insertions(+) diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index afffb9a60cdb..3b9969190927 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -530,6 +530,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job, trace_drm_sched_job(sched_job, entity); + atomic_inc(&entity->rq->sched->num_jobs); WRITE_ONCE(entity->last_user, current->group_leader); first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node); @@ -821,6 +822,7 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb) dma_fence_get(&s_fence->finished); atomic_dec(&sched->hw_rq_count); + atomic_dec(&sched->num_jobs); drm_sched_fence_finished(s_fence); trace_drm_sched_process_job(s_fence); @@ -938,6 +940,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, INIT_LIST_HEAD(&sched->ring_mirror_list); spin_lock_init(&sched->job_list_lock); atomic_set(&sched->hw_rq_count, 0); + atomic_set(&sched->num_jobs, 0); atomic64_set(&sched->job_id_count, 0); /* Each scheduler will run on a seperate kernel thread */ diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 2419887e25eb..0c4cfe689d4c 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -262,6 +262,7 @@ struct drm_sched_backend_ops { * @job_list_lock: lock to protect the ring_mirror_list. * @hang_limit: once the hangs by a job crosses this limit then it is marked * guilty and it will be considered for scheduling further. + * @num_jobs: the number of jobs in queue in the scheduler * * One scheduler is implemented for each hardware ring. */ @@ -279,6 +280,7 @@ struct drm_gpu_scheduler { struct list_head ring_mirror_list; spinlock_t job_list_lock; int hang_limit; + atomic_t num_jobs; }; int drm_sched_init(struct drm_gpu_scheduler *sched, -- GitLab From 97ffa35b5dec4e68baa85e626b69ae4949a4ca2a Mon Sep 17 00:00:00 2001 From: Nayan Deshmukh Date: Wed, 1 Aug 2018 13:50:01 +0530 Subject: [PATCH 0328/1692] drm/scheduler: add new function to get least loaded sched v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The function selects the run queue from the rq_list with the least load. The load is decided by the number of jobs in a scheduler. v2: avoid using atomic read twice consecutively, instead store it locally Signed-off-by: Nayan Deshmukh Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/scheduler/gpu_scheduler.c | 25 +++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index 3b9969190927..3e13bdfa8710 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -241,6 +241,31 @@ static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity) return true; } +/** + * drm_sched_entity_get_free_sched - Get the rq from rq_list with least load + * + * @entity: scheduler entity + * + * Return the pointer to the rq with least load. + */ +static struct drm_sched_rq * +drm_sched_entity_get_free_sched(struct drm_sched_entity *entity) +{ + struct drm_sched_rq *rq = NULL; + unsigned int min_jobs = UINT_MAX, num_jobs; + int i; + + for (i = 0; i < entity->num_rq_list; ++i) { + num_jobs = atomic_read(&entity->rq_list[i]->sched->num_jobs); + if (num_jobs < min_jobs) { + min_jobs = num_jobs; + rq = entity->rq_list[i]; + } + } + + return rq; +} + static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, struct dma_fence_cb *cb) { -- GitLab From df0ca30838eeddbd34d7573cdbfaf88c56ad3e65 Mon Sep 17 00:00:00 2001 From: Nayan Deshmukh Date: Wed, 1 Aug 2018 13:50:02 +0530 Subject: [PATCH 0329/1692] drm/scheduler: move idle entities to scheduler with less load v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is the first attempt to move entities between schedulers to have dynamic load balancing. We just move entities with no jobs for now as moving the ones with jobs will lead to other compilcations like ensuring that the other scheduler does not remove a job from the current entity while we are moving. v2: remove unused variable and an unecessary check Signed-off-by: Nayan Deshmukh Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/scheduler/gpu_scheduler.c | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index 3e13bdfa8710..61ea802ce492 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -520,6 +520,8 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity) if (!sched_job) return NULL; + sched_job->sched = sched; + sched_job->s_fence->sched = sched; while ((entity->dependency = sched->ops->dependency(sched_job, entity))) if (drm_sched_entity_add_dependency_cb(entity)) return NULL; @@ -550,11 +552,23 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity) void drm_sched_entity_push_job(struct drm_sched_job *sched_job, struct drm_sched_entity *entity) { - struct drm_gpu_scheduler *sched = sched_job->sched; - bool first = false; + struct drm_sched_rq *rq = entity->rq; + bool first, reschedule, idle; - trace_drm_sched_job(sched_job, entity); + idle = entity->last_scheduled == NULL || + dma_fence_is_signaled(entity->last_scheduled); + first = spsc_queue_count(&entity->job_queue) == 0; + reschedule = idle && first && (entity->num_rq_list > 1); + if (reschedule) { + rq = drm_sched_entity_get_free_sched(entity); + spin_lock(&entity->rq_lock); + drm_sched_rq_remove_entity(entity->rq, entity); + entity->rq = rq; + spin_unlock(&entity->rq_lock); + } + + trace_drm_sched_job(sched_job, entity); atomic_inc(&entity->rq->sched->num_jobs); WRITE_ONCE(entity->last_user, current->group_leader); first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node); @@ -570,7 +584,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job, } drm_sched_rq_add_entity(entity->rq, entity); spin_unlock(&entity->rq_lock); - drm_sched_wakeup(sched); + drm_sched_wakeup(entity->rq->sched); } } EXPORT_SYMBOL(drm_sched_entity_push_job); -- GitLab From 07507c01aa70558828216ea81393c1511fb6189d Mon Sep 17 00:00:00 2001 From: Andrey Grodzovsky Date: Tue, 31 Jul 2018 10:48:52 -0400 Subject: [PATCH 0330/1692] drm/scheduler: Add job dependency trace. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit During debug sessions I encountered a need to trace back a job dependecy a few steps back to the first failing job. This trace helpped me a lot. Signed-off-by: Andrey Grodzovsky Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/scheduler/gpu_scheduler.c | 8 +++++-- .../gpu/drm/scheduler/gpu_scheduler_trace.h | 24 +++++++++++++++++++ 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index 61ea802ce492..08fa5b65acaf 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -522,9 +522,13 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity) sched_job->sched = sched; sched_job->s_fence->sched = sched; - while ((entity->dependency = sched->ops->dependency(sched_job, entity))) - if (drm_sched_entity_add_dependency_cb(entity)) + while ((entity->dependency = sched->ops->dependency(sched_job, entity))) { + if (drm_sched_entity_add_dependency_cb(entity)) { + + trace_drm_sched_job_wait_dep(sched_job, entity->dependency); return NULL; + } + } /* skip jobs from entity that marked guilty */ if (entity->guilty && atomic_read(entity->guilty)) diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h index 4998ad950a48..1626f3967130 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h +++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h @@ -74,6 +74,30 @@ TRACE_EVENT(drm_sched_process_job, TP_printk("fence=%p signaled", __entry->fence) ); +TRACE_EVENT(drm_sched_job_wait_dep, + TP_PROTO(struct drm_sched_job *sched_job, struct dma_fence *fence), + TP_ARGS(sched_job, fence), + TP_STRUCT__entry( + __field(const char *,name) + __field(uint64_t, id) + __field(struct dma_fence *, fence) + __field(uint64_t, ctx) + __field(unsigned, seqno) + ), + + TP_fast_assign( + __entry->name = sched_job->sched->name; + __entry->id = sched_job->id; + __entry->fence = fence; + __entry->ctx = fence->context; + __entry->seqno = fence->seqno; + ), + TP_printk("job ring=%s, id=%llu, depends fence=%p, context=%llu, seq=%u", + __entry->name, __entry->id, + __entry->fence, __entry->ctx, + __entry->seqno) +); + #endif /* This part must be outside protection */ -- GitLab From 65f7260b135669bb1da72969ca8aad5c2fc4a300 Mon Sep 17 00:00:00 2001 From: Andrey Grodzovsky Date: Tue, 31 Jul 2018 10:52:25 -0400 Subject: [PATCH 0331/1692] drm/amdgpu: Add job pipe sync dependecy trace MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It's useful to trace any dependency a job has on prevoius jobs. Signed-off-by: Andrey Grodzovsky Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 5 +++++ drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | 24 +++++++++++++++++++++++ 2 files changed, 29 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 51b5e977ca88..47817e00f54f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -32,6 +32,7 @@ #include #include "amdgpu.h" #include "atom.h" +#include "amdgpu_trace.h" #define AMDGPU_IB_TEST_TIMEOUT msecs_to_jiffies(1000) @@ -170,6 +171,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, (amdgpu_sriov_vf(adev) && need_ctx_switch) || amdgpu_vm_need_pipeline_sync(ring, job))) { need_pipe_sync = true; + + if (tmp) + trace_amdgpu_ib_pipe_sync(job, tmp); + dma_fence_put(tmp); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 7206a0025b17..8c2dab20eb36 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -462,6 +462,30 @@ TRACE_EVENT(amdgpu_bo_move, __entry->new_placement, __entry->bo_size) ); +TRACE_EVENT(amdgpu_ib_pipe_sync, + TP_PROTO(struct amdgpu_job *sched_job, struct dma_fence *fence), + TP_ARGS(sched_job, fence), + TP_STRUCT__entry( + __field(const char *,name) + __field(uint64_t, id) + __field(struct dma_fence *, fence) + __field(uint64_t, ctx) + __field(unsigned, seqno) + ), + + TP_fast_assign( + __entry->name = sched_job->base.sched->name; + __entry->id = sched_job->base.id; + __entry->fence = fence; + __entry->ctx = fence->context; + __entry->seqno = fence->seqno; + ), + TP_printk("job ring=%s, id=%llu, need pipe sync to fence=%p, context=%llu, seq=%u", + __entry->name, __entry->id, + __entry->fence, __entry->ctx, + __entry->seqno) +); + #undef AMDGPU_JOB_GET_TIMELINE_NAME #endif -- GitLab From f688b614b64368ce1681f28cb6f2ff53e1eb8462 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 5 Jul 2018 19:22:50 +0800 Subject: [PATCH 0332/1692] drm/amd/pp: Implement get_performance_level for legacy dgpu display can get clock info through this function. implement this function for vega10 and old asics. from vega12, there is no power state management, so need to add new interface to notify display the clock info Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- .../drm/amd/powerplay/hwmgr/hardwaremanager.c | 2 +- .../gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 24 +++++++++++++++++++ .../drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 24 +++++++++++++++++++ 3 files changed, 49 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c index 6ef3c875fedd..85119c2bdcc8 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c @@ -359,7 +359,7 @@ int phm_get_clock_info(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *s PHM_PerformanceLevelDesignation designation) { int result; - PHM_PerformanceLevel performance_level; + PHM_PerformanceLevel performance_level = {0}; PHM_FUNC_CHECK(hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 052e60dfaf9f..380f282a64ba 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -5008,6 +5008,29 @@ static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint return 0; } +static int smu7_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, + PHM_PerformanceLevelDesignation designation, uint32_t index, + PHM_PerformanceLevel *level) +{ + const struct smu7_power_state *ps; + struct smu7_hwmgr *data; + uint32_t i; + + if (level == NULL || hwmgr == NULL || state == NULL) + return -EINVAL; + + data = hwmgr->backend; + ps = cast_const_phw_smu7_power_state(state); + + i = index > ps->performance_level_count - 1 ? + ps->performance_level_count - 1 : index; + + level->coreClock = ps->performance_levels[i].engine_clock; + level->memory_clock = ps->performance_levels[i].memory_clock; + + return 0; +} + static const struct pp_hwmgr_func smu7_hwmgr_funcs = { .backend_init = &smu7_hwmgr_backend_init, .backend_fini = &smu7_hwmgr_backend_fini, @@ -5064,6 +5087,7 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = { .set_power_limit = smu7_set_power_limit, .get_power_profile_mode = smu7_get_power_profile_mode, .set_power_profile_mode = smu7_set_power_profile_mode, + .get_performance_level = smu7_get_performance_level, }; uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index fb86c24394ff..704b237ecf70 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -4854,6 +4854,29 @@ static int vega10_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, return 0; } +static int vega10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, + PHM_PerformanceLevelDesignation designation, uint32_t index, + PHM_PerformanceLevel *level) +{ + const struct vega10_power_state *ps; + struct vega10_hwmgr *data; + uint32_t i; + + if (level == NULL || hwmgr == NULL || state == NULL) + return -EINVAL; + + data = hwmgr->backend; + ps = cast_const_phw_vega10_power_state(state); + + i = index > ps->performance_level_count - 1 ? + ps->performance_level_count - 1 : index; + + level->coreClock = ps->performance_levels[i].gfx_clock; + level->memory_clock = ps->performance_levels[i].mem_clock; + + return 0; +} + static const struct pp_hwmgr_func vega10_hwmgr_funcs = { .backend_init = vega10_hwmgr_backend_init, .backend_fini = vega10_hwmgr_backend_fini, @@ -4913,6 +4936,7 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = { .set_power_profile_mode = vega10_set_power_profile_mode, .set_power_limit = vega10_set_power_limit, .odn_edit_dpm_table = vega10_odn_edit_dpm_table, + .get_performance_level = vega10_get_performance_level, }; int vega10_enable_smc_features(struct pp_hwmgr *hwmgr, -- GitLab From a465feae60dc615d0e363ea5809b95a433b259d4 Mon Sep 17 00:00:00 2001 From: Charlene Liu Date: Mon, 16 Jul 2018 14:05:11 -0400 Subject: [PATCH 0333/1692] drm/amd/display: pass compat_level to hubp Signed-off-by: Charlene Liu Reviewed-by: Dmytro Laktyushkin Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 3 ++- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h | 3 ++- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 4 +++- drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h | 3 ++- 4 files changed, 9 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index 2138cd3c5d1d..fa1bacd7ba3a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -485,7 +485,8 @@ void hubp1_program_surface_config( union plane_size *plane_size, enum dc_rotation_angle rotation, struct dc_plane_dcc_param *dcc, - bool horizontal_mirror) + bool horizontal_mirror, + unsigned int compat_level) { hubp1_dcc_control(hubp, dcc->enable, dcc->grph.independent_64b_blks); hubp1_program_tiling(hubp, tiling_info, format); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h index f689feace82d..48c1907c78c6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h @@ -664,7 +664,8 @@ void hubp1_program_surface_config( union plane_size *plane_size, enum dc_rotation_angle rotation, struct dc_plane_dcc_param *dcc, - bool horizontal_mirror); + bool horizontal_mirror, + unsigned int compat_level); void hubp1_program_deadline( struct hubp *hubp, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index cfcc54f2ce65..41f6595891f1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -2004,6 +2004,7 @@ static void update_dchubp_dpp( struct dpp *dpp = pipe_ctx->plane_res.dpp; struct dc_plane_state *plane_state = pipe_ctx->plane_state; union plane_size size = plane_state->plane_size; + unsigned int compat_level = 0; /* depends on DML calculation, DPP clock value may change dynamically */ /* If request max dpp clk is lower than current dispclk, no need to @@ -2095,7 +2096,8 @@ static void update_dchubp_dpp( &size, plane_state->rotation, &plane_state->dcc, - plane_state->horizontal_mirror); + plane_state->horizontal_mirror, + compat_level); } hubp->power_gated = false; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h index 4f3f9e68ccfa..334c48cdafdc 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h @@ -96,7 +96,8 @@ struct hubp_funcs { union plane_size *plane_size, enum dc_rotation_angle rotation, struct dc_plane_dcc_param *dcc, - bool horizontal_mirror); + bool horizontal_mirror, + unsigned int compa_level); bool (*hubp_is_flip_pending)(struct hubp *hubp); -- GitLab From 265f5ba6c209875081da7c5f7affe8c2c1913a75 Mon Sep 17 00:00:00 2001 From: Jun Lei Date: Mon, 16 Jul 2018 10:40:31 -0400 Subject: [PATCH 0334/1692] drm/amd/display: Move PME to function pointer call semantics [why] Legacy IRI style is not linux friendly. [how] New function pointer call semantics will be used for all future PPLIB/DAL interfaces, and also some existing will be refactored. This change defines how the new function pointer structures will look, as well as implements Signed-off-by: Jun Lei Reviewed-by: Tony Cheng Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- .../amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c | 8 +- .../gpu/drm/amd/display/dc/calcs/dcn_calcs.c | 7 ++ drivers/gpu/drm/amd/display/dc/dm_pp_smu.h | 92 +++++++++++-------- 3 files changed, 65 insertions(+), 42 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index 4ba0003a9d32..cfa907b119c7 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c @@ -478,7 +478,7 @@ bool dm_pp_get_static_clocks( void pp_rv_set_display_requirement(struct pp_smu *pp, struct pp_smu_display_requirement_rv *req) { - struct dc_context *ctx = pp->ctx; + const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; void *pp_handle = adev->powerplay.pp_handle; const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; @@ -499,7 +499,7 @@ void pp_rv_set_display_requirement(struct pp_smu *pp, void pp_rv_set_wm_ranges(struct pp_smu *pp, struct pp_smu_wm_range_sets *ranges) { - struct dc_context *ctx = pp->ctx; + const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; void *pp_handle = adev->powerplay.pp_handle; const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; @@ -548,7 +548,7 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp, void pp_rv_set_pme_wa_enable(struct pp_smu *pp) { - struct dc_context *ctx = pp->ctx; + const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; void *pp_handle = adev->powerplay.pp_handle; const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; @@ -563,7 +563,7 @@ void dm_pp_get_funcs_rv( struct dc_context *ctx, struct pp_smu_funcs_rv *funcs) { - funcs->pp_smu.ctx = ctx; + funcs->pp_smu.dm = ctx; funcs->set_display_requirement = pp_rv_set_display_requirement; funcs->set_wm_ranges = pp_rv_set_wm_ranges; funcs->set_pme_wa_enable = pp_rv_set_pme_wa_enable; diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c index bd039322f697..32b34134c501 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c @@ -37,6 +37,13 @@ #define DC_LOGGER \ dc->ctx->logger + +#define WM_SET_COUNT 4 +#define WM_A 0 +#define WM_B 1 +#define WM_C 2 +#define WM_D 3 + /* * NOTE: * This file is gcc-parseable HW gospel, coming straight from HW engineers. diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h index 58ed2055ef9f..f2ea8452d48f 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h +++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h @@ -30,33 +30,45 @@ * interface to PPLIB/SMU to setup clocks and pstate requirements on SoC */ +enum pp_smu_ver { + /* + * PP_SMU_INTERFACE_X should be interpreted as the interface defined + * starting from X, where X is some family of ASICs. This is as + * opposed to interfaces used only for X. There will be some degree + * of interface sharing between families of ASIcs. + */ + PP_SMU_UNSUPPORTED, + PP_SMU_VER_RV +}; struct pp_smu { - struct dc_context *ctx; -}; + enum pp_smu_ver ver; + const void *pp; -enum wm_set_id { - WM_A, - WM_B, - WM_C, - WM_D, - WM_SET_COUNT, + /* + * interim extra handle for backwards compatibility + * as some existing functionality not yet implemented + * by ppsmu + */ + const void *dm; }; struct pp_smu_wm_set_range { - enum wm_set_id wm_inst; + unsigned int wm_inst; uint32_t min_fill_clk_khz; uint32_t max_fill_clk_khz; uint32_t min_drain_clk_khz; uint32_t max_drain_clk_khz; }; +#define MAX_WATERMARK_SETS 4 + struct pp_smu_wm_range_sets { - uint32_t num_reader_wm_sets; - struct pp_smu_wm_set_range reader_wm_sets[WM_SET_COUNT]; + unsigned int num_reader_wm_sets; + struct pp_smu_wm_set_range reader_wm_sets[MAX_WATERMARK_SETS]; - uint32_t num_writer_wm_sets; - struct pp_smu_wm_set_range writer_wm_sets[WM_SET_COUNT]; + unsigned int num_writer_wm_sets; + struct pp_smu_wm_set_range writer_wm_sets[MAX_WATERMARK_SETS]; }; struct pp_smu_display_requirement_rv { @@ -85,48 +97,52 @@ struct pp_smu_display_requirement_rv { struct pp_smu_funcs_rv { struct pp_smu pp_smu; - void (*set_display_requirement)(struct pp_smu *pp, - struct pp_smu_display_requirement_rv *req); + /* PPSMC_MSG_SetDisplayCount + * 0 triggers S0i2 optimization + */ + void (*set_display_count)(struct pp_smu *pp, int count); /* which SMU message? are reader and writer WM separate SMU msg? */ void (*set_wm_ranges)(struct pp_smu *pp, struct pp_smu_wm_range_sets *ranges); - /* PME w/a */ - void (*set_pme_wa_enable)(struct pp_smu *pp); -}; -#if 0 -struct pp_smu_funcs_rv { + /* PPSMC_MSG_SetHardMinDcfclkByFreq + * fixed clock at requested freq, either from FCH bypass or DFS + */ + void (*set_hard_min_dcfclk_by_freq)(struct pp_smu *pp, int khz); - /* PPSMC_MSG_SetDisplayCount - * 0 triggers S0i2 optimization + /* PPSMC_MSG_SetMinDeepSleepDcfclk + * when DF is in cstate, dcf clock is further divided down + * to just above given frequency */ - void (*set_display_count)(struct pp_smu *pp, int count); + void (*set_min_deep_sleep_dcfclk)(struct pp_smu *pp, int mhz); /* PPSMC_MSG_SetHardMinFclkByFreq - * FCLK will vary with DPM, but never below requested hard min + * FCLK will vary with DPM, but never below requested hard min */ void (*set_hard_min_fclk_by_freq)(struct pp_smu *pp, int khz); - /* PPSMC_MSG_SetHardMinDcefclkByFreq - * fixed clock at requested freq, either from FCH bypass or DFS + /* PPSMC_MSG_SetHardMinSocclkByFreq + * Needed for DWB support */ - void (*set_hard_min_dcefclk_by_freq)(struct pp_smu *pp, int khz); + void (*set_hard_min_socclk_by_freq)(struct pp_smu *pp, int khz); - /* PPSMC_MSG_SetMinDeepSleepDcefclk - * when DF is in cstate, dcf clock is further divided down - * to just above given frequency - */ - void (*set_min_deep_sleep_dcefclk)(struct pp_smu *pp, int mhz); + /* PME w/a */ + void (*set_pme_wa_enable)(struct pp_smu *pp); - /* todo: aesthetic - * watermark range table + /* + * Legacy functions. Used for backwards comp. with existing + * PPlib code. */ + void (*set_display_requirement)(struct pp_smu *pp, + struct pp_smu_display_requirement_rv *req); +}; - /* todo: functional/feature - * PPSMC_MSG_SetHardMinSocclkByFreq: required to support DWB - */ +struct pp_smu_funcs { + struct pp_smu ctx; + union { + struct pp_smu_funcs_rv rv_funcs; + }; }; -#endif #endif /* DM_PP_SMU_IF__H */ -- GitLab From 2cb3bcdb33c0a18b25a23e7081de8c9a1698a80e Mon Sep 17 00:00:00 2001 From: Tony Cheng Date: Wed, 18 Jul 2018 20:28:12 -0400 Subject: [PATCH 0335/1692] drm/amd/display: dal 3.1.60 Signed-off-by: Tony Cheng Reviewed-by: Aric Cyr Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 6c9990bef267..01af5356f2fc 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -38,7 +38,7 @@ #include "inc/compressor.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.1.59" +#define DC_VER "3.1.60" #define MAX_SURFACES 3 #define MAX_STREAMS 6 -- GitLab From 4e60536d093f486229bb8d86c739e8ef6446df85 Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Tue, 24 Jul 2018 13:19:49 -0400 Subject: [PATCH 0336/1692] drm/amd/display: Set DFS bypass flags for dce110 [Why] While there is support for using and quering DFS bypass clocks the hardware is never notified to enter DFS bypass mode for dce110. [How] Add a flag that can be set when programming the display engine PLL to enable DFS bypass mode. If this flag is set then the hardware is notified to enter DFS bypass mode and the correct display engine clock frequency can be acquired. Signed-off-by: Nicholas Kazlauskas Reviewed-by: Harry Wentland Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/bios/command_table.c | 3 +++ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c | 3 +++ drivers/gpu/drm/amd/display/include/bios_parser_types.h | 2 ++ 3 files changed, 8 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c index a558bfaa0c46..2bd7cd97e00d 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c @@ -2201,6 +2201,9 @@ static enum bp_result program_clock_v6( if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC) params.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC; + if (bp_params->flags.SET_DISPCLK_DFS_BYPASS) + params.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_DPREFCLK_BYPASS; + if (EXEC_BIOS_CMD_TABLE(SetPixelClock, params)) { /* True display clock is returned by VBIOS if DFS bypass * is enabled. */ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c index fb1f373d08a1..0782b74624d7 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c @@ -255,6 +255,9 @@ static int dce_set_clock( pxl_clk_params.target_pixel_clock = requested_clk_khz; pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS; + if (clk_dce->dfs_bypass_enabled) + pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true; + bp->funcs->program_display_engine_pll(bp, &pxl_clk_params); if (clk_dce->dfs_bypass_enabled) { diff --git a/drivers/gpu/drm/amd/display/include/bios_parser_types.h b/drivers/gpu/drm/amd/display/include/bios_parser_types.h index 0840f69cde99..f8dbfa5b89f2 100644 --- a/drivers/gpu/drm/amd/display/include/bios_parser_types.h +++ b/drivers/gpu/drm/amd/display/include/bios_parser_types.h @@ -234,6 +234,8 @@ struct bp_pixel_clock_parameters { uint32_t USE_E_CLOCK_AS_SOURCE_FOR_D_CLOCK:1; /* Use external reference clock (refDivSrc for PLL) */ uint32_t SET_EXTERNAL_REF_DIV_SRC:1; + /* Use DFS bypass for Display clock. */ + uint32_t SET_DISPCLK_DFS_BYPASS:1; /* Force program PHY PLL only */ uint32_t PROGRAM_PHY_PLL_ONLY:1; /* Support for YUV420 */ -- GitLab From 1c8faa9aa0cc1ccf02bed608d23966b7347d71a6 Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Tue, 24 Jul 2018 09:42:23 -0400 Subject: [PATCH 0337/1692] drm/amd/display: Enable DFS bypass support in DC config [Why] We explicitly disable DFS bypass support when creating DC. Support for this feature should now be in place so it can be left implicitly enabled. [How] Remove the line that disables DFS bypass support. Note: This option was actually reset to false anyway for most of the hardware I've tested on making this particular line misleading in the first place. This patch also fixes this issue. Signed-off-by: Nicholas Kazlauskas Reviewed-by: Harry Wentland Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 6ae050dc3220..ebdf82044f73 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -706,8 +706,6 @@ struct dc *dc_create(const struct dc_init_data *init_params) DC_LOG_DC("Display Core initialized\n"); - /* TODO: missing feature to be enabled */ - dc->debug.disable_dfs_bypass = true; return dc; -- GitLab From 5a83c93249098df2ee3b0039ec8f4495b959fcd0 Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Tue, 21 Aug 2018 14:36:49 -0500 Subject: [PATCH 0338/1692] drm/amd/display: Add support for toggling DFS bypass [Why] If the hardware supports DFS bypass it will always be enabled after creation of the DCCG. DFS bypass should only be enabled when the current stream consists of a single embedded panel and the minimum display clock is below the DFS bypass threshold. [How] Add a function to the DCCG table that updates the DFS bypass state when setting the bandwidth. If the DFS bypass state is changed, the clock needs to be reprogrammed to reflect this before the DPREFCLK is updated for audio endpoints. The existing display clock value is used as the target display clock value when reprogramming since the resulting change will be equal or larger to the current value. These changes only specifically target dce110 but do offer a framework for support on other applicable targets. Signed-off-by: Nicholas Kazlauskas Reviewed-by: David Francis Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/dce/dce_clocks.c | 63 +++++++++++++++++-- .../gpu/drm/amd/display/dc/dce/dce_clocks.h | 2 + .../display/dc/dce110/dce110_hw_sequencer.c | 12 +++- .../drm/amd/display/dc/inc/hw/display_clock.h | 5 ++ 4 files changed, 76 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c index 0782b74624d7..103dc3cf1c43 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c @@ -255,13 +255,12 @@ static int dce_set_clock( pxl_clk_params.target_pixel_clock = requested_clk_khz; pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS; - if (clk_dce->dfs_bypass_enabled) + if (clk_dce->dfs_bypass_active) pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true; bp->funcs->program_display_engine_pll(bp, &pxl_clk_params); - if (clk_dce->dfs_bypass_enabled) { - + if (clk_dce->dfs_bypass_active) { /* Cache the fixed display clock*/ clk_dce->dfs_bypass_disp_clk = pxl_clk_params.dfs_bypass_display_clock; @@ -677,6 +676,61 @@ static void dce_update_clocks(struct dccg *dccg, } } +static bool dce_update_dfs_bypass( + struct dccg *dccg, + struct dc *dc, + struct dc_state *context, + int requested_clock_khz) +{ + struct dce_dccg *clk_dce = TO_DCE_CLOCKS(dccg); + struct resource_context *res_ctx = &context->res_ctx; + enum signal_type signal_type = SIGNAL_TYPE_NONE; + bool was_active = clk_dce->dfs_bypass_active; + int i; + + /* Disable DFS bypass by default. */ + clk_dce->dfs_bypass_active = false; + + /* Check that DFS bypass is available. */ + if (!clk_dce->dfs_bypass_enabled) + goto update; + + /* Check if the requested display clock is below the threshold. */ + if (requested_clock_khz >= 400000) + goto update; + + /* DFS-bypass should only be enabled on single stream setups */ + if (context->stream_count != 1) + goto update; + + /* Check that the stream's signal type is an embedded panel */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (res_ctx->pipe_ctx[i].stream) { + struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; + + signal_type = pipe_ctx->stream->sink->link->connector_signal; + break; + } + } + + if (signal_type == SIGNAL_TYPE_EDP || + signal_type == SIGNAL_TYPE_LVDS) + clk_dce->dfs_bypass_active = true; + +update: + /* Update the clock state. We don't need to respect safe_to_lower + * because DFS bypass should always be greater than the current + * display clock frequency. + */ + if (was_active != clk_dce->dfs_bypass_active) { + dccg->clks.dispclk_khz = + dccg->funcs->set_dispclk(dccg, dccg->clks.dispclk_khz); + return true; + } + + return false; +} + #ifdef CONFIG_DRM_AMD_DC_DCN1_0 static const struct display_clock_funcs dcn1_funcs = { .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, @@ -700,7 +754,8 @@ static const struct display_clock_funcs dce112_funcs = { static const struct display_clock_funcs dce110_funcs = { .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz, .set_dispclk = dce_psr_set_clock, - .update_clocks = dce_update_clocks + .update_clocks = dce_update_clocks, + .update_dfs_bypass = dce_update_dfs_bypass }; static const struct display_clock_funcs dce_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h index 8a6b2d328467..8b5a53e98ad9 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h @@ -78,6 +78,8 @@ struct dce_dccg { /* Cache the status of DFS-bypass feature*/ bool dfs_bypass_enabled; + /* True if the DFS-bypass feature is enabled and active. */ + bool dfs_bypass_active; /* Cache the display clock returned by VBIOS if DFS-bypass is enabled. * This is basically "Crystal Frequency In KHz" (XTALIN) frequency */ int dfs_bypass_disp_clk; diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 14384d9675a8..2f2c5155c5aa 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -2566,6 +2566,7 @@ void dce110_set_bandwidth( bool decrease_allowed) { struct dc_clocks req_clks; + struct dccg *dccg = dc->res_pool->dccg; req_clks.dispclk_khz = context->bw.dce.dispclk_khz; req_clks.phyclk_khz = get_max_pixel_clock_for_all_paths(dc, context); @@ -2575,8 +2576,15 @@ void dce110_set_bandwidth( else dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool); - dc->res_pool->dccg->funcs->update_clocks( - dc->res_pool->dccg, + if (dccg->funcs->update_dfs_bypass) + dccg->funcs->update_dfs_bypass( + dccg, + dc, + context, + req_clks.dispclk_khz); + + dccg->funcs->update_clocks( + dccg, &req_clks, decrease_allowed); pplib_apply_display_requirements(dc, context); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h b/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h index 3c7ccb68ecdb..689faa16c0ae 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h @@ -53,6 +53,11 @@ struct display_clock_funcs { int requested_clock_khz); int (*get_dp_ref_clk_frequency)(struct dccg *dccg); + + bool (*update_dfs_bypass)(struct dccg *dccg, + struct dc *dc, + struct dc_state *context, + int requested_clock_khz); }; #endif /* __DISPLAY_CLOCK_H__ */ -- GitLab From d23ee13fba23a3039971a976b2c4857cb5ba9c73 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Mon, 30 Jul 2018 16:59:09 +0800 Subject: [PATCH 0339/1692] drm/amdgpu: Add amdgpu_gfx_off_ctrl function v2: 1. drop the special handling for the hw IP suggested by hawking and Christian. 2. refine the variable name suggested by Flora. This funciton as the entry of gfx off feature. we arbitrat gfx off feature enable/disable in this function. Reviewed-by: Hawking Zhang Reviewed-by: Felix Kuehling Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 5 +++ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 36 ++++++++++++++++++++++ 3 files changed, 43 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 447c4c7a36d6..47fbe8f54036 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -950,6 +950,10 @@ struct amdgpu_gfx { /* NGG */ struct amdgpu_ngg ngg; + /* gfx off */ + bool gfx_off_state; /* true: enabled, false: disabled */ + struct mutex gfx_off_mutex; + uint32_t gfx_off_req_count; /* default 1, enable gfx off: dec 1, disable gfx off: add 1 */ /* pipe reservation */ struct mutex pipe_reserve_mutex; DECLARE_BITMAP (pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); @@ -1774,6 +1778,7 @@ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev, const u32 array_size); bool amdgpu_device_is_px(struct drm_device *dev); +void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable); /* atpx handler */ #if defined(CONFIG_VGA_SWITCHEROO) void amdgpu_register_atpx_handler(void); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 8ab5ccbc14ac..2068b7fe7523 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2367,6 +2367,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, mutex_init(&adev->gfx.gpu_clock_mutex); mutex_init(&adev->srbm_mutex); mutex_init(&adev->gfx.pipe_reserve_mutex); + mutex_init(&adev->gfx.gfx_off_mutex); mutex_init(&adev->grbm_idx_mutex); mutex_init(&adev->mn_lock); mutex_init(&adev->virt.vf_errors.lock); @@ -2394,6 +2395,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, INIT_DELAYED_WORK(&adev->late_init_work, amdgpu_device_ip_late_init_func_handler); + adev->gfx.gfx_off_req_count = 1; adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false; /* Registers mapping */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 239bf2a4b3c6..1cdb26471a03 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -340,3 +340,39 @@ void amdgpu_gfx_compute_mqd_sw_fini(struct amdgpu_device *adev) &ring->mqd_gpu_addr, &ring->mqd_ptr); } + +/* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable + * + * @adev: amdgpu_device pointer + * @bool enable true: enable gfx off feature, false: disable gfx off feature + * + * 1. gfx off feature will be enabled by gfx ip after gfx cg gp enabled. + * 2. other client can send request to disable gfx off feature, the request should be honored. + * 3. other client can cancel their request of disable gfx off feature + * 4. other client should not send request to enable gfx off feature before disable gfx off feature. + */ + +void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable) +{ + if (!(adev->powerplay.pp_feature & PP_GFXOFF_MASK)) + return; + + if (!adev->powerplay.pp_funcs->set_powergating_by_smu) + return; + + mutex_lock(&adev->gfx.gfx_off_mutex); + + if (!enable) + adev->gfx.gfx_off_req_count++; + else if (adev->gfx.gfx_off_req_count > 0) + adev->gfx.gfx_off_req_count--; + + if (enable && !adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) { + if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true)) + adev->gfx.gfx_off_state = true; + } else if (!enable && adev->gfx.gfx_off_state) { + if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) + adev->gfx.gfx_off_state = false; + } + mutex_unlock(&adev->gfx.gfx_off_mutex); +} -- GitLab From 1e317b99f0c244bd8830918fdae9715210baf4fe Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 27 Jul 2018 21:06:30 +0800 Subject: [PATCH 0340/1692] drm/amdgpu: Put enable gfx off feature to a delay thread delay to enable gfx off feature to avoid gfx on/off frequently suggested by Alex and Evan. Reviewed-by: Hawking Zhang Reviewed-by: Felix Kuehling Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 15 +++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 8 ++++++-- 3 files changed, 23 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 47fbe8f54036..6a8ed9b5d4fd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -954,6 +954,8 @@ struct amdgpu_gfx { bool gfx_off_state; /* true: enabled, false: disabled */ struct mutex gfx_off_mutex; uint32_t gfx_off_req_count; /* default 1, enable gfx off: dec 1, disable gfx off: add 1 */ + struct delayed_work gfx_off_delay_work; + /* pipe reservation */ struct mutex pipe_reserve_mutex; DECLARE_BITMAP (pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 2068b7fe7523..82bc329919fe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1925,6 +1925,19 @@ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work) DRM_ERROR("ib ring test failed (%d).\n", r); } +static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work) +{ + struct amdgpu_device *adev = + container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work); + + mutex_lock(&adev->gfx.gfx_off_mutex); + if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) { + if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true)) + adev->gfx.gfx_off_state = true; + } + mutex_unlock(&adev->gfx.gfx_off_mutex); +} + /** * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1) * @@ -2394,6 +2407,8 @@ int amdgpu_device_init(struct amdgpu_device *adev, INIT_DELAYED_WORK(&adev->late_init_work, amdgpu_device_ip_late_init_func_handler); + INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work, + amdgpu_device_delay_enable_gfx_off); adev->gfx.gfx_off_req_count = 1; adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 1cdb26471a03..11d4d9f93b95 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -26,6 +26,9 @@ #include "amdgpu.h" #include "amdgpu_gfx.h" +/* 0.5 second timeout */ +#define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(500) + /* * GPU scratch registers helpers function. */ @@ -360,6 +363,7 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable) if (!adev->powerplay.pp_funcs->set_powergating_by_smu) return; + mutex_lock(&adev->gfx.gfx_off_mutex); if (!enable) @@ -368,11 +372,11 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable) adev->gfx.gfx_off_req_count--; if (enable && !adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) { - if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true)) - adev->gfx.gfx_off_state = true; + schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE); } else if (!enable && adev->gfx.gfx_off_state) { if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) adev->gfx.gfx_off_state = false; } + mutex_unlock(&adev->gfx.gfx_off_mutex); } -- GitLab From 408acede8732bec629959f8628c46ab4517e3995 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 27 Jul 2018 14:55:09 +0800 Subject: [PATCH 0341/1692] drm/amdgpu: Ctrl gfx off via amdgpu_gfx_off_ctrl use amdgpu_gfx_off_ctrl function so driver can arbitrate whether the gfx ip can be power off or power on. Reviewed-by: Hawking Zhang Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 6 ++---- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 6 ++---- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 82bc329919fe..6d0ffbf5b337 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1828,8 +1828,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) adev->ip_blocks[i].version->funcs->name, r); return r; } - if (adev->powerplay.pp_funcs->set_powergating_by_smu) - amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false); + amdgpu_gfx_off_ctrl(adev, false); r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); /* XXX handle errors */ if (r) { @@ -2012,8 +2011,7 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) } /* call smu to disable gfx off feature first when suspend */ - if (adev->powerplay.pp_funcs->set_powergating_by_smu) - amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false); + amdgpu_gfx_off_ctrl(adev, false); for (i = adev->num_ip_blocks - 1; i >= 0; i--) { if (!adev->ip_blocks[i].status.valid) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index ef00d14f8645..fd31d3b27819 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -3783,13 +3783,11 @@ static int gfx_v9_0_set_powergating_state(void *handle, gfx_v9_0_update_gfx_mg_power_gating(adev, enable); /* set gfx off through smu */ - if (enable && adev->powerplay.pp_funcs->set_powergating_by_smu) - amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true); + amdgpu_gfx_off_ctrl(adev, true); break; case CHIP_VEGA12: /* set gfx off through smu */ - if (enable && adev->powerplay.pp_funcs->set_powergating_by_smu) - amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true); + amdgpu_gfx_off_ctrl(adev, true); break; default: break; -- GitLab From 3fded222f4bf7f4c56ef4854872a39a4de08f7a8 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 27 Jul 2018 17:00:02 +0800 Subject: [PATCH 0342/1692] drm/amdgpu: Disable gfx off if VCN is busy this patch is a workaround for the gpu hang at video begin/end time if gfx off is enabled. Reviewed-by: Hawking Zhang Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index fd654a4406db..76e59a6e8311 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -217,6 +217,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work) fences += amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg); if (fences == 0) { + amdgpu_gfx_off_ctrl(adev, true); if (adev->pm.dpm_enabled) amdgpu_dpm_enable_uvd(adev, false); else @@ -233,6 +234,7 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring) bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work); if (set_clocks) { + amdgpu_gfx_off_ctrl(adev, false); if (adev->pm.dpm_enabled) amdgpu_dpm_enable_uvd(adev, true); else -- GitLab From fd28705388ef5244a963aa5ec70751d522c214c3 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 27 Jul 2018 14:10:45 +0800 Subject: [PATCH 0343/1692] drm/amd/pp: Delete duplicated interface in hwmgr_func gfx off support in smu can be via powergate_gfx interface. so remove the gfx_off_control interface. Reviewed-by: Hawking Zhang Reviewed-by: Evan Quan Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 1 - drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 1 - 2 files changed, 2 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index a63e00653324..26d130a91725 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -1185,7 +1185,6 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = { .dynamic_state_management_disable = smu10_disable_dpm_tasks, .powergate_mmhub = smu10_powergate_mmhub, .smus_notify_pwe = smu10_smus_notify_pwe, - .gfx_off_control = smu10_gfx_off_control, .display_clock_voltage_request = smu10_display_clock_voltage_request, .powergate_gfx = smu10_gfx_off_control, }; diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 7e58a0da5ccf..88f451764da9 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -298,7 +298,6 @@ struct pp_hwmgr_func { int (*display_clock_voltage_request)(struct pp_hwmgr *hwmgr, struct pp_display_clock_request *clock); int (*get_max_high_clocks)(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks); - int (*gfx_off_control)(struct pp_hwmgr *hwmgr, bool enable); int (*power_off_asic)(struct pp_hwmgr *hwmgr); int (*force_clock_level)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask); int (*print_clock_levels)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf); -- GitLab From 448fe1928ce415b8cae0425e5c7f066d6bd8b2de Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Thu, 2 Aug 2018 16:12:39 +0800 Subject: [PATCH 0344/1692] drm/amdgpu: move gfx definitions into amdgpu_gfx header MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Demangle amdgpu.h Signed-off-by: Huang Rui Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 282 +------------------ drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 34 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 349 +++++++++++++++++++++--- 3 files changed, 342 insertions(+), 323 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 6a8ed9b5d4fd..ddbc5142bdf8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -69,6 +69,7 @@ #include "amdgpu_vcn.h" #include "amdgpu_mn.h" #include "amdgpu_gmc.h" +#include "amdgpu_gfx.h" #include "amdgpu_dm.h" #include "amdgpu_virt.h" #include "amdgpu_gart.h" @@ -171,13 +172,6 @@ extern int amdgpu_cik_support; #define AMDGPU_RESET_VCE (1 << 13) #define AMDGPU_RESET_VCE1 (1 << 14) -/* GFX current status */ -#define AMDGPU_GFX_NORMAL_MODE 0x00000000L -#define AMDGPU_GFX_SAFE_MODE 0x00000001L -#define AMDGPU_GFX_PG_DISABLED_MODE 0x00000002L -#define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L -#define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L - /* max cursor sizes (in pixels) */ #define CIK_CURSOR_WIDTH 128 #define CIK_CURSOR_HEIGHT 128 @@ -690,277 +684,6 @@ struct amdgpu_fpriv { struct amdgpu_ctx_mgr ctx_mgr; }; -/* - * GFX stuff - */ -#include "clearstate_defs.h" - -struct amdgpu_rlc_funcs { - void (*enter_safe_mode)(struct amdgpu_device *adev); - void (*exit_safe_mode)(struct amdgpu_device *adev); -}; - -struct amdgpu_rlc { - /* for power gating */ - struct amdgpu_bo *save_restore_obj; - uint64_t save_restore_gpu_addr; - volatile uint32_t *sr_ptr; - const u32 *reg_list; - u32 reg_list_size; - /* for clear state */ - struct amdgpu_bo *clear_state_obj; - uint64_t clear_state_gpu_addr; - volatile uint32_t *cs_ptr; - const struct cs_section_def *cs_data; - u32 clear_state_size; - /* for cp tables */ - struct amdgpu_bo *cp_table_obj; - uint64_t cp_table_gpu_addr; - volatile uint32_t *cp_table_ptr; - u32 cp_table_size; - - /* safe mode for updating CG/PG state */ - bool in_safe_mode; - const struct amdgpu_rlc_funcs *funcs; - - /* for firmware data */ - u32 save_and_restore_offset; - u32 clear_state_descriptor_offset; - u32 avail_scratch_ram_locations; - u32 reg_restore_list_size; - u32 reg_list_format_start; - u32 reg_list_format_separate_start; - u32 starting_offsets_start; - u32 reg_list_format_size_bytes; - u32 reg_list_size_bytes; - u32 reg_list_format_direct_reg_list_length; - u32 save_restore_list_cntl_size_bytes; - u32 save_restore_list_gpm_size_bytes; - u32 save_restore_list_srm_size_bytes; - - u32 *register_list_format; - u32 *register_restore; - u8 *save_restore_list_cntl; - u8 *save_restore_list_gpm; - u8 *save_restore_list_srm; - - bool is_rlc_v2_1; -}; - -#define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES - -struct amdgpu_mec { - struct amdgpu_bo *hpd_eop_obj; - u64 hpd_eop_gpu_addr; - struct amdgpu_bo *mec_fw_obj; - u64 mec_fw_gpu_addr; - u32 num_mec; - u32 num_pipe_per_mec; - u32 num_queue_per_pipe; - void *mqd_backup[AMDGPU_MAX_COMPUTE_RINGS + 1]; - - /* These are the resources for which amdgpu takes ownership */ - DECLARE_BITMAP(queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); -}; - -struct amdgpu_kiq { - u64 eop_gpu_addr; - struct amdgpu_bo *eop_obj; - spinlock_t ring_lock; - struct amdgpu_ring ring; - struct amdgpu_irq_src irq; -}; - -/* - * GPU scratch registers structures, functions & helpers - */ -struct amdgpu_scratch { - unsigned num_reg; - uint32_t reg_base; - uint32_t free_mask; -}; - -/* - * GFX configurations - */ -#define AMDGPU_GFX_MAX_SE 4 -#define AMDGPU_GFX_MAX_SH_PER_SE 2 - -struct amdgpu_rb_config { - uint32_t rb_backend_disable; - uint32_t user_rb_backend_disable; - uint32_t raster_config; - uint32_t raster_config_1; -}; - -struct gb_addr_config { - uint16_t pipe_interleave_size; - uint8_t num_pipes; - uint8_t max_compress_frags; - uint8_t num_banks; - uint8_t num_se; - uint8_t num_rb_per_se; -}; - -struct amdgpu_gfx_config { - unsigned max_shader_engines; - unsigned max_tile_pipes; - unsigned max_cu_per_sh; - unsigned max_sh_per_se; - unsigned max_backends_per_se; - unsigned max_texture_channel_caches; - unsigned max_gprs; - unsigned max_gs_threads; - unsigned max_hw_contexts; - unsigned sc_prim_fifo_size_frontend; - unsigned sc_prim_fifo_size_backend; - unsigned sc_hiz_tile_fifo_size; - unsigned sc_earlyz_tile_fifo_size; - - unsigned num_tile_pipes; - unsigned backend_enable_mask; - unsigned mem_max_burst_length_bytes; - unsigned mem_row_size_in_kb; - unsigned shader_engine_tile_size; - unsigned num_gpus; - unsigned multi_gpu_tile_size; - unsigned mc_arb_ramcfg; - unsigned gb_addr_config; - unsigned num_rbs; - unsigned gs_vgt_table_depth; - unsigned gs_prim_buffer_depth; - - uint32_t tile_mode_array[32]; - uint32_t macrotile_mode_array[16]; - - struct gb_addr_config gb_addr_config_fields; - struct amdgpu_rb_config rb_config[AMDGPU_GFX_MAX_SE][AMDGPU_GFX_MAX_SH_PER_SE]; - - /* gfx configure feature */ - uint32_t double_offchip_lds_buf; - /* cached value of DB_DEBUG2 */ - uint32_t db_debug2; -}; - -struct amdgpu_cu_info { - uint32_t simd_per_cu; - uint32_t max_waves_per_simd; - uint32_t wave_front_size; - uint32_t max_scratch_slots_per_cu; - uint32_t lds_size; - - /* total active CU number */ - uint32_t number; - uint32_t ao_cu_mask; - uint32_t ao_cu_bitmap[4][4]; - uint32_t bitmap[4][4]; -}; - -struct amdgpu_gfx_funcs { - /* get the gpu clock counter */ - uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev); - void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance); - void (*read_wave_data)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields); - void (*read_wave_vgprs)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t thread, uint32_t start, uint32_t size, uint32_t *dst); - void (*read_wave_sgprs)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t start, uint32_t size, uint32_t *dst); - void (*select_me_pipe_q)(struct amdgpu_device *adev, u32 me, u32 pipe, u32 queue); -}; - -struct amdgpu_ngg_buf { - struct amdgpu_bo *bo; - uint64_t gpu_addr; - uint32_t size; - uint32_t bo_size; -}; - -enum { - NGG_PRIM = 0, - NGG_POS, - NGG_CNTL, - NGG_PARAM, - NGG_BUF_MAX -}; - -struct amdgpu_ngg { - struct amdgpu_ngg_buf buf[NGG_BUF_MAX]; - uint32_t gds_reserve_addr; - uint32_t gds_reserve_size; - bool init; -}; - -struct sq_work { - struct work_struct work; - unsigned ih_data; -}; - -struct amdgpu_gfx { - struct mutex gpu_clock_mutex; - struct amdgpu_gfx_config config; - struct amdgpu_rlc rlc; - struct amdgpu_mec mec; - struct amdgpu_kiq kiq; - struct amdgpu_scratch scratch; - const struct firmware *me_fw; /* ME firmware */ - uint32_t me_fw_version; - const struct firmware *pfp_fw; /* PFP firmware */ - uint32_t pfp_fw_version; - const struct firmware *ce_fw; /* CE firmware */ - uint32_t ce_fw_version; - const struct firmware *rlc_fw; /* RLC firmware */ - uint32_t rlc_fw_version; - const struct firmware *mec_fw; /* MEC firmware */ - uint32_t mec_fw_version; - const struct firmware *mec2_fw; /* MEC2 firmware */ - uint32_t mec2_fw_version; - uint32_t me_feature_version; - uint32_t ce_feature_version; - uint32_t pfp_feature_version; - uint32_t rlc_feature_version; - uint32_t rlc_srlc_fw_version; - uint32_t rlc_srlc_feature_version; - uint32_t rlc_srlg_fw_version; - uint32_t rlc_srlg_feature_version; - uint32_t rlc_srls_fw_version; - uint32_t rlc_srls_feature_version; - uint32_t mec_feature_version; - uint32_t mec2_feature_version; - struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS]; - unsigned num_gfx_rings; - struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS]; - unsigned num_compute_rings; - struct amdgpu_irq_src eop_irq; - struct amdgpu_irq_src priv_reg_irq; - struct amdgpu_irq_src priv_inst_irq; - struct amdgpu_irq_src cp_ecc_error_irq; - struct amdgpu_irq_src sq_irq; - struct sq_work sq_work; - - /* gfx status */ - uint32_t gfx_current_status; - /* ce ram size*/ - unsigned ce_ram_size; - struct amdgpu_cu_info cu_info; - const struct amdgpu_gfx_funcs *funcs; - - /* reset mask */ - uint32_t grbm_soft_reset; - uint32_t srbm_soft_reset; - /* s3/s4 mask */ - bool in_suspend; - /* NGG */ - struct amdgpu_ngg ngg; - - /* gfx off */ - bool gfx_off_state; /* true: enabled, false: disabled */ - struct mutex gfx_off_mutex; - uint32_t gfx_off_req_count; /* default 1, enable gfx off: dec 1, disable gfx off: add 1 */ - struct delayed_work gfx_off_delay_work; - - /* pipe reservation */ - struct mutex pipe_reserve_mutex; - DECLARE_BITMAP (pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); -}; - int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned size, struct amdgpu_ib *ib); void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, @@ -1755,11 +1478,8 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r)) #define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b)) #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b)) -#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) -#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance)) #define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a)) #define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i)) -#define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q) (adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q)) /* Common functions */ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 11d4d9f93b95..bbb81e23020e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -30,8 +30,40 @@ #define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(500) /* - * GPU scratch registers helpers function. + * GPU GFX IP block helpers function. */ + +int amdgpu_gfx_queue_to_bit(struct amdgpu_device *adev, int mec, + int pipe, int queue) +{ + int bit = 0; + + bit += mec * adev->gfx.mec.num_pipe_per_mec + * adev->gfx.mec.num_queue_per_pipe; + bit += pipe * adev->gfx.mec.num_queue_per_pipe; + bit += queue; + + return bit; +} + +void amdgpu_gfx_bit_to_queue(struct amdgpu_device *adev, int bit, + int *mec, int *pipe, int *queue) +{ + *queue = bit % adev->gfx.mec.num_queue_per_pipe; + *pipe = (bit / adev->gfx.mec.num_queue_per_pipe) + % adev->gfx.mec.num_pipe_per_mec; + *mec = (bit / adev->gfx.mec.num_queue_per_pipe) + / adev->gfx.mec.num_pipe_per_mec; + +} + +bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, + int mec, int pipe, int queue) +{ + return test_bit(amdgpu_gfx_queue_to_bit(adev, mec, pipe, queue), + adev->gfx.mec.queue_bitmap); +} + /** * amdgpu_gfx_scratch_get - Allocate a scratch register * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 1f279050d334..4e3d147c2f1b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -24,28 +24,296 @@ #ifndef __AMDGPU_GFX_H__ #define __AMDGPU_GFX_H__ -int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg); -void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg); +/* + * GFX stuff + */ +#include "clearstate_defs.h" +#include "amdgpu_ring.h" -void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, - unsigned max_sh); +/* GFX current status */ +#define AMDGPU_GFX_NORMAL_MODE 0x00000000L +#define AMDGPU_GFX_SAFE_MODE 0x00000001L +#define AMDGPU_GFX_PG_DISABLED_MODE 0x00000002L +#define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L +#define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L -void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev); -int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, - struct amdgpu_ring *ring, - struct amdgpu_irq_src *irq); +struct amdgpu_rlc_funcs { + void (*enter_safe_mode)(struct amdgpu_device *adev); + void (*exit_safe_mode)(struct amdgpu_device *adev); +}; -void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring, - struct amdgpu_irq_src *irq); +struct amdgpu_rlc { + /* for power gating */ + struct amdgpu_bo *save_restore_obj; + uint64_t save_restore_gpu_addr; + volatile uint32_t *sr_ptr; + const u32 *reg_list; + u32 reg_list_size; + /* for clear state */ + struct amdgpu_bo *clear_state_obj; + uint64_t clear_state_gpu_addr; + volatile uint32_t *cs_ptr; + const struct cs_section_def *cs_data; + u32 clear_state_size; + /* for cp tables */ + struct amdgpu_bo *cp_table_obj; + uint64_t cp_table_gpu_addr; + volatile uint32_t *cp_table_ptr; + u32 cp_table_size; -void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev); -int amdgpu_gfx_kiq_init(struct amdgpu_device *adev, - unsigned hpd_size); + /* safe mode for updating CG/PG state */ + bool in_safe_mode; + const struct amdgpu_rlc_funcs *funcs; -int amdgpu_gfx_compute_mqd_sw_init(struct amdgpu_device *adev, - unsigned mqd_size); -void amdgpu_gfx_compute_mqd_sw_fini(struct amdgpu_device *adev); + /* for firmware data */ + u32 save_and_restore_offset; + u32 clear_state_descriptor_offset; + u32 avail_scratch_ram_locations; + u32 reg_restore_list_size; + u32 reg_list_format_start; + u32 reg_list_format_separate_start; + u32 starting_offsets_start; + u32 reg_list_format_size_bytes; + u32 reg_list_size_bytes; + u32 reg_list_format_direct_reg_list_length; + u32 save_restore_list_cntl_size_bytes; + u32 save_restore_list_gpm_size_bytes; + u32 save_restore_list_srm_size_bytes; + + u32 *register_list_format; + u32 *register_restore; + u8 *save_restore_list_cntl; + u8 *save_restore_list_gpm; + u8 *save_restore_list_srm; + + bool is_rlc_v2_1; +}; + +#define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES + +struct amdgpu_mec { + struct amdgpu_bo *hpd_eop_obj; + u64 hpd_eop_gpu_addr; + struct amdgpu_bo *mec_fw_obj; + u64 mec_fw_gpu_addr; + u32 num_mec; + u32 num_pipe_per_mec; + u32 num_queue_per_pipe; + void *mqd_backup[AMDGPU_MAX_COMPUTE_RINGS + 1]; + + /* These are the resources for which amdgpu takes ownership */ + DECLARE_BITMAP(queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); +}; + +struct amdgpu_kiq { + u64 eop_gpu_addr; + struct amdgpu_bo *eop_obj; + spinlock_t ring_lock; + struct amdgpu_ring ring; + struct amdgpu_irq_src irq; +}; + +/* + * GPU scratch registers structures, functions & helpers + */ +struct amdgpu_scratch { + unsigned num_reg; + uint32_t reg_base; + uint32_t free_mask; +}; + +/* + * GFX configurations + */ +#define AMDGPU_GFX_MAX_SE 4 +#define AMDGPU_GFX_MAX_SH_PER_SE 2 + +struct amdgpu_rb_config { + uint32_t rb_backend_disable; + uint32_t user_rb_backend_disable; + uint32_t raster_config; + uint32_t raster_config_1; +}; + +struct gb_addr_config { + uint16_t pipe_interleave_size; + uint8_t num_pipes; + uint8_t max_compress_frags; + uint8_t num_banks; + uint8_t num_se; + uint8_t num_rb_per_se; +}; + +struct amdgpu_gfx_config { + unsigned max_shader_engines; + unsigned max_tile_pipes; + unsigned max_cu_per_sh; + unsigned max_sh_per_se; + unsigned max_backends_per_se; + unsigned max_texture_channel_caches; + unsigned max_gprs; + unsigned max_gs_threads; + unsigned max_hw_contexts; + unsigned sc_prim_fifo_size_frontend; + unsigned sc_prim_fifo_size_backend; + unsigned sc_hiz_tile_fifo_size; + unsigned sc_earlyz_tile_fifo_size; + + unsigned num_tile_pipes; + unsigned backend_enable_mask; + unsigned mem_max_burst_length_bytes; + unsigned mem_row_size_in_kb; + unsigned shader_engine_tile_size; + unsigned num_gpus; + unsigned multi_gpu_tile_size; + unsigned mc_arb_ramcfg; + unsigned gb_addr_config; + unsigned num_rbs; + unsigned gs_vgt_table_depth; + unsigned gs_prim_buffer_depth; + + uint32_t tile_mode_array[32]; + uint32_t macrotile_mode_array[16]; + + struct gb_addr_config gb_addr_config_fields; + struct amdgpu_rb_config rb_config[AMDGPU_GFX_MAX_SE][AMDGPU_GFX_MAX_SH_PER_SE]; + + /* gfx configure feature */ + uint32_t double_offchip_lds_buf; + /* cached value of DB_DEBUG2 */ + uint32_t db_debug2; +}; + +struct amdgpu_cu_info { + uint32_t simd_per_cu; + uint32_t max_waves_per_simd; + uint32_t wave_front_size; + uint32_t max_scratch_slots_per_cu; + uint32_t lds_size; + + /* total active CU number */ + uint32_t number; + uint32_t ao_cu_mask; + uint32_t ao_cu_bitmap[4][4]; + uint32_t bitmap[4][4]; +}; + +struct amdgpu_gfx_funcs { + /* get the gpu clock counter */ + uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev); + void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, + u32 sh_num, u32 instance); + void (*read_wave_data)(struct amdgpu_device *adev, uint32_t simd, + uint32_t wave, uint32_t *dst, int *no_fields); + void (*read_wave_vgprs)(struct amdgpu_device *adev, uint32_t simd, + uint32_t wave, uint32_t thread, uint32_t start, + uint32_t size, uint32_t *dst); + void (*read_wave_sgprs)(struct amdgpu_device *adev, uint32_t simd, + uint32_t wave, uint32_t start, uint32_t size, + uint32_t *dst); + void (*select_me_pipe_q)(struct amdgpu_device *adev, u32 me, u32 pipe, + u32 queue); +}; + +struct amdgpu_ngg_buf { + struct amdgpu_bo *bo; + uint64_t gpu_addr; + uint32_t size; + uint32_t bo_size; +}; + +enum { + NGG_PRIM = 0, + NGG_POS, + NGG_CNTL, + NGG_PARAM, + NGG_BUF_MAX +}; + +struct amdgpu_ngg { + struct amdgpu_ngg_buf buf[NGG_BUF_MAX]; + uint32_t gds_reserve_addr; + uint32_t gds_reserve_size; + bool init; +}; + +struct sq_work { + struct work_struct work; + unsigned ih_data; +}; + +struct amdgpu_gfx { + struct mutex gpu_clock_mutex; + struct amdgpu_gfx_config config; + struct amdgpu_rlc rlc; + struct amdgpu_mec mec; + struct amdgpu_kiq kiq; + struct amdgpu_scratch scratch; + const struct firmware *me_fw; /* ME firmware */ + uint32_t me_fw_version; + const struct firmware *pfp_fw; /* PFP firmware */ + uint32_t pfp_fw_version; + const struct firmware *ce_fw; /* CE firmware */ + uint32_t ce_fw_version; + const struct firmware *rlc_fw; /* RLC firmware */ + uint32_t rlc_fw_version; + const struct firmware *mec_fw; /* MEC firmware */ + uint32_t mec_fw_version; + const struct firmware *mec2_fw; /* MEC2 firmware */ + uint32_t mec2_fw_version; + uint32_t me_feature_version; + uint32_t ce_feature_version; + uint32_t pfp_feature_version; + uint32_t rlc_feature_version; + uint32_t rlc_srlc_fw_version; + uint32_t rlc_srlc_feature_version; + uint32_t rlc_srlg_fw_version; + uint32_t rlc_srlg_feature_version; + uint32_t rlc_srls_fw_version; + uint32_t rlc_srls_feature_version; + uint32_t mec_feature_version; + uint32_t mec2_feature_version; + struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS]; + unsigned num_gfx_rings; + struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS]; + unsigned num_compute_rings; + struct amdgpu_irq_src eop_irq; + struct amdgpu_irq_src priv_reg_irq; + struct amdgpu_irq_src priv_inst_irq; + struct amdgpu_irq_src cp_ecc_error_irq; + struct amdgpu_irq_src sq_irq; + struct sq_work sq_work; + + /* gfx status */ + uint32_t gfx_current_status; + /* ce ram size*/ + unsigned ce_ram_size; + struct amdgpu_cu_info cu_info; + const struct amdgpu_gfx_funcs *funcs; + + /* reset mask */ + uint32_t grbm_soft_reset; + uint32_t srbm_soft_reset; + /* s3/s4 mask */ + bool in_suspend; + /* NGG */ + struct amdgpu_ngg ngg; + + /* gfx off */ + bool gfx_off_state; /* true: enabled, false: disabled */ + struct mutex gfx_off_mutex; + uint32_t gfx_off_req_count; /* default 1, enable gfx off: dec 1, disable gfx off: add 1 */ + struct delayed_work gfx_off_delay_work; + + /* pipe reservation */ + struct mutex pipe_reserve_mutex; + DECLARE_BITMAP (pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); +}; + +#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) +#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance)) +#define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q) (adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q)) /** * amdgpu_gfx_create_bitmask - create a bitmask @@ -60,34 +328,33 @@ static inline u32 amdgpu_gfx_create_bitmask(u32 bit_width) return (u32)((1ULL << bit_width) - 1); } -static inline int amdgpu_gfx_queue_to_bit(struct amdgpu_device *adev, - int mec, int pipe, int queue) -{ - int bit = 0; +int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg); +void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg); - bit += mec * adev->gfx.mec.num_pipe_per_mec - * adev->gfx.mec.num_queue_per_pipe; - bit += pipe * adev->gfx.mec.num_queue_per_pipe; - bit += queue; +void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, + unsigned max_sh); - return bit; -} +int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, + struct amdgpu_ring *ring, + struct amdgpu_irq_src *irq); -static inline void amdgpu_gfx_bit_to_queue(struct amdgpu_device *adev, int bit, - int *mec, int *pipe, int *queue) -{ - *queue = bit % adev->gfx.mec.num_queue_per_pipe; - *pipe = (bit / adev->gfx.mec.num_queue_per_pipe) - % adev->gfx.mec.num_pipe_per_mec; - *mec = (bit / adev->gfx.mec.num_queue_per_pipe) - / adev->gfx.mec.num_pipe_per_mec; +void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring, + struct amdgpu_irq_src *irq); -} -static inline bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, - int mec, int pipe, int queue) -{ - return test_bit(amdgpu_gfx_queue_to_bit(adev, mec, pipe, queue), - adev->gfx.mec.queue_bitmap); -} +void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev); +int amdgpu_gfx_kiq_init(struct amdgpu_device *adev, + unsigned hpd_size); + +int amdgpu_gfx_compute_mqd_sw_init(struct amdgpu_device *adev, + unsigned mqd_size); +void amdgpu_gfx_compute_mqd_sw_fini(struct amdgpu_device *adev); + +void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev); +int amdgpu_gfx_queue_to_bit(struct amdgpu_device *adev, int mec, + int pipe, int queue); +void amdgpu_gfx_bit_to_queue(struct amdgpu_device *adev, int bit, + int *mec, int *pipe, int *queue); +bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int mec, + int pipe, int queue); #endif -- GitLab From aa47d117282cc32874e2749ce8ae94262b9edddf Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Thu, 2 Aug 2018 16:24:52 +0800 Subject: [PATCH 0345/1692] drm/amdgpu: move ih definitions into amdgpu_ih header MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Demangle amdgpu.h Signed-off-by: Huang Rui Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 14 -------------- drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h | 15 +++++++++++++++ 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index ddbc5142bdf8..3517abb73e55 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -319,16 +319,6 @@ struct amdgpu_vm_pte_funcs { uint32_t incr, uint64_t flags); }; -/* provided by the ih block */ -struct amdgpu_ih_funcs { - /* ring read/write ptr handling, called from interrupt context */ - u32 (*get_wptr)(struct amdgpu_device *adev); - bool (*prescreen_iv)(struct amdgpu_device *adev); - void (*decode_iv)(struct amdgpu_device *adev, - struct amdgpu_iv_entry *entry); - void (*set_rptr)(struct amdgpu_device *adev); -}; - /* * BIOS. */ @@ -1461,10 +1451,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib))) #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r)) #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o)) -#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev)) -#define amdgpu_ih_prescreen_iv(adev) (adev)->irq.ih_funcs->prescreen_iv((adev)) -#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv)) -#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev)) #define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc)) #define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l)) #define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h index 0e01f115bbe5..a23e1c0bed93 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h @@ -76,6 +76,21 @@ struct amdgpu_iv_entry { const uint32_t *iv_entry; }; +/* provided by the ih block */ +struct amdgpu_ih_funcs { + /* ring read/write ptr handling, called from interrupt context */ + u32 (*get_wptr)(struct amdgpu_device *adev); + bool (*prescreen_iv)(struct amdgpu_device *adev); + void (*decode_iv)(struct amdgpu_device *adev, + struct amdgpu_iv_entry *entry); + void (*set_rptr)(struct amdgpu_device *adev); +}; + +#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev)) +#define amdgpu_ih_prescreen_iv(adev) (adev)->irq.ih_funcs->prescreen_iv((adev)) +#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv)) +#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev)) + int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size, bool use_bus_addr); void amdgpu_ih_ring_fini(struct amdgpu_device *adev); -- GitLab From bb7743bc205177440ba98eca2359779ba943e03b Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Thu, 2 Aug 2018 17:23:33 +0800 Subject: [PATCH 0346/1692] drm/amdgpu: move sdma definitions into amdgpu_sdma header MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Demangle amdgpu.h. Furthermore, SDMA is used for moving and clearing the data buffer, so the header also need be included in ttm. Signed-off-by: Huang Rui Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu.h | 87 +------------------ drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c | 44 ++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h | 101 +++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 1 + 5 files changed, 148 insertions(+), 86 deletions(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index bfd332c95b61..e610656015b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -108,6 +108,7 @@ amdgpu-y += \ # add async DMA block amdgpu-y += \ + amdgpu_sdma.o \ sdma_v2_4.o \ sdma_v3_0.o \ sdma_v4_0.o diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 3517abb73e55..159854eaf553 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -70,6 +70,7 @@ #include "amdgpu_mn.h" #include "amdgpu_gmc.h" #include "amdgpu_gfx.h" +#include "amdgpu_sdma.h" #include "amdgpu_dm.h" #include "amdgpu_virt.h" #include "amdgpu_gart.h" @@ -149,9 +150,6 @@ extern int amdgpu_cik_support; #define AMDGPUFB_CONN_LIMIT 4 #define AMDGPU_BIOS_NUM_SCRATCH 16 -/* max number of IP instances */ -#define AMDGPU_MAX_SDMA_INSTANCES 2 - /* hard reset data */ #define AMDGPU_ASIC_RESET_DATA 0x39d5e86b @@ -199,13 +197,6 @@ enum amdgpu_cp_irq { AMDGPU_CP_IRQ_LAST }; -enum amdgpu_sdma_irq { - AMDGPU_SDMA_IRQ_TRAP0 = 0, - AMDGPU_SDMA_IRQ_TRAP1, - - AMDGPU_SDMA_IRQ_LAST -}; - enum amdgpu_thermal_irq { AMDGPU_THERMAL_IRQ_LOW_TO_HIGH = 0, AMDGPU_THERMAL_IRQ_HIGH_TO_LOW, @@ -265,39 +256,6 @@ amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev, int amdgpu_device_ip_block_add(struct amdgpu_device *adev, const struct amdgpu_ip_block_version *ip_block_version); -/* provided by hw blocks that can move/clear data. e.g., gfx or sdma */ -struct amdgpu_buffer_funcs { - /* maximum bytes in a single operation */ - uint32_t copy_max_bytes; - - /* number of dw to reserve per operation */ - unsigned copy_num_dw; - - /* used for buffer migration */ - void (*emit_copy_buffer)(struct amdgpu_ib *ib, - /* src addr in bytes */ - uint64_t src_offset, - /* dst addr in bytes */ - uint64_t dst_offset, - /* number of byte to transfer */ - uint32_t byte_count); - - /* maximum bytes in a single operation */ - uint32_t fill_max_bytes; - - /* number of dw to reserve per operation */ - unsigned fill_num_dw; - - /* used for buffer clearing */ - void (*emit_fill_buffer)(struct amdgpu_ib *ib, - /* value to write to memory */ - uint32_t src_data, - /* dst addr in bytes */ - uint64_t dst_offset, - /* number of byte to fill */ - uint32_t byte_count); -}; - /* provided by hw blocks that can write ptes, e.g., sdma */ struct amdgpu_vm_pte_funcs { /* number of dw to reserve per operation */ @@ -756,31 +714,6 @@ struct amdgpu_wb { int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb); void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb); -/* - * SDMA - */ -struct amdgpu_sdma_instance { - /* SDMA firmware */ - const struct firmware *fw; - uint32_t fw_version; - uint32_t feature_version; - - struct amdgpu_ring ring; - bool burst_nop; -}; - -struct amdgpu_sdma { - struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES]; -#ifdef CONFIG_DRM_AMDGPU_SI - //SI DMA has a difference trap irq number for the second engine - struct amdgpu_irq_src trap_irq_1; -#endif - struct amdgpu_irq_src trap_irq; - struct amdgpu_irq_src illegal_inst_irq; - int num_instances; - uint32_t srbm_soft_reset; -}; - /* * Firmware */ @@ -1385,22 +1318,6 @@ int emu_soc_asic_init(struct amdgpu_device *adev); #define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8)) #define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16)) -static inline struct amdgpu_sdma_instance * -amdgpu_get_sdma_instance(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - int i; - - for (i = 0; i < adev->sdma.num_instances; i++) - if (&adev->sdma.instance[i].ring == ring) - break; - - if (i < AMDGPU_MAX_SDMA_INSTANCES) - return &adev->sdma.instance[i]; - else - return NULL; -} - /* * ASICs macro. */ @@ -1462,8 +1379,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos)) #define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c)) #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r)) -#define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b)) -#define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b)) #define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a)) #define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c new file mode 100644 index 000000000000..bc9244b429ef --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c @@ -0,0 +1,44 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include +#include "amdgpu.h" +#include "amdgpu_sdma.h" + +/* + * GPU SDMA IP block helpers function. + */ + +struct amdgpu_sdma_instance * amdgpu_get_sdma_instance(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + int i; + + for (i = 0; i < adev->sdma.num_instances; i++) + if (&adev->sdma.instance[i].ring == ring) + break; + + if (i < AMDGPU_MAX_SDMA_INSTANCES) + return &adev->sdma.instance[i]; + else + return NULL; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h new file mode 100644 index 000000000000..d17503f0df8e --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h @@ -0,0 +1,101 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __AMDGPU_SDMA_H__ +#define __AMDGPU_SDMA_H__ + +/* max number of IP instances */ +#define AMDGPU_MAX_SDMA_INSTANCES 2 + +enum amdgpu_sdma_irq { + AMDGPU_SDMA_IRQ_TRAP0 = 0, + AMDGPU_SDMA_IRQ_TRAP1, + + AMDGPU_SDMA_IRQ_LAST +}; + +struct amdgpu_sdma_instance { + /* SDMA firmware */ + const struct firmware *fw; + uint32_t fw_version; + uint32_t feature_version; + + struct amdgpu_ring ring; + bool burst_nop; +}; + +struct amdgpu_sdma { + struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES]; +#ifdef CONFIG_DRM_AMDGPU_SI + //SI DMA has a difference trap irq number for the second engine + struct amdgpu_irq_src trap_irq_1; +#endif + struct amdgpu_irq_src trap_irq; + struct amdgpu_irq_src illegal_inst_irq; + int num_instances; + uint32_t srbm_soft_reset; +}; + +/* + * Provided by hw blocks that can move/clear data. e.g., gfx or sdma + * But currently, we use sdma to move data. + */ +struct amdgpu_buffer_funcs { + /* maximum bytes in a single operation */ + uint32_t copy_max_bytes; + + /* number of dw to reserve per operation */ + unsigned copy_num_dw; + + /* used for buffer migration */ + void (*emit_copy_buffer)(struct amdgpu_ib *ib, + /* src addr in bytes */ + uint64_t src_offset, + /* dst addr in bytes */ + uint64_t dst_offset, + /* number of byte to transfer */ + uint32_t byte_count); + + /* maximum bytes in a single operation */ + uint32_t fill_max_bytes; + + /* number of dw to reserve per operation */ + unsigned fill_num_dw; + + /* used for buffer clearing */ + void (*emit_fill_buffer)(struct amdgpu_ib *ib, + /* value to write to memory */ + uint32_t src_data, + /* dst addr in bytes */ + uint64_t dst_offset, + /* number of byte to fill */ + uint32_t byte_count); +}; + +#define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b)) +#define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b)) + +struct amdgpu_sdma_instance * +amdgpu_get_sdma_instance(struct amdgpu_ring *ring); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index fcf421263fd9..c6611cff64c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -47,6 +47,7 @@ #include "amdgpu_object.h" #include "amdgpu_trace.h" #include "amdgpu_amdkfd.h" +#include "amdgpu_sdma.h" #include "bif/bif_4_1_d.h" #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) -- GitLab From 55560046d5b0495833cc2cb3de43bbf4425da234 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Thu, 2 Aug 2018 17:47:15 +0800 Subject: [PATCH 0347/1692] drm/amdgpu: move firmware definitions into amdgpu_ucode header MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Demangle amdgpu.h. Signed-off-by: Huang Rui Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 27 ----------------------- drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h | 24 ++++++++++++++++++++ 2 files changed, 24 insertions(+), 27 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 159854eaf553..d39053d06b27 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -714,33 +714,6 @@ struct amdgpu_wb { int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb); void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb); -/* - * Firmware - */ -enum amdgpu_firmware_load_type { - AMDGPU_FW_LOAD_DIRECT = 0, - AMDGPU_FW_LOAD_SMU, - AMDGPU_FW_LOAD_PSP, -}; - -struct amdgpu_firmware { - struct amdgpu_firmware_info ucode[AMDGPU_UCODE_ID_MAXIMUM]; - enum amdgpu_firmware_load_type load_type; - struct amdgpu_bo *fw_buf; - unsigned int fw_size; - unsigned int max_ucodes; - /* firmwares are loaded by psp instead of smu from vega10 */ - const struct amdgpu_psp_funcs *funcs; - struct amdgpu_bo *rbuf; - struct mutex mutex; - - /* gpu info firmware data pointer */ - const struct firmware *gpu_info_fw; - - void *fw_buf_ptr; - uint64_t fw_buf_mc; -}; - /* * Benchmarking */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index bdc472b6e641..a1edc70da979 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h @@ -205,6 +205,12 @@ enum AMDGPU_UCODE_STATUS { AMDGPU_UCODE_STATUS_LOADED, }; +enum amdgpu_firmware_load_type { + AMDGPU_FW_LOAD_DIRECT = 0, + AMDGPU_FW_LOAD_SMU, + AMDGPU_FW_LOAD_PSP, +}; + /* conform to smu_ucode_xfer_cz.h */ #define AMDGPU_SDMA0_UCODE_LOADED 0x00000001 #define AMDGPU_SDMA1_UCODE_LOADED 0x00000002 @@ -232,6 +238,24 @@ struct amdgpu_firmware_info { uint32_t tmr_mc_addr_hi; }; +struct amdgpu_firmware { + struct amdgpu_firmware_info ucode[AMDGPU_UCODE_ID_MAXIMUM]; + enum amdgpu_firmware_load_type load_type; + struct amdgpu_bo *fw_buf; + unsigned int fw_size; + unsigned int max_ucodes; + /* firmwares are loaded by psp instead of smu from vega10 */ + const struct amdgpu_psp_funcs *funcs; + struct amdgpu_bo *rbuf; + struct mutex mutex; + + /* gpu info firmware data pointer */ + const struct firmware *gpu_info_fw; + + void *fw_buf_ptr; + uint64_t fw_buf_mc; +}; + void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr); void amdgpu_ucode_print_smc_hdr(const struct common_firmware_header *hdr); void amdgpu_ucode_print_gfx_hdr(const struct common_firmware_header *hdr); -- GitLab From 6462c0071b8df3d03f5020a3f9b785d94cad80c6 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Thu, 2 Aug 2018 17:54:21 +0800 Subject: [PATCH 0348/1692] drm/amdgpu: move psp macro into amdgpu_psp header MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Demangle amdgpu.h. Signed-off-by: Huang Rui Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 - drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h | 29 +++++++++++++++---------- 2 files changed, 17 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index d39053d06b27..0568140e38b3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1353,7 +1353,6 @@ int emu_soc_asic_init(struct amdgpu_device *adev); #define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c)) #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r)) #define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a)) -#define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i)) /* Common functions */ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 129209686848..967712fd6abd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -63,13 +63,16 @@ struct psp_funcs int (*prep_cmd_buf)(struct amdgpu_firmware_info *ucode, struct psp_gfx_cmd_resp *cmd); int (*ring_init)(struct psp_context *psp, enum psp_ring_type ring_type); - int (*ring_create)(struct psp_context *psp, enum psp_ring_type ring_type); + int (*ring_create)(struct psp_context *psp, + enum psp_ring_type ring_type); int (*ring_stop)(struct psp_context *psp, enum psp_ring_type ring_type); int (*ring_destroy)(struct psp_context *psp, enum psp_ring_type ring_type); - int (*cmd_submit)(struct psp_context *psp, struct amdgpu_firmware_info *ucode, - uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr, int index); + int (*cmd_submit)(struct psp_context *psp, + struct amdgpu_firmware_info *ucode, + uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr, + int index); bool (*compare_sram_data)(struct psp_context *psp, struct amdgpu_firmware_info *ucode, enum AMDGPU_UCODE_ID ucode_type); @@ -83,11 +86,11 @@ struct psp_context struct psp_ring km_ring; struct psp_gfx_cmd_resp *cmd; - const struct psp_funcs *funcs; + const struct psp_funcs *funcs; /* fence buffer */ - struct amdgpu_bo *fw_pri_bo; - uint64_t fw_pri_mc_addr; + struct amdgpu_bo *fw_pri_bo; + uint64_t fw_pri_mc_addr; void *fw_pri_buf; /* sos firmware */ @@ -100,8 +103,8 @@ struct psp_context uint8_t *sos_start_addr; /* tmr buffer */ - struct amdgpu_bo *tmr_bo; - uint64_t tmr_mc_addr; + struct amdgpu_bo *tmr_bo; + uint64_t tmr_mc_addr; void *tmr_buf; /* asd firmware and buffer */ @@ -110,13 +113,13 @@ struct psp_context uint32_t asd_feature_version; uint32_t asd_ucode_size; uint8_t *asd_start_addr; - struct amdgpu_bo *asd_shared_bo; - uint64_t asd_shared_mc_addr; + struct amdgpu_bo *asd_shared_bo; + uint64_t asd_shared_mc_addr; void *asd_shared_buf; /* fence buffer */ - struct amdgpu_bo *fence_buf_bo; - uint64_t fence_buf_mc_addr; + struct amdgpu_bo *fence_buf_bo; + uint64_t fence_buf_mc_addr; void *fence_buf; /* cmd buffer */ @@ -150,6 +153,8 @@ struct amdgpu_psp_funcs { #define psp_mode1_reset(psp) \ ((psp)->funcs->mode1_reset ? (psp)->funcs->mode1_reset((psp)) : false) +#define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i)) + extern const struct amd_ip_funcs psp_ip_funcs; extern const struct amdgpu_ip_block_version psp_v3_1_ip_block; -- GitLab From 2cddc50e98193f2c4aab10d05550b5ffe7587e73 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Mon, 13 Aug 2018 11:41:35 -0500 Subject: [PATCH 0349/1692] drm/amdgpu: move gem definitions into amdgpu_gem header MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Demangle amdgpu.h. Signed-off-by: Huang Rui Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 61 +-------------- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h | 92 +++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c | 1 + drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 1 + drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 1 + drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 1 + drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 1 + 11 files changed, 102 insertions(+), 60 deletions(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 0568140e38b3..7261068f9cca 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -77,6 +77,7 @@ #include "amdgpu_debugfs.h" #include "amdgpu_job.h" #include "amdgpu_bo_list.h" +#include "amdgpu_gem.h" /* * Modules parameters. @@ -302,34 +303,6 @@ struct amdgpu_clock { uint32_t max_pixel_clock; }; -/* - * GEM. - */ - -#define AMDGPU_GEM_DOMAIN_MAX 0x3 -#define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base) - -void amdgpu_gem_object_free(struct drm_gem_object *obj); -int amdgpu_gem_object_open(struct drm_gem_object *obj, - struct drm_file *file_priv); -void amdgpu_gem_object_close(struct drm_gem_object *obj, - struct drm_file *file_priv); -unsigned long amdgpu_gem_timeout(uint64_t timeout_ns); -struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj); -struct drm_gem_object * -amdgpu_gem_prime_import_sg_table(struct drm_device *dev, - struct dma_buf_attachment *attach, - struct sg_table *sg); -struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, - struct drm_gem_object *gobj, - int flags); -struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev, - struct dma_buf *dma_buf); -struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *); -void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj); -void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); -int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); - /* sub-allocation manager, it has to be protected by another lock. * By conception this is an helper for other part of the driver * like the indirect buffer or semaphore, which both have their @@ -379,22 +352,6 @@ struct amdgpu_sa_bo { struct dma_fence *fence; }; -/* - * GEM objects. - */ -void amdgpu_gem_force_release(struct amdgpu_device *adev); -int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, - int alignment, u32 initial_domain, - u64 flags, enum ttm_bo_type type, - struct reservation_object *resv, - struct drm_gem_object **obj); - -int amdgpu_mode_dumb_create(struct drm_file *file_priv, - struct drm_device *dev, - struct drm_mode_create_dumb *args); -int amdgpu_mode_dumb_mmap(struct drm_file *filp, - struct drm_device *dev, - uint32_t handle, uint64_t *offset_p); int amdgpu_fence_slab_init(void); void amdgpu_fence_slab_fini(void); @@ -791,23 +748,9 @@ struct amdgpu_asic_funcs { /* * IOCTL. */ -int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, - struct drm_file *filp); int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); -int amdgpu_gem_info_ioctl(struct drm_device *dev, void *data, - struct drm_file *filp); -int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, - struct drm_file *filp); -int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data, - struct drm_file *filp); -int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, - struct drm_file *filp); -int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, - struct drm_file *filp); -int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, - struct drm_file *filp); int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); @@ -815,8 +758,6 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *fi int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); -int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, - struct drm_file *filp); /* VRAM scratch page for HDP bug, default vram page */ struct amdgpu_vram_scratch { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 09703c87d676..dc3b2f980d87 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -32,6 +32,7 @@ #include "amdgpu.h" #include "amdgpu_trace.h" #include "amdgpu_gmc.h" +#include "amdgpu_gem.h" static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, struct drm_amdgpu_cs_chunk_fence *data, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 8843a06360fa..75c9433ef300 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -36,6 +36,7 @@ #include "amdgpu.h" #include "amdgpu_irq.h" +#include "amdgpu_gem.h" #include "amdgpu_amdkfd.h" diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index 69c5d22f29bd..5cbde74b97dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c @@ -33,6 +33,7 @@ #include #include "amdgpu.h" #include "cikd.h" +#include "amdgpu_gem.h" #include diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h new file mode 100644 index 000000000000..d63daba9b17c --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h @@ -0,0 +1,92 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __AMDGPU_GEM_H__ +#define __AMDGPU_GEM_H__ + +#include +#include + +/* + * GEM. + */ + +#define AMDGPU_GEM_DOMAIN_MAX 0x3 +#define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base) + +void amdgpu_gem_object_free(struct drm_gem_object *obj); +int amdgpu_gem_object_open(struct drm_gem_object *obj, + struct drm_file *file_priv); +void amdgpu_gem_object_close(struct drm_gem_object *obj, + struct drm_file *file_priv); +unsigned long amdgpu_gem_timeout(uint64_t timeout_ns); +struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj); +struct drm_gem_object * +amdgpu_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sg); +struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, + struct drm_gem_object *gobj, + int flags); +struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf); +struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *); +void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj); +void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); +int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); + +/* + * GEM objects. + */ +void amdgpu_gem_force_release(struct amdgpu_device *adev); +int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, + int alignment, u32 initial_domain, + u64 flags, enum ttm_bo_type type, + struct reservation_object *resv, + struct drm_gem_object **obj); + +int amdgpu_mode_dumb_create(struct drm_file *file_priv, + struct drm_device *dev, + struct drm_mode_create_dumb *args); +int amdgpu_mode_dumb_mmap(struct drm_file *filp, + struct drm_device *dev, + uint32_t handle, uint64_t *offset_p); + +int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); +int amdgpu_gem_info_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); +int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); +int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); +int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); +int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); +int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); + +int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index bd98cc5fb97b..20645ea719b3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -37,6 +37,7 @@ #include #include #include "amdgpu_amdkfd.h" +#include "amdgpu_gem.h" /** * amdgpu_driver_unload_kms - Main unload function for KMS. diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c index 1c5d97f4b4dd..2686297e34e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c @@ -35,6 +35,7 @@ #include "amdgpu.h" #include "amdgpu_display.h" +#include "amdgpu_gem.h" #include #include diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index ad151fefa41f..0a0a4dcbea2c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -26,6 +26,7 @@ #include "amdgpu.h" #include "gmc_v6_0.h" #include "amdgpu_ucode.h" +#include "amdgpu_gem.h" #include "bif/bif_3_0_d.h" #include "bif/bif_3_0_sh_mask.h" diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index f8d8a3a73e42..93ea19456e91 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -29,6 +29,7 @@ #include "gmc_v7_0.h" #include "amdgpu_ucode.h" #include "amdgpu_amdkfd.h" +#include "amdgpu_gem.h" #include "bif/bif_4_1_d.h" #include "bif/bif_4_1_sh_mask.h" diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 9333109b210d..24dd86725b6e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -27,6 +27,7 @@ #include "gmc_v8_0.h" #include "amdgpu_ucode.h" #include "amdgpu_amdkfd.h" +#include "amdgpu_gem.h" #include "gmc/gmc_8_1_d.h" #include "gmc/gmc_8_1_sh_mask.h" diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 72f8018fa2a8..7300be4816a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -25,6 +25,7 @@ #include "amdgpu.h" #include "gmc_v9_0.h" #include "amdgpu_atomfirmware.h" +#include "amdgpu_gem.h" #include "hdp/hdp_4_0_offset.h" #include "hdp/hdp_4_0_sh_mask.h" -- GitLab From 1b369d3c0d9364df702fc9e50dd9f471b640ddff Mon Sep 17 00:00:00 2001 From: Mikita Lipski Date: Thu, 26 Jul 2018 16:27:48 -0400 Subject: [PATCH 0350/1692] drm/amd/display: pass the right num of modes added [why] In case if edid is null or corrupted we need to manually add a single failsafe mode (640x480). If zero modes returned DRM adds a different failsafe mode that is not accepted by DP 1.2 compliance test [how] Return the number of modes manually added Signed-off-by: Mikita Lipski Reviewed-by: Sun peng Li Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 800f481a6995..d8261fe6a04f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -3468,7 +3468,8 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) encoder = helper->best_encoder(connector); if (!edid || !drm_edid_is_valid(edid)) { - drm_add_modes_noedid(connector, 640, 480); + amdgpu_dm_connector->num_modes = + drm_add_modes_noedid(connector, 640, 480); } else { amdgpu_dm_connector_ddc_get_modes(connector, edid); amdgpu_dm_connector_add_common_modes(encoder, connector); -- GitLab From 9b5349f74a85a6aa05c06d30f10b3a83d6ec00b1 Mon Sep 17 00:00:00 2001 From: Martin Tsai Date: Fri, 27 Jul 2018 15:39:47 +0800 Subject: [PATCH 0351/1692] drm/amd/display: correct image viewport calculation [why] We didn't transfer the camera/video viewport coordinate when doing rotation and mirror. [how] To correct the viewport coordinate in calculate_viewport(). Signed-off-by: Martin Tsai Reviewed-by: Charlene Liu Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/core/dc_resource.c | 49 +++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index ea6beccfd89d..d10314016edb 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -487,6 +487,18 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx) pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state; bool sec_split = pipe_ctx->top_pipe && pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state; + bool flip_vert_scan_dir = false, flip_horz_scan_dir = false; + + /* + * Need to calculate the scan direction for viewport to properly determine offset + */ + if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_180) { + flip_vert_scan_dir = true; + flip_horz_scan_dir = true; + } else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90) + flip_vert_scan_dir = true; + else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) + flip_horz_scan_dir = true; if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE || stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) { @@ -530,6 +542,34 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx) data->viewport.height = clip.height * surf_src.height / plane_state->dst_rect.height; + /* To transfer the x, y to correct coordinate on mirror image (camera). + * deg 0 : transfer x, + * deg 90 : don't need to transfer, + * deg180 : transfer y, + * deg270 : transfer x and y. + * To transfer the x, y to correct coordinate on non-mirror image (video). + * deg 0 : don't need to transfer, + * deg 90 : transfer y, + * deg180 : transfer x and y, + * deg270 : transfer x. + */ + if (pipe_ctx->plane_state->horizontal_mirror) { + if (flip_horz_scan_dir && !flip_vert_scan_dir) { + data->viewport.y = surf_src.height - data->viewport.y - data->viewport.height; + data->viewport.x = surf_src.width - data->viewport.x - data->viewport.width; + } else if (flip_horz_scan_dir && flip_vert_scan_dir) + data->viewport.y = surf_src.height - data->viewport.y - data->viewport.height; + else { + if (!flip_horz_scan_dir && !flip_vert_scan_dir) + data->viewport.x = surf_src.width - data->viewport.x - data->viewport.width; + } + } else { + if (flip_horz_scan_dir) + data->viewport.x = surf_src.width - data->viewport.x - data->viewport.width; + if (flip_vert_scan_dir) + data->viewport.y = surf_src.height - data->viewport.y - data->viewport.height; + } + /* Round down, compensate in init */ data->viewport_c.x = data->viewport.x / vpc_div; data->viewport_c.y = data->viewport.y / vpc_div; @@ -725,6 +765,15 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct rect *r rect_swap_helper(&src); rect_swap_helper(&data->viewport_c); rect_swap_helper(&data->viewport); + + if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270 && + pipe_ctx->plane_state->horizontal_mirror) { + flip_vert_scan_dir = true; + } + if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 && + pipe_ctx->plane_state->horizontal_mirror) { + flip_vert_scan_dir = false; + } } else if (pipe_ctx->plane_state->horizontal_mirror) flip_horz_scan_dir = !flip_horz_scan_dir; -- GitLab From d02e07948fcff46a7a48f8747260941d7290774b Mon Sep 17 00:00:00 2001 From: Nikola Cornij Date: Thu, 19 Jul 2018 14:03:14 -0400 Subject: [PATCH 0352/1692] drm/amd/display: Print DPP DTN log info only for enabled pipes [why] There is currently a dependency on the order in which tests are executed. This is because the non-relevant state info is being printed, which results in the output based on the state from the previous test. [how] Print DPP DTN log only if the pipe is enabled. In addition to the affected per-submission DTN golden logs, included in this change is also DTN golden log update for pre-submission tests. The other DTN golden logs affected by this change will be updated upon nightly test run (which will generate the updated DTN logs). Signed-off-by: Nikola Cornij Reviewed-by: Nikola Cornij Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c | 2 ++ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 7 +++++-- drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h | 1 + 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c index bf8b68f8db4f..1d642552c743 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c @@ -103,6 +103,8 @@ void dpp_read_state(struct dpp *dpp_base, { struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + REG_GET(DPP_CONTROL, + DPP_CLOCK_ENABLE, &s->is_enabled); REG_GET(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, &s->igam_lut_mode); REG_GET(CM_IGAM_CONTROL, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 41f6595891f1..cfd93557c428 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -88,7 +88,7 @@ static void log_mpc_crc(struct dc *dc) void dcn10_log_hubbub_state(struct dc *dc) { struct dc_context *dc_ctx = dc->ctx; - struct dcn_hubbub_wm wm; + struct dcn_hubbub_wm wm = {0}; int i; hubbub1_wm_read_state(dc->res_pool->hubbub, &wm); @@ -244,10 +244,13 @@ void dcn10_log_hw_state(struct dc *dc) "C31 C32 C33 C34\n"); for (i = 0; i < pool->pipe_count; i++) { struct dpp *dpp = pool->dpps[i]; - struct dcn_dpp_state s; + struct dcn_dpp_state s = {0}; dpp->funcs->dpp_read_state(dpp, &s); + if (!s.is_enabled) + continue; + DTN_INFO("[%2d]: %11xh %-11s %-11s %-11s" "%8x %08xh %08xh %08xh %08xh %08xh %08xh", dpp->inst, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h index 74ad94b0e4f0..80a480b9f137 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h @@ -45,6 +45,7 @@ struct dpp_grph_csc_adjustment { }; struct dcn_dpp_state { + uint32_t is_enabled; uint32_t igam_lut_mode; uint32_t igam_input_format; uint32_t dgam_lut_mode; -- GitLab From dc6c981d202733fd5718d4db627f7d85fee651eb Mon Sep 17 00:00:00 2001 From: Vitaly Prosyak Date: Wed, 18 Jul 2018 15:10:10 -0500 Subject: [PATCH 0353/1692] drm/amd/display: Use DGAM ROM or RAM [Why] Optimize gamma programming [How] Use ROM for optimization when it is possible. Use RAM only when it is necessary. Signed-off-by: Vitaly Prosyak Reviewed-by: Dmytro Laktyushkin Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c | 6 ++++-- .../gpu/drm/amd/display/modules/color/color_gamma.c | 10 +++++----- .../gpu/drm/amd/display/modules/color/color_gamma.h | 5 +++-- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c index 326f6fb7e0bc..be19e6861189 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c @@ -22,7 +22,7 @@ * Authors: AMD * */ - +#include "amdgpu.h" #include "amdgpu_mode.h" #include "amdgpu_dm.h" #include "dc.h" @@ -122,6 +122,8 @@ int amdgpu_dm_set_regamma_lut(struct dm_crtc_state *crtc) { struct drm_property_blob *blob = crtc->base.gamma_lut; struct dc_stream_state *stream = crtc->stream; + struct amdgpu_device *adev = (struct amdgpu_device *) + crtc->base.state->dev->dev_private; struct drm_color_lut *lut; uint32_t lut_size; struct dc_gamma *gamma; @@ -162,7 +164,7 @@ int amdgpu_dm_set_regamma_lut(struct dm_crtc_state *crtc) */ stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS; ret = mod_color_calculate_regamma_params(stream->out_transfer_func, - gamma, true); + gamma, true, adev->asic_type <= CHIP_RAVEN); dc_gamma_release(&gamma); if (!ret) { stream->out_transfer_func->type = old_type; diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index bf29733958c3..3d1b89a30b3b 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -1352,7 +1352,7 @@ static bool map_regamma_hw_to_x_user( #define _EXTRA_POINTS 3 bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, - const struct dc_gamma *ramp, bool mapUserRamp) + const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed) { struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts; struct dividers dividers; @@ -1368,7 +1368,7 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, return false; /* we can use hardcoded curve for plain SRGB TF */ - if (output_tf->type == TF_TYPE_PREDEFINED && + if (output_tf->type == TF_TYPE_PREDEFINED && canRomBeUsed == true && output_tf->tf == TRANSFER_FUNCTION_SRGB && (!mapUserRamp && ramp->type == GAMMA_RGB_256)) return true; @@ -1430,7 +1430,6 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, MAX_HW_POINTS, coordinates_x, tf == TRANSFER_FUNCTION_SRGB ? true:false); } - map_regamma_hw_to_x_user(ramp, coeff, rgb_user, coordinates_x, axix_x, rgb_regamma, MAX_HW_POINTS, tf_pts, @@ -1659,7 +1658,8 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf, bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans, - struct dc_transfer_func_distributed_points *points) + struct dc_transfer_func_distributed_points *points, + uint32_t sdr_ref_white_level) { uint32_t i; bool ret = false; @@ -1693,7 +1693,7 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans, build_pq(rgb_regamma, MAX_HW_POINTS, coordinates_x, - 80); + sdr_ref_white_level); for (i = 0; i <= MAX_HW_POINTS ; i++) { points->red[i] = rgb_regamma[i].r; points->green[i] = rgb_regamma[i].g; diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h index b64048991a95..63ccb9c91224 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h @@ -78,13 +78,14 @@ void precompute_pq(void); void precompute_de_pq(void); bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, - const struct dc_gamma *ramp, bool mapUserRamp); + const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed); bool mod_color_calculate_degamma_params(struct dc_transfer_func *output_tf, const struct dc_gamma *ramp, bool mapUserRamp); bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans, - struct dc_transfer_func_distributed_points *points); + struct dc_transfer_func_distributed_points *points, + uint32_t sdr_ref_white_level); bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans, struct dc_transfer_func_distributed_points *points); -- GitLab From 1fdf7a4b1f96520467a578bdd9c90793cf7e8728 Mon Sep 17 00:00:00 2001 From: Vitaly Prosyak Date: Thu, 12 Jul 2018 14:26:47 -0500 Subject: [PATCH 0354/1692] drm/amd/display: Add check for num of entries in gamma This check avoids potential bugs related to gamma. Signed-off-by: Vitaly Prosyak Reviewed-by: Charlene Liu Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/modules/color/color_gamma.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index 3d1b89a30b3b..15427f4fc990 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -1580,7 +1580,8 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf, /* we can use hardcoded curve for plain SRGB TF */ if (input_tf->type == TF_TYPE_PREDEFINED && input_tf->tf == TRANSFER_FUNCTION_SRGB && - (!mapUserRamp && ramp->type == GAMMA_RGB_256)) + (!mapUserRamp && + (ramp->type == GAMMA_RGB_256 || ramp->num_entries == 0))) return true; input_tf->type = TF_TYPE_DISTRIBUTED_POINTS; -- GitLab From bf9b1d9dc7e99bb1395deb0ed0df3999ea527de3 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Mon, 6 Aug 2018 19:45:04 +0800 Subject: [PATCH 0355/1692] drm/amdgpu: Delay 100ms to enable gfx off feature Original 500ms delay seems a bit large. Change to 100 ms suggested by Christian. Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index bbb81e23020e..790fd5408ddf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -26,8 +26,8 @@ #include "amdgpu.h" #include "amdgpu_gfx.h" -/* 0.5 second timeout */ -#define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(500) +/* delay 0.1 second to enable gfx off feature */ +#define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100) /* * GPU GFX IP block helpers function. -- GitLab From 7febe4bfd5d477eba17f70d4879cb81e9787118e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 1 Aug 2018 16:22:39 +0200 Subject: [PATCH 0356/1692] drm/scheduler: fix setting the priorty for entities (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since we now deal with multiple rq we need to update all of them, not just the current one. v2: Trivial: Removed unused variable (Alex) Signed-off-by: Christian König Acked-by: Nayan Deshmukh Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 4 +-- drivers/gpu/drm/scheduler/gpu_scheduler.c | 36 +++++++++++++++-------- include/drm/gpu_scheduler.h | 5 ++-- 3 files changed, 26 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index df6965761046..02d563cfb4a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -394,7 +394,6 @@ void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, { int i; struct amdgpu_device *adev = ctx->adev; - struct drm_sched_rq *rq; struct drm_sched_entity *entity; struct amdgpu_ring *ring; enum drm_sched_priority ctx_prio; @@ -407,12 +406,11 @@ void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, for (i = 0; i < adev->num_rings; i++) { ring = adev->rings[i]; entity = &ctx->rings[i].entity; - rq = &ring->sched.sched_rq[ctx_prio]; if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) continue; - drm_sched_entity_set_rq(entity, rq); + drm_sched_entity_set_priority(entity, ctx_prio); } } diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index 08fa5b65acaf..695a9643f046 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -416,29 +416,39 @@ static void drm_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb } /** - * drm_sched_entity_set_rq - Sets the run queue for an entity + * drm_sched_entity_set_rq_priority - helper for drm_sched_entity_set_priority + */ +static void drm_sched_entity_set_rq_priority(struct drm_sched_rq **rq, + enum drm_sched_priority priority) +{ + *rq = &(*rq)->sched->sched_rq[priority]; +} + +/** + * drm_sched_entity_set_priority - Sets priority of the entity * * @entity: scheduler entity - * @rq: scheduler run queue + * @priority: scheduler priority * - * Sets the run queue for an entity and removes the entity from the previous - * run queue in which was present. + * Update the priority of runqueus used for the entity. */ -void drm_sched_entity_set_rq(struct drm_sched_entity *entity, - struct drm_sched_rq *rq) +void drm_sched_entity_set_priority(struct drm_sched_entity *entity, + enum drm_sched_priority priority) { - if (entity->rq == rq) - return; - - BUG_ON(!rq); + unsigned int i; spin_lock(&entity->rq_lock); + + for (i = 0; i < entity->num_rq_list; ++i) + drm_sched_entity_set_rq_priority(&entity->rq_list[i], priority); + drm_sched_rq_remove_entity(entity->rq, entity); - entity->rq = rq; - drm_sched_rq_add_entity(rq, entity); + drm_sched_entity_set_rq_priority(&entity->rq, priority); + drm_sched_rq_add_entity(entity->rq, entity); + spin_unlock(&entity->rq_lock); } -EXPORT_SYMBOL(drm_sched_entity_set_rq); +EXPORT_SYMBOL(drm_sched_entity_set_priority); /** * drm_sched_dependency_optimized diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 0c4cfe689d4c..22c0f88f7d8f 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -298,9 +298,8 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity); void drm_sched_entity_destroy(struct drm_sched_entity *entity); void drm_sched_entity_push_job(struct drm_sched_job *sched_job, struct drm_sched_entity *entity); -void drm_sched_entity_set_rq(struct drm_sched_entity *entity, - struct drm_sched_rq *rq); - +void drm_sched_entity_set_priority(struct drm_sched_entity *entity, + enum drm_sched_priority priority); struct drm_sched_fence *drm_sched_fence_create( struct drm_sched_entity *s_entity, void *owner); void drm_sched_fence_scheduled(struct drm_sched_fence *fence); -- GitLab From e854b61acf775a082d01d828fe3430bab0c4769f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 6 Aug 2018 12:46:41 +0200 Subject: [PATCH 0357/1692] drm/scheduler: bind job earlier to scheduler MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Update job earlier with the scheduler it is supposed to be scheduled on. Otherwise we could incorrectly optimize dependencies when moving an entity from one scheduler to another. Signed-off-by: Christian König Reviewed-by: Nayan Deshmukh Signed-off-by: Alex Deucher --- drivers/gpu/drm/scheduler/gpu_scheduler.c | 4 ++-- drivers/gpu/drm/scheduler/sched_fence.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index 695a9643f046..da2da8d85035 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -530,8 +530,6 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity) if (!sched_job) return NULL; - sched_job->sched = sched; - sched_job->s_fence->sched = sched; while ((entity->dependency = sched->ops->dependency(sched_job, entity))) { if (drm_sched_entity_add_dependency_cb(entity)) { @@ -582,6 +580,8 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job, spin_unlock(&entity->rq_lock); } + sched_job->sched = entity->rq->sched; + sched_job->s_fence->sched = entity->rq->sched; trace_drm_sched_job(sched_job, entity); atomic_inc(&entity->rq->sched->num_jobs); WRITE_ONCE(entity->last_user, current->group_leader); diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c index d8d2dff9ea2f..20e4da377890 100644 --- a/drivers/gpu/drm/scheduler/sched_fence.c +++ b/drivers/gpu/drm/scheduler/sched_fence.c @@ -161,7 +161,7 @@ struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity, return NULL; fence->owner = owner; - fence->sched = entity->rq->sched; + fence->sched = NULL; spin_lock_init(&fence->lock); seq = atomic_inc_return(&entity->fence_seq); -- GitLab From c51cebb890dc76260c02354882cf333c8207dd19 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Sat, 4 Aug 2018 08:25:35 +0800 Subject: [PATCH 0358/1692] gpu: drm: radeon: cik: Replace mdelay() with msleep() in cik_pcie_gen3_enable() cik_pcie_gen3_enable() is never called in atomic context. It calls mdelay() to busily wait, which is not necessary. mdelay() can be replaced with msleep(). This is found by a static analysis tool named DCNS written by myself. Signed-off-by: Jia-Ju Bai Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/cik.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index ebce4601a305..ab7b4e2ffcd2 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -9600,7 +9600,7 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev) tmp |= LC_REDO_EQ; WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp); - mdelay(100); + msleep(100); /* linkctl */ pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16); -- GitLab From 2f2debb5a162ea9aeb03ad2532827631e3a68b1a Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Sat, 4 Aug 2018 08:33:44 +0800 Subject: [PATCH 0359/1692] gpu: drm: radeon: si: Replace mdelay() with msleep() in si_pcie_gen3_enable() si_pcie_gen3_enable() is never called in atomic context. It calls mdelay() to busily wait, which is not necessary. mdelay() can be replaced with msleep(). This is found by a static analysis tool named DCNS written by myself Signed-off-by: Jia-Ju Bai Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/si.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 85c604d29235..841bc8bc333d 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -7183,7 +7183,7 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev) tmp |= LC_REDO_EQ; WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp); - mdelay(100); + msleep(100); /* linkctl */ pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16); -- GitLab From 4b991c54ce82e85d89babcf32d72746365163ed5 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Sat, 4 Aug 2018 08:01:02 +0800 Subject: [PATCH 0360/1692] gpu: drm: radeon: radeon_test: Replace mdelay() with msleep() radeon_test_ring_sync() and radeon_test_ring_sync2() are never called in atomic context. They call mdelay() to busily wait, which is not necessary. mdelay() can be replaced with msleep(). This is found by a static analysis tool named DCNS written by myself. Signed-off-by: Jia-Ju Bai Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_test.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c index 0c7f228db6e3..701c4a59e3c3 100644 --- a/drivers/gpu/drm/radeon/radeon_test.c +++ b/drivers/gpu/drm/radeon/radeon_test.c @@ -348,7 +348,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev, if (r) goto out_cleanup; - mdelay(1000); + msleep(1000); if (radeon_fence_signaled(fence1)) { DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n"); @@ -369,7 +369,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev, goto out_cleanup; } - mdelay(1000); + msleep(1000); if (radeon_fence_signaled(fence2)) { DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n"); @@ -442,7 +442,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev, if (r) goto out_cleanup; - mdelay(1000); + msleep(1000); if (radeon_fence_signaled(fenceA)) { DRM_ERROR("Fence A signaled without waiting for semaphore.\n"); @@ -462,7 +462,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev, radeon_ring_unlock_commit(rdev, ringC, false); for (i = 0; i < 30; ++i) { - mdelay(100); + msleep(100); sigA = radeon_fence_signaled(fenceA); sigB = radeon_fence_signaled(fenceB); if (sigA || sigB) @@ -487,7 +487,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev, radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); radeon_ring_unlock_commit(rdev, ringC, false); - mdelay(1000); + msleep(1000); r = radeon_fence_wait(fenceA, false); if (r) { -- GitLab From 0a7845db93b29f773f37d87e9a15ec1c295b1163 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Fri, 3 Aug 2018 18:33:06 +0800 Subject: [PATCH 0361/1692] drm/amdgpu: move ring macros into amdgpu_ring header MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Demangle amdgpu.h. Signed-off-by: Huang Rui Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 23 ----------------------- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 24 ++++++++++++++++++++++++ 2 files changed, 24 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 7261068f9cca..2cf907cd46a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1259,29 +1259,6 @@ int emu_soc_asic_init(struct amdgpu_device *adev); #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count))) #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr))) #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) -#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib))) -#define amdgpu_ring_patch_cs_in_place(r, p, ib) ((r)->funcs->patch_cs_in_place((p), (ib))) -#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r)) -#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t)) -#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r)) -#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r)) -#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r)) -#define amdgpu_ring_emit_ib(r, ib, vmid, c) (r)->funcs->emit_ib((r), (ib), (vmid), (c)) -#define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r)) -#define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr)) -#define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags)) -#define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as)) -#define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r)) -#define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r)) -#define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d)) -#define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d)) -#define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v)) -#define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m)) -#define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m)) -#define amdgpu_ring_emit_tmz(r, b) (r)->funcs->emit_tmz((r), (b)) -#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib))) -#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r)) -#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o)) #define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc)) #define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l)) #define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index d242b9a51e90..906897a38743 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -221,6 +221,30 @@ struct amdgpu_ring { #endif }; +#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib))) +#define amdgpu_ring_patch_cs_in_place(r, p, ib) ((r)->funcs->patch_cs_in_place((p), (ib))) +#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r)) +#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t)) +#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r)) +#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r)) +#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r)) +#define amdgpu_ring_emit_ib(r, ib, vmid, c) (r)->funcs->emit_ib((r), (ib), (vmid), (c)) +#define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r)) +#define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr)) +#define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags)) +#define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as)) +#define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r)) +#define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r)) +#define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d)) +#define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d)) +#define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v)) +#define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m)) +#define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m)) +#define amdgpu_ring_emit_tmz(r, b) (r)->funcs->emit_tmz((r), (b)) +#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib))) +#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r)) +#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o)) + int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw); void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count); void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib); -- GitLab From 9e21fc56fe5828642d0058700912ff31facb6fa6 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Fri, 3 Aug 2018 18:37:58 +0800 Subject: [PATCH 0362/1692] drm/amdgpu: remove useless gds switch macro MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Demangle amdgpu.h. Signed-off-by: Huang Rui Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 2cf907cd46a8..5726c56fc5d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1270,7 +1270,6 @@ int emu_soc_asic_init(struct amdgpu_device *adev); #define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos)) #define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c)) #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r)) -#define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a)) /* Common functions */ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, -- GitLab From 5df585258f9ff5d5b653f6a162dc389c597c5714 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Thu, 9 Aug 2018 09:50:12 -0500 Subject: [PATCH 0363/1692] drm/amdgpu: move display definitions into amdgpu_display header MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Demangle amdgpu.h. Signed-off-by: Huang Rui Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 13 ------------- drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_display.h | 15 +++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 1 + drivers/gpu/drm/amd/amdgpu/atombios_encoders.c | 1 + drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | 1 + drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | 1 + drivers/gpu/drm/amd/amdgpu/dce_v6_0.c | 1 + drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | 1 + 13 files changed, 26 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 5726c56fc5d1..2473037e9550 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -758,7 +758,6 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *fi int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); - /* VRAM scratch page for HDP bug, default vram page */ struct amdgpu_vram_scratch { struct amdgpu_bo *robj; @@ -1259,24 +1258,12 @@ int emu_soc_asic_init(struct amdgpu_device *adev); #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count))) #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr))) #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) -#define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc)) -#define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l)) -#define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e)) -#define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h)) -#define amdgpu_display_hpd_set_polarity(adev, h) (adev)->mode_info.funcs->hpd_set_polarity((adev), (h)) -#define amdgpu_display_hpd_get_gpio_reg(adev) (adev)->mode_info.funcs->hpd_get_gpio_reg((adev)) -#define amdgpu_display_bandwidth_update(adev) (adev)->mode_info.funcs->bandwidth_update((adev)) -#define amdgpu_display_page_flip(adev, crtc, base, async) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base), (async)) -#define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos)) -#define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c)) -#define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r)) /* Common functions */ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job* job, bool force); void amdgpu_device_pci_config_reset(struct amdgpu_device *adev); bool amdgpu_device_need_post(struct amdgpu_device *adev); -void amdgpu_display_update_priority(struct amdgpu_device *adev); void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, u64 num_vis_bytes); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 353993218f21..6488e90ec948 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -31,6 +31,7 @@ #include #include "amdgpu.h" #include "amdgpu_pm.h" +#include "amdgpu_display.h" #include "amd_acpi.h" #include "atom.h" diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index bf872f694f50..e02781b37e73 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -29,6 +29,7 @@ #include "amdgpu_atombios.h" #include "amdgpu_atomfirmware.h" #include "amdgpu_i2c.h" +#include "amdgpu_display.h" #include "atom.h" #include "atom-bits.h" diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index c770d73352a7..69ad6ec0a4f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -34,6 +34,7 @@ #include "atombios_dp.h" #include "amdgpu_connectors.h" #include "amdgpu_i2c.h" +#include "amdgpu_display.h" #include diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h index f66e3e3fef0a..06b922fe0d42 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h @@ -23,6 +23,21 @@ #ifndef __AMDGPU_DISPLAY_H__ #define __AMDGPU_DISPLAY_H__ +#define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc)) +#define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l)) +#define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e)) +#define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h)) +#define amdgpu_display_hpd_set_polarity(adev, h) (adev)->mode_info.funcs->hpd_set_polarity((adev), (h)) +#define amdgpu_display_hpd_get_gpio_reg(adev) (adev)->mode_info.funcs->hpd_get_gpio_reg((adev)) +#define amdgpu_display_bandwidth_update(adev) (adev)->mode_info.funcs->bandwidth_update((adev)) +#define amdgpu_display_page_flip(adev, crtc, base, async) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base), (async)) +#define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos)) +#define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c)) +#define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r)) + +int amdgpu_display_freesync_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); +void amdgpu_display_update_priority(struct amdgpu_device *adev); uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev); struct drm_framebuffer * amdgpu_display_user_framebuffer_create(struct drm_device *dev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c index ae8fac34f7a5..ec78e2b2015c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c @@ -28,6 +28,7 @@ #include #include "amdgpu.h" #include "amdgpu_connectors.h" +#include "amdgpu_display.h" #include "atom.h" #include "atombios_encoders.h" diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 20645ea719b3..a1043b421e3e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -38,6 +38,7 @@ #include #include "amdgpu_amdkfd.h" #include "amdgpu_gem.h" +#include "amdgpu_display.h" /** * amdgpu_driver_unload_kms - Main unload function for KMS. diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 8f98629fbe59..daa55fb06171 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -27,6 +27,7 @@ #include "amdgpu_drv.h" #include "amdgpu_pm.h" #include "amdgpu_dpm.h" +#include "amdgpu_display.h" #include "atom.h" #include #include diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c index d702fb8e3427..60e2447e12c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c @@ -28,6 +28,7 @@ #include #include "amdgpu.h" #include "amdgpu_connectors.h" +#include "amdgpu_display.h" #include "atom.h" #include "atombios_encoders.h" #include "atombios_dp.h" diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 308f9f238bc1..4313d6c6407d 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -31,6 +31,7 @@ #include "atombios_encoders.h" #include "amdgpu_pll.h" #include "amdgpu_connectors.h" +#include "amdgpu_display.h" #include "dce_v10_0.h" #include "dce/dce_10_0_d.h" diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 76dfb76f7900..e295cc18cae4 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -31,6 +31,7 @@ #include "atombios_encoders.h" #include "amdgpu_pll.h" #include "amdgpu_connectors.h" +#include "amdgpu_display.h" #include "dce_v11_0.h" #include "dce/dce_11_0_d.h" diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index c9adc627305d..018dd62dc5b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -30,6 +30,7 @@ #include "atombios_encoders.h" #include "amdgpu_pll.h" #include "amdgpu_connectors.h" +#include "amdgpu_display.h" #include "bif/bif_3_0_d.h" #include "bif/bif_3_0_sh_mask.h" diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 50cd03beac7d..2bae3ad2bbf8 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -31,6 +31,7 @@ #include "atombios_encoders.h" #include "amdgpu_pll.h" #include "amdgpu_connectors.h" +#include "amdgpu_display.h" #include "dce_v8_0.h" #include "dce/dce_8_0_d.h" -- GitLab From c082b99878b73ebf9e9b5296a5c4f3ad5313942a Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Fri, 3 Aug 2018 18:59:25 +0800 Subject: [PATCH 0364/1692] drm/amdgpu: move gmc macros into amdgpu_gmc header MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Demangle amdgpu.h. Signed-off-by: Huang Rui Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 6 ------ drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | 7 +++++++ 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 2473037e9550..64f9ad4056da 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1249,12 +1249,6 @@ int emu_soc_asic_init(struct amdgpu_device *adev); #define amdgpu_asic_flush_hdp(adev, r) (adev)->asic_funcs->flush_hdp((adev), (r)) #define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r)) #define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev)) -#define amdgpu_gmc_flush_gpu_tlb(adev, vmid) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid)) -#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr)) -#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid)) -#define amdgpu_gmc_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gmc.gmc_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags)) -#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags)) -#define amdgpu_gmc_get_pte_flags(adev, flags) (adev)->gmc.gmc_funcs->get_vm_pte_flags((adev),(flags)) #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count))) #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr))) #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index bb5a47a45790..64391d811a82 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -111,6 +111,13 @@ struct amdgpu_gmc { const struct amdgpu_gmc_funcs *gmc_funcs; }; +#define amdgpu_gmc_flush_gpu_tlb(adev, vmid) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid)) +#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr)) +#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid)) +#define amdgpu_gmc_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gmc.gmc_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags)) +#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags)) +#define amdgpu_gmc_get_pte_flags(adev, flags) (adev)->gmc.gmc_funcs->get_vm_pte_flags((adev),(flags)) + /** * amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR * -- GitLab From 4473e1db3120cc78e795fd2902275ba18659ae3f Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Fri, 3 Aug 2018 19:06:02 +0800 Subject: [PATCH 0365/1692] drm/amdgpu: move vm definitions into amdgpu_vm header MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Demangle amdgpu.h. Signed-off-by: Huang Rui Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 24 ------------------------ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 25 +++++++++++++++++++++++++ 2 files changed, 25 insertions(+), 24 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 64f9ad4056da..ef4fb6a6f9db 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -257,27 +257,6 @@ amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev, int amdgpu_device_ip_block_add(struct amdgpu_device *adev, const struct amdgpu_ip_block_version *ip_block_version); -/* provided by hw blocks that can write ptes, e.g., sdma */ -struct amdgpu_vm_pte_funcs { - /* number of dw to reserve per operation */ - unsigned copy_pte_num_dw; - - /* copy pte entries from GART */ - void (*copy_pte)(struct amdgpu_ib *ib, - uint64_t pe, uint64_t src, - unsigned count); - - /* write pte one entry at a time with addr mapping */ - void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe, - uint64_t value, unsigned count, - uint32_t incr); - /* for linear pte/pde updates without addr mapping */ - void (*set_pte_pde)(struct amdgpu_ib *ib, - uint64_t pe, - uint64_t addr, unsigned count, - uint32_t incr, uint64_t flags); -}; - /* * BIOS. */ @@ -1249,9 +1228,6 @@ int emu_soc_asic_init(struct amdgpu_device *adev); #define amdgpu_asic_flush_hdp(adev, r) (adev)->asic_funcs->flush_hdp((adev), (r)) #define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r)) #define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev)) -#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count))) -#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr))) -#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) /* Common functions */ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 67a15d439ac0..ffda53420f8c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -160,6 +160,27 @@ struct amdgpu_vm_pt { struct amdgpu_vm_pt *entries; }; +/* provided by hw blocks that can write ptes, e.g., sdma */ +struct amdgpu_vm_pte_funcs { + /* number of dw to reserve per operation */ + unsigned copy_pte_num_dw; + + /* copy pte entries from GART */ + void (*copy_pte)(struct amdgpu_ib *ib, + uint64_t pe, uint64_t src, + unsigned count); + + /* write pte one entry at a time with addr mapping */ + void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe, + uint64_t value, unsigned count, + uint32_t incr); + /* for linear pte/pde updates without addr mapping */ + void (*set_pte_pde)(struct amdgpu_ib *ib, + uint64_t pe, + uint64_t addr, unsigned count, + uint32_t incr, uint64_t flags); +}; + #define AMDGPU_VM_FAULT(pasid, addr) (((u64)(pasid) << 48) | (addr)) #define AMDGPU_VM_FAULT_PASID(fault) ((u64)(fault) >> 48) #define AMDGPU_VM_FAULT_ADDR(fault) ((u64)(fault) & 0xfffffffff000ULL) @@ -266,6 +287,10 @@ struct amdgpu_vm_manager { spinlock_t pasid_lock; }; +#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count))) +#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr))) +#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) + void amdgpu_vm_manager_init(struct amdgpu_device *adev); void amdgpu_vm_manager_fini(struct amdgpu_device *adev); int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, -- GitLab From c2d358d724ee5ba8bda49a384ac3ae5ab125134c Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Mon, 6 Aug 2018 20:14:51 +0800 Subject: [PATCH 0366/1692] drm/amdgpu: move missed gfxoff entry into amdgpu_gfx header MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move missed gfxoff entry to amdgpu_gfx.h. Signed-off-by: Huang Rui Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 - drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index ef4fb6a6f9db..07924d41ee89 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1247,7 +1247,6 @@ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev, const u32 array_size); bool amdgpu_device_is_px(struct drm_device *dev); -void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable); /* atpx handler */ #if defined(CONFIG_VGA_SWITCHEROO) void amdgpu_register_atpx_handler(void); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 4e3d147c2f1b..53e9e2a0821e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -356,5 +356,6 @@ void amdgpu_gfx_bit_to_queue(struct amdgpu_device *adev, int bit, int *mec, int *pipe, int *queue); bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int mec, int pipe, int queue); +void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable); #endif -- GitLab From 93f15e1c0796f102a62fdc4931e4c5f847c98a12 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 3 Aug 2018 13:07:36 +0200 Subject: [PATCH 0367/1692] drm/scheduler: Remove entity->rq NULL check MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit That is superflous now. Signed-off-by: Christian König Acked-by: Nayan Deshmukh Signed-off-by: Alex Deucher --- drivers/gpu/drm/scheduler/gpu_scheduler.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index da2da8d85035..6be554499be9 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -591,11 +591,6 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job, if (first) { /* Add the entity to the run queue */ spin_lock(&entity->rq_lock); - if (!entity->rq) { - DRM_ERROR("Trying to push to a killed entity\n"); - spin_unlock(&entity->rq_lock); - return; - } drm_sched_rq_add_entity(entity->rq, entity); spin_unlock(&entity->rq_lock); drm_sched_wakeup(entity->rq->sched); -- GitLab From 573edb241b44162a1478cc74429f94df86e6e71d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 7 Aug 2018 14:52:13 +0200 Subject: [PATCH 0368/1692] drm/scheduler: fix last_scheduled handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make sure we access last_scheduled only after checking that there are no more jobs on the entity. Signed-off-by: Christian König Reviewed-by: Nayan Deshmukh Signed-off-by: Alex Deucher --- drivers/gpu/drm/scheduler/gpu_scheduler.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index 6be554499be9..f40a504e3d68 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -565,19 +565,20 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job, struct drm_sched_entity *entity) { struct drm_sched_rq *rq = entity->rq; - bool first, reschedule, idle; + bool first; - idle = entity->last_scheduled == NULL || - dma_fence_is_signaled(entity->last_scheduled); first = spsc_queue_count(&entity->job_queue) == 0; - reschedule = idle && first && (entity->num_rq_list > 1); + if (first && (entity->num_rq_list > 1)) { + struct dma_fence *fence; - if (reschedule) { - rq = drm_sched_entity_get_free_sched(entity); - spin_lock(&entity->rq_lock); - drm_sched_rq_remove_entity(entity->rq, entity); - entity->rq = rq; - spin_unlock(&entity->rq_lock); + fence = READ_ONCE(entity->last_scheduled); + if (fence == NULL || dma_fence_is_signaled(fence)) { + rq = drm_sched_entity_get_free_sched(entity); + spin_lock(&entity->rq_lock); + drm_sched_rq_remove_entity(entity->rq, entity); + entity->rq = rq; + spin_unlock(&entity->rq_lock); + } } sched_job->sched = entity->rq->sched; -- GitLab From 35e160e781a048a9170a9deb3c1f13f06df4add9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 8 Aug 2018 13:07:11 +0200 Subject: [PATCH 0369/1692] drm/scheduler: change entities rq even earlier MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Looks like for correct debugging we need to know the scheduler even earlier. So move picking a rq for an entity into job creation. Signed-off-by: Christian König Reviewed-by: Nayan Deshmukh Signed-off-by: Alex Deucher --- drivers/gpu/drm/scheduler/gpu_scheduler.c | 50 +++++++++++++++-------- drivers/gpu/drm/scheduler/sched_fence.c | 2 +- 2 files changed, 33 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index f40a504e3d68..f566405f49e3 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -549,6 +549,34 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity) return sched_job; } +/** + * drm_sched_entity_select_rq - select a new rq for the entity + * + * @entity: scheduler entity + * + * Check all prerequisites and select a new rq for the entity for load + * balancing. + */ +static void drm_sched_entity_select_rq(struct drm_sched_entity *entity) +{ + struct dma_fence *fence; + struct drm_sched_rq *rq; + + if (!spsc_queue_count(&entity->job_queue) == 0 || + entity->num_rq_list <= 1) + return; + + fence = READ_ONCE(entity->last_scheduled); + if (fence && !dma_fence_is_signaled(fence)) + return; + + rq = drm_sched_entity_get_free_sched(entity); + spin_lock(&entity->rq_lock); + drm_sched_rq_remove_entity(entity->rq, entity); + entity->rq = rq; + spin_unlock(&entity->rq_lock); +} + /** * drm_sched_entity_push_job - Submit a job to the entity's job queue * @@ -564,25 +592,8 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity) void drm_sched_entity_push_job(struct drm_sched_job *sched_job, struct drm_sched_entity *entity) { - struct drm_sched_rq *rq = entity->rq; bool first; - first = spsc_queue_count(&entity->job_queue) == 0; - if (first && (entity->num_rq_list > 1)) { - struct dma_fence *fence; - - fence = READ_ONCE(entity->last_scheduled); - if (fence == NULL || dma_fence_is_signaled(fence)) { - rq = drm_sched_entity_get_free_sched(entity); - spin_lock(&entity->rq_lock); - drm_sched_rq_remove_entity(entity->rq, entity); - entity->rq = rq; - spin_unlock(&entity->rq_lock); - } - } - - sched_job->sched = entity->rq->sched; - sched_job->s_fence->sched = entity->rq->sched; trace_drm_sched_job(sched_job, entity); atomic_inc(&entity->rq->sched->num_jobs); WRITE_ONCE(entity->last_user, current->group_leader); @@ -786,7 +797,10 @@ int drm_sched_job_init(struct drm_sched_job *job, struct drm_sched_entity *entity, void *owner) { - struct drm_gpu_scheduler *sched = entity->rq->sched; + struct drm_gpu_scheduler *sched; + + drm_sched_entity_select_rq(entity); + sched = entity->rq->sched; job->sched = sched; job->entity = entity; diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c index 20e4da377890..d8d2dff9ea2f 100644 --- a/drivers/gpu/drm/scheduler/sched_fence.c +++ b/drivers/gpu/drm/scheduler/sched_fence.c @@ -161,7 +161,7 @@ struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity, return NULL; fence->owner = owner; - fence->sched = NULL; + fence->sched = entity->rq->sched; spin_lock_init(&fence->lock); seq = atomic_inc_return(&entity->fence_seq); -- GitLab From 2cf01099518b845ff0df332a16bf3eb48c631c0c Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 7 Aug 2018 15:17:09 -0500 Subject: [PATCH 0370/1692] drm/amdgpu/pp: endian fixes for process_pptables_v1_0.c Properly swap when reading from the vbios. Reviewed-by: Evan Quan Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- .../powerplay/hwmgr/process_pptables_v1_0.c | 194 +++++++++--------- 1 file changed, 97 insertions(+), 97 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c index 4e1fd5393845..ae64ff7153d6 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c @@ -214,23 +214,23 @@ static int get_platform_power_management_table( ptr->ppm_design = atom_ppm_table->ucPpmDesign; ptr->cpu_core_number - = atom_ppm_table->usCpuCoreNumber; + = le16_to_cpu(atom_ppm_table->usCpuCoreNumber); ptr->platform_tdp - = atom_ppm_table->ulPlatformTDP; + = le32_to_cpu(atom_ppm_table->ulPlatformTDP); ptr->small_ac_platform_tdp - = atom_ppm_table->ulSmallACPlatformTDP; + = le32_to_cpu(atom_ppm_table->ulSmallACPlatformTDP); ptr->platform_tdc - = atom_ppm_table->ulPlatformTDC; + = le32_to_cpu(atom_ppm_table->ulPlatformTDC); ptr->small_ac_platform_tdc - = atom_ppm_table->ulSmallACPlatformTDC; + = le32_to_cpu(atom_ppm_table->ulSmallACPlatformTDC); ptr->apu_tdp - = atom_ppm_table->ulApuTDP; + = le32_to_cpu(atom_ppm_table->ulApuTDP); ptr->dgpu_tdp - = atom_ppm_table->ulDGpuTDP; + = le32_to_cpu(atom_ppm_table->ulDGpuTDP); ptr->dgpu_ulv_power - = atom_ppm_table->ulDGpuUlvPower; + = le32_to_cpu(atom_ppm_table->ulDGpuUlvPower); ptr->tj_max - = atom_ppm_table->ulTjmax; + = le32_to_cpu(atom_ppm_table->ulTjmax); pp_table_information->ppm_parameter_table = ptr; @@ -355,11 +355,11 @@ static int get_hard_limits( PP_ASSERT_WITH_CODE((0 != limitable->ucNumEntries), "Invalid PowerPlay Table!", return -1); /* currently we always take entries[0] parameters */ - limits->sclk = (uint32_t)limitable->entries[0].ulSCLKLimit; - limits->mclk = (uint32_t)limitable->entries[0].ulMCLKLimit; - limits->vddc = (uint16_t)limitable->entries[0].usVddcLimit; - limits->vddci = (uint16_t)limitable->entries[0].usVddciLimit; - limits->vddgfx = (uint16_t)limitable->entries[0].usVddgfxLimit; + limits->sclk = le32_to_cpu(limitable->entries[0].ulSCLKLimit); + limits->mclk = le32_to_cpu(limitable->entries[0].ulMCLKLimit); + limits->vddc = le16_to_cpu(limitable->entries[0].usVddcLimit); + limits->vddci = le16_to_cpu(limitable->entries[0].usVddciLimit); + limits->vddgfx = le16_to_cpu(limitable->entries[0].usVddgfxLimit); return 0; } @@ -396,10 +396,10 @@ static int get_mclk_voltage_dependency_table( ATOM_Tonga_MCLK_Dependency_Record, entries, mclk_dep_table, i); mclk_table_record->vddInd = mclk_dep_record->ucVddcInd; - mclk_table_record->vdd_offset = mclk_dep_record->usVddgfxOffset; - mclk_table_record->vddci = mclk_dep_record->usVddci; - mclk_table_record->mvdd = mclk_dep_record->usMvdd; - mclk_table_record->clk = mclk_dep_record->ulMclk; + mclk_table_record->vdd_offset = le16_to_cpu(mclk_dep_record->usVddgfxOffset); + mclk_table_record->vddci = le16_to_cpu(mclk_dep_record->usVddci); + mclk_table_record->mvdd = le16_to_cpu(mclk_dep_record->usMvdd); + mclk_table_record->clk = le32_to_cpu(mclk_dep_record->ulMclk); } *pp_tonga_mclk_dep_table = mclk_table; @@ -443,8 +443,8 @@ static int get_sclk_voltage_dependency_table( phm_ppt_v1_clock_voltage_dependency_record, entries, sclk_table, i); sclk_table_record->vddInd = sclk_dep_record->ucVddInd; - sclk_table_record->vdd_offset = sclk_dep_record->usVddcOffset; - sclk_table_record->clk = sclk_dep_record->ulSclk; + sclk_table_record->vdd_offset = le16_to_cpu(sclk_dep_record->usVddcOffset); + sclk_table_record->clk = le32_to_cpu(sclk_dep_record->ulSclk); sclk_table_record->cks_enable = (((sclk_dep_record->ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0; sclk_table_record->cks_voffset = (sclk_dep_record->ucCKSVOffsetandDisable & 0x7F); @@ -475,12 +475,12 @@ static int get_sclk_voltage_dependency_table( phm_ppt_v1_clock_voltage_dependency_record, entries, sclk_table, i); sclk_table_record->vddInd = sclk_dep_record->ucVddInd; - sclk_table_record->vdd_offset = sclk_dep_record->usVddcOffset; - sclk_table_record->clk = sclk_dep_record->ulSclk; + sclk_table_record->vdd_offset = le16_to_cpu(sclk_dep_record->usVddcOffset); + sclk_table_record->clk = le32_to_cpu(sclk_dep_record->ulSclk); sclk_table_record->cks_enable = (((sclk_dep_record->ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0; sclk_table_record->cks_voffset = (sclk_dep_record->ucCKSVOffsetandDisable & 0x7F); - sclk_table_record->sclk_offset = sclk_dep_record->ulSclkOffset; + sclk_table_record->sclk_offset = le32_to_cpu(sclk_dep_record->ulSclkOffset); } } *pp_tonga_sclk_dep_table = sclk_table; @@ -534,7 +534,7 @@ static int get_pcie_table( ATOM_Tonga_PCIE_Record, entries, atom_pcie_table, i); pcie_record->gen_speed = atom_pcie_record->ucPCIEGenSpeed; - pcie_record->lane_width = atom_pcie_record->usPCIELaneWidth; + pcie_record->lane_width = le16_to_cpu(atom_pcie_record->usPCIELaneWidth); } *pp_tonga_pcie_table = pcie_table; @@ -574,8 +574,8 @@ static int get_pcie_table( ATOM_Polaris10_PCIE_Record, entries, atom_pcie_table, i); pcie_record->gen_speed = atom_pcie_record->ucPCIEGenSpeed; - pcie_record->lane_width = atom_pcie_record->usPCIELaneWidth; - pcie_record->pcie_sclk = atom_pcie_record->ulPCIE_Sclk; + pcie_record->lane_width = le16_to_cpu(atom_pcie_record->usPCIELaneWidth); + pcie_record->pcie_sclk = le32_to_cpu(atom_pcie_record->ulPCIE_Sclk); } *pp_tonga_pcie_table = pcie_table; @@ -609,64 +609,64 @@ static int get_cac_tdp_table( if (table->ucRevId < 3) { const ATOM_Tonga_PowerTune_Table *tonga_table = (ATOM_Tonga_PowerTune_Table *)table; - tdp_table->usTDP = tonga_table->usTDP; + tdp_table->usTDP = le16_to_cpu(tonga_table->usTDP); tdp_table->usConfigurableTDP = - tonga_table->usConfigurableTDP; - tdp_table->usTDC = tonga_table->usTDC; + le16_to_cpu(tonga_table->usConfigurableTDP); + tdp_table->usTDC = le16_to_cpu(tonga_table->usTDC); tdp_table->usBatteryPowerLimit = - tonga_table->usBatteryPowerLimit; + le16_to_cpu(tonga_table->usBatteryPowerLimit); tdp_table->usSmallPowerLimit = - tonga_table->usSmallPowerLimit; + le16_to_cpu(tonga_table->usSmallPowerLimit); tdp_table->usLowCACLeakage = - tonga_table->usLowCACLeakage; + le16_to_cpu(tonga_table->usLowCACLeakage); tdp_table->usHighCACLeakage = - tonga_table->usHighCACLeakage; + le16_to_cpu(tonga_table->usHighCACLeakage); tdp_table->usMaximumPowerDeliveryLimit = - tonga_table->usMaximumPowerDeliveryLimit; + le16_to_cpu(tonga_table->usMaximumPowerDeliveryLimit); tdp_table->usDefaultTargetOperatingTemp = - tonga_table->usTjMax; + le16_to_cpu(tonga_table->usTjMax); tdp_table->usTargetOperatingTemp = - tonga_table->usTjMax; /*Set the initial temp to the same as default */ + le16_to_cpu(tonga_table->usTjMax); /*Set the initial temp to the same as default */ tdp_table->usPowerTuneDataSetID = - tonga_table->usPowerTuneDataSetID; + le16_to_cpu(tonga_table->usPowerTuneDataSetID); tdp_table->usSoftwareShutdownTemp = - tonga_table->usSoftwareShutdownTemp; + le16_to_cpu(tonga_table->usSoftwareShutdownTemp); tdp_table->usClockStretchAmount = - tonga_table->usClockStretchAmount; + le16_to_cpu(tonga_table->usClockStretchAmount); } else { /* Fiji and newer */ const ATOM_Fiji_PowerTune_Table *fijitable = (ATOM_Fiji_PowerTune_Table *)table; - tdp_table->usTDP = fijitable->usTDP; - tdp_table->usConfigurableTDP = fijitable->usConfigurableTDP; - tdp_table->usTDC = fijitable->usTDC; - tdp_table->usBatteryPowerLimit = fijitable->usBatteryPowerLimit; - tdp_table->usSmallPowerLimit = fijitable->usSmallPowerLimit; - tdp_table->usLowCACLeakage = fijitable->usLowCACLeakage; - tdp_table->usHighCACLeakage = fijitable->usHighCACLeakage; + tdp_table->usTDP = le16_to_cpu(fijitable->usTDP); + tdp_table->usConfigurableTDP = le16_to_cpu(fijitable->usConfigurableTDP); + tdp_table->usTDC = le16_to_cpu(fijitable->usTDC); + tdp_table->usBatteryPowerLimit = le16_to_cpu(fijitable->usBatteryPowerLimit); + tdp_table->usSmallPowerLimit = le16_to_cpu(fijitable->usSmallPowerLimit); + tdp_table->usLowCACLeakage = le16_to_cpu(fijitable->usLowCACLeakage); + tdp_table->usHighCACLeakage = le16_to_cpu(fijitable->usHighCACLeakage); tdp_table->usMaximumPowerDeliveryLimit = - fijitable->usMaximumPowerDeliveryLimit; + le16_to_cpu(fijitable->usMaximumPowerDeliveryLimit); tdp_table->usDefaultTargetOperatingTemp = - fijitable->usTjMax; + le16_to_cpu(fijitable->usTjMax); tdp_table->usTargetOperatingTemp = - fijitable->usTjMax; /*Set the initial temp to the same as default */ + le16_to_cpu(fijitable->usTjMax); /*Set the initial temp to the same as default */ tdp_table->usPowerTuneDataSetID = - fijitable->usPowerTuneDataSetID; + le16_to_cpu(fijitable->usPowerTuneDataSetID); tdp_table->usSoftwareShutdownTemp = - fijitable->usSoftwareShutdownTemp; + le16_to_cpu(fijitable->usSoftwareShutdownTemp); tdp_table->usClockStretchAmount = - fijitable->usClockStretchAmount; + le16_to_cpu(fijitable->usClockStretchAmount); tdp_table->usTemperatureLimitHotspot = - fijitable->usTemperatureLimitHotspot; + le16_to_cpu(fijitable->usTemperatureLimitHotspot); tdp_table->usTemperatureLimitLiquid1 = - fijitable->usTemperatureLimitLiquid1; + le16_to_cpu(fijitable->usTemperatureLimitLiquid1); tdp_table->usTemperatureLimitLiquid2 = - fijitable->usTemperatureLimitLiquid2; + le16_to_cpu(fijitable->usTemperatureLimitLiquid2); tdp_table->usTemperatureLimitVrVddc = - fijitable->usTemperatureLimitVrVddc; + le16_to_cpu(fijitable->usTemperatureLimitVrVddc); tdp_table->usTemperatureLimitVrMvdd = - fijitable->usTemperatureLimitVrMvdd; + le16_to_cpu(fijitable->usTemperatureLimitVrMvdd); tdp_table->usTemperatureLimitPlx = - fijitable->usTemperatureLimitPlx; + le16_to_cpu(fijitable->usTemperatureLimitPlx); tdp_table->ucLiquid1_I2C_address = fijitable->ucLiquid1_I2C_address; tdp_table->ucLiquid2_I2C_address = @@ -715,12 +715,12 @@ static int get_mm_clock_voltage_table( phm_ppt_v1_mm_clock_voltage_dependency_record, entries, mm_table, i); mm_table_record->vddcInd = mm_dependency_record->ucVddcInd; - mm_table_record->vddgfx_offset = mm_dependency_record->usVddgfxOffset; - mm_table_record->aclk = mm_dependency_record->ulAClk; - mm_table_record->samclock = mm_dependency_record->ulSAMUClk; - mm_table_record->eclk = mm_dependency_record->ulEClk; - mm_table_record->vclk = mm_dependency_record->ulVClk; - mm_table_record->dclk = mm_dependency_record->ulDClk; + mm_table_record->vddgfx_offset = le16_to_cpu(mm_dependency_record->usVddgfxOffset); + mm_table_record->aclk = le32_to_cpu(mm_dependency_record->ulAClk); + mm_table_record->samclock = le32_to_cpu(mm_dependency_record->ulSAMUClk); + mm_table_record->eclk = le32_to_cpu(mm_dependency_record->ulEClk); + mm_table_record->vclk = le32_to_cpu(mm_dependency_record->ulVClk); + mm_table_record->dclk = le32_to_cpu(mm_dependency_record->ulDClk); } *tonga_mm_table = mm_table; @@ -939,33 +939,33 @@ static int init_thermal_controller( hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst = tonga_fan_table->ucTHyst; hwmgr->thermal_controller.advanceFanControlParameters.usTMin - = tonga_fan_table->usTMin; + = le16_to_cpu(tonga_fan_table->usTMin); hwmgr->thermal_controller.advanceFanControlParameters.usTMed - = tonga_fan_table->usTMed; + = le16_to_cpu(tonga_fan_table->usTMed); hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - = tonga_fan_table->usTHigh; + = le16_to_cpu(tonga_fan_table->usTHigh); hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin - = tonga_fan_table->usPWMMin; + = le16_to_cpu(tonga_fan_table->usPWMMin); hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - = tonga_fan_table->usPWMMed; + = le16_to_cpu(tonga_fan_table->usPWMMed); hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - = tonga_fan_table->usPWMHigh; + = le16_to_cpu(tonga_fan_table->usPWMHigh); hwmgr->thermal_controller.advanceFanControlParameters.usTMax = 10900; /* hard coded */ hwmgr->thermal_controller.advanceFanControlParameters.usTMax - = tonga_fan_table->usTMax; + = le16_to_cpu(tonga_fan_table->usTMax); hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode = tonga_fan_table->ucFanControlMode; hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM - = tonga_fan_table->usFanPWMMax; + = le16_to_cpu(tonga_fan_table->usFanPWMMax); hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity = 4836; hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity - = tonga_fan_table->usFanOutputSensitivity; + = le16_to_cpu(tonga_fan_table->usFanOutputSensitivity); hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM - = tonga_fan_table->usFanRPMMax; + = le16_to_cpu(tonga_fan_table->usFanRPMMax); hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit - = (tonga_fan_table->ulMinFanSCLKAcousticLimit / 100); /* PPTable stores it in 10Khz unit for 2 decimal places. SMC wants MHz. */ + = (le32_to_cpu(tonga_fan_table->ulMinFanSCLKAcousticLimit) / 100); /* PPTable stores it in 10Khz unit for 2 decimal places. SMC wants MHz. */ hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature = tonga_fan_table->ucTargetTemperature; hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit @@ -976,50 +976,50 @@ static int init_thermal_controller( hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst = fiji_fan_table->ucTHyst; hwmgr->thermal_controller.advanceFanControlParameters.usTMin - = fiji_fan_table->usTMin; + = le16_to_cpu(fiji_fan_table->usTMin); hwmgr->thermal_controller.advanceFanControlParameters.usTMed - = fiji_fan_table->usTMed; + = le16_to_cpu(fiji_fan_table->usTMed); hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - = fiji_fan_table->usTHigh; + = le16_to_cpu(fiji_fan_table->usTHigh); hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin - = fiji_fan_table->usPWMMin; + = le16_to_cpu(fiji_fan_table->usPWMMin); hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - = fiji_fan_table->usPWMMed; + = le16_to_cpu(fiji_fan_table->usPWMMed); hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - = fiji_fan_table->usPWMHigh; + = le16_to_cpu(fiji_fan_table->usPWMHigh); hwmgr->thermal_controller.advanceFanControlParameters.usTMax - = fiji_fan_table->usTMax; + = le16_to_cpu(fiji_fan_table->usTMax); hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode = fiji_fan_table->ucFanControlMode; hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM - = fiji_fan_table->usFanPWMMax; + = le16_to_cpu(fiji_fan_table->usFanPWMMax); hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity = 4836; hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity - = fiji_fan_table->usFanOutputSensitivity; + = le16_to_cpu(fiji_fan_table->usFanOutputSensitivity); hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM - = fiji_fan_table->usFanRPMMax; + = le16_to_cpu(fiji_fan_table->usFanRPMMax); hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit - = (fiji_fan_table->ulMinFanSCLKAcousticLimit / 100); /* PPTable stores it in 10Khz unit for 2 decimal places. SMC wants MHz. */ + = (le32_to_cpu(fiji_fan_table->ulMinFanSCLKAcousticLimit) / 100); /* PPTable stores it in 10Khz unit for 2 decimal places. SMC wants MHz. */ hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature = fiji_fan_table->ucTargetTemperature; hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit = fiji_fan_table->ucMinimumPWMLimit; hwmgr->thermal_controller.advanceFanControlParameters.usFanGainEdge - = fiji_fan_table->usFanGainEdge; + = le16_to_cpu(fiji_fan_table->usFanGainEdge); hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHotspot - = fiji_fan_table->usFanGainHotspot; + = le16_to_cpu(fiji_fan_table->usFanGainHotspot); hwmgr->thermal_controller.advanceFanControlParameters.usFanGainLiquid - = fiji_fan_table->usFanGainLiquid; + = le16_to_cpu(fiji_fan_table->usFanGainLiquid); hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrVddc - = fiji_fan_table->usFanGainVrVddc; + = le16_to_cpu(fiji_fan_table->usFanGainVrVddc); hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrMvdd - = fiji_fan_table->usFanGainVrMvdd; + = le16_to_cpu(fiji_fan_table->usFanGainVrMvdd); hwmgr->thermal_controller.advanceFanControlParameters.usFanGainPlx - = fiji_fan_table->usFanGainPlx; + = le16_to_cpu(fiji_fan_table->usFanGainPlx); hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHbm - = fiji_fan_table->usFanGainHbm; + = le16_to_cpu(fiji_fan_table->usFanGainHbm); } return 0; @@ -1256,9 +1256,9 @@ static int ppt_get_vce_state_table_entry_v1_0(struct pp_hwmgr *hwmgr, uint32_t i vce_state_record->ucVCEClockIndex); *flag = vce_state_record->ucFlag; - vce_state->evclk = mm_dep_record->ulEClk; - vce_state->ecclk = mm_dep_record->ulEClk; - vce_state->sclk = sclk_dep_record->ulSclk; + vce_state->evclk = le32_to_cpu(mm_dep_record->ulEClk); + vce_state->ecclk = le32_to_cpu(mm_dep_record->ulEClk); + vce_state->sclk = le32_to_cpu(sclk_dep_record->ulSclk); if (vce_state_record->ucMCLKIndex >= mclk_dep_table->ucNumEntries) mclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR( @@ -1271,7 +1271,7 @@ static int ppt_get_vce_state_table_entry_v1_0(struct pp_hwmgr *hwmgr, uint32_t i entries, mclk_dep_table, vce_state_record->ucMCLKIndex); - vce_state->mclk = mclk_dep_record->ulMclk; + vce_state->mclk = le32_to_cpu(mclk_dep_record->ulMclk); return 0; } -- GitLab From 54f16ebfde5d32ea9583a4affbbe7c7fb5054966 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 7 Aug 2018 16:30:50 -0500 Subject: [PATCH 0371/1692] drm/amdgpu/pp: endian fixes for processpptables.c Properly swap when reading from the vbios. Reviewed-by: Evan Quan Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- .../drm/amd/powerplay/hwmgr/processpptables.c | 30 ++++++++++--------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c index 925e17104f90..77c14671866c 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c @@ -757,8 +757,8 @@ static int init_non_clock_fields(struct pp_hwmgr *hwmgr, ps->validation.supportedPowerLevels = pnon_clock_info->ucRequiredPower; if (ATOM_PPLIB_NONCLOCKINFO_VER1 < version) { - ps->uvd_clocks.VCLK = pnon_clock_info->ulVCLK; - ps->uvd_clocks.DCLK = pnon_clock_info->ulDCLK; + ps->uvd_clocks.VCLK = le32_to_cpu(pnon_clock_info->ulVCLK); + ps->uvd_clocks.DCLK = le32_to_cpu(pnon_clock_info->ulDCLK); } else { ps->uvd_clocks.VCLK = 0; ps->uvd_clocks.DCLK = 0; @@ -937,8 +937,9 @@ int pp_tables_get_entry(struct pp_hwmgr *hwmgr, if (entry_index > powerplay_table->ucNumStates) return -1; - pstate_entry = (ATOM_PPLIB_STATE *)((unsigned long)powerplay_table + powerplay_table->usStateArrayOffset + - entry_index * powerplay_table->ucStateEntrySize); + pstate_entry = (ATOM_PPLIB_STATE *)((unsigned long)powerplay_table + + le16_to_cpu(powerplay_table->usStateArrayOffset) + + entry_index * powerplay_table->ucStateEntrySize); pnon_clock_info = (ATOM_PPLIB_NONCLOCK_INFO *)((unsigned long)powerplay_table + le16_to_cpu(powerplay_table->usNonClockInfoArrayOffset) + @@ -1063,13 +1064,13 @@ static int init_overdrive_limits(struct pp_hwmgr *hwmgr, &size, &frev, &crev); if ((fw_info->ucTableFormatRevision == 1) - && (fw_info->usStructureSize >= sizeof(ATOM_FIRMWARE_INFO_V1_4))) + && (le16_to_cpu(fw_info->usStructureSize) >= sizeof(ATOM_FIRMWARE_INFO_V1_4))) result = init_overdrive_limits_V1_4(hwmgr, powerplay_table, (const ATOM_FIRMWARE_INFO_V1_4 *)fw_info); else if ((fw_info->ucTableFormatRevision == 2) - && (fw_info->usStructureSize >= sizeof(ATOM_FIRMWARE_INFO_V2_1))) + && (le16_to_cpu(fw_info->usStructureSize) >= sizeof(ATOM_FIRMWARE_INFO_V2_1))) result = init_overdrive_limits_V2_1(hwmgr, powerplay_table, (const ATOM_FIRMWARE_INFO_V2_1 *)fw_info); @@ -1303,7 +1304,7 @@ static int init_clock_voltage_dependency(struct pp_hwmgr *hwmgr, if (0 != powerplay_table4->usVddcDependencyOnSCLKOffset) { table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) (((unsigned long) powerplay_table4) + - powerplay_table4->usVddcDependencyOnSCLKOffset); + le16_to_cpu(powerplay_table4->usVddcDependencyOnSCLKOffset)); result = get_clock_voltage_dependency_table(hwmgr, &hwmgr->dyn_state.vddc_dependency_on_sclk, table); } @@ -1311,7 +1312,7 @@ static int init_clock_voltage_dependency(struct pp_hwmgr *hwmgr, if (result == 0 && (0 != powerplay_table4->usVddciDependencyOnMCLKOffset)) { table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) (((unsigned long) powerplay_table4) + - powerplay_table4->usVddciDependencyOnMCLKOffset); + le16_to_cpu(powerplay_table4->usVddciDependencyOnMCLKOffset)); result = get_clock_voltage_dependency_table(hwmgr, &hwmgr->dyn_state.vddci_dependency_on_mclk, table); } @@ -1319,7 +1320,7 @@ static int init_clock_voltage_dependency(struct pp_hwmgr *hwmgr, if (result == 0 && (0 != powerplay_table4->usVddcDependencyOnMCLKOffset)) { table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) (((unsigned long) powerplay_table4) + - powerplay_table4->usVddcDependencyOnMCLKOffset); + le16_to_cpu(powerplay_table4->usVddcDependencyOnMCLKOffset)); result = get_clock_voltage_dependency_table(hwmgr, &hwmgr->dyn_state.vddc_dependency_on_mclk, table); } @@ -1327,7 +1328,7 @@ static int init_clock_voltage_dependency(struct pp_hwmgr *hwmgr, if (result == 0 && (0 != powerplay_table4->usMaxClockVoltageOnDCOffset)) { limit_table = (ATOM_PPLIB_Clock_Voltage_Limit_Table *) (((unsigned long) powerplay_table4) + - powerplay_table4->usMaxClockVoltageOnDCOffset); + le16_to_cpu(powerplay_table4->usMaxClockVoltageOnDCOffset)); result = get_clock_voltage_limit(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_dc, limit_table); } @@ -1346,7 +1347,7 @@ static int init_clock_voltage_dependency(struct pp_hwmgr *hwmgr, if (result == 0 && (0 != powerplay_table4->usMvddDependencyOnMCLKOffset)) { table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) (((unsigned long) powerplay_table4) + - powerplay_table4->usMvddDependencyOnMCLKOffset); + le16_to_cpu(powerplay_table4->usMvddDependencyOnMCLKOffset)); result = get_clock_voltage_dependency_table(hwmgr, &hwmgr->dyn_state.mvdd_dependency_on_mclk, table); } @@ -1569,7 +1570,8 @@ static int get_vce_state_table_entry(struct pp_hwmgr *hwmgr, const VCEClockInfoArray *vce_clock_info_array = (const VCEClockInfoArray *)(((unsigned long) powerplay_table) + vce_clock_info_array_offset); - const ClockInfoArray *clock_arrays = (ClockInfoArray *)(((unsigned long)powerplay_table) + powerplay_table->usClockInfoArrayOffset); + const ClockInfoArray *clock_arrays = (ClockInfoArray *)(((unsigned long)powerplay_table) + + le16_to_cpu(powerplay_table->usClockInfoArrayOffset)); const ATOM_PPLIB_VCE_State_Record *record = &vce_state_table->entries[i]; @@ -1579,8 +1581,8 @@ static int get_vce_state_table_entry(struct pp_hwmgr *hwmgr, *flag = (record->ucClockInfoIndex >> NUM_BITS_CLOCK_INFO_ARRAY_INDEX); - vce_state->evclk = ((uint32_t)vce_clock_info->ucEVClkHigh << 16) | vce_clock_info->usEVClkLow; - vce_state->ecclk = ((uint32_t)vce_clock_info->ucECClkHigh << 16) | vce_clock_info->usECClkLow; + vce_state->evclk = ((uint32_t)vce_clock_info->ucEVClkHigh << 16) | le16_to_cpu(vce_clock_info->usEVClkLow); + vce_state->ecclk = ((uint32_t)vce_clock_info->ucECClkHigh << 16) | le16_to_cpu(vce_clock_info->usECClkLow); *clock_info = (void *)((unsigned long)(clock_arrays->clockInfo) + (clockInfoIndex * clock_arrays->ucEntrySize)); -- GitLab From eb4f6999203710f82861fd03c1bc696dae4182b5 Mon Sep 17 00:00:00 2001 From: Boyuan Zhang Date: Wed, 11 Jul 2018 14:40:18 -0400 Subject: [PATCH 0372/1692] drm/amdgpu: add emit reg write reg wait for vcn jpeg The emit_reg_write_reg_wait function was not assigned for vcn jpeg. This patch adds it back. Signed-off-by: Boyuan Zhang Reviewed-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 072371ef5975..51fc2be0a9ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -1746,6 +1746,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_jpeg_ring_vm_funcs = { .end_use = amdgpu_vcn_ring_end_use, .emit_wreg = vcn_v1_0_jpeg_ring_emit_wreg, .emit_reg_wait = vcn_v1_0_jpeg_ring_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, }; static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev) -- GitLab From 8709890892d839ba7169924a301c9cb0bd54ce6b Mon Sep 17 00:00:00 2001 From: Boyuan Zhang Date: Wed, 18 Jul 2018 16:13:29 -0400 Subject: [PATCH 0373/1692] drm/amdgpu: add system interrupt register offset header Add new register offset for enabling system interrupt. Signed-off-by: Boyuan Zhang Acked-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h index fe0cbaade3c3..216a401028de 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h @@ -307,6 +307,8 @@ #define mmUVD_LMI_CTRL2_BASE_IDX 1 #define mmUVD_MASTINT_EN 0x0540 #define mmUVD_MASTINT_EN_BASE_IDX 1 +#define mmUVD_SYS_INT_EN 0x0541 +#define mmUVD_SYS_INT_EN_BASE_IDX 1 #define mmJPEG_CGC_CTRL 0x0565 #define mmJPEG_CGC_CTRL_BASE_IDX 1 #define mmUVD_LMI_CTRL 0x0566 -- GitLab From 44287b7190f4504792e8bbfcd0ef899d566e4ec7 Mon Sep 17 00:00:00 2001 From: Boyuan Zhang Date: Wed, 18 Jul 2018 16:24:18 -0400 Subject: [PATCH 0374/1692] drm/amdgpu: add system interrupt mask for jrbc Add new mask for enabling system interrupt for jrbc. Signed-off-by: Boyuan Zhang Acked-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h index d6ba26922275..124383dac284 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h @@ -982,6 +982,8 @@ #define UVD_MASTINT_EN__VCPU_EN_MASK 0x00000002L #define UVD_MASTINT_EN__SYS_EN_MASK 0x00000004L #define UVD_MASTINT_EN__INT_OVERRUN_MASK 0x007FFFF0L +//UVD_SYS_INT_EN +#define UVD_SYS_INT_EN__UVD_JRBC_EN_MASK 0x00000010L //JPEG_CGC_CTRL #define JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT 0x0 #define JPEG_CGC_CTRL__JPEG2_MODE__SHIFT 0x1 -- GitLab From 7f31077cadc98517bc580b638063c7d841b90372 Mon Sep 17 00:00:00 2001 From: Boyuan Zhang Date: Wed, 18 Jul 2018 16:25:42 -0400 Subject: [PATCH 0375/1692] drm/amdgpu: enable system interrupt for jrbc Enable system interrupt for jrbc during engine starting time. Signed-off-by: Boyuan Zhang Acked-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 51fc2be0a9ce..8d84e2e50636 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -724,6 +724,11 @@ static int vcn_v1_0_start(struct amdgpu_device *adev) (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK), ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK)); + /* enable system interrupt for JRBC, TODO: move to set interrupt*/ + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SYS_INT_EN), + UVD_SYS_INT_EN__UVD_JRBC_EN_MASK, + ~UVD_SYS_INT_EN__UVD_JRBC_EN_MASK); + /* clear the bit 4 of VCN_STATUS */ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); @@ -1778,7 +1783,7 @@ static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = { static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev) { - adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 1; + adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 2; adev->vcn.irq.funcs = &vcn_v1_0_irq_funcs; } -- GitLab From 36a12bd0b4239ef11178344525c6d0b912b0b230 Mon Sep 17 00:00:00 2001 From: Boyuan Zhang Date: Wed, 18 Jul 2018 16:26:28 -0400 Subject: [PATCH 0376/1692] drm/amdgpu: add emit trap for vcn jpeg Add emit trap command in jpeg emit fence call. Signed-off-by: Boyuan Zhang Acked-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 8d84e2e50636..aa21f667ff47 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -1340,6 +1340,10 @@ static void vcn_v1_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u6 amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0)); amdgpu_ring_write(ring, 0x1); + + /* emit trap */ + amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7)); + amdgpu_ring_write(ring, 0); } /** -- GitLab From 7ea34ea3e889a539ecfe1073ac45f64e7eb3579b Mon Sep 17 00:00:00 2001 From: Boyuan Zhang Date: Wed, 18 Jul 2018 16:29:29 -0400 Subject: [PATCH 0377/1692] drm/amdgpu: fix emit frame size and comments for jpeg Fix vcn jpeg ring emit fence size in dword, and fix the naming in comments. Signed-off-by: Boyuan Zhang Acked-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index aa21f667ff47..2664bb2c47c3 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -1738,10 +1738,10 @@ static const struct amdgpu_ring_funcs vcn_v1_0_jpeg_ring_vm_funcs = { 6 + 6 + /* hdp invalidate / flush */ SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + - 8 + /* vcn_v1_0_dec_ring_emit_vm_flush */ - 14 + 14 + /* vcn_v1_0_dec_ring_emit_fence x2 vm fence */ + 8 + /* vcn_v1_0_jpeg_ring_emit_vm_flush */ + 26 + 26 + /* vcn_v1_0_jpeg_ring_emit_fence x2 vm fence */ 6, - .emit_ib_size = 22, /* vcn_v1_0_dec_ring_emit_ib */ + .emit_ib_size = 22, /* vcn_v1_0_jpeg_ring_emit_ib */ .emit_ib = vcn_v1_0_jpeg_ring_emit_ib, .emit_fence = vcn_v1_0_jpeg_ring_emit_fence, .emit_vm_flush = vcn_v1_0_jpeg_ring_emit_vm_flush, -- GitLab From ec2e082a79b5d46addf2e7b83a13fb015fca6149 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 9 Aug 2018 14:24:08 -0500 Subject: [PATCH 0378/1692] drm/amdgpu/powerplay: check vrefresh when when changing displays Compare the current vrefresh in addition to the number of displays when determining whether or not the smu needs updates when changing modes. The SMU needs to be updated if the vbi timeout changes due to a different refresh rate. Fixes flickering around mode changes in some cases on polaris parts. Reviewed-by: Rex Zhu Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 3 +++ drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h | 1 + drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c | 1 + drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c | 3 ++- drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c | 1 + drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c | 1 + drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c | 1 + drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c | 1 + 8 files changed, 11 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 380f282a64ba..ab759e38e4ea 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -4132,6 +4132,9 @@ smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr) if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display) is_update_required = true; + if (data->display_timing.vrefresh != hwmgr->display_config->vrefresh) + is_update_required = true; + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr && (data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK || diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h index 3784ce6e50ab..69d361f8dfca 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h @@ -156,6 +156,7 @@ struct smu7_vbios_boot_state { struct smu7_display_timing { uint32_t min_clock_in_sr; uint32_t num_existing_displays; + uint32_t vrefresh; }; struct smu7_dpmlevel_enable_mask { diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c index fbe3ef4ee45c..18643e06bc6f 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c @@ -1231,6 +1231,7 @@ static int ci_populate_single_memory_level( memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; data->display_timing.num_existing_displays = hwmgr->display_config->num_display; + data->display_timing.vrefresh = hwmgr->display_config->vrefresh; /* stutter mode not support on ci */ diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c index 18048f8e2f13..ec14798e87b6 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c @@ -1210,7 +1210,8 @@ static int fiji_populate_single_memory_level(struct pp_hwmgr *hwmgr, * PECI_GetNumberOfActiveDisplays(hwmgr->pPECI, * &(data->DisplayTiming.numExistingDisplays)); */ - data->display_timing.num_existing_displays = 1; + data->display_timing.num_existing_displays = hwmgr->display_config->num_display; + data->display_timing.vrefresh = hwmgr->display_config->vrefresh; if (mclk_stutter_mode_threshold && (clock <= mclk_stutter_mode_threshold) && diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c index 9299b93aa09a..73aa368a454e 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c @@ -1280,6 +1280,7 @@ static int iceland_populate_single_memory_level( memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; data->display_timing.num_existing_displays = hwmgr->display_config->num_display; + data->display_timing.vrefresh = hwmgr->display_config->vrefresh; /* stutter mode not support on iceland */ diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index 1276f168ff68..872d3824337b 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c @@ -1103,6 +1103,7 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr, mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; data->display_timing.num_existing_displays = hwmgr->display_config->num_display; + data->display_timing.vrefresh = hwmgr->display_config->vrefresh; if (mclk_stutter_mode_threshold && (clock <= mclk_stutter_mode_threshold) && diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c index 7dabc6c456e1..ae8378ed32ee 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c @@ -1004,6 +1004,7 @@ static int tonga_populate_single_memory_level( memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; data->display_timing.num_existing_displays = hwmgr->display_config->num_display; + data->display_timing.vrefresh = hwmgr->display_config->vrefresh; if ((mclk_stutter_mode_threshold != 0) && (memory_clock <= mclk_stutter_mode_threshold) && diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c index 57420d7caa4e..3d415fabbd93 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c @@ -1009,6 +1009,7 @@ static int vegam_populate_single_memory_level(struct pp_hwmgr *hwmgr, mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; data->display_timing.num_existing_displays = hwmgr->display_config->num_display; + data->display_timing.vrefresh = hwmgr->display_config->vrefresh; if (mclk_stutter_mode_threshold && (clock <= mclk_stutter_mode_threshold) && -- GitLab From 6c1fd99bc6694be3cca3c1778908fe40ef7532ca Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 9 Aug 2018 15:26:06 +0800 Subject: [PATCH 0379/1692] drm/amdgpu: Cancel gfx off delay work when driver fini/suspend there may be gfx off delay work pending when suspend/driver unload, need to cancel them first. Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 6d0ffbf5b337..99a0e478499b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1829,6 +1829,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) return r; } amdgpu_gfx_off_ctrl(adev, false); + cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work); r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); /* XXX handle errors */ if (r) { @@ -2012,6 +2013,7 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) /* call smu to disable gfx off feature first when suspend */ amdgpu_gfx_off_ctrl(adev, false); + cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work); for (i = adev->num_ip_blocks - 1; i >= 0; i--) { if (!adev->ip_blocks[i].status.valid) -- GitLab From f1220c876d4fb6bb41851f7f422efa4600d0bfa7 Mon Sep 17 00:00:00 2001 From: Tony Cheng Date: Wed, 18 Jul 2018 20:28:54 -0400 Subject: [PATCH 0380/1692] drm/amd/display: dc 3.1.61 Signed-off-by: Tony Cheng Reviewed-by: Steven Chiu Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 01af5356f2fc..af57c3001a82 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -38,7 +38,7 @@ #include "inc/compressor.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.1.60" +#define DC_VER "3.1.61" #define MAX_SURFACES 3 #define MAX_STREAMS 6 -- GitLab From 94a4ffd1d40b845dd19f9fdbb2cb6bf32de0946b Mon Sep 17 00:00:00 2001 From: Gloria Li Date: Thu, 26 Jul 2018 11:32:14 -0400 Subject: [PATCH 0381/1692] drm/amd/display: fix PIP bugs on Dal3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [Why] There are outstanding bugs for PIP in Dal3: -Crash when toggling PIP visibility -Global Alpha is not working, Adjusting global alpha doesn’t have an effect -Cursor is not working with pip plane and pipe splits -One flash occurs when cursor enters PIP plane from top/bottom -Crash when moving PIP plane off the screen [How] Resolve divide by 0 error Implement global alpha Program cursor on all pipes Add dst rects' x and y offests into cursor position Disable cursor when it is beyond bottom/top edge Signed-off-by: Gloria Li Reviewed-by: Aric Cyr Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 3 +++ .../gpu/drm/amd/display/dc/core/dc_resource.c | 9 ++++++--- .../gpu/drm/amd/display/dc/core/dc_stream.c | 2 -- drivers/gpu/drm/amd/display/dc/dc.h | 5 +++++ .../gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c | 10 +++++++++- .../gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h | 3 ++- .../gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 7 +++++++ .../amd/display/dc/dcn10/dcn10_hw_sequencer.c | 18 ++++++++++++------ drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h | 3 ++- 9 files changed, 46 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index ebdf82044f73..71742635e797 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1106,6 +1106,9 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) update_flags->bits.per_pixel_alpha_change = 1; + if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) + update_flags->bits.global_alpha_change = 1; + if (u->plane_info->dcc.enable != u->surface->dcc.enable || u->plane_info->dcc.grph.independent_64b_blks != u->surface->dcc.grph.independent_64b_blks || u->plane_info->dcc.grph.meta_pitch != u->surface->dcc.grph.meta_pitch) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index d10314016edb..2c348b11b9a5 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -589,8 +589,10 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx) data->viewport.width = (data->viewport.width + 1) / 2; data->viewport_c.width = (data->viewport_c.width + 1) / 2; } else if (pri_split) { - data->viewport.width /= 2; - data->viewport_c.width /= 2; + if (data->viewport.width > 1) + data->viewport.width /= 2; + if (data->viewport_c.width > 1) + data->viewport_c.width /= 2; } if (plane_state->rotation == ROTATION_ANGLE_90 || @@ -670,7 +672,8 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, struct rect *recout_full pipe_ctx->plane_res.scl_data.recout.width = (pipe_ctx->plane_res.scl_data.recout.width + 1) / 2; } else { - pipe_ctx->plane_res.scl_data.recout.width /= 2; + if (pipe_ctx->plane_res.scl_data.recout.width > 1) + pipe_ctx->plane_res.scl_data.recout.width /= 2; } } /* Unclipped recout offset = stream dst offset + ((surf dst offset - stream surf_src offset) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index fdcc8ab19bf3..2ac848a106ba 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -205,8 +205,6 @@ bool dc_stream_set_cursor_attributes( if (pipe_ctx->stream != stream) continue; - if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state) - continue; if (!pipe_to_program) { pipe_to_program = pipe_ctx; diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index af57c3001a82..eb1de3ba622f 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -442,6 +442,7 @@ union surface_update_flags { uint32_t color_space_change:1; uint32_t horizontal_mirror_change:1; uint32_t per_pixel_alpha_change:1; + uint32_t global_alpha_change:1; uint32_t rotation_change:1; uint32_t swizzle_change:1; uint32_t scaling_change:1; @@ -496,6 +497,8 @@ struct dc_plane_state { bool is_tiling_rotated; bool per_pixel_alpha; + bool global_alpha; + int global_alpha_value; bool visible; bool flip_immediate; bool horizontal_mirror; @@ -522,6 +525,8 @@ struct dc_plane_info { bool horizontal_mirror; bool visible; bool per_pixel_alpha; + bool global_alpha; + int global_alpha_value; bool input_csc_enabled; }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c index 1d642552c743..5f2054a1d563 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c @@ -444,10 +444,12 @@ void dpp1_set_cursor_position( struct dpp *dpp_base, const struct dc_cursor_position *pos, const struct dc_cursor_mi_param *param, - uint32_t width) + uint32_t width, + uint32_t height) { struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); int src_x_offset = pos->x - pos->x_hotspot - param->viewport.x; + int src_y_offset = pos->y - pos->y_hotspot - param->viewport.y; uint32_t cur_en = pos->enable ? 1 : 0; if (src_x_offset >= (int)param->viewport.width) @@ -456,6 +458,12 @@ void dpp1_set_cursor_position( if (src_x_offset + (int)width <= 0) cur_en = 0; /* not visible beyond left edge*/ + if (src_y_offset >= (int)param->viewport.height) + cur_en = 0; /* not visible beyond bottom edge*/ + + if (src_y_offset < 0) + cur_en = 0; /* not visible beyond top edge*/ + REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h index e2889e61b18c..282e22f9b175 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h @@ -1374,7 +1374,8 @@ void dpp1_set_cursor_position( struct dpp *dpp_base, const struct dc_cursor_position *pos, const struct dc_cursor_mi_param *param, - uint32_t width); + uint32_t width, + uint32_t height); void dpp1_cnv_set_optional_cursor_attributes( struct dpp *dpp_base, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index fa1bacd7ba3a..ec4a5f665586 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -1070,6 +1070,7 @@ void hubp1_cursor_set_position( { struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); int src_x_offset = pos->x - pos->x_hotspot - param->viewport.x; + int src_y_offset = pos->y - pos->y_hotspot - param->viewport.y; int x_hotspot = pos->x_hotspot; int y_hotspot = pos->y_hotspot; uint32_t dst_x_offset; @@ -1113,6 +1114,12 @@ void hubp1_cursor_set_position( if (src_x_offset + (int)hubp->curs_attr.width <= 0) cur_en = 0; /* not visible beyond left edge*/ + if (src_y_offset >= (int)param->viewport.height) + cur_en = 0; /* not visible beyond bottom edge*/ + + if (src_y_offset < 0) //+ (int)hubp->curs_attr.height + cur_en = 0; /* not visible beyond top edge*/ + if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0) hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index cfd93557c428..6d27f1db3c69 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -1932,9 +1932,13 @@ static void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA; blnd_cfg.overlap_only = false; - blnd_cfg.global_alpha = 0xff; blnd_cfg.global_gain = 0xff; + if (pipe_ctx->plane_state->global_alpha) + blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value; + else + blnd_cfg.global_alpha = 0xff; + /* DCN1.0 has output CM before MPC which seems to screw with * pre-multiplied alpha. */ @@ -2049,11 +2053,13 @@ static void update_dchubp_dpp( update_dpp(dpp, plane_state); if (plane_state->update_flags.bits.full_update || - plane_state->update_flags.bits.per_pixel_alpha_change) + plane_state->update_flags.bits.per_pixel_alpha_change || + plane_state->update_flags.bits.global_alpha_change) dc->hwss.update_mpcc(dc, pipe_ctx); if (plane_state->update_flags.bits.full_update || plane_state->update_flags.bits.per_pixel_alpha_change || + plane_state->update_flags.bits.global_alpha_change || plane_state->update_flags.bits.scaling_change || plane_state->update_flags.bits.position_change) { update_scaler(pipe_ctx); @@ -2597,15 +2603,15 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) .mirror = pipe_ctx->plane_state->horizontal_mirror }; + pos_cpy.x -= pipe_ctx->plane_state->dst_rect.x; + pos_cpy.y -= pipe_ctx->plane_state->dst_rect.y; + if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) pos_cpy.enable = false; - if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state) - pos_cpy.enable = false; - hubp->funcs->set_cursor_position(hubp, &pos_cpy, ¶m); - dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width); + dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width, hubp->curs_attr.height); } static void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx) diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h index 80a480b9f137..e894e649ce5a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h @@ -147,7 +147,8 @@ struct dpp_funcs { struct dpp *dpp_base, const struct dc_cursor_position *pos, const struct dc_cursor_mi_param *param, - uint32_t width + uint32_t width, + uint32_t height ); void (*dpp_set_hdr_multiplier)( struct dpp *dpp_base, -- GitLab From c4621988d49785849a9fa817721d960798d14e19 Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Mon, 30 Jul 2018 14:41:01 -0400 Subject: [PATCH 0382/1692] drm/amd/display: Add dprefclk value to dce_dccg This allows us to avoid any vbios bugs when initializing clocks Signed-off-by: Dmytro Laktyushkin Reviewed-by: Charlene Liu Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c | 4 +++- drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c index 103dc3cf1c43..bf6261a1584b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c @@ -202,7 +202,7 @@ static int dce12_get_dp_ref_freq_khz(struct dccg *clk) { struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk); - return dccg_adjust_dp_ref_freq_for_ss(clk_dce, 600000); + return dccg_adjust_dp_ref_freq_for_ss(clk_dce, clk_dce->dprefclk_khz); } static enum dm_pp_clocks_state dce_get_required_clocks_state( @@ -882,6 +882,7 @@ struct dccg *dce120_dccg_create(struct dc_context *ctx) dce_dccg_construct( clk_dce, ctx, NULL, NULL, NULL); + clk_dce->dprefclk_khz = 600000; clk_dce->base.funcs = &dce120_funcs; return &clk_dce->base; @@ -909,6 +910,7 @@ struct dccg *dcn1_dccg_create(struct dc_context *ctx) clk_dce->dprefclk_ss_divider = 1000; clk_dce->ss_on_dprefclk = false; + clk_dce->dprefclk_khz = 600000; if (bp->integrated_info) clk_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq; if (clk_dce->dentist_vco_freq_khz == 0) { diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h index 8b5a53e98ad9..34fdb386c884 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h @@ -90,6 +90,7 @@ struct dce_dccg { int dprefclk_ss_percentage; /* DPREFCLK SS percentage Divider (100 or 1000) */ int dprefclk_ss_divider; + int dprefclk_khz; }; -- GitLab From f137586b2b2bb3ea0b9886b0929055ddef5a32f4 Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Tue, 17 Jul 2018 17:15:48 -0400 Subject: [PATCH 0383/1692] drm/amd/display: fix dml handling of mono8/16 pixel formats mono formats are treated exactly the same as equivallent bpp 444 formats. Dml validation however lacks 444 8 bit format while dml perf param calculation lacks mono format support This change makes them equivallent as far as the enum is concerned to avoid having to update dml Signed-off-by: Dmytro Laktyushkin Reviewed-by: Charlene Liu Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h index 47c19f8fe7d1..bea4e61b94c7 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h @@ -40,8 +40,8 @@ enum source_format_class { dm_422_8 = 5, dm_422_10 = 6, dm_444_8 = 7, - dm_mono_8, - dm_mono_16 + dm_mono_8 = dm_444_8, + dm_mono_16 = dm_444_16 }; enum output_bpc_class { dm_out_6 = 0, dm_out_8 = 1, dm_out_10 = 2, dm_out_12 = 3, dm_out_16 = 4 -- GitLab From 2f14bc8968e3d97fc46bb464045d0fa8fbd2b013 Mon Sep 17 00:00:00 2001 From: Charlene Liu Date: Tue, 31 Jul 2018 20:14:26 -0400 Subject: [PATCH 0384/1692] drm/amd/display: add retimer log for HWQ tuning use. Signed-off-by: Charlene Liu Reviewed-by: Dmytro Laktyushkin Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 59 +++++++++++++++++++ .../drm/amd/display/include/logger_types.h | 3 +- 2 files changed, 61 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 567867915d32..1adfcdd588d6 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -54,6 +54,9 @@ DC_LOG_HW_HOTPLUG( \ __VA_ARGS__) +#define RETIMER_REDRIVER_INFO(...) \ + DC_LOG_RETIMER_REDRIVER( \ + __VA_ARGS__) /******************************************************************************* * Private structures ******************************************************************************/ @@ -1547,6 +1550,7 @@ static void write_i2c_retimer_setting( uint8_t value = 0; int i = 0; bool i2c_success = false; + DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); memset(&buffer, 0, sizeof(buffer)); @@ -1560,6 +1564,9 @@ static void write_i2c_retimer_setting( buffer[1] = settings->reg_settings[i].i2c_reg_val; i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ + offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) /* Write failure */ @@ -1590,6 +1597,9 @@ static void write_i2c_retimer_setting( buffer[1] = value | apply_rx_tx_change; i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ + offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) /* Write failure */ ASSERT(i2c_success); @@ -1607,6 +1617,9 @@ static void write_i2c_retimer_setting( buffer[1] = settings->reg_settings_6g[i].i2c_reg_val; i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("above 340Mhz: retimer write to slave_address = 0x%x,\ + offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) /* Write failure */ @@ -1637,6 +1650,9 @@ static void write_i2c_retimer_setting( buffer[1] = value | apply_rx_tx_change; i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ + offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) /* Write failure */ ASSERT(i2c_success); @@ -1653,6 +1669,9 @@ static void write_i2c_retimer_setting( buffer[1] = 0x01; i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ + offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) /* Write failure */ ASSERT(i2c_success); @@ -1662,6 +1681,9 @@ static void write_i2c_retimer_setting( buffer[1] = 0x23; i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ + offset = 0x%d, reg_val = 0x%d, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) /* Write failure */ ASSERT(i2c_success); @@ -1671,6 +1693,9 @@ static void write_i2c_retimer_setting( buffer[1] = 0x00; i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ + offset = 0x%d, reg_val = 0x%d, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) /* Write failure */ ASSERT(i2c_success); @@ -1686,6 +1711,7 @@ static void write_i2c_default_retimer_setting( uint8_t slave_address = (0xBA >> 1); uint8_t buffer[2]; bool i2c_success = false; + DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); memset(&buffer, 0, sizeof(buffer)); @@ -1695,6 +1721,9 @@ static void write_i2c_default_retimer_setting( buffer[1] = 0x13; i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer writes default setting to slave_address = 0x%x,\ + offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) /* Write failure */ ASSERT(i2c_success); @@ -1704,6 +1733,9 @@ static void write_i2c_default_retimer_setting( buffer[1] = 0x17; i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ + offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) /* Write failure */ ASSERT(i2c_success); @@ -1713,6 +1745,9 @@ static void write_i2c_default_retimer_setting( buffer[1] = is_over_340mhz ? 0xDA : 0xD8; i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ + offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) /* Write failure */ ASSERT(i2c_success); @@ -1722,6 +1757,9 @@ static void write_i2c_default_retimer_setting( buffer[1] = 0x17; i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ + offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) /* Write failure */ ASSERT(i2c_success); @@ -1731,6 +1769,9 @@ static void write_i2c_default_retimer_setting( buffer[1] = is_over_340mhz ? 0x1D : 0x91; i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ + offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) /* Write failure */ ASSERT(i2c_success); @@ -1740,6 +1781,9 @@ static void write_i2c_default_retimer_setting( buffer[1] = 0x17; i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ + offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) /* Write failure */ ASSERT(i2c_success); @@ -1753,6 +1797,9 @@ static void write_i2c_default_retimer_setting( buffer[1] = 0x01; i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ + offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) /* Write failure */ ASSERT(i2c_success); @@ -1762,6 +1809,9 @@ static void write_i2c_default_retimer_setting( buffer[1] = 0x23; i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ + offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) /* Write failure */ ASSERT(i2c_success); @@ -1771,6 +1821,9 @@ static void write_i2c_default_retimer_setting( buffer[1] = 0x00; i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer write default setting to slave_addr = 0x%x,\ + offset = 0x%x, reg_val= 0x%x, i2c_success = %d end here\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) /* Write failure */ ASSERT(i2c_success); @@ -1784,6 +1837,7 @@ static void write_i2c_redriver_setting( uint8_t slave_address = (0xF0 >> 1); uint8_t buffer[16]; bool i2c_success = false; + DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); memset(&buffer, 0, sizeof(buffer)); @@ -1795,6 +1849,11 @@ static void write_i2c_redriver_setting( i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("redriver write 0 to all 16 reg offset expect following:\n\ + \t slave_addr = 0x%x, offset[3] = 0x%x, offset[4] = 0x%x,\ + offset[5] = 0x%x,offset[6] is_over_340mhz = 0x%x,\ + i2c_success = %d\n", + slave_address, buffer[3], buffer[4], buffer[5], buffer[6], i2c_success?1:0); if (!i2c_success) /* Write failure */ diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h index ad3695e67b76..bc5732668092 100644 --- a/drivers/gpu/drm/amd/display/include/logger_types.h +++ b/drivers/gpu/drm/amd/display/include/logger_types.h @@ -62,6 +62,7 @@ #define DC_LOG_EVENT_UNDERFLOW(...) DRM_DEBUG_KMS(__VA_ARGS__) #define DC_LOG_IF_TRACE(...) pr_debug("[IF_TRACE]:"__VA_ARGS__) #define DC_LOG_PERF_TRACE(...) DRM_DEBUG_KMS(__VA_ARGS__) +#define DC_LOG_RETIMER_REDRIVER(...) DRM_DEBUG_KMS(__VA_ARGS__) struct dal_logger; @@ -99,7 +100,7 @@ enum dc_log_type { LOG_IF_TRACE, LOG_PERF_TRACE, LOG_DISPLAYSTATS, - + LOG_HDMI_RETIMER_REDRIVER, LOG_SECTION_TOTAL_COUNT }; -- GitLab From 56780940389a344a949d53ed7be77012a20ced7a Mon Sep 17 00:00:00 2001 From: "Leo (Sunpeng) Li" Date: Wed, 1 Aug 2018 10:20:53 -0400 Subject: [PATCH 0385/1692] drm/amd/display: Remove redundant non-zero and overflow check [Why] Unsigned int is guaranteed to be >= 0, and read_channel_reply checks for overflows. read_channel_reply also returns -1 on error, which is what dc_link_aux_transfer is expected to return on error. [How] Remove the if-statement. Return result of read_channel_reply directly. Signed-off-by: Leo (Sunpeng) Li Reviewed-by: Mikita Lipski Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c index 8def0d9fa0ff..506a97e16956 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c @@ -666,13 +666,9 @@ int dc_link_aux_transfer(struct ddc_service *ddc, switch (operation_result) { case AUX_CHANNEL_OPERATION_SUCCEEDED: - res = returned_bytes; - - if (res <= size && res >= 0) - res = aux_engine->funcs->read_channel_reply(aux_engine, size, - buffer, reply, - &status); - + res = aux_engine->funcs->read_channel_reply(aux_engine, size, + buffer, reply, + &status); break; case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON: res = 0; -- GitLab From 5d0e7e5caa08e1548aa39ad316202bd99bf970c4 Mon Sep 17 00:00:00 2001 From: Tony Cheng Date: Wed, 18 Jul 2018 20:29:13 -0400 Subject: [PATCH 0386/1692] drm/amd/display: dc 3.1.62 Signed-off-by: Tony Cheng Reviewed-by: Steven Chiu Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index eb1de3ba622f..1cf4ec68e741 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -38,7 +38,7 @@ #include "inc/compressor.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.1.61" +#define DC_VER "3.1.62" #define MAX_SURFACES 3 #define MAX_STREAMS 6 -- GitLab From a4ead3e5d6f37b49291b81c1016e3d6f03843f2f Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 10 Aug 2018 13:09:43 -0500 Subject: [PATCH 0387/1692] drm/amdgpu: add AVFS control to PP_FEATURE_MASK Add a ppfeaturemask flag to disable AVFS control. Reviewed-by: Rex Zhu Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/amd_shared.h | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index 265621d8945c..86b167ec9863 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -129,6 +129,7 @@ enum PP_FEATURE_MASK { PP_GFXOFF_MASK = 0x8000, PP_ACG_MASK = 0x10000, PP_STUTTER_MODE = 0x20000, + PP_AVFS_MASK = 0x40000, }; /** -- GitLab From b4abff4bb2b43e94d7d36432d02a21ea25724bf8 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 10 Aug 2018 13:19:26 -0500 Subject: [PATCH 0388/1692] drm/amdgpu/powerplay/smu7: enable AVFS control via ppfeaturemask Allow the user to disable AFVS via ppfeaturemask for debugging. Reviewed-by: Rex Zhu Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c index a029e47c2319..186dafc7f166 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c @@ -620,7 +620,8 @@ int smu7_init(struct pp_hwmgr *hwmgr) return -EINVAL; } - if (smum_is_hw_avfs_present(hwmgr)) + if (smum_is_hw_avfs_present(hwmgr) && + (hwmgr->feature_mask & PP_AVFS_MASK)) hwmgr->avfs_supported = true; return 0; -- GitLab From a19c3bea8ebea8f4ee740c56a6796dbcef692474 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 10 Aug 2018 13:21:09 -0500 Subject: [PATCH 0389/1692] drm/amdgpu/powerplay/vega10: enable AVFS control via ppfeaturemask Allow the user to disable AFVS via ppfeaturemask for debugging. Reviewed-by: Rex Zhu Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index 704b237ecf70..ca9be583fb62 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -129,7 +129,8 @@ static void vega10_set_default_registry_data(struct pp_hwmgr *hwmgr) data->registry_data.thermal_support = 1; data->registry_data.fw_ctf_enabled = 1; - data->registry_data.avfs_support = 1; + data->registry_data.avfs_support = + hwmgr->feature_mask & PP_AVFS_MASK ? true : false; data->registry_data.led_dpm_enabled = 1; data->registry_data.vr0hot_enabled = 1; -- GitLab From bcd47f60ab65c780eef44cc09c0e264453dd455e Mon Sep 17 00:00:00 2001 From: Mauro Rossi Date: Sun, 12 Aug 2018 21:43:01 +0200 Subject: [PATCH 0390/1692] drm/amd/display: enable ABGR and XBGR formats (v4) SURFACE_PIXEL_FORMAT_GRPH_ABGR8888 is supported in amd/display/dc/dc_hw_types.h and the necessary crossbars register controls to swap red and blue channels are already implemented in drm/amd/display/dc/dce/dce_mem_input.c (v4) Logic to handle new formats is added only in amdgpu_dm module. Signed-off-by: Mauro Rossi Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index d8261fe6a04f..497a718d1bc4 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1882,6 +1882,10 @@ static int fill_plane_attributes_from_fb(struct amdgpu_device *adev, case DRM_FORMAT_ABGR2101010: plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010; break; + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ABGR8888: + plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888; + break; case DRM_FORMAT_NV21: plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr; break; @@ -3185,6 +3189,8 @@ static const uint32_t rgb_formats[] = { DRM_FORMAT_XBGR2101010, DRM_FORMAT_ARGB2101010, DRM_FORMAT_ABGR2101010, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ABGR8888, }; static const uint32_t yuv_formats[] = { -- GitLab From 00ecc6e6d4eeba9681ec91f9601a9ed1a68a9e7f Mon Sep 17 00:00:00 2001 From: Mauro Rossi Date: Sun, 12 Aug 2018 21:43:02 +0200 Subject: [PATCH 0391/1692] drm/amdgpu: enable ABGR and XBGR formats (v2) Add support for DRM_FORMAT_{A,X}BGR8888 in amdgpu with amd dc disabled (v2) Crossbar registers are defined and used to swap red and blue channels, keeping the existing coding style in each of the dce modules. After setting crossbar bits in fb_swap, use bitwise OR for big endian where required in DCE6 and DCE8 which do not rely on REG_SET_FIELD() Signed-off-by: Mauro Rossi Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | 11 +++++++++++ drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | 11 +++++++++++ drivers/gpu/drm/amd/amdgpu/dce_v6_0.c | 10 ++++++++++ drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | 10 ++++++++++ drivers/gpu/drm/amd/amdgpu/si_enums.h | 20 ++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/sid.h | 20 ++++++++++++++++++++ 6 files changed, 82 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 4313d6c6407d..3916aa6cc4ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -1943,6 +1943,17 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ bypass_lut = true; break; + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ABGR8888: + fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2); + fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0); + fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_RED_CROSSBAR, 2); + fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_BLUE_CROSSBAR, 2); +#ifdef __BIG_ENDIAN + fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, + ENDIAN_8IN32); +#endif + break; default: DRM_ERROR("Unsupported screen format %s\n", drm_get_format_name(target_fb->format->format, &format_name)); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index e295cc18cae4..4ffb612a4e53 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -1985,6 +1985,17 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ bypass_lut = true; break; + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ABGR8888: + fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2); + fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0); + fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_RED_CROSSBAR, 2); + fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_BLUE_CROSSBAR, 2); +#ifdef __BIG_ENDIAN + fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, + ENDIAN_8IN32); +#endif + break; default: DRM_ERROR("Unsupported screen format %s\n", drm_get_format_name(target_fb->format->format, &format_name)); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index 018dd62dc5b6..480c5348a14f 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -1888,6 +1888,16 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc, /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ bypass_lut = true; break; + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ABGR8888: + fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) | + GRPH_FORMAT(GRPH_FORMAT_ARGB8888)); + fb_swap = (GRPH_RED_CROSSBAR(GRPH_RED_SEL_B) | + GRPH_BLUE_CROSSBAR(GRPH_BLUE_SEL_R)); +#ifdef __BIG_ENDIAN + fb_swap |= GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32); +#endif + break; default: DRM_ERROR("Unsupported screen format %s\n", drm_get_format_name(target_fb->format->format, &format_name)); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 2bae3ad2bbf8..797196476c94 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -1865,6 +1865,16 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ bypass_lut = true; break; + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ABGR8888: + fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | + (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); + fb_swap = ((GRPH_RED_SEL_B << GRPH_SWAP_CNTL__GRPH_RED_CROSSBAR__SHIFT) | + (GRPH_BLUE_SEL_R << GRPH_SWAP_CNTL__GRPH_BLUE_CROSSBAR__SHIFT)); +#ifdef __BIG_ENDIAN + fb_swap |= (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); +#endif + break; default: DRM_ERROR("Unsupported screen format %s\n", drm_get_format_name(target_fb->format->format, &format_name)); diff --git a/drivers/gpu/drm/amd/amdgpu/si_enums.h b/drivers/gpu/drm/amd/amdgpu/si_enums.h index dc9e0e6b4558..790ba46eaebb 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_enums.h +++ b/drivers/gpu/drm/amd/amdgpu/si_enums.h @@ -46,6 +46,26 @@ #define GRPH_ENDIAN_8IN16 1 #define GRPH_ENDIAN_8IN32 2 #define GRPH_ENDIAN_8IN64 3 +#define GRPH_RED_CROSSBAR(x) (((x) & 0x3) << 4) +#define GRPH_RED_SEL_R 0 +#define GRPH_RED_SEL_G 1 +#define GRPH_RED_SEL_B 2 +#define GRPH_RED_SEL_A 3 +#define GRPH_GREEN_CROSSBAR(x) (((x) & 0x3) << 6) +#define GRPH_GREEN_SEL_G 0 +#define GRPH_GREEN_SEL_B 1 +#define GRPH_GREEN_SEL_A 2 +#define GRPH_GREEN_SEL_R 3 +#define GRPH_BLUE_CROSSBAR(x) (((x) & 0x3) << 8) +#define GRPH_BLUE_SEL_B 0 +#define GRPH_BLUE_SEL_A 1 +#define GRPH_BLUE_SEL_R 2 +#define GRPH_BLUE_SEL_G 3 +#define GRPH_ALPHA_CROSSBAR(x) (((x) & 0x3) << 10) +#define GRPH_ALPHA_SEL_A 0 +#define GRPH_ALPHA_SEL_R 1 +#define GRPH_ALPHA_SEL_G 2 +#define GRPH_ALPHA_SEL_B 3 #define GRPH_DEPTH(x) (((x) & 0x3) << 0) #define GRPH_DEPTH_8BPP 0 diff --git a/drivers/gpu/drm/amd/amdgpu/sid.h b/drivers/gpu/drm/amd/amdgpu/sid.h index c57eff159374..7cf12adb3915 100644 --- a/drivers/gpu/drm/amd/amdgpu/sid.h +++ b/drivers/gpu/drm/amd/amdgpu/sid.h @@ -2201,6 +2201,26 @@ # define EVERGREEN_GRPH_ENDIAN_8IN16 1 # define EVERGREEN_GRPH_ENDIAN_8IN32 2 # define EVERGREEN_GRPH_ENDIAN_8IN64 3 +#define EVERGREEN_GRPH_RED_CROSSBAR(x) (((x) & 0x3) << 4) +# define EVERGREEN_GRPH_RED_SEL_R 0 +# define EVERGREEN_GRPH_RED_SEL_G 1 +# define EVERGREEN_GRPH_RED_SEL_B 2 +# define EVERGREEN_GRPH_RED_SEL_A 3 +#define EVERGREEN_GRPH_GREEN_CROSSBAR(x) (((x) & 0x3) << 6) +# define EVERGREEN_GRPH_GREEN_SEL_G 0 +# define EVERGREEN_GRPH_GREEN_SEL_B 1 +# define EVERGREEN_GRPH_GREEN_SEL_A 2 +# define EVERGREEN_GRPH_GREEN_SEL_R 3 +#define EVERGREEN_GRPH_BLUE_CROSSBAR(x) (((x) & 0x3) << 8) +# define EVERGREEN_GRPH_BLUE_SEL_B 0 +# define EVERGREEN_GRPH_BLUE_SEL_A 1 +# define EVERGREEN_GRPH_BLUE_SEL_R 2 +# define EVERGREEN_GRPH_BLUE_SEL_G 3 +#define EVERGREEN_GRPH_ALPHA_CROSSBAR(x) (((x) & 0x3) << 10) +# define EVERGREEN_GRPH_ALPHA_SEL_A 0 +# define EVERGREEN_GRPH_ALPHA_SEL_R 1 +# define EVERGREEN_GRPH_ALPHA_SEL_G 2 +# define EVERGREEN_GRPH_ALPHA_SEL_B 3 #define EVERGREEN_D3VGA_CONTROL 0xf8 #define EVERGREEN_D4VGA_CONTROL 0xf9 -- GitLab From a69e40fd824ddae4f515ca8e754882bebf644ed4 Mon Sep 17 00:00:00 2001 From: Mauro Rossi Date: Sun, 12 Aug 2018 21:43:03 +0200 Subject: [PATCH 0392/1692] drm/radeon: enable ABGR and XBGR formats (v2) Add support for DRM_FORMAT_{A,X}BGR8888 in atombios_crtc Swapping of red and blue channels is implemented for radeon chipsets: DCE2/R6xx and later - crossbar registers defined where needed and used DCE1/R5xx - AVIVO_D1GRPH_SWAP_RB bit is used (v2) Set AVIVO_D1GRPH_SWAP_RB bit in fb_format, using bitwise OR for DCE1 path Use bitwise OR where required for big endian settings in fb_swap Use existing code style CHIP_R600 condition, fix typo in R600 blue crossbar Signed-off-by: Mauro Rossi Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/atombios_crtc.c | 25 +++++++++++++++++++++ drivers/gpu/drm/radeon/r600_reg.h | 31 +++++++++++++++++++++----- 2 files changed, 51 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index efbd5816082d..d75ae17ff3ad 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -1254,6 +1254,16 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ bypass_lut = true; break; + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ABGR8888: + fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) | + EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888)); + fb_swap = (EVERGREEN_GRPH_RED_CROSSBAR(EVERGREEN_GRPH_RED_SEL_B) | + EVERGREEN_GRPH_BLUE_CROSSBAR(EVERGREEN_GRPH_BLUE_SEL_R)); +#ifdef __BIG_ENDIAN + fb_swap |= EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32); +#endif + break; default: DRM_ERROR("Unsupported screen format %s\n", drm_get_format_name(target_fb->format->format, &format_name)); @@ -1551,6 +1561,21 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc, /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ bypass_lut = true; break; + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ABGR8888: + fb_format = + AVIVO_D1GRPH_CONTROL_DEPTH_32BPP | + AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888; + if (rdev->family >= CHIP_R600) + fb_swap = + (R600_D1GRPH_RED_CROSSBAR(R600_D1GRPH_RED_SEL_B) | + R600_D1GRPH_BLUE_CROSSBAR(R600_D1GRPH_BLUE_SEL_R)); + else /* DCE1 (R5xx) */ + fb_format |= AVIVO_D1GRPH_SWAP_RB; +#ifdef __BIG_ENDIAN + fb_swap |= R600_D1GRPH_SWAP_ENDIAN_32BIT; +#endif + break; default: DRM_ERROR("Unsupported screen format %s\n", drm_get_format_name(target_fb->format->format, &format_name)); diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h index 3ef202629e7e..85e85ac3ba4d 100644 --- a/drivers/gpu/drm/radeon/r600_reg.h +++ b/drivers/gpu/drm/radeon/r600_reg.h @@ -87,11 +87,32 @@ #define R600_MEDIUM_VID_LOWER_GPIO_CNTL 0x720 #define R600_LOW_VID_LOWER_GPIO_CNTL 0x724 -#define R600_D1GRPH_SWAP_CONTROL 0x610C -# define R600_D1GRPH_SWAP_ENDIAN_NONE (0 << 0) -# define R600_D1GRPH_SWAP_ENDIAN_16BIT (1 << 0) -# define R600_D1GRPH_SWAP_ENDIAN_32BIT (2 << 0) -# define R600_D1GRPH_SWAP_ENDIAN_64BIT (3 << 0) +#define R600_D1GRPH_SWAP_CONTROL 0x610C +# define R600_D1GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0) +# define R600_D1GRPH_SWAP_ENDIAN_NONE 0 +# define R600_D1GRPH_SWAP_ENDIAN_16BIT 1 +# define R600_D1GRPH_SWAP_ENDIAN_32BIT 2 +# define R600_D1GRPH_SWAP_ENDIAN_64BIT 3 +# define R600_D1GRPH_RED_CROSSBAR(x) (((x) & 0x3) << 4) +# define R600_D1GRPH_RED_SEL_R 0 +# define R600_D1GRPH_RED_SEL_G 1 +# define R600_D1GRPH_RED_SEL_B 2 +# define R600_D1GRPH_RED_SEL_A 3 +# define R600_D1GRPH_GREEN_CROSSBAR(x) (((x) & 0x3) << 6) +# define R600_D1GRPH_GREEN_SEL_G 0 +# define R600_D1GRPH_GREEN_SEL_B 1 +# define R600_D1GRPH_GREEN_SEL_A 2 +# define R600_D1GRPH_GREEN_SEL_R 3 +# define R600_D1GRPH_BLUE_CROSSBAR(x) (((x) & 0x3) << 8) +# define R600_D1GRPH_BLUE_SEL_B 0 +# define R600_D1GRPH_BLUE_SEL_A 1 +# define R600_D1GRPH_BLUE_SEL_R 2 +# define R600_D1GRPH_BLUE_SEL_G 3 +# define R600_D1GRPH_ALPHA_CROSSBAR(x) (((x) & 0x3) << 10) +# define R600_D1GRPH_ALPHA_SEL_A 0 +# define R600_D1GRPH_ALPHA_SEL_R 1 +# define R600_D1GRPH_ALPHA_SEL_G 2 +# define R600_D1GRPH_ALPHA_SEL_B 3 #define R600_HDP_NONSURFACE_BASE 0x2c04 -- GitLab From 1f902edecb3201eacae5e3735fe4857a987627d2 Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Wed, 17 Jan 2018 20:05:19 +0800 Subject: [PATCH 0393/1692] drm/amdgpu/include: Add nbio 7.4 header files (v4) v2: Cleanups (Alex) v3: More updates (Alex) v4: more cleanups (Alex) Signed-off-by: Feifei Xu Acked-by: Hawking Zhang Signed-off-by: Alex Deucher --- .../include/asic_reg/nbio/nbio_7_4_offset.h | 4627 ++ .../include/asic_reg/nbio/nbio_7_4_sh_mask.h | 48436 ++++++++++++++++ 2 files changed, 53063 insertions(+) create mode 100644 drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h create mode 100644 drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h new file mode 100644 index 000000000000..e932213f87f0 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h @@ -0,0 +1,4627 @@ +/* + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _nbio_7_4_OFFSET_HEADER +#define _nbio_7_4_OFFSET_HEADER + + + +// addressBlock: nbio_pcie0_pswuscfg0_cfgdecp +// base address: 0x0 +#define cfgPSWUSCFG0_VENDOR_ID 0x0000 +#define cfgPSWUSCFG0_DEVICE_ID 0x0002 +#define cfgPSWUSCFG0_COMMAND 0x0004 +#define cfgPSWUSCFG0_STATUS 0x0006 +#define cfgPSWUSCFG0_REVISION_ID 0x0008 +#define cfgPSWUSCFG0_PROG_INTERFACE 0x0009 +#define cfgPSWUSCFG0_SUB_CLASS 0x000a +#define cfgPSWUSCFG0_BASE_CLASS 0x000b +#define cfgPSWUSCFG0_CACHE_LINE 0x000c +#define cfgPSWUSCFG0_LATENCY 0x000d +#define cfgPSWUSCFG0_HEADER 0x000e +#define cfgPSWUSCFG0_BIST 0x000f +#define cfgPSWUSCFG0_SUB_BUS_NUMBER_LATENCY 0x0018 +#define cfgPSWUSCFG0_IO_BASE_LIMIT 0x001c +#define cfgPSWUSCFG0_SECONDARY_STATUS 0x001e +#define cfgPSWUSCFG0_MEM_BASE_LIMIT 0x0020 +#define cfgPSWUSCFG0_PREF_BASE_LIMIT 0x0024 +#define cfgPSWUSCFG0_PREF_BASE_UPPER 0x0028 +#define cfgPSWUSCFG0_PREF_LIMIT_UPPER 0x002c +#define cfgPSWUSCFG0_IO_BASE_LIMIT_HI 0x0030 +#define cfgPSWUSCFG0_CAP_PTR 0x0034 +#define cfgPSWUSCFG0_INTERRUPT_LINE 0x003c +#define cfgPSWUSCFG0_INTERRUPT_PIN 0x003d +#define cfgPSWUSCFG0_IRQ_BRIDGE_CNTL 0x003e +#define cfgEXT_BRIDGE_CNTL 0x0040 +#define cfgPSWUSCFG0_VENDOR_CAP_LIST 0x0048 +#define cfgPSWUSCFG0_ADAPTER_ID_W 0x004c +#define cfgPSWUSCFG0_PMI_CAP_LIST 0x0050 +#define cfgPSWUSCFG0_PMI_CAP 0x0052 +#define cfgPSWUSCFG0_PMI_STATUS_CNTL 0x0054 +#define cfgPSWUSCFG0_PCIE_CAP_LIST 0x0058 +#define cfgPSWUSCFG0_PCIE_CAP 0x005a +#define cfgPSWUSCFG0_DEVICE_CAP 0x005c +#define cfgPSWUSCFG0_DEVICE_CNTL 0x0060 +#define cfgPSWUSCFG0_DEVICE_STATUS 0x0062 +#define cfgPSWUSCFG0_LINK_CAP 0x0064 +#define cfgPSWUSCFG0_LINK_CNTL 0x0068 +#define cfgPSWUSCFG0_LINK_STATUS 0x006a +#define cfgPSWUSCFG0_DEVICE_CAP2 0x007c +#define cfgPSWUSCFG0_DEVICE_CNTL2 0x0080 +#define cfgPSWUSCFG0_DEVICE_STATUS2 0x0082 +#define cfgPSWUSCFG0_LINK_CAP2 0x0084 +#define cfgPSWUSCFG0_LINK_CNTL2 0x0088 +#define cfgPSWUSCFG0_LINK_STATUS2 0x008a +#define cfgPSWUSCFG0_MSI_CAP_LIST 0x00a0 +#define cfgPSWUSCFG0_MSI_MSG_CNTL 0x00a2 +#define cfgPSWUSCFG0_MSI_MSG_ADDR_LO 0x00a4 +#define cfgPSWUSCFG0_MSI_MSG_ADDR_HI 0x00a8 +#define cfgPSWUSCFG0_MSI_MSG_DATA 0x00a8 +#define cfgPSWUSCFG0_MSI_MSG_DATA_64 0x00ac +#define cfgPSWUSCFG0_SSID_CAP_LIST 0x00c0 +#define cfgPSWUSCFG0_SSID_CAP 0x00c4 +#define cfgMSI_MAP_CAP_LIST 0x00c8 +#define cfgMSI_MAP_CAP 0x00ca +#define cfgMSI_MAP_ADDR_LO 0x00cc +#define cfgMSI_MAP_ADDR_HI 0x00d0 +#define cfgPSWUSCFG0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100 +#define cfgPSWUSCFG0_PCIE_VENDOR_SPECIFIC_HDR 0x0104 +#define cfgPSWUSCFG0_PCIE_VENDOR_SPECIFIC1 0x0108 +#define cfgPSWUSCFG0_PCIE_VENDOR_SPECIFIC2 0x010c +#define cfgPSWUSCFG0_PCIE_VC_ENH_CAP_LIST 0x0110 +#define cfgPSWUSCFG0_PCIE_PORT_VC_CAP_REG1 0x0114 +#define cfgPSWUSCFG0_PCIE_PORT_VC_CAP_REG2 0x0118 +#define cfgPSWUSCFG0_PCIE_PORT_VC_CNTL 0x011c +#define cfgPSWUSCFG0_PCIE_PORT_VC_STATUS 0x011e +#define cfgPSWUSCFG0_PCIE_VC0_RESOURCE_CAP 0x0120 +#define cfgPSWUSCFG0_PCIE_VC0_RESOURCE_CNTL 0x0124 +#define cfgPSWUSCFG0_PCIE_VC0_RESOURCE_STATUS 0x012a +#define cfgPSWUSCFG0_PCIE_VC1_RESOURCE_CAP 0x012c +#define cfgPSWUSCFG0_PCIE_VC1_RESOURCE_CNTL 0x0130 +#define cfgPSWUSCFG0_PCIE_VC1_RESOURCE_STATUS 0x0136 +#define cfgPSWUSCFG0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST 0x0140 +#define cfgPSWUSCFG0_PCIE_DEV_SERIAL_NUM_DW1 0x0144 +#define cfgPSWUSCFG0_PCIE_DEV_SERIAL_NUM_DW2 0x0148 +#define cfgPSWUSCFG0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150 +#define cfgPSWUSCFG0_PCIE_UNCORR_ERR_STATUS 0x0154 +#define cfgPSWUSCFG0_PCIE_UNCORR_ERR_MASK 0x0158 +#define cfgPSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY 0x015c +#define cfgPSWUSCFG0_PCIE_CORR_ERR_STATUS 0x0160 +#define cfgPSWUSCFG0_PCIE_CORR_ERR_MASK 0x0164 +#define cfgPSWUSCFG0_PCIE_ADV_ERR_CAP_CNTL 0x0168 +#define cfgPSWUSCFG0_PCIE_HDR_LOG0 0x016c +#define cfgPSWUSCFG0_PCIE_HDR_LOG1 0x0170 +#define cfgPSWUSCFG0_PCIE_HDR_LOG2 0x0174 +#define cfgPSWUSCFG0_PCIE_HDR_LOG3 0x0178 +#define cfgPSWUSCFG0_PCIE_TLP_PREFIX_LOG0 0x0188 +#define cfgPSWUSCFG0_PCIE_TLP_PREFIX_LOG1 0x018c +#define cfgPSWUSCFG0_PCIE_TLP_PREFIX_LOG2 0x0190 +#define cfgPSWUSCFG0_PCIE_TLP_PREFIX_LOG3 0x0194 +#define cfgPSWUSCFG0_PCIE_SECONDARY_ENH_CAP_LIST 0x0270 +#define cfgPSWUSCFG0_PCIE_LINK_CNTL3 0x0274 +#define cfgPSWUSCFG0_PCIE_LANE_ERROR_STATUS 0x0278 +#define cfgPSWUSCFG0_PCIE_LANE_0_EQUALIZATION_CNTL 0x027c +#define cfgPSWUSCFG0_PCIE_LANE_1_EQUALIZATION_CNTL 0x027e +#define cfgPSWUSCFG0_PCIE_LANE_2_EQUALIZATION_CNTL 0x0280 +#define cfgPSWUSCFG0_PCIE_LANE_3_EQUALIZATION_CNTL 0x0282 +#define cfgPSWUSCFG0_PCIE_LANE_4_EQUALIZATION_CNTL 0x0284 +#define cfgPSWUSCFG0_PCIE_LANE_5_EQUALIZATION_CNTL 0x0286 +#define cfgPSWUSCFG0_PCIE_LANE_6_EQUALIZATION_CNTL 0x0288 +#define cfgPSWUSCFG0_PCIE_LANE_7_EQUALIZATION_CNTL 0x028a +#define cfgPSWUSCFG0_PCIE_LANE_8_EQUALIZATION_CNTL 0x028c +#define cfgPSWUSCFG0_PCIE_LANE_9_EQUALIZATION_CNTL 0x028e +#define cfgPSWUSCFG0_PCIE_LANE_10_EQUALIZATION_CNTL 0x0290 +#define cfgPSWUSCFG0_PCIE_LANE_11_EQUALIZATION_CNTL 0x0292 +#define cfgPSWUSCFG0_PCIE_LANE_12_EQUALIZATION_CNTL 0x0294 +#define cfgPSWUSCFG0_PCIE_LANE_13_EQUALIZATION_CNTL 0x0296 +#define cfgPSWUSCFG0_PCIE_LANE_14_EQUALIZATION_CNTL 0x0298 +#define cfgPSWUSCFG0_PCIE_LANE_15_EQUALIZATION_CNTL 0x029a +#define cfgPSWUSCFG0_PCIE_ACS_ENH_CAP_LIST 0x02a0 +#define cfgPSWUSCFG0_PCIE_ACS_CAP 0x02a4 +#define cfgPSWUSCFG0_PCIE_ACS_CNTL 0x02a6 +#define cfgPSWUSCFG0_PCIE_MC_ENH_CAP_LIST 0x02f0 +#define cfgPSWUSCFG0_PCIE_MC_CAP 0x02f4 +#define cfgPSWUSCFG0_PCIE_MC_CNTL 0x02f6 +#define cfgPSWUSCFG0_PCIE_MC_ADDR0 0x02f8 +#define cfgPSWUSCFG0_PCIE_MC_ADDR1 0x02fc +#define cfgPSWUSCFG0_PCIE_MC_RCV0 0x0300 +#define cfgPSWUSCFG0_PCIE_MC_RCV1 0x0304 +#define cfgPSWUSCFG0_PCIE_MC_BLOCK_ALL0 0x0308 +#define cfgPSWUSCFG0_PCIE_MC_BLOCK_ALL1 0x030c +#define cfgPSWUSCFG0_PCIE_MC_BLOCK_UNTRANSLATED_0 0x0310 +#define cfgPSWUSCFG0_PCIE_MC_BLOCK_UNTRANSLATED_1 0x0314 +#define cfgPCIE_MC_OVERLAY_BAR0 0x0318 +#define cfgPCIE_MC_OVERLAY_BAR1 0x031c +#define cfgPSWUSCFG0_PCIE_LTR_ENH_CAP_LIST 0x0320 +#define cfgPSWUSCFG0_PCIE_LTR_CAP 0x0324 +#define cfgPSWUSCFG0_PCIE_ARI_ENH_CAP_LIST 0x0328 +#define cfgPSWUSCFG0_PCIE_ARI_CAP 0x032c +#define cfgPSWUSCFG0_PCIE_ARI_CNTL 0x032e +#define cfgPCIE_L1_PM_SUB_CAP_LIST 0x0370 +#define cfgPCIE_L1_PM_SUB_CAP 0x0374 +#define cfgPCIE_L1_PM_SUB_CNTL 0x0378 +#define cfgPCIE_L1_PM_SUB_CNTL2 0x037c +#define cfgPCIE_ESM_CAP_LIST 0x03c4 +#define cfgPCIE_ESM_HEADER_1 0x03c8 +#define cfgPCIE_ESM_HEADER_2 0x03cc +#define cfgPCIE_ESM_STATUS 0x03ce +#define cfgPCIE_ESM_CTRL 0x03d0 +#define cfgPCIE_ESM_CAP_1 0x03d4 +#define cfgPCIE_ESM_CAP_2 0x03d8 +#define cfgPCIE_ESM_CAP_3 0x03dc +#define cfgPCIE_ESM_CAP_4 0x03e0 +#define cfgPCIE_ESM_CAP_5 0x03e4 +#define cfgPCIE_ESM_CAP_6 0x03e8 +#define cfgPCIE_ESM_CAP_7 0x03ec +#define cfgPSWUSCFG0_PCIE_DLF_ENH_CAP_LIST 0x0400 +#define cfgPSWUSCFG0_DATA_LINK_FEATURE_CAP 0x0404 +#define cfgPSWUSCFG0_DATA_LINK_FEATURE_STATUS 0x0408 +#define cfgPCIE_PHY_16GT_ENH_CAP_LIST 0x0410 +#define cfgPSWUSCFG0_LINK_CAP_16GT 0x0414 +#define cfgPSWUSCFG0_LINK_CNTL_16GT 0x0418 +#define cfgPSWUSCFG0_LINK_STATUS_16GT 0x041c +#define cfgPSWUSCFG0_LOCAL_PARITY_MISMATCH_STATUS_16GT 0x0420 +#define cfgPSWUSCFG0_RTM1_PARITY_MISMATCH_STATUS_16GT 0x0424 +#define cfgPSWUSCFG0_RTM2_PARITY_MISMATCH_STATUS_16GT 0x0428 +#define cfgPSWUSCFG0_LANE_0_EQUALIZATION_CNTL_16GT 0x0430 +#define cfgPSWUSCFG0_LANE_1_EQUALIZATION_CNTL_16GT 0x0431 +#define cfgPSWUSCFG0_LANE_2_EQUALIZATION_CNTL_16GT 0x0432 +#define cfgPSWUSCFG0_LANE_3_EQUALIZATION_CNTL_16GT 0x0433 +#define cfgPSWUSCFG0_LANE_4_EQUALIZATION_CNTL_16GT 0x0434 +#define cfgPSWUSCFG0_LANE_5_EQUALIZATION_CNTL_16GT 0x0435 +#define cfgPSWUSCFG0_LANE_6_EQUALIZATION_CNTL_16GT 0x0436 +#define cfgPSWUSCFG0_LANE_7_EQUALIZATION_CNTL_16GT 0x0437 +#define cfgPSWUSCFG0_LANE_8_EQUALIZATION_CNTL_16GT 0x0438 +#define cfgPSWUSCFG0_LANE_9_EQUALIZATION_CNTL_16GT 0x0439 +#define cfgPSWUSCFG0_LANE_10_EQUALIZATION_CNTL_16GT 0x043a +#define cfgPSWUSCFG0_LANE_11_EQUALIZATION_CNTL_16GT 0x043b +#define cfgPSWUSCFG0_LANE_12_EQUALIZATION_CNTL_16GT 0x043c +#define cfgPSWUSCFG0_LANE_13_EQUALIZATION_CNTL_16GT 0x043d +#define cfgPSWUSCFG0_LANE_14_EQUALIZATION_CNTL_16GT 0x043e +#define cfgPSWUSCFG0_LANE_15_EQUALIZATION_CNTL_16GT 0x043f +#define cfgPCIE_MARGINING_ENH_CAP_LIST 0x0440 +#define cfgPSWUSCFG0_MARGINING_PORT_CAP 0x0444 +#define cfgPSWUSCFG0_MARGINING_PORT_STATUS 0x0446 +#define cfgPSWUSCFG0_LANE_0_MARGINING_LANE_CNTL 0x0448 +#define cfgPSWUSCFG0_LANE_0_MARGINING_LANE_STATUS 0x044a +#define cfgPSWUSCFG0_LANE_1_MARGINING_LANE_CNTL 0x044c +#define cfgPSWUSCFG0_LANE_1_MARGINING_LANE_STATUS 0x044e +#define cfgPSWUSCFG0_LANE_2_MARGINING_LANE_CNTL 0x0450 +#define cfgPSWUSCFG0_LANE_2_MARGINING_LANE_STATUS 0x0452 +#define cfgPSWUSCFG0_LANE_3_MARGINING_LANE_CNTL 0x0454 +#define cfgPSWUSCFG0_LANE_3_MARGINING_LANE_STATUS 0x0456 +#define cfgPSWUSCFG0_LANE_4_MARGINING_LANE_CNTL 0x0458 +#define cfgPSWUSCFG0_LANE_4_MARGINING_LANE_STATUS 0x045a +#define cfgPSWUSCFG0_LANE_5_MARGINING_LANE_CNTL 0x045c +#define cfgPSWUSCFG0_LANE_5_MARGINING_LANE_STATUS 0x045e +#define cfgPSWUSCFG0_LANE_6_MARGINING_LANE_CNTL 0x0460 +#define cfgPSWUSCFG0_LANE_6_MARGINING_LANE_STATUS 0x0462 +#define cfgPSWUSCFG0_LANE_7_MARGINING_LANE_CNTL 0x0464 +#define cfgPSWUSCFG0_LANE_7_MARGINING_LANE_STATUS 0x0466 +#define cfgPSWUSCFG0_LANE_8_MARGINING_LANE_CNTL 0x0468 +#define cfgPSWUSCFG0_LANE_8_MARGINING_LANE_STATUS 0x046a +#define cfgPSWUSCFG0_LANE_9_MARGINING_LANE_CNTL 0x046c +#define cfgPSWUSCFG0_LANE_9_MARGINING_LANE_STATUS 0x046e +#define cfgPSWUSCFG0_LANE_10_MARGINING_LANE_CNTL 0x0470 +#define cfgPSWUSCFG0_LANE_10_MARGINING_LANE_STATUS 0x0472 +#define cfgPSWUSCFG0_LANE_11_MARGINING_LANE_CNTL 0x0474 +#define cfgPSWUSCFG0_LANE_11_MARGINING_LANE_STATUS 0x0476 +#define cfgPSWUSCFG0_LANE_12_MARGINING_LANE_CNTL 0x0478 +#define cfgPSWUSCFG0_LANE_12_MARGINING_LANE_STATUS 0x047a +#define cfgPSWUSCFG0_LANE_13_MARGINING_LANE_CNTL 0x047c +#define cfgPSWUSCFG0_LANE_13_MARGINING_LANE_STATUS 0x047e +#define cfgPSWUSCFG0_LANE_14_MARGINING_LANE_CNTL 0x0480 +#define cfgPSWUSCFG0_LANE_14_MARGINING_LANE_STATUS 0x0482 +#define cfgPSWUSCFG0_LANE_15_MARGINING_LANE_CNTL 0x0484 +#define cfgPSWUSCFG0_LANE_15_MARGINING_LANE_STATUS 0x0486 + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_bifcfgdecp +// base address: 0x0 +#define cfgBIF_CFG_DEV0_EPF0_0_VENDOR_ID 0x0000 +#define cfgBIF_CFG_DEV0_EPF0_0_DEVICE_ID 0x0002 +#define cfgBIF_CFG_DEV0_EPF0_0_COMMAND 0x0004 +#define cfgBIF_CFG_DEV0_EPF0_0_STATUS 0x0006 +#define cfgBIF_CFG_DEV0_EPF0_0_REVISION_ID 0x0008 +#define cfgBIF_CFG_DEV0_EPF0_0_PROG_INTERFACE 0x0009 +#define cfgBIF_CFG_DEV0_EPF0_0_SUB_CLASS 0x000a +#define cfgBIF_CFG_DEV0_EPF0_0_BASE_CLASS 0x000b +#define cfgBIF_CFG_DEV0_EPF0_0_CACHE_LINE 0x000c +#define cfgBIF_CFG_DEV0_EPF0_0_LATENCY 0x000d +#define cfgBIF_CFG_DEV0_EPF0_0_HEADER 0x000e +#define cfgBIF_CFG_DEV0_EPF0_0_BIST 0x000f +#define cfgBIF_CFG_DEV0_EPF0_0_BASE_ADDR_1 0x0010 +#define cfgBIF_CFG_DEV0_EPF0_0_BASE_ADDR_2 0x0014 +#define cfgBIF_CFG_DEV0_EPF0_0_BASE_ADDR_3 0x0018 +#define cfgBIF_CFG_DEV0_EPF0_0_BASE_ADDR_4 0x001c +#define cfgBIF_CFG_DEV0_EPF0_0_BASE_ADDR_5 0x0020 +#define cfgBIF_CFG_DEV0_EPF0_0_BASE_ADDR_6 0x0024 +#define cfgBIF_CFG_DEV0_EPF0_0_ADAPTER_ID 0x002c +#define cfgBIF_CFG_DEV0_EPF0_0_ROM_BASE_ADDR 0x0030 +#define cfgBIF_CFG_DEV0_EPF0_0_CAP_PTR 0x0034 +#define cfgBIF_CFG_DEV0_EPF0_0_INTERRUPT_LINE 0x003c +#define cfgBIF_CFG_DEV0_EPF0_0_INTERRUPT_PIN 0x003d +#define cfgBIF_CFG_DEV0_EPF0_0_MIN_GRANT 0x003e +#define cfgBIF_CFG_DEV0_EPF0_0_MAX_LATENCY 0x003f +#define cfgBIF_CFG_DEV0_EPF0_0_VENDOR_CAP_LIST 0x0048 +#define cfgBIF_CFG_DEV0_EPF0_0_ADAPTER_ID_W 0x004c +#define cfgBIF_CFG_DEV0_EPF0_0_PMI_CAP_LIST 0x0050 +#define cfgBIF_CFG_DEV0_EPF0_0_PMI_CAP 0x0052 +#define cfgBIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL 0x0054 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_CAP_LIST 0x0064 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_CAP 0x0066 +#define cfgBIF_CFG_DEV0_EPF0_0_DEVICE_CAP 0x0068 +#define cfgBIF_CFG_DEV0_EPF0_0_DEVICE_CNTL 0x006c +#define cfgBIF_CFG_DEV0_EPF0_0_DEVICE_STATUS 0x006e +#define cfgBIF_CFG_DEV0_EPF0_0_LINK_CAP 0x0070 +#define cfgBIF_CFG_DEV0_EPF0_0_LINK_CNTL 0x0074 +#define cfgBIF_CFG_DEV0_EPF0_0_LINK_STATUS 0x0076 +#define cfgBIF_CFG_DEV0_EPF0_0_DEVICE_CAP2 0x0088 +#define cfgBIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2 0x008c +#define cfgBIF_CFG_DEV0_EPF0_0_DEVICE_STATUS2 0x008e +#define cfgBIF_CFG_DEV0_EPF0_0_LINK_CAP2 0x0090 +#define cfgBIF_CFG_DEV0_EPF0_0_LINK_CNTL2 0x0094 +#define cfgBIF_CFG_DEV0_EPF0_0_LINK_STATUS2 0x0096 +#define cfgBIF_CFG_DEV0_EPF0_0_SLOT_CAP2 0x0098 +#define cfgBIF_CFG_DEV0_EPF0_0_SLOT_CNTL2 0x009c +#define cfgBIF_CFG_DEV0_EPF0_0_SLOT_STATUS2 0x009e +#define cfgBIF_CFG_DEV0_EPF0_0_MSI_CAP_LIST 0x00a0 +#define cfgBIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL 0x00a2 +#define cfgBIF_CFG_DEV0_EPF0_0_MSI_MSG_ADDR_LO 0x00a4 +#define cfgBIF_CFG_DEV0_EPF0_0_MSI_MSG_ADDR_HI 0x00a8 +#define cfgBIF_CFG_DEV0_EPF0_0_MSI_MSG_DATA 0x00a8 +#define cfgBIF_CFG_DEV0_EPF0_0_MSI_MASK 0x00ac +#define cfgBIF_CFG_DEV0_EPF0_0_MSI_MSG_DATA_64 0x00ac +#define cfgBIF_CFG_DEV0_EPF0_0_MSI_MASK_64 0x00b0 +#define cfgBIF_CFG_DEV0_EPF0_0_MSI_PENDING 0x00b0 +#define cfgBIF_CFG_DEV0_EPF0_0_MSI_PENDING_64 0x00b4 +#define cfgBIF_CFG_DEV0_EPF0_0_MSIX_CAP_LIST 0x00c0 +#define cfgBIF_CFG_DEV0_EPF0_0_MSIX_MSG_CNTL 0x00c2 +#define cfgBIF_CFG_DEV0_EPF0_0_MSIX_TABLE 0x00c4 +#define cfgBIF_CFG_DEV0_EPF0_0_MSIX_PBA 0x00c8 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR 0x0104 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC1 0x0108 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC2 0x010c +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VC_ENH_CAP_LIST 0x0110 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1 0x0114 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG2 0x0118 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CNTL 0x011c +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_STATUS 0x011e +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP 0x0120 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL 0x0124 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_STATUS 0x012a +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP 0x012c +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL 0x0130 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_STATUS 0x0136 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST 0x0140 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_DW1 0x0144 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_DW2 0x0148 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS 0x0154 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK 0x0158 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY 0x015c +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS 0x0160 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK 0x0164 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL 0x0168 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG0 0x016c +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG1 0x0170 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG2 0x0174 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG3 0x0178 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG0 0x0188 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG1 0x018c +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG2 0x0190 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG3 0x0194 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_BAR_ENH_CAP_LIST 0x0200 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CAP 0x0204 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CNTL 0x0208 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CAP 0x020c +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CNTL 0x0210 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CAP 0x0214 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CNTL 0x0218 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CAP 0x021c +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CNTL 0x0220 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CAP 0x0224 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CNTL 0x0228 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CAP 0x022c +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CNTL 0x0230 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_ENH_CAP_LIST 0x0240 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA_SELECT 0x0244 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA 0x0248 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_CAP 0x024c +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_DPA_ENH_CAP_LIST 0x0250 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP 0x0254 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_DPA_LATENCY_INDICATOR 0x0258 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_DPA_STATUS 0x025c +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_DPA_CNTL 0x025e +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0 0x0260 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1 0x0261 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2 0x0262 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3 0x0263 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4 0x0264 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5 0x0265 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6 0x0266 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7 0x0267 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_SECONDARY_ENH_CAP_LIST 0x0270 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_LINK_CNTL3 0x0274 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_LANE_ERROR_STATUS 0x0278 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL 0x027c +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL 0x027e +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL 0x0280 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL 0x0282 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL 0x0284 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL 0x0286 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL 0x0288 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL 0x028a +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL 0x028c +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL 0x028e +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL 0x0290 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL 0x0292 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL 0x0294 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL 0x0296 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL 0x0298 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL 0x029a +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_ACS_ENH_CAP_LIST 0x02a0 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP 0x02a4 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL 0x02a6 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_ATS_ENH_CAP_LIST 0x02b0 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_ATS_CAP 0x02b4 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_ATS_CNTL 0x02b6 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_ENH_CAP_LIST 0x02c0 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_CNTL 0x02c4 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS 0x02c6 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_OUTSTAND_PAGE_REQ_CAPACITY 0x02c8 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_OUTSTAND_PAGE_REQ_ALLOC 0x02cc +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_PASID_ENH_CAP_LIST 0x02d0 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_PASID_CAP 0x02d4 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_PASID_CNTL 0x02d6 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_MC_ENH_CAP_LIST 0x02f0 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_MC_CAP 0x02f4 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_MC_CNTL 0x02f6 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR0 0x02f8 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR1 0x02fc +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_MC_RCV0 0x0300 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_MC_RCV1 0x0304 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_ALL0 0x0308 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_ALL1 0x030c +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_UNTRANSLATED_0 0x0310 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_UNTRANSLATED_1 0x0314 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_LTR_ENH_CAP_LIST 0x0320 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP 0x0324 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_ARI_ENH_CAP_LIST 0x0328 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_ARI_CAP 0x032c +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_ARI_CNTL 0x032e +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_ENH_CAP_LIST 0x0330 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP 0x0334 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL 0x0338 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_STATUS 0x033a +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_INITIAL_VFS 0x033c +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_TOTAL_VFS 0x033e +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_NUM_VFS 0x0340 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_FUNC_DEP_LINK 0x0342 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_FIRST_VF_OFFSET 0x0344 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_STRIDE 0x0346 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_DEVICE_ID 0x034a +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_SUPPORTED_PAGE_SIZE 0x034c +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_SYSTEM_PAGE_SIZE 0x0350 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_0 0x0354 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_1 0x0358 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_2 0x035c +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_3 0x0360 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_4 0x0364 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_5 0x0368 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET 0x036c +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_ENH_CAP_LIST 0x0370 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP 0x0374 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CNTL 0x0378 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_DLF_ENH_CAP_LIST 0x0400 +#define cfgBIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_CAP 0x0404 +#define cfgBIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_STATUS 0x0408 +#define cfgBIF_CFG_DEV0_EPF0_0_PHY_16GT_ENH_CAP_LIST 0x0410 +#define cfgBIF_CFG_DEV0_EPF0_0_LINK_CAP_16GT 0x0414 +#define cfgBIF_CFG_DEV0_EPF0_0_LINK_CNTL_16GT 0x0418 +#define cfgBIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT 0x041c +#define cfgBIF_CFG_DEV0_EPF0_0_LOCAL_PARITY_MISMATCH_STATUS_16GT 0x0420 +#define cfgBIF_CFG_DEV0_EPF0_0_RTM1_PARITY_MISMATCH_STATUS_16GT 0x0424 +#define cfgBIF_CFG_DEV0_EPF0_0_RTM2_PARITY_MISMATCH_STATUS_16GT 0x0428 +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_0_EQUALIZATION_CNTL_16GT 0x0430 +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_1_EQUALIZATION_CNTL_16GT 0x0431 +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_2_EQUALIZATION_CNTL_16GT 0x0432 +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_3_EQUALIZATION_CNTL_16GT 0x0433 +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_4_EQUALIZATION_CNTL_16GT 0x0434 +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_5_EQUALIZATION_CNTL_16GT 0x0435 +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_6_EQUALIZATION_CNTL_16GT 0x0436 +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_7_EQUALIZATION_CNTL_16GT 0x0437 +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_8_EQUALIZATION_CNTL_16GT 0x0438 +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_9_EQUALIZATION_CNTL_16GT 0x0439 +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_10_EQUALIZATION_CNTL_16GT 0x043a +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_11_EQUALIZATION_CNTL_16GT 0x043b +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_12_EQUALIZATION_CNTL_16GT 0x043c +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_13_EQUALIZATION_CNTL_16GT 0x043d +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_14_EQUALIZATION_CNTL_16GT 0x043e +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_15_EQUALIZATION_CNTL_16GT 0x043f +#define cfgBIF_CFG_DEV0_EPF0_0_MARGINING_ENH_CAP_LIST 0x0440 +#define cfgBIF_CFG_DEV0_EPF0_0_MARGINING_PORT_CAP 0x0444 +#define cfgBIF_CFG_DEV0_EPF0_0_MARGINING_PORT_STATUS 0x0446 +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL 0x0448 +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS 0x044a +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL 0x044c +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS 0x044e +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL 0x0450 +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS 0x0452 +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL 0x0454 +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS 0x0456 +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL 0x0458 +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS 0x045a +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL 0x045c +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS 0x045e +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL 0x0460 +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS 0x0462 +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL 0x0464 +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS 0x0466 +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL 0x0468 +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS 0x046a +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL 0x046c +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS 0x046e +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL 0x0470 +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS 0x0472 +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL 0x0474 +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS 0x0476 +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL 0x0478 +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS 0x047a +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL 0x047c +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS 0x047e +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL 0x0480 +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS 0x0482 +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL 0x0484 +#define cfgBIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS 0x0486 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST 0x04c0 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CAP 0x04c4 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CNTL 0x04c8 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CAP 0x04cc +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CNTL 0x04d0 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CAP 0x04d4 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CNTL 0x04d8 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CAP 0x04dc +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CNTL 0x04e0 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CAP 0x04e4 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CNTL 0x04e8 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CAP 0x04ec +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CNTL 0x04f0 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV 0x0500 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV 0x0504 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW 0x0508 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE 0x050c +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS 0x0510 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_RESET_CONTROL 0x0514 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0 0x0518 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1 0x051c +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2 0x0520 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT 0x0524 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB 0x0528 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS 0x052c +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE 0x0530 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB 0x0534 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB 0x0538 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB 0x053c +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB 0x0540 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB 0x0544 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB 0x0548 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB 0x054c +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB 0x0550 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB 0x0554 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB 0x0558 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB 0x055c +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB 0x0560 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB 0x0564 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB 0x0568 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB 0x056c +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB 0x0570 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB 0x0574 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB 0x0578 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB 0x057c +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB 0x0580 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB 0x0584 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB 0x0588 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB 0x058c +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB 0x0590 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB 0x0594 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB 0x0598 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB 0x059c +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB 0x05a0 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB 0x05a4 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB 0x05a8 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB 0x05ac +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW0 0x05b0 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW1 0x05b4 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW2 0x05b8 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW3 0x05bc +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW4 0x05c0 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW5 0x05c4 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW6 0x05c8 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW7 0x05cc +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW8 0x05d0 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW0 0x05e0 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW1 0x05e4 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW2 0x05e8 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW3 0x05ec +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW4 0x05f0 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW5 0x05f4 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW6 0x05f8 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW7 0x05fc +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW8 0x0600 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW0 0x0610 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW1 0x0614 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW2 0x0618 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW3 0x061c +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW4 0x0620 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW5 0x0624 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW6 0x0628 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW7 0x062c +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW8 0x0630 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW0 0x0640 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW1 0x0644 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW2 0x0648 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW3 0x064c +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW4 0x0650 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW5 0x0654 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW6 0x0658 +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW7 0x065c +#define cfgBIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW8 0x0660 + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf1_bifcfgdecp +// base address: 0x0 +#define cfgBIF_CFG_DEV0_EPF1_0_VENDOR_ID 0x0000 +#define cfgBIF_CFG_DEV0_EPF1_0_DEVICE_ID 0x0002 +#define cfgBIF_CFG_DEV0_EPF1_0_COMMAND 0x0004 +#define cfgBIF_CFG_DEV0_EPF1_0_STATUS 0x0006 +#define cfgBIF_CFG_DEV0_EPF1_0_REVISION_ID 0x0008 +#define cfgBIF_CFG_DEV0_EPF1_0_PROG_INTERFACE 0x0009 +#define cfgBIF_CFG_DEV0_EPF1_0_SUB_CLASS 0x000a +#define cfgBIF_CFG_DEV0_EPF1_0_BASE_CLASS 0x000b +#define cfgBIF_CFG_DEV0_EPF1_0_CACHE_LINE 0x000c +#define cfgBIF_CFG_DEV0_EPF1_0_LATENCY 0x000d +#define cfgBIF_CFG_DEV0_EPF1_0_HEADER 0x000e +#define cfgBIF_CFG_DEV0_EPF1_0_BIST 0x000f +#define cfgBIF_CFG_DEV0_EPF1_0_BASE_ADDR_1 0x0010 +#define cfgBIF_CFG_DEV0_EPF1_0_BASE_ADDR_2 0x0014 +#define cfgBIF_CFG_DEV0_EPF1_0_BASE_ADDR_3 0x0018 +#define cfgBIF_CFG_DEV0_EPF1_0_BASE_ADDR_4 0x001c +#define cfgBIF_CFG_DEV0_EPF1_0_BASE_ADDR_5 0x0020 +#define cfgBIF_CFG_DEV0_EPF1_0_BASE_ADDR_6 0x0024 +#define cfgBIF_CFG_DEV0_EPF1_0_ADAPTER_ID 0x002c +#define cfgBIF_CFG_DEV0_EPF1_0_ROM_BASE_ADDR 0x0030 +#define cfgBIF_CFG_DEV0_EPF1_0_CAP_PTR 0x0034 +#define cfgBIF_CFG_DEV0_EPF1_0_INTERRUPT_LINE 0x003c +#define cfgBIF_CFG_DEV0_EPF1_0_INTERRUPT_PIN 0x003d +#define cfgBIF_CFG_DEV0_EPF1_0_MIN_GRANT 0x003e +#define cfgBIF_CFG_DEV0_EPF1_0_MAX_LATENCY 0x003f +#define cfgBIF_CFG_DEV0_EPF1_0_VENDOR_CAP_LIST 0x0048 +#define cfgBIF_CFG_DEV0_EPF1_0_ADAPTER_ID_W 0x004c +#define cfgBIF_CFG_DEV0_EPF1_0_PMI_CAP_LIST 0x0050 +#define cfgBIF_CFG_DEV0_EPF1_0_PMI_CAP 0x0052 +#define cfgBIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL 0x0054 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_CAP_LIST 0x0064 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_CAP 0x0066 +#define cfgBIF_CFG_DEV0_EPF1_0_DEVICE_CAP 0x0068 +#define cfgBIF_CFG_DEV0_EPF1_0_DEVICE_CNTL 0x006c +#define cfgBIF_CFG_DEV0_EPF1_0_DEVICE_STATUS 0x006e +#define cfgBIF_CFG_DEV0_EPF1_0_LINK_CAP 0x0070 +#define cfgBIF_CFG_DEV0_EPF1_0_LINK_CNTL 0x0074 +#define cfgBIF_CFG_DEV0_EPF1_0_LINK_STATUS 0x0076 +#define cfgBIF_CFG_DEV0_EPF1_0_DEVICE_CAP2 0x0088 +#define cfgBIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2 0x008c +#define cfgBIF_CFG_DEV0_EPF1_0_DEVICE_STATUS2 0x008e +#define cfgBIF_CFG_DEV0_EPF1_0_LINK_CAP2 0x0090 +#define cfgBIF_CFG_DEV0_EPF1_0_LINK_CNTL2 0x0094 +#define cfgBIF_CFG_DEV0_EPF1_0_LINK_STATUS2 0x0096 +#define cfgBIF_CFG_DEV0_EPF1_0_SLOT_CAP2 0x0098 +#define cfgBIF_CFG_DEV0_EPF1_0_SLOT_CNTL2 0x009c +#define cfgBIF_CFG_DEV0_EPF1_0_SLOT_STATUS2 0x009e +#define cfgBIF_CFG_DEV0_EPF1_0_MSI_CAP_LIST 0x00a0 +#define cfgBIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL 0x00a2 +#define cfgBIF_CFG_DEV0_EPF1_0_MSI_MSG_ADDR_LO 0x00a4 +#define cfgBIF_CFG_DEV0_EPF1_0_MSI_MSG_ADDR_HI 0x00a8 +#define cfgBIF_CFG_DEV0_EPF1_0_MSI_MSG_DATA 0x00a8 +#define cfgBIF_CFG_DEV0_EPF1_0_MSI_MASK 0x00ac +#define cfgBIF_CFG_DEV0_EPF1_0_MSI_MSG_DATA_64 0x00ac +#define cfgBIF_CFG_DEV0_EPF1_0_MSI_MASK_64 0x00b0 +#define cfgBIF_CFG_DEV0_EPF1_0_MSI_PENDING 0x00b0 +#define cfgBIF_CFG_DEV0_EPF1_0_MSI_PENDING_64 0x00b4 +#define cfgBIF_CFG_DEV0_EPF1_0_MSIX_CAP_LIST 0x00c0 +#define cfgBIF_CFG_DEV0_EPF1_0_MSIX_MSG_CNTL 0x00c2 +#define cfgBIF_CFG_DEV0_EPF1_0_MSIX_TABLE 0x00c4 +#define cfgBIF_CFG_DEV0_EPF1_0_MSIX_PBA 0x00c8 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR 0x0104 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC1 0x0108 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC2 0x010c +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VC_ENH_CAP_LIST 0x0110 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG1 0x0114 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG2 0x0118 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CNTL 0x011c +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_STATUS 0x011e +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CAP 0x0120 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL 0x0124 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_STATUS 0x012a +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CAP 0x012c +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL 0x0130 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_STATUS 0x0136 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST 0x0140 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_DW1 0x0144 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_DW2 0x0148 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS 0x0154 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK 0x0158 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY 0x015c +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS 0x0160 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK 0x0164 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL 0x0168 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG0 0x016c +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG1 0x0170 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG2 0x0174 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG3 0x0178 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG0 0x0188 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG1 0x018c +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG2 0x0190 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG3 0x0194 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_BAR_ENH_CAP_LIST 0x0200 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CAP 0x0204 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CNTL 0x0208 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CAP 0x020c +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CNTL 0x0210 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CAP 0x0214 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CNTL 0x0218 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CAP 0x021c +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CNTL 0x0220 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CAP 0x0224 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CNTL 0x0228 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CAP 0x022c +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CNTL 0x0230 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_ENH_CAP_LIST 0x0240 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA_SELECT 0x0244 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA 0x0248 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_CAP 0x024c +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_DPA_ENH_CAP_LIST 0x0250 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP 0x0254 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_DPA_LATENCY_INDICATOR 0x0258 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_DPA_STATUS 0x025c +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_DPA_CNTL 0x025e +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0 0x0260 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1 0x0261 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2 0x0262 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3 0x0263 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4 0x0264 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5 0x0265 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6 0x0266 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7 0x0267 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_SECONDARY_ENH_CAP_LIST 0x0270 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_LINK_CNTL3 0x0274 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_LANE_ERROR_STATUS 0x0278 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL 0x027c +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL 0x027e +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL 0x0280 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL 0x0282 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL 0x0284 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL 0x0286 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL 0x0288 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL 0x028a +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL 0x028c +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL 0x028e +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL 0x0290 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL 0x0292 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL 0x0294 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL 0x0296 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL 0x0298 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL 0x029a +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_ACS_ENH_CAP_LIST 0x02a0 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP 0x02a4 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL 0x02a6 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_ATS_ENH_CAP_LIST 0x02b0 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_ATS_CAP 0x02b4 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_ATS_CNTL 0x02b6 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_ENH_CAP_LIST 0x02c0 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_CNTL 0x02c4 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_STATUS 0x02c6 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_OUTSTAND_PAGE_REQ_CAPACITY 0x02c8 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_OUTSTAND_PAGE_REQ_ALLOC 0x02cc +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_PASID_ENH_CAP_LIST 0x02d0 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_PASID_CAP 0x02d4 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_PASID_CNTL 0x02d6 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_MC_ENH_CAP_LIST 0x02f0 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_MC_CAP 0x02f4 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_MC_CNTL 0x02f6 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_MC_ADDR0 0x02f8 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_MC_ADDR1 0x02fc +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_MC_RCV0 0x0300 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_MC_RCV1 0x0304 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_ALL0 0x0308 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_ALL1 0x030c +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_UNTRANSLATED_0 0x0310 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_UNTRANSLATED_1 0x0314 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_LTR_ENH_CAP_LIST 0x0320 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_LTR_CAP 0x0324 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_ARI_ENH_CAP_LIST 0x0328 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_ARI_CAP 0x032c +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_ARI_CNTL 0x032e +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_ENH_CAP_LIST 0x0330 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CAP 0x0334 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL 0x0338 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_STATUS 0x033a +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_INITIAL_VFS 0x033c +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_TOTAL_VFS 0x033e +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_NUM_VFS 0x0340 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_FUNC_DEP_LINK 0x0342 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_FIRST_VF_OFFSET 0x0344 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_STRIDE 0x0346 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_DEVICE_ID 0x034a +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_SUPPORTED_PAGE_SIZE 0x034c +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_SYSTEM_PAGE_SIZE 0x0350 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_0 0x0354 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_1 0x0358 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_2 0x035c +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_3 0x0360 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_4 0x0364 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_5 0x0368 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET 0x036c +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_ENH_CAP_LIST 0x0370 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP 0x0374 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CNTL 0x0378 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_DLF_ENH_CAP_LIST 0x0400 +#define cfgBIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_CAP 0x0404 +#define cfgBIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_STATUS 0x0408 +#define cfgBIF_CFG_DEV0_EPF1_0_PHY_16GT_ENH_CAP_LIST 0x0410 +#define cfgBIF_CFG_DEV0_EPF1_0_LINK_CAP_16GT 0x0414 +#define cfgBIF_CFG_DEV0_EPF1_0_LINK_CNTL_16GT 0x0418 +#define cfgBIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT 0x041c +#define cfgBIF_CFG_DEV0_EPF1_0_LOCAL_PARITY_MISMATCH_STATUS_16GT 0x0420 +#define cfgBIF_CFG_DEV0_EPF1_0_RTM1_PARITY_MISMATCH_STATUS_16GT 0x0424 +#define cfgBIF_CFG_DEV0_EPF1_0_RTM2_PARITY_MISMATCH_STATUS_16GT 0x0428 +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_0_EQUALIZATION_CNTL_16GT 0x0430 +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_1_EQUALIZATION_CNTL_16GT 0x0431 +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_2_EQUALIZATION_CNTL_16GT 0x0432 +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_3_EQUALIZATION_CNTL_16GT 0x0433 +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_4_EQUALIZATION_CNTL_16GT 0x0434 +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_5_EQUALIZATION_CNTL_16GT 0x0435 +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_6_EQUALIZATION_CNTL_16GT 0x0436 +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_7_EQUALIZATION_CNTL_16GT 0x0437 +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_8_EQUALIZATION_CNTL_16GT 0x0438 +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_9_EQUALIZATION_CNTL_16GT 0x0439 +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_10_EQUALIZATION_CNTL_16GT 0x043a +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_11_EQUALIZATION_CNTL_16GT 0x043b +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_12_EQUALIZATION_CNTL_16GT 0x043c +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_13_EQUALIZATION_CNTL_16GT 0x043d +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_14_EQUALIZATION_CNTL_16GT 0x043e +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_15_EQUALIZATION_CNTL_16GT 0x043f +#define cfgBIF_CFG_DEV0_EPF1_0_MARGINING_ENH_CAP_LIST 0x0440 +#define cfgBIF_CFG_DEV0_EPF1_0_MARGINING_PORT_CAP 0x0444 +#define cfgBIF_CFG_DEV0_EPF1_0_MARGINING_PORT_STATUS 0x0446 +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_CNTL 0x0448 +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_STATUS 0x044a +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_CNTL 0x044c +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_STATUS 0x044e +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_CNTL 0x0450 +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_STATUS 0x0452 +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_CNTL 0x0454 +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_STATUS 0x0456 +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_CNTL 0x0458 +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_STATUS 0x045a +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_CNTL 0x045c +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_STATUS 0x045e +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_CNTL 0x0460 +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_STATUS 0x0462 +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_CNTL 0x0464 +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_STATUS 0x0466 +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_CNTL 0x0468 +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_STATUS 0x046a +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_CNTL 0x046c +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_STATUS 0x046e +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_CNTL 0x0470 +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_STATUS 0x0472 +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_CNTL 0x0474 +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_STATUS 0x0476 +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_CNTL 0x0478 +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_STATUS 0x047a +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_CNTL 0x047c +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_STATUS 0x047e +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_CNTL 0x0480 +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_STATUS 0x0482 +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_CNTL 0x0484 +#define cfgBIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_STATUS 0x0486 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST 0x04c0 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CAP 0x04c4 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CNTL 0x04c8 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CAP 0x04cc +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CNTL 0x04d0 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CAP 0x04d4 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CNTL 0x04d8 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CAP 0x04dc +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CNTL 0x04e0 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CAP 0x04e4 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CNTL 0x04e8 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CAP 0x04ec +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CNTL 0x04f0 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV 0x0500 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV 0x0504 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW 0x0508 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE 0x050c +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS 0x0510 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_RESET_CONTROL 0x0514 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0 0x0518 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1 0x051c +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2 0x0520 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT 0x0524 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB 0x0528 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS 0x052c +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE 0x0530 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB 0x0534 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB 0x0538 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB 0x053c +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB 0x0540 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB 0x0544 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB 0x0548 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB 0x054c +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB 0x0550 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB 0x0554 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB 0x0558 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB 0x055c +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB 0x0560 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB 0x0564 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB 0x0568 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB 0x056c +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB 0x0570 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB 0x0574 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB 0x0578 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB 0x057c +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB 0x0580 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB 0x0584 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB 0x0588 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB 0x058c +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB 0x0590 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB 0x0594 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB 0x0598 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB 0x059c +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB 0x05a0 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB 0x05a4 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB 0x05a8 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB 0x05ac +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW0 0x05b0 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW1 0x05b4 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW2 0x05b8 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW3 0x05bc +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW4 0x05c0 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW5 0x05c4 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW6 0x05c8 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW7 0x05cc +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW8 0x05d0 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW0 0x05e0 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW1 0x05e4 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW2 0x05e8 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW3 0x05ec +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW4 0x05f0 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW5 0x05f4 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW6 0x05f8 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW7 0x05fc +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW8 0x0600 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW0 0x0610 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW1 0x0614 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW2 0x0618 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW3 0x061c +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW4 0x0620 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW5 0x0624 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW6 0x0628 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW7 0x062c +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW8 0x0630 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW0 0x0640 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW1 0x0644 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW2 0x0648 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW3 0x064c +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW4 0x0650 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW5 0x0654 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW6 0x0658 +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW7 0x065c +#define cfgBIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW8 0x0660 + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_swds_bifcfgdecp +// base address: 0x0 +#define cfgBIF_CFG_DEV0_SWDS0_VENDOR_ID 0x0000 +#define cfgBIF_CFG_DEV0_SWDS0_DEVICE_ID 0x0002 +#define cfgBIF_CFG_DEV0_SWDS0_COMMAND 0x0004 +#define cfgBIF_CFG_DEV0_SWDS0_STATUS 0x0006 +#define cfgBIF_CFG_DEV0_SWDS0_REVISION_ID 0x0008 +#define cfgBIF_CFG_DEV0_SWDS0_PROG_INTERFACE 0x0009 +#define cfgBIF_CFG_DEV0_SWDS0_SUB_CLASS 0x000a +#define cfgBIF_CFG_DEV0_SWDS0_BASE_CLASS 0x000b +#define cfgBIF_CFG_DEV0_SWDS0_CACHE_LINE 0x000c +#define cfgBIF_CFG_DEV0_SWDS0_LATENCY 0x000d +#define cfgBIF_CFG_DEV0_SWDS0_HEADER 0x000e +#define cfgBIF_CFG_DEV0_SWDS0_BIST 0x000f +#define cfgBIF_CFG_DEV0_SWDS0_BASE_ADDR_1 0x0010 +#define cfgBIF_CFG_DEV0_SWDS0_SUB_BUS_NUMBER_LATENCY 0x0018 +#define cfgBIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT 0x001c +#define cfgBIF_CFG_DEV0_SWDS0_SECONDARY_STATUS 0x001e +#define cfgBIF_CFG_DEV0_SWDS0_MEM_BASE_LIMIT 0x0020 +#define cfgBIF_CFG_DEV0_SWDS0_PREF_BASE_LIMIT 0x0024 +#define cfgBIF_CFG_DEV0_SWDS0_PREF_BASE_UPPER 0x0028 +#define cfgBIF_CFG_DEV0_SWDS0_PREF_LIMIT_UPPER 0x002c +#define cfgBIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT_HI 0x0030 +#define cfgBIF_CFG_DEV0_SWDS0_CAP_PTR 0x0034 +#define cfgBIF_CFG_DEV0_SWDS0_INTERRUPT_LINE 0x003c +#define cfgBIF_CFG_DEV0_SWDS0_INTERRUPT_PIN 0x003d +#define cfgBIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL 0x003e +#define cfgBIF_CFG_DEV0_SWDS0_PMI_CAP_LIST 0x0050 +#define cfgBIF_CFG_DEV0_SWDS0_PMI_CAP 0x0052 +#define cfgBIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL 0x0054 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_CAP_LIST 0x0058 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_CAP 0x005a +#define cfgBIF_CFG_DEV0_SWDS0_DEVICE_CAP 0x005c +#define cfgBIF_CFG_DEV0_SWDS0_DEVICE_CNTL 0x0060 +#define cfgBIF_CFG_DEV0_SWDS0_DEVICE_STATUS 0x0062 +#define cfgBIF_CFG_DEV0_SWDS0_LINK_CAP 0x0064 +#define cfgBIF_CFG_DEV0_SWDS0_LINK_CNTL 0x0068 +#define cfgBIF_CFG_DEV0_SWDS0_LINK_STATUS 0x006a +#define cfgBIF_CFG_DEV0_SWDS0_SLOT_CAP 0x006c +#define cfgBIF_CFG_DEV0_SWDS0_SLOT_CNTL 0x0070 +#define cfgBIF_CFG_DEV0_SWDS0_SLOT_STATUS 0x0072 +#define cfgBIF_CFG_DEV0_SWDS0_DEVICE_CAP2 0x007c +#define cfgBIF_CFG_DEV0_SWDS0_DEVICE_CNTL2 0x0080 +#define cfgBIF_CFG_DEV0_SWDS0_DEVICE_STATUS2 0x0082 +#define cfgBIF_CFG_DEV0_SWDS0_LINK_CAP2 0x0084 +#define cfgBIF_CFG_DEV0_SWDS0_LINK_CNTL2 0x0088 +#define cfgBIF_CFG_DEV0_SWDS0_LINK_STATUS2 0x008a +#define cfgBIF_CFG_DEV0_SWDS0_SLOT_CAP2 0x008c +#define cfgBIF_CFG_DEV0_SWDS0_SLOT_CNTL2 0x0090 +#define cfgBIF_CFG_DEV0_SWDS0_SLOT_STATUS2 0x0092 +#define cfgBIF_CFG_DEV0_SWDS0_MSI_CAP_LIST 0x00a0 +#define cfgBIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL 0x00a2 +#define cfgBIF_CFG_DEV0_SWDS0_MSI_MSG_ADDR_LO 0x00a4 +#define cfgBIF_CFG_DEV0_SWDS0_MSI_MSG_ADDR_HI 0x00a8 +#define cfgBIF_CFG_DEV0_SWDS0_MSI_MSG_DATA 0x00a8 +#define cfgBIF_CFG_DEV0_SWDS0_MSI_MSG_DATA_64 0x00ac +#define cfgBIF_CFG_DEV0_SWDS0_SSID_CAP_LIST 0x00c0 +#define cfgBIF_CFG_DEV0_SWDS0_SSID_CAP 0x00c4 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_HDR 0x0104 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC1 0x0108 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC2 0x010c +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_VC_ENH_CAP_LIST 0x0110 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG1 0x0114 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG2 0x0118 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CNTL 0x011c +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_STATUS 0x011e +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CAP 0x0120 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL 0x0124 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_STATUS 0x012a +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CAP 0x012c +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL 0x0130 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_STATUS 0x0136 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST 0x0140 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_DW1 0x0144 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_DW2 0x0148 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS 0x0154 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK 0x0158 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY 0x015c +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS 0x0160 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK 0x0164 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL 0x0168 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG0 0x016c +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG1 0x0170 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG2 0x0174 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG3 0x0178 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG0 0x0188 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG1 0x018c +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG2 0x0190 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG3 0x0194 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_SECONDARY_ENH_CAP_LIST 0x0270 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_LINK_CNTL3 0x0274 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_LANE_ERROR_STATUS 0x0278 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL 0x027c +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL 0x027e +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL 0x0280 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL 0x0282 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL 0x0284 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL 0x0286 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL 0x0288 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL 0x028a +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL 0x028c +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL 0x028e +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL 0x0290 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL 0x0292 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL 0x0294 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL 0x0296 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL 0x0298 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL 0x029a +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_ACS_ENH_CAP_LIST 0x02a0 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP 0x02a4 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL 0x02a6 +#define cfgBIF_CFG_DEV0_SWDS0_PCIE_DLF_ENH_CAP_LIST 0x0400 +#define cfgBIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_CAP 0x0404 +#define cfgBIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_STATUS 0x0408 +#define cfgBIF_CFG_DEV0_SWDS0_PHY_16GT_ENH_CAP_LIST 0x0410 +#define cfgBIF_CFG_DEV0_SWDS0_LINK_CAP_16GT 0x0414 +#define cfgBIF_CFG_DEV0_SWDS0_LINK_CNTL_16GT 0x0418 +#define cfgBIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT 0x041c +#define cfgBIF_CFG_DEV0_SWDS0_LOCAL_PARITY_MISMATCH_STATUS_16GT 0x0420 +#define cfgBIF_CFG_DEV0_SWDS0_RTM1_PARITY_MISMATCH_STATUS_16GT 0x0424 +#define cfgBIF_CFG_DEV0_SWDS0_RTM2_PARITY_MISMATCH_STATUS_16GT 0x0428 +#define cfgBIF_CFG_DEV0_SWDS0_LANE_0_EQUALIZATION_CNTL_16GT 0x0430 +#define cfgBIF_CFG_DEV0_SWDS0_LANE_1_EQUALIZATION_CNTL_16GT 0x0431 +#define cfgBIF_CFG_DEV0_SWDS0_LANE_2_EQUALIZATION_CNTL_16GT 0x0432 +#define cfgBIF_CFG_DEV0_SWDS0_LANE_3_EQUALIZATION_CNTL_16GT 0x0433 +#define cfgBIF_CFG_DEV0_SWDS0_LANE_4_EQUALIZATION_CNTL_16GT 0x0434 +#define cfgBIF_CFG_DEV0_SWDS0_LANE_5_EQUALIZATION_CNTL_16GT 0x0435 +#define cfgBIF_CFG_DEV0_SWDS0_LANE_6_EQUALIZATION_CNTL_16GT 0x0436 +#define cfgBIF_CFG_DEV0_SWDS0_LANE_7_EQUALIZATION_CNTL_16GT 0x0437 +#define cfgBIF_CFG_DEV0_SWDS0_LANE_8_EQUALIZATION_CNTL_16GT 0x0438 +#define cfgBIF_CFG_DEV0_SWDS0_LANE_9_EQUALIZATION_CNTL_16GT 0x0439 +#define cfgBIF_CFG_DEV0_SWDS0_LANE_10_EQUALIZATION_CNTL_16GT 0x043a +#define cfgBIF_CFG_DEV0_SWDS0_LANE_11_EQUALIZATION_CNTL_16GT 0x043b +#define cfgBIF_CFG_DEV0_SWDS0_LANE_12_EQUALIZATION_CNTL_16GT 0x043c +#define cfgBIF_CFG_DEV0_SWDS0_LANE_13_EQUALIZATION_CNTL_16GT 0x043d +#define cfgBIF_CFG_DEV0_SWDS0_LANE_14_EQUALIZATION_CNTL_16GT 0x043e +#define cfgBIF_CFG_DEV0_SWDS0_LANE_15_EQUALIZATION_CNTL_16GT 0x043f +#define cfgBIF_CFG_DEV0_SWDS0_MARGINING_ENH_CAP_LIST 0x0440 +#define cfgBIF_CFG_DEV0_SWDS0_MARGINING_PORT_CAP 0x0444 +#define cfgBIF_CFG_DEV0_SWDS0_MARGINING_PORT_STATUS 0x0446 +#define cfgBIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_CNTL 0x0448 +#define cfgBIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_STATUS 0x044a +#define cfgBIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_CNTL 0x044c +#define cfgBIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_STATUS 0x044e +#define cfgBIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_CNTL 0x0450 +#define cfgBIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_STATUS 0x0452 +#define cfgBIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_CNTL 0x0454 +#define cfgBIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_STATUS 0x0456 +#define cfgBIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_CNTL 0x0458 +#define cfgBIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_STATUS 0x045a +#define cfgBIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_CNTL 0x045c +#define cfgBIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_STATUS 0x045e +#define cfgBIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_CNTL 0x0460 +#define cfgBIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_STATUS 0x0462 +#define cfgBIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_CNTL 0x0464 +#define cfgBIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_STATUS 0x0466 +#define cfgBIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_CNTL 0x0468 +#define cfgBIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_STATUS 0x046a +#define cfgBIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_CNTL 0x046c +#define cfgBIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_STATUS 0x046e +#define cfgBIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_CNTL 0x0470 +#define cfgBIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_STATUS 0x0472 +#define cfgBIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_CNTL 0x0474 +#define cfgBIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_STATUS 0x0476 +#define cfgBIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_CNTL 0x0478 +#define cfgBIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_STATUS 0x047a +#define cfgBIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_CNTL 0x047c +#define cfgBIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_STATUS 0x047e +#define cfgBIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_CNTL 0x0480 +#define cfgBIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_STATUS 0x0482 +#define cfgBIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_CNTL 0x0484 +#define cfgBIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_STATUS 0x0486 + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf0_bifcfgdecp +// base address: 0x0 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_VENDOR_ID 0x0000 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_DEVICE_ID 0x0002 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_COMMAND 0x0004 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_STATUS 0x0006 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_REVISION_ID 0x0008 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PROG_INTERFACE 0x0009 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_SUB_CLASS 0x000a +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_BASE_CLASS 0x000b +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_CACHE_LINE 0x000c +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_LATENCY 0x000d +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_HEADER 0x000e +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_BIST 0x000f +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_1 0x0010 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_2 0x0014 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_3 0x0018 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_4 0x001c +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_5 0x0020 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_6 0x0024 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_ADAPTER_ID 0x002c +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_ROM_BASE_ADDR 0x0030 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_CAP_PTR 0x0034 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_INTERRUPT_LINE 0x003c +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_INTERRUPT_PIN 0x003d +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP_LIST 0x0064 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP 0x0066 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP 0x0068 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL 0x006c +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS 0x006e +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP 0x0070 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL 0x0074 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS 0x0076 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2 0x0088 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2 0x008c +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS2 0x008e +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2 0x0090 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2 0x0094 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2 0x0096 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_SLOT_CAP2 0x0098 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_SLOT_CNTL2 0x009c +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_SLOT_STATUS2 0x009e +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_MSI_CAP_LIST 0x00a0 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL 0x00a2 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_ADDR_LO 0x00a4 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_ADDR_HI 0x00a8 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_DATA 0x00a8 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_MSI_MASK 0x00ac +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_DATA_64 0x00ac +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_MSI_MASK_64 0x00b0 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_MSI_PENDING 0x00b0 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_MSI_PENDING_64 0x00b4 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_MSIX_CAP_LIST 0x00c0 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_MSIX_MSG_CNTL 0x00c2 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_MSIX_TABLE 0x00c4 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_MSIX_PBA 0x00c8 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_HDR 0x0104 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC1 0x0108 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC2 0x010c +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS 0x0154 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK 0x0158 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY 0x015c +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS 0x0160 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK 0x0164 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL 0x0168 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG0 0x016c +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG1 0x0170 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG2 0x0174 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG3 0x0178 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG0 0x0188 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG1 0x018c +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG2 0x0190 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG3 0x0194 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_ENH_CAP_LIST 0x02b0 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CAP 0x02b4 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CNTL 0x02b6 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_ENH_CAP_LIST 0x0328 +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CAP 0x032c +#define cfgBIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CNTL 0x032e + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf1_bifcfgdecp +// base address: 0x0 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_VENDOR_ID 0x0000 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_DEVICE_ID 0x0002 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_COMMAND 0x0004 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_STATUS 0x0006 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_REVISION_ID 0x0008 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PROG_INTERFACE 0x0009 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_SUB_CLASS 0x000a +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_BASE_CLASS 0x000b +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_CACHE_LINE 0x000c +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_LATENCY 0x000d +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_HEADER 0x000e +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_BIST 0x000f +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_1 0x0010 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_2 0x0014 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_3 0x0018 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_4 0x001c +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_5 0x0020 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_6 0x0024 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_ADAPTER_ID 0x002c +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_ROM_BASE_ADDR 0x0030 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_CAP_PTR 0x0034 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_INTERRUPT_LINE 0x003c +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_INTERRUPT_PIN 0x003d +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP_LIST 0x0064 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP 0x0066 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP 0x0068 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL 0x006c +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS 0x006e +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP 0x0070 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL 0x0074 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS 0x0076 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2 0x0088 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2 0x008c +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS2 0x008e +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2 0x0090 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2 0x0094 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2 0x0096 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_SLOT_CAP2 0x0098 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_SLOT_CNTL2 0x009c +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_SLOT_STATUS2 0x009e +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_MSI_CAP_LIST 0x00a0 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL 0x00a2 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_ADDR_LO 0x00a4 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_ADDR_HI 0x00a8 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_DATA 0x00a8 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_MSI_MASK 0x00ac +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_DATA_64 0x00ac +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_MSI_MASK_64 0x00b0 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_MSI_PENDING 0x00b0 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_MSI_PENDING_64 0x00b4 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_MSIX_CAP_LIST 0x00c0 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_MSIX_MSG_CNTL 0x00c2 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_MSIX_TABLE 0x00c4 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_MSIX_PBA 0x00c8 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_HDR 0x0104 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC1 0x0108 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC2 0x010c +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS 0x0154 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK 0x0158 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY 0x015c +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS 0x0160 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK 0x0164 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL 0x0168 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG0 0x016c +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG1 0x0170 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG2 0x0174 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG3 0x0178 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG0 0x0188 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG1 0x018c +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG2 0x0190 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG3 0x0194 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_ENH_CAP_LIST 0x02b0 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CAP 0x02b4 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CNTL 0x02b6 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_ENH_CAP_LIST 0x0328 +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CAP 0x032c +#define cfgBIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CNTL 0x032e + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf2_bifcfgdecp +// base address: 0x0 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_VENDOR_ID 0x0000 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_DEVICE_ID 0x0002 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_COMMAND 0x0004 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_STATUS 0x0006 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_REVISION_ID 0x0008 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PROG_INTERFACE 0x0009 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_SUB_CLASS 0x000a +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_BASE_CLASS 0x000b +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_CACHE_LINE 0x000c +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_LATENCY 0x000d +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_HEADER 0x000e +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_BIST 0x000f +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_1 0x0010 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_2 0x0014 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_3 0x0018 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_4 0x001c +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_5 0x0020 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_6 0x0024 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_ADAPTER_ID 0x002c +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_ROM_BASE_ADDR 0x0030 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_CAP_PTR 0x0034 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_INTERRUPT_LINE 0x003c +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_INTERRUPT_PIN 0x003d +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP_LIST 0x0064 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP 0x0066 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP 0x0068 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL 0x006c +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS 0x006e +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP 0x0070 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL 0x0074 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS 0x0076 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2 0x0088 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2 0x008c +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS2 0x008e +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2 0x0090 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2 0x0094 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2 0x0096 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_SLOT_CAP2 0x0098 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_SLOT_CNTL2 0x009c +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_SLOT_STATUS2 0x009e +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_MSI_CAP_LIST 0x00a0 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL 0x00a2 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_ADDR_LO 0x00a4 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_ADDR_HI 0x00a8 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_DATA 0x00a8 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_MSI_MASK 0x00ac +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_DATA_64 0x00ac +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_MSI_MASK_64 0x00b0 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_MSI_PENDING 0x00b0 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_MSI_PENDING_64 0x00b4 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_MSIX_CAP_LIST 0x00c0 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_MSIX_MSG_CNTL 0x00c2 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_MSIX_TABLE 0x00c4 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_MSIX_PBA 0x00c8 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_HDR 0x0104 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC1 0x0108 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC2 0x010c +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS 0x0154 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK 0x0158 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY 0x015c +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS 0x0160 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK 0x0164 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL 0x0168 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG0 0x016c +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG1 0x0170 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG2 0x0174 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG3 0x0178 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG0 0x0188 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG1 0x018c +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG2 0x0190 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG3 0x0194 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_ENH_CAP_LIST 0x02b0 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CAP 0x02b4 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CNTL 0x02b6 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_ENH_CAP_LIST 0x0328 +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CAP 0x032c +#define cfgBIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CNTL 0x032e + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf3_bifcfgdecp +// base address: 0x0 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_VENDOR_ID 0x0000 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_DEVICE_ID 0x0002 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_COMMAND 0x0004 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_STATUS 0x0006 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_REVISION_ID 0x0008 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PROG_INTERFACE 0x0009 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_SUB_CLASS 0x000a +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_BASE_CLASS 0x000b +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_CACHE_LINE 0x000c +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_LATENCY 0x000d +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_HEADER 0x000e +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_BIST 0x000f +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_1 0x0010 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_2 0x0014 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_3 0x0018 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_4 0x001c +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_5 0x0020 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_6 0x0024 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_ADAPTER_ID 0x002c +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_ROM_BASE_ADDR 0x0030 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_CAP_PTR 0x0034 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_INTERRUPT_LINE 0x003c +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_INTERRUPT_PIN 0x003d +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP_LIST 0x0064 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP 0x0066 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP 0x0068 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL 0x006c +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS 0x006e +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP 0x0070 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL 0x0074 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS 0x0076 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2 0x0088 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2 0x008c +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS2 0x008e +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2 0x0090 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2 0x0094 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2 0x0096 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_SLOT_CAP2 0x0098 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_SLOT_CNTL2 0x009c +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_SLOT_STATUS2 0x009e +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_MSI_CAP_LIST 0x00a0 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL 0x00a2 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_ADDR_LO 0x00a4 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_ADDR_HI 0x00a8 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_DATA 0x00a8 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_MSI_MASK 0x00ac +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_DATA_64 0x00ac +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_MSI_MASK_64 0x00b0 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_MSI_PENDING 0x00b0 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_MSI_PENDING_64 0x00b4 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_MSIX_CAP_LIST 0x00c0 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_MSIX_MSG_CNTL 0x00c2 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_MSIX_TABLE 0x00c4 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_MSIX_PBA 0x00c8 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_HDR 0x0104 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC1 0x0108 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC2 0x010c +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS 0x0154 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK 0x0158 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY 0x015c +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS 0x0160 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK 0x0164 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL 0x0168 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG0 0x016c +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG1 0x0170 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG2 0x0174 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG3 0x0178 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG0 0x0188 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG1 0x018c +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG2 0x0190 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG3 0x0194 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_ENH_CAP_LIST 0x02b0 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CAP 0x02b4 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CNTL 0x02b6 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_ENH_CAP_LIST 0x0328 +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CAP 0x032c +#define cfgBIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CNTL 0x032e + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf4_bifcfgdecp +// base address: 0x0 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_VENDOR_ID 0x0000 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_DEVICE_ID 0x0002 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_COMMAND 0x0004 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_STATUS 0x0006 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_REVISION_ID 0x0008 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PROG_INTERFACE 0x0009 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_SUB_CLASS 0x000a +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_BASE_CLASS 0x000b +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_CACHE_LINE 0x000c +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_LATENCY 0x000d +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_HEADER 0x000e +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_BIST 0x000f +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_1 0x0010 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_2 0x0014 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_3 0x0018 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_4 0x001c +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_5 0x0020 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_6 0x0024 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_ADAPTER_ID 0x002c +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_ROM_BASE_ADDR 0x0030 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_CAP_PTR 0x0034 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_INTERRUPT_LINE 0x003c +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_INTERRUPT_PIN 0x003d +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP_LIST 0x0064 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP 0x0066 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP 0x0068 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL 0x006c +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS 0x006e +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP 0x0070 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL 0x0074 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS 0x0076 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2 0x0088 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2 0x008c +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS2 0x008e +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2 0x0090 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2 0x0094 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2 0x0096 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_SLOT_CAP2 0x0098 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_SLOT_CNTL2 0x009c +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_SLOT_STATUS2 0x009e +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_MSI_CAP_LIST 0x00a0 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL 0x00a2 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_ADDR_LO 0x00a4 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_ADDR_HI 0x00a8 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_DATA 0x00a8 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_MSI_MASK 0x00ac +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_DATA_64 0x00ac +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_MSI_MASK_64 0x00b0 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_MSI_PENDING 0x00b0 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_MSI_PENDING_64 0x00b4 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_MSIX_CAP_LIST 0x00c0 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_MSIX_MSG_CNTL 0x00c2 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_MSIX_TABLE 0x00c4 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_MSIX_PBA 0x00c8 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_HDR 0x0104 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC1 0x0108 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC2 0x010c +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS 0x0154 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK 0x0158 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY 0x015c +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS 0x0160 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK 0x0164 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL 0x0168 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG0 0x016c +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG1 0x0170 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG2 0x0174 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG3 0x0178 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG0 0x0188 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG1 0x018c +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG2 0x0190 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG3 0x0194 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_ENH_CAP_LIST 0x02b0 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CAP 0x02b4 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CNTL 0x02b6 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_ENH_CAP_LIST 0x0328 +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CAP 0x032c +#define cfgBIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CNTL 0x032e + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf5_bifcfgdecp +// base address: 0x0 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_VENDOR_ID 0x0000 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_DEVICE_ID 0x0002 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_COMMAND 0x0004 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_STATUS 0x0006 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_REVISION_ID 0x0008 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PROG_INTERFACE 0x0009 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_SUB_CLASS 0x000a +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_BASE_CLASS 0x000b +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_CACHE_LINE 0x000c +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_LATENCY 0x000d +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_HEADER 0x000e +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_BIST 0x000f +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_1 0x0010 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_2 0x0014 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_3 0x0018 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_4 0x001c +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_5 0x0020 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_6 0x0024 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_ADAPTER_ID 0x002c +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_ROM_BASE_ADDR 0x0030 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_CAP_PTR 0x0034 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_INTERRUPT_LINE 0x003c +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_INTERRUPT_PIN 0x003d +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP_LIST 0x0064 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP 0x0066 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP 0x0068 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL 0x006c +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS 0x006e +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP 0x0070 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL 0x0074 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS 0x0076 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2 0x0088 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2 0x008c +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS2 0x008e +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2 0x0090 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2 0x0094 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2 0x0096 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_SLOT_CAP2 0x0098 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_SLOT_CNTL2 0x009c +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_SLOT_STATUS2 0x009e +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_MSI_CAP_LIST 0x00a0 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL 0x00a2 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_ADDR_LO 0x00a4 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_ADDR_HI 0x00a8 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_DATA 0x00a8 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_MSI_MASK 0x00ac +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_DATA_64 0x00ac +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_MSI_MASK_64 0x00b0 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_MSI_PENDING 0x00b0 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_MSI_PENDING_64 0x00b4 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_MSIX_CAP_LIST 0x00c0 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_MSIX_MSG_CNTL 0x00c2 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_MSIX_TABLE 0x00c4 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_MSIX_PBA 0x00c8 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_HDR 0x0104 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC1 0x0108 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC2 0x010c +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS 0x0154 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK 0x0158 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY 0x015c +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS 0x0160 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK 0x0164 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL 0x0168 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG0 0x016c +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG1 0x0170 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG2 0x0174 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG3 0x0178 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG0 0x0188 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG1 0x018c +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG2 0x0190 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG3 0x0194 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_ENH_CAP_LIST 0x02b0 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CAP 0x02b4 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CNTL 0x02b6 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_ENH_CAP_LIST 0x0328 +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CAP 0x032c +#define cfgBIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CNTL 0x032e + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf6_bifcfgdecp +// base address: 0x0 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_VENDOR_ID 0x0000 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_DEVICE_ID 0x0002 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_COMMAND 0x0004 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_STATUS 0x0006 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_REVISION_ID 0x0008 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PROG_INTERFACE 0x0009 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_SUB_CLASS 0x000a +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_BASE_CLASS 0x000b +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_CACHE_LINE 0x000c +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_LATENCY 0x000d +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_HEADER 0x000e +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_BIST 0x000f +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_1 0x0010 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_2 0x0014 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_3 0x0018 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_4 0x001c +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_5 0x0020 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_6 0x0024 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_ADAPTER_ID 0x002c +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_ROM_BASE_ADDR 0x0030 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_CAP_PTR 0x0034 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_INTERRUPT_LINE 0x003c +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_INTERRUPT_PIN 0x003d +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP_LIST 0x0064 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP 0x0066 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP 0x0068 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL 0x006c +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS 0x006e +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP 0x0070 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL 0x0074 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS 0x0076 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2 0x0088 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2 0x008c +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS2 0x008e +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2 0x0090 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2 0x0094 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2 0x0096 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_SLOT_CAP2 0x0098 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_SLOT_CNTL2 0x009c +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_SLOT_STATUS2 0x009e +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_MSI_CAP_LIST 0x00a0 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL 0x00a2 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_ADDR_LO 0x00a4 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_ADDR_HI 0x00a8 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_DATA 0x00a8 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_MSI_MASK 0x00ac +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_DATA_64 0x00ac +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_MSI_MASK_64 0x00b0 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_MSI_PENDING 0x00b0 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_MSI_PENDING_64 0x00b4 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_MSIX_CAP_LIST 0x00c0 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_MSIX_MSG_CNTL 0x00c2 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_MSIX_TABLE 0x00c4 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_MSIX_PBA 0x00c8 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_HDR 0x0104 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC1 0x0108 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC2 0x010c +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS 0x0154 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK 0x0158 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY 0x015c +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS 0x0160 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK 0x0164 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL 0x0168 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG0 0x016c +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG1 0x0170 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG2 0x0174 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG3 0x0178 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG0 0x0188 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG1 0x018c +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG2 0x0190 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG3 0x0194 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_ENH_CAP_LIST 0x02b0 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CAP 0x02b4 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CNTL 0x02b6 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_ENH_CAP_LIST 0x0328 +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CAP 0x032c +#define cfgBIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CNTL 0x032e + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf7_bifcfgdecp +// base address: 0x0 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_VENDOR_ID 0x0000 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_DEVICE_ID 0x0002 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_COMMAND 0x0004 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_STATUS 0x0006 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_REVISION_ID 0x0008 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PROG_INTERFACE 0x0009 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_SUB_CLASS 0x000a +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_BASE_CLASS 0x000b +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_CACHE_LINE 0x000c +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_LATENCY 0x000d +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_HEADER 0x000e +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_BIST 0x000f +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_1 0x0010 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_2 0x0014 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_3 0x0018 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_4 0x001c +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_5 0x0020 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_6 0x0024 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_ADAPTER_ID 0x002c +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_ROM_BASE_ADDR 0x0030 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_CAP_PTR 0x0034 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_INTERRUPT_LINE 0x003c +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_INTERRUPT_PIN 0x003d +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP_LIST 0x0064 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP 0x0066 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP 0x0068 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL 0x006c +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS 0x006e +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP 0x0070 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL 0x0074 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS 0x0076 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2 0x0088 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2 0x008c +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS2 0x008e +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2 0x0090 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2 0x0094 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2 0x0096 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_SLOT_CAP2 0x0098 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_SLOT_CNTL2 0x009c +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_SLOT_STATUS2 0x009e +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_MSI_CAP_LIST 0x00a0 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL 0x00a2 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_ADDR_LO 0x00a4 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_ADDR_HI 0x00a8 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_DATA 0x00a8 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_MSI_MASK 0x00ac +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_DATA_64 0x00ac +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_MSI_MASK_64 0x00b0 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_MSI_PENDING 0x00b0 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_MSI_PENDING_64 0x00b4 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_MSIX_CAP_LIST 0x00c0 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_MSIX_MSG_CNTL 0x00c2 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_MSIX_TABLE 0x00c4 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_MSIX_PBA 0x00c8 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_HDR 0x0104 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC1 0x0108 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC2 0x010c +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS 0x0154 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK 0x0158 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY 0x015c +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS 0x0160 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK 0x0164 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL 0x0168 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG0 0x016c +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG1 0x0170 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG2 0x0174 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG3 0x0178 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG0 0x0188 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG1 0x018c +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG2 0x0190 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG3 0x0194 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_ENH_CAP_LIST 0x02b0 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CAP 0x02b4 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CNTL 0x02b6 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_ENH_CAP_LIST 0x0328 +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CAP 0x032c +#define cfgBIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CNTL 0x032e + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf8_bifcfgdecp +// base address: 0x0 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_VENDOR_ID 0x0000 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_DEVICE_ID 0x0002 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_COMMAND 0x0004 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_STATUS 0x0006 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_REVISION_ID 0x0008 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PROG_INTERFACE 0x0009 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_SUB_CLASS 0x000a +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_BASE_CLASS 0x000b +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_CACHE_LINE 0x000c +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_LATENCY 0x000d +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_HEADER 0x000e +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_BIST 0x000f +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_1 0x0010 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_2 0x0014 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_3 0x0018 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_4 0x001c +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_5 0x0020 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_6 0x0024 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_ADAPTER_ID 0x002c +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_ROM_BASE_ADDR 0x0030 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_CAP_PTR 0x0034 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_INTERRUPT_LINE 0x003c +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_INTERRUPT_PIN 0x003d +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP_LIST 0x0064 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP 0x0066 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP 0x0068 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL 0x006c +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS 0x006e +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP 0x0070 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL 0x0074 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS 0x0076 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2 0x0088 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2 0x008c +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS2 0x008e +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2 0x0090 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2 0x0094 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2 0x0096 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_SLOT_CAP2 0x0098 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_SLOT_CNTL2 0x009c +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_SLOT_STATUS2 0x009e +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_MSI_CAP_LIST 0x00a0 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL 0x00a2 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_ADDR_LO 0x00a4 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_ADDR_HI 0x00a8 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_DATA 0x00a8 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_MSI_MASK 0x00ac +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_DATA_64 0x00ac +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_MSI_MASK_64 0x00b0 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_MSI_PENDING 0x00b0 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_MSI_PENDING_64 0x00b4 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_MSIX_CAP_LIST 0x00c0 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_MSIX_MSG_CNTL 0x00c2 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_MSIX_TABLE 0x00c4 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_MSIX_PBA 0x00c8 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_HDR 0x0104 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC1 0x0108 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC2 0x010c +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS 0x0154 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK 0x0158 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY 0x015c +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS 0x0160 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK 0x0164 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL 0x0168 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG0 0x016c +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG1 0x0170 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG2 0x0174 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG3 0x0178 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG0 0x0188 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG1 0x018c +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG2 0x0190 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG3 0x0194 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_ENH_CAP_LIST 0x02b0 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CAP 0x02b4 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CNTL 0x02b6 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_ENH_CAP_LIST 0x0328 +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CAP 0x032c +#define cfgBIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CNTL 0x032e + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf9_bifcfgdecp +// base address: 0x0 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_VENDOR_ID 0x0000 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_DEVICE_ID 0x0002 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_COMMAND 0x0004 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_STATUS 0x0006 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_REVISION_ID 0x0008 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PROG_INTERFACE 0x0009 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_SUB_CLASS 0x000a +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_BASE_CLASS 0x000b +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_CACHE_LINE 0x000c +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_LATENCY 0x000d +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_HEADER 0x000e +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_BIST 0x000f +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_1 0x0010 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_2 0x0014 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_3 0x0018 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_4 0x001c +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_5 0x0020 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_6 0x0024 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_ADAPTER_ID 0x002c +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_ROM_BASE_ADDR 0x0030 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_CAP_PTR 0x0034 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_INTERRUPT_LINE 0x003c +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_INTERRUPT_PIN 0x003d +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP_LIST 0x0064 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP 0x0066 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP 0x0068 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL 0x006c +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS 0x006e +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP 0x0070 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL 0x0074 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS 0x0076 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2 0x0088 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2 0x008c +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS2 0x008e +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2 0x0090 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2 0x0094 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2 0x0096 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_SLOT_CAP2 0x0098 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_SLOT_CNTL2 0x009c +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_SLOT_STATUS2 0x009e +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_MSI_CAP_LIST 0x00a0 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL 0x00a2 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_ADDR_LO 0x00a4 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_ADDR_HI 0x00a8 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_DATA 0x00a8 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_MSI_MASK 0x00ac +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_DATA_64 0x00ac +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_MSI_MASK_64 0x00b0 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_MSI_PENDING 0x00b0 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_MSI_PENDING_64 0x00b4 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_MSIX_CAP_LIST 0x00c0 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_MSIX_MSG_CNTL 0x00c2 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_MSIX_TABLE 0x00c4 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_MSIX_PBA 0x00c8 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_HDR 0x0104 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC1 0x0108 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC2 0x010c +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS 0x0154 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK 0x0158 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY 0x015c +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS 0x0160 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK 0x0164 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL 0x0168 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG0 0x016c +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG1 0x0170 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG2 0x0174 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG3 0x0178 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG0 0x0188 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG1 0x018c +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG2 0x0190 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG3 0x0194 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_ENH_CAP_LIST 0x02b0 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CAP 0x02b4 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CNTL 0x02b6 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_ENH_CAP_LIST 0x0328 +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CAP 0x032c +#define cfgBIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CNTL 0x032e + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf10_bifcfgdecp +// base address: 0x0 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_VENDOR_ID 0x0000 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_DEVICE_ID 0x0002 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_COMMAND 0x0004 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_STATUS 0x0006 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_REVISION_ID 0x0008 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PROG_INTERFACE 0x0009 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_SUB_CLASS 0x000a +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_BASE_CLASS 0x000b +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_CACHE_LINE 0x000c +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_LATENCY 0x000d +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_HEADER 0x000e +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_BIST 0x000f +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_1 0x0010 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_2 0x0014 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_3 0x0018 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_4 0x001c +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_5 0x0020 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_6 0x0024 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_ADAPTER_ID 0x002c +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_ROM_BASE_ADDR 0x0030 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_CAP_PTR 0x0034 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_INTERRUPT_LINE 0x003c +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_INTERRUPT_PIN 0x003d +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP_LIST 0x0064 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP 0x0066 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP 0x0068 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL 0x006c +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS 0x006e +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP 0x0070 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL 0x0074 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS 0x0076 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2 0x0088 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2 0x008c +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS2 0x008e +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2 0x0090 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2 0x0094 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2 0x0096 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_SLOT_CAP2 0x0098 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_SLOT_CNTL2 0x009c +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_SLOT_STATUS2 0x009e +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_MSI_CAP_LIST 0x00a0 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL 0x00a2 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_ADDR_LO 0x00a4 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_ADDR_HI 0x00a8 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_DATA 0x00a8 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_MSI_MASK 0x00ac +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_DATA_64 0x00ac +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_MSI_MASK_64 0x00b0 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_MSI_PENDING 0x00b0 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_MSI_PENDING_64 0x00b4 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_MSIX_CAP_LIST 0x00c0 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_MSIX_MSG_CNTL 0x00c2 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_MSIX_TABLE 0x00c4 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_MSIX_PBA 0x00c8 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_HDR 0x0104 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC1 0x0108 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC2 0x010c +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS 0x0154 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK 0x0158 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY 0x015c +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS 0x0160 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK 0x0164 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL 0x0168 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG0 0x016c +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG1 0x0170 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG2 0x0174 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG3 0x0178 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG0 0x0188 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG1 0x018c +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG2 0x0190 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG3 0x0194 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_ENH_CAP_LIST 0x02b0 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CAP 0x02b4 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CNTL 0x02b6 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_ENH_CAP_LIST 0x0328 +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CAP 0x032c +#define cfgBIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CNTL 0x032e + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf11_bifcfgdecp +// base address: 0x0 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_VENDOR_ID 0x0000 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_DEVICE_ID 0x0002 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_COMMAND 0x0004 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_STATUS 0x0006 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_REVISION_ID 0x0008 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PROG_INTERFACE 0x0009 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_SUB_CLASS 0x000a +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_BASE_CLASS 0x000b +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_CACHE_LINE 0x000c +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_LATENCY 0x000d +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_HEADER 0x000e +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_BIST 0x000f +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_1 0x0010 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_2 0x0014 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_3 0x0018 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_4 0x001c +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_5 0x0020 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_6 0x0024 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_ADAPTER_ID 0x002c +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_ROM_BASE_ADDR 0x0030 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_CAP_PTR 0x0034 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_INTERRUPT_LINE 0x003c +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_INTERRUPT_PIN 0x003d +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP_LIST 0x0064 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP 0x0066 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP 0x0068 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL 0x006c +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS 0x006e +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP 0x0070 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL 0x0074 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS 0x0076 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2 0x0088 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2 0x008c +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS2 0x008e +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2 0x0090 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2 0x0094 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2 0x0096 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_SLOT_CAP2 0x0098 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_SLOT_CNTL2 0x009c +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_SLOT_STATUS2 0x009e +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_MSI_CAP_LIST 0x00a0 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL 0x00a2 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_ADDR_LO 0x00a4 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_ADDR_HI 0x00a8 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_DATA 0x00a8 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_MSI_MASK 0x00ac +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_DATA_64 0x00ac +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_MSI_MASK_64 0x00b0 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_MSI_PENDING 0x00b0 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_MSI_PENDING_64 0x00b4 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_MSIX_CAP_LIST 0x00c0 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_MSIX_MSG_CNTL 0x00c2 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_MSIX_TABLE 0x00c4 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_MSIX_PBA 0x00c8 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_HDR 0x0104 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC1 0x0108 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC2 0x010c +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS 0x0154 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK 0x0158 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY 0x015c +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS 0x0160 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK 0x0164 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL 0x0168 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG0 0x016c +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG1 0x0170 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG2 0x0174 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG3 0x0178 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG0 0x0188 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG1 0x018c +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG2 0x0190 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG3 0x0194 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_ENH_CAP_LIST 0x02b0 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CAP 0x02b4 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CNTL 0x02b6 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_ENH_CAP_LIST 0x0328 +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CAP 0x032c +#define cfgBIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CNTL 0x032e + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf12_bifcfgdecp +// base address: 0x0 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_VENDOR_ID 0x0000 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_DEVICE_ID 0x0002 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_COMMAND 0x0004 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_STATUS 0x0006 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_REVISION_ID 0x0008 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PROG_INTERFACE 0x0009 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_SUB_CLASS 0x000a +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_BASE_CLASS 0x000b +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_CACHE_LINE 0x000c +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_LATENCY 0x000d +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_HEADER 0x000e +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_BIST 0x000f +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_1 0x0010 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_2 0x0014 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_3 0x0018 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_4 0x001c +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_5 0x0020 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_6 0x0024 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_ADAPTER_ID 0x002c +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_ROM_BASE_ADDR 0x0030 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_CAP_PTR 0x0034 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_INTERRUPT_LINE 0x003c +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_INTERRUPT_PIN 0x003d +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP_LIST 0x0064 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP 0x0066 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP 0x0068 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL 0x006c +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS 0x006e +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP 0x0070 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL 0x0074 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS 0x0076 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2 0x0088 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2 0x008c +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS2 0x008e +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2 0x0090 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2 0x0094 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2 0x0096 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_SLOT_CAP2 0x0098 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_SLOT_CNTL2 0x009c +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_SLOT_STATUS2 0x009e +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_MSI_CAP_LIST 0x00a0 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL 0x00a2 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_ADDR_LO 0x00a4 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_ADDR_HI 0x00a8 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_DATA 0x00a8 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_MSI_MASK 0x00ac +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_DATA_64 0x00ac +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_MSI_MASK_64 0x00b0 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_MSI_PENDING 0x00b0 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_MSI_PENDING_64 0x00b4 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_MSIX_CAP_LIST 0x00c0 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_MSIX_MSG_CNTL 0x00c2 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_MSIX_TABLE 0x00c4 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_MSIX_PBA 0x00c8 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_HDR 0x0104 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC1 0x0108 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC2 0x010c +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS 0x0154 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK 0x0158 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY 0x015c +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS 0x0160 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK 0x0164 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL 0x0168 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG0 0x016c +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG1 0x0170 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG2 0x0174 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG3 0x0178 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG0 0x0188 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG1 0x018c +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG2 0x0190 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG3 0x0194 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_ENH_CAP_LIST 0x02b0 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CAP 0x02b4 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CNTL 0x02b6 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_ENH_CAP_LIST 0x0328 +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CAP 0x032c +#define cfgBIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CNTL 0x032e + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf13_bifcfgdecp +// base address: 0x0 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_VENDOR_ID 0x0000 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_DEVICE_ID 0x0002 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_COMMAND 0x0004 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_STATUS 0x0006 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_REVISION_ID 0x0008 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PROG_INTERFACE 0x0009 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_SUB_CLASS 0x000a +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_BASE_CLASS 0x000b +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_CACHE_LINE 0x000c +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_LATENCY 0x000d +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_HEADER 0x000e +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_BIST 0x000f +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_1 0x0010 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_2 0x0014 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_3 0x0018 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_4 0x001c +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_5 0x0020 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_6 0x0024 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_ADAPTER_ID 0x002c +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_ROM_BASE_ADDR 0x0030 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_CAP_PTR 0x0034 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_INTERRUPT_LINE 0x003c +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_INTERRUPT_PIN 0x003d +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP_LIST 0x0064 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP 0x0066 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP 0x0068 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL 0x006c +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS 0x006e +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP 0x0070 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL 0x0074 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS 0x0076 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2 0x0088 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2 0x008c +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS2 0x008e +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2 0x0090 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2 0x0094 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2 0x0096 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_SLOT_CAP2 0x0098 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_SLOT_CNTL2 0x009c +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_SLOT_STATUS2 0x009e +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_MSI_CAP_LIST 0x00a0 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL 0x00a2 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_ADDR_LO 0x00a4 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_ADDR_HI 0x00a8 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_DATA 0x00a8 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_MSI_MASK 0x00ac +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_DATA_64 0x00ac +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_MSI_MASK_64 0x00b0 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_MSI_PENDING 0x00b0 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_MSI_PENDING_64 0x00b4 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_MSIX_CAP_LIST 0x00c0 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_MSIX_MSG_CNTL 0x00c2 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_MSIX_TABLE 0x00c4 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_MSIX_PBA 0x00c8 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_HDR 0x0104 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC1 0x0108 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC2 0x010c +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS 0x0154 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK 0x0158 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY 0x015c +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS 0x0160 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK 0x0164 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL 0x0168 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG0 0x016c +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG1 0x0170 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG2 0x0174 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG3 0x0178 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG0 0x0188 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG1 0x018c +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG2 0x0190 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG3 0x0194 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_ENH_CAP_LIST 0x02b0 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CAP 0x02b4 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CNTL 0x02b6 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_ENH_CAP_LIST 0x0328 +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CAP 0x032c +#define cfgBIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CNTL 0x032e + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf14_bifcfgdecp +// base address: 0x0 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_VENDOR_ID 0x0000 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_DEVICE_ID 0x0002 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_COMMAND 0x0004 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_STATUS 0x0006 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_REVISION_ID 0x0008 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PROG_INTERFACE 0x0009 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_SUB_CLASS 0x000a +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_BASE_CLASS 0x000b +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_CACHE_LINE 0x000c +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_LATENCY 0x000d +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_HEADER 0x000e +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_BIST 0x000f +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_1 0x0010 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_2 0x0014 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_3 0x0018 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_4 0x001c +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_5 0x0020 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_6 0x0024 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_ADAPTER_ID 0x002c +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_ROM_BASE_ADDR 0x0030 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_CAP_PTR 0x0034 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_INTERRUPT_LINE 0x003c +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_INTERRUPT_PIN 0x003d +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP_LIST 0x0064 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP 0x0066 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP 0x0068 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL 0x006c +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS 0x006e +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP 0x0070 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL 0x0074 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS 0x0076 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2 0x0088 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2 0x008c +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS2 0x008e +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2 0x0090 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2 0x0094 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2 0x0096 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_SLOT_CAP2 0x0098 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_SLOT_CNTL2 0x009c +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_SLOT_STATUS2 0x009e +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_MSI_CAP_LIST 0x00a0 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL 0x00a2 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_ADDR_LO 0x00a4 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_ADDR_HI 0x00a8 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_DATA 0x00a8 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_MSI_MASK 0x00ac +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_DATA_64 0x00ac +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_MSI_MASK_64 0x00b0 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_MSI_PENDING 0x00b0 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_MSI_PENDING_64 0x00b4 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_MSIX_CAP_LIST 0x00c0 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_MSIX_MSG_CNTL 0x00c2 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_MSIX_TABLE 0x00c4 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_MSIX_PBA 0x00c8 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_HDR 0x0104 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC1 0x0108 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC2 0x010c +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS 0x0154 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK 0x0158 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY 0x015c +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS 0x0160 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK 0x0164 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL 0x0168 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG0 0x016c +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG1 0x0170 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG2 0x0174 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG3 0x0178 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG0 0x0188 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG1 0x018c +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG2 0x0190 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG3 0x0194 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_ENH_CAP_LIST 0x02b0 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CAP 0x02b4 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CNTL 0x02b6 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_ENH_CAP_LIST 0x0328 +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CAP 0x032c +#define cfgBIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CNTL 0x032e + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf15_bifcfgdecp +// base address: 0x0 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_VENDOR_ID 0x0000 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_DEVICE_ID 0x0002 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_COMMAND 0x0004 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_STATUS 0x0006 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_REVISION_ID 0x0008 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PROG_INTERFACE 0x0009 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_SUB_CLASS 0x000a +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_BASE_CLASS 0x000b +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_CACHE_LINE 0x000c +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_LATENCY 0x000d +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_HEADER 0x000e +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_BIST 0x000f +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_1 0x0010 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_2 0x0014 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_3 0x0018 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_4 0x001c +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_5 0x0020 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_6 0x0024 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_ADAPTER_ID 0x002c +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_ROM_BASE_ADDR 0x0030 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_CAP_PTR 0x0034 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_INTERRUPT_LINE 0x003c +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_INTERRUPT_PIN 0x003d +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP_LIST 0x0064 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP 0x0066 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP 0x0068 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL 0x006c +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS 0x006e +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP 0x0070 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL 0x0074 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS 0x0076 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2 0x0088 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2 0x008c +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS2 0x008e +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2 0x0090 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2 0x0094 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2 0x0096 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_SLOT_CAP2 0x0098 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_SLOT_CNTL2 0x009c +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_SLOT_STATUS2 0x009e +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_MSI_CAP_LIST 0x00a0 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL 0x00a2 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_ADDR_LO 0x00a4 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_ADDR_HI 0x00a8 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_DATA 0x00a8 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_MSI_MASK 0x00ac +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_DATA_64 0x00ac +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_MSI_MASK_64 0x00b0 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_MSI_PENDING 0x00b0 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_MSI_PENDING_64 0x00b4 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_MSIX_CAP_LIST 0x00c0 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_MSIX_MSG_CNTL 0x00c2 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_MSIX_TABLE 0x00c4 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_MSIX_PBA 0x00c8 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST 0x0100 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_HDR 0x0104 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC1 0x0108 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC2 0x010c +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST 0x0150 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS 0x0154 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK 0x0158 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY 0x015c +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS 0x0160 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK 0x0164 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL 0x0168 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG0 0x016c +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG1 0x0170 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG2 0x0174 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG3 0x0178 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG0 0x0188 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG1 0x018c +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG2 0x0190 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG3 0x0194 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_ENH_CAP_LIST 0x02b0 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CAP 0x02b4 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CNTL 0x02b6 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_ENH_CAP_LIST 0x0328 +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CAP 0x032c +#define cfgBIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CNTL 0x032e + + +// addressBlock: nbio_nbif0_bif_bx_pf_SYSPFVFDEC +// base address: 0x0 +#define mmMM_INDEX 0x0000 +#define mmMM_INDEX_BASE_IDX 0 +#define mmMM_DATA 0x0001 +#define mmMM_DATA_BASE_IDX 0 +#define mmMM_INDEX_HI 0x0006 +#define mmMM_INDEX_HI_BASE_IDX 0 + + +// addressBlock: nbio_nbif0_bif_bx_SYSDEC +// base address: 0x0 +#define mmSYSHUB_INDEX_OVLP 0x0008 +#define mmSYSHUB_INDEX_OVLP_BASE_IDX 0 +#define mmSYSHUB_DATA_OVLP 0x0009 +#define mmSYSHUB_DATA_OVLP_BASE_IDX 0 +#define mmPCIE_INDEX 0x000c +#define mmPCIE_INDEX_BASE_IDX 0 +#define mmPCIE_DATA 0x000d +#define mmPCIE_DATA_BASE_IDX 0 +#define mmPCIE_INDEX2 0x000e +#define mmPCIE_INDEX2_BASE_IDX 0 +#define mmPCIE_DATA2 0x000f +#define mmPCIE_DATA2_BASE_IDX 0 +#define mmSBIOS_SCRATCH_0 0x0034 +#define mmSBIOS_SCRATCH_0_BASE_IDX 1 +#define mmSBIOS_SCRATCH_1 0x0035 +#define mmSBIOS_SCRATCH_1_BASE_IDX 1 +#define mmSBIOS_SCRATCH_2 0x0036 +#define mmSBIOS_SCRATCH_2_BASE_IDX 1 +#define mmSBIOS_SCRATCH_3 0x0037 +#define mmSBIOS_SCRATCH_3_BASE_IDX 1 +#define mmBIOS_SCRATCH_0 0x0038 +#define mmBIOS_SCRATCH_0_BASE_IDX 1 +#define mmBIOS_SCRATCH_1 0x0039 +#define mmBIOS_SCRATCH_1_BASE_IDX 1 +#define mmBIOS_SCRATCH_2 0x003a +#define mmBIOS_SCRATCH_2_BASE_IDX 1 +#define mmBIOS_SCRATCH_3 0x003b +#define mmBIOS_SCRATCH_3_BASE_IDX 1 +#define mmBIOS_SCRATCH_4 0x003c +#define mmBIOS_SCRATCH_4_BASE_IDX 1 +#define mmBIOS_SCRATCH_5 0x003d +#define mmBIOS_SCRATCH_5_BASE_IDX 1 +#define mmBIOS_SCRATCH_6 0x003e +#define mmBIOS_SCRATCH_6_BASE_IDX 1 +#define mmBIOS_SCRATCH_7 0x003f +#define mmBIOS_SCRATCH_7_BASE_IDX 1 +#define mmBIOS_SCRATCH_8 0x0040 +#define mmBIOS_SCRATCH_8_BASE_IDX 1 +#define mmBIOS_SCRATCH_9 0x0041 +#define mmBIOS_SCRATCH_9_BASE_IDX 1 +#define mmBIOS_SCRATCH_10 0x0042 +#define mmBIOS_SCRATCH_10_BASE_IDX 1 +#define mmBIOS_SCRATCH_11 0x0043 +#define mmBIOS_SCRATCH_11_BASE_IDX 1 +#define mmBIOS_SCRATCH_12 0x0044 +#define mmBIOS_SCRATCH_12_BASE_IDX 1 +#define mmBIOS_SCRATCH_13 0x0045 +#define mmBIOS_SCRATCH_13_BASE_IDX 1 +#define mmBIOS_SCRATCH_14 0x0046 +#define mmBIOS_SCRATCH_14_BASE_IDX 1 +#define mmBIOS_SCRATCH_15 0x0047 +#define mmBIOS_SCRATCH_15_BASE_IDX 1 +#define mmBIF_RLC_INTR_CNTL 0x004c +#define mmBIF_RLC_INTR_CNTL_BASE_IDX 1 +#define mmBIF_VCE_INTR_CNTL 0x004d +#define mmBIF_VCE_INTR_CNTL_BASE_IDX 1 +#define mmBIF_UVD_INTR_CNTL 0x004e +#define mmBIF_UVD_INTR_CNTL_BASE_IDX 1 +#define mmGFX_MMIOREG_CAM_ADDR0 0x006c +#define mmGFX_MMIOREG_CAM_ADDR0_BASE_IDX 1 +#define mmGFX_MMIOREG_CAM_REMAP_ADDR0 0x006d +#define mmGFX_MMIOREG_CAM_REMAP_ADDR0_BASE_IDX 1 +#define mmGFX_MMIOREG_CAM_ADDR1 0x006e +#define mmGFX_MMIOREG_CAM_ADDR1_BASE_IDX 1 +#define mmGFX_MMIOREG_CAM_REMAP_ADDR1 0x006f +#define mmGFX_MMIOREG_CAM_REMAP_ADDR1_BASE_IDX 1 +#define mmGFX_MMIOREG_CAM_ADDR2 0x0070 +#define mmGFX_MMIOREG_CAM_ADDR2_BASE_IDX 1 +#define mmGFX_MMIOREG_CAM_REMAP_ADDR2 0x0071 +#define mmGFX_MMIOREG_CAM_REMAP_ADDR2_BASE_IDX 1 +#define mmGFX_MMIOREG_CAM_ADDR3 0x0072 +#define mmGFX_MMIOREG_CAM_ADDR3_BASE_IDX 1 +#define mmGFX_MMIOREG_CAM_REMAP_ADDR3 0x0073 +#define mmGFX_MMIOREG_CAM_REMAP_ADDR3_BASE_IDX 1 +#define mmGFX_MMIOREG_CAM_ADDR4 0x0074 +#define mmGFX_MMIOREG_CAM_ADDR4_BASE_IDX 1 +#define mmGFX_MMIOREG_CAM_REMAP_ADDR4 0x0075 +#define mmGFX_MMIOREG_CAM_REMAP_ADDR4_BASE_IDX 1 +#define mmGFX_MMIOREG_CAM_ADDR5 0x0076 +#define mmGFX_MMIOREG_CAM_ADDR5_BASE_IDX 1 +#define mmGFX_MMIOREG_CAM_REMAP_ADDR5 0x0077 +#define mmGFX_MMIOREG_CAM_REMAP_ADDR5_BASE_IDX 1 +#define mmGFX_MMIOREG_CAM_ADDR6 0x0078 +#define mmGFX_MMIOREG_CAM_ADDR6_BASE_IDX 1 +#define mmGFX_MMIOREG_CAM_REMAP_ADDR6 0x0079 +#define mmGFX_MMIOREG_CAM_REMAP_ADDR6_BASE_IDX 1 +#define mmGFX_MMIOREG_CAM_ADDR7 0x007a +#define mmGFX_MMIOREG_CAM_ADDR7_BASE_IDX 1 +#define mmGFX_MMIOREG_CAM_REMAP_ADDR7 0x007b +#define mmGFX_MMIOREG_CAM_REMAP_ADDR7_BASE_IDX 1 +#define mmGFX_MMIOREG_CAM_CNTL 0x007c +#define mmGFX_MMIOREG_CAM_CNTL_BASE_IDX 1 +#define mmGFX_MMIOREG_CAM_ZERO_CPL 0x007d +#define mmGFX_MMIOREG_CAM_ZERO_CPL_BASE_IDX 1 +#define mmGFX_MMIOREG_CAM_ONE_CPL 0x007e +#define mmGFX_MMIOREG_CAM_ONE_CPL_BASE_IDX 1 +#define mmGFX_MMIOREG_CAM_PROGRAMMABLE_CPL 0x007f +#define mmGFX_MMIOREG_CAM_PROGRAMMABLE_CPL_BASE_IDX 1 + + +// addressBlock: nbio_nbif0_syshub_mmreg_syshubdec +// base address: 0x0 +#define mmSYSHUB_INDEX 0x0008 +#define mmSYSHUB_INDEX_BASE_IDX 0 +#define mmSYSHUB_DATA 0x0009 +#define mmSYSHUB_DATA_BASE_IDX 0 + + +// addressBlock: nbio_nbif0_rcc_strap_BIFDEC1 +// base address: 0x0 +#define mmRCC_DEV0_EPF0_STRAP0 0x0011 +#define mmRCC_DEV0_EPF0_STRAP0_BASE_IDX 2 + + +// addressBlock: nbio_nbif0_rcc_ep_dev0_BIFDEC1 +// base address: 0x0 +#define mmEP_PCIE_SCRATCH 0x0025 +#define mmEP_PCIE_SCRATCH_BASE_IDX 2 +#define mmEP_PCIE_CNTL 0x0027 +#define mmEP_PCIE_CNTL_BASE_IDX 2 +#define mmEP_PCIE_INT_CNTL 0x0028 +#define mmEP_PCIE_INT_CNTL_BASE_IDX 2 +#define mmEP_PCIE_INT_STATUS 0x0029 +#define mmEP_PCIE_INT_STATUS_BASE_IDX 2 +#define mmEP_PCIE_RX_CNTL2 0x002a +#define mmEP_PCIE_RX_CNTL2_BASE_IDX 2 +#define mmEP_PCIE_BUS_CNTL 0x002b +#define mmEP_PCIE_BUS_CNTL_BASE_IDX 2 +#define mmEP_PCIE_CFG_CNTL 0x002c +#define mmEP_PCIE_CFG_CNTL_BASE_IDX 2 +#define mmEP_PCIE_TX_LTR_CNTL 0x002e +#define mmEP_PCIE_TX_LTR_CNTL_BASE_IDX 2 +#define mmPCIE_F1_DPA_SUBSTATE_PWR_ALLOC_0 0x002f +#define mmPCIE_F1_DPA_SUBSTATE_PWR_ALLOC_0_BASE_IDX 2 +#define mmPCIE_F1_DPA_SUBSTATE_PWR_ALLOC_1 0x002f +#define mmPCIE_F1_DPA_SUBSTATE_PWR_ALLOC_1_BASE_IDX 2 +#define mmPCIE_F1_DPA_SUBSTATE_PWR_ALLOC_2 0x002f +#define mmPCIE_F1_DPA_SUBSTATE_PWR_ALLOC_2_BASE_IDX 2 +#define mmPCIE_F1_DPA_SUBSTATE_PWR_ALLOC_3 0x002f +#define mmPCIE_F1_DPA_SUBSTATE_PWR_ALLOC_3_BASE_IDX 2 +#define mmPCIE_F1_DPA_SUBSTATE_PWR_ALLOC_4 0x0030 +#define mmPCIE_F1_DPA_SUBSTATE_PWR_ALLOC_4_BASE_IDX 2 +#define mmPCIE_F1_DPA_SUBSTATE_PWR_ALLOC_5 0x0030 +#define mmPCIE_F1_DPA_SUBSTATE_PWR_ALLOC_5_BASE_IDX 2 +#define mmPCIE_F1_DPA_SUBSTATE_PWR_ALLOC_6 0x0030 +#define mmPCIE_F1_DPA_SUBSTATE_PWR_ALLOC_6_BASE_IDX 2 +#define mmPCIE_F1_DPA_SUBSTATE_PWR_ALLOC_7 0x0030 +#define mmPCIE_F1_DPA_SUBSTATE_PWR_ALLOC_7_BASE_IDX 2 +#define mmEP_PCIE_F0_DPA_CAP 0x0034 +#define mmEP_PCIE_F0_DPA_CAP_BASE_IDX 2 +#define mmEP_PCIE_F0_DPA_LATENCY_INDICATOR 0x0035 +#define mmEP_PCIE_F0_DPA_LATENCY_INDICATOR_BASE_IDX 2 +#define mmEP_PCIE_F0_DPA_CNTL 0x0035 +#define mmEP_PCIE_F0_DPA_CNTL_BASE_IDX 2 +#define mmPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0 0x0035 +#define mmPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0_BASE_IDX 2 +#define mmPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1 0x0036 +#define mmPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1_BASE_IDX 2 +#define mmPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2 0x0036 +#define mmPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2_BASE_IDX 2 +#define mmPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3 0x0036 +#define mmPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3_BASE_IDX 2 +#define mmPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4 0x0036 +#define mmPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4_BASE_IDX 2 +#define mmPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5 0x0037 +#define mmPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5_BASE_IDX 2 +#define mmPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6 0x0037 +#define mmPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6_BASE_IDX 2 +#define mmPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7 0x0037 +#define mmPCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7_BASE_IDX 2 +#define mmEP_PCIE_PME_CONTROL 0x0037 +#define mmEP_PCIE_PME_CONTROL_BASE_IDX 2 +#define mmEP_PCIEP_RESERVED 0x0038 +#define mmEP_PCIEP_RESERVED_BASE_IDX 2 +#define mmEP_PCIE_TX_CNTL 0x003a +#define mmEP_PCIE_TX_CNTL_BASE_IDX 2 +#define mmEP_PCIE_TX_REQUESTER_ID 0x003b +#define mmEP_PCIE_TX_REQUESTER_ID_BASE_IDX 2 +#define mmEP_PCIE_ERR_CNTL 0x003c +#define mmEP_PCIE_ERR_CNTL_BASE_IDX 2 +#define mmEP_PCIE_RX_CNTL 0x003d +#define mmEP_PCIE_RX_CNTL_BASE_IDX 2 +#define mmEP_PCIE_LC_SPEED_CNTL 0x003e +#define mmEP_PCIE_LC_SPEED_CNTL_BASE_IDX 2 + + +// addressBlock: nbio_nbif0_rcc_dwn_dev0_BIFDEC1 +// base address: 0x0 +#define mmDN_PCIE_RESERVED 0x0040 +#define mmDN_PCIE_RESERVED_BASE_IDX 2 +#define mmDN_PCIE_SCRATCH 0x0041 +#define mmDN_PCIE_SCRATCH_BASE_IDX 2 +#define mmDN_PCIE_CNTL 0x0043 +#define mmDN_PCIE_CNTL_BASE_IDX 2 +#define mmDN_PCIE_CONFIG_CNTL 0x0044 +#define mmDN_PCIE_CONFIG_CNTL_BASE_IDX 2 +#define mmDN_PCIE_RX_CNTL2 0x0045 +#define mmDN_PCIE_RX_CNTL2_BASE_IDX 2 +#define mmDN_PCIE_BUS_CNTL 0x0046 +#define mmDN_PCIE_BUS_CNTL_BASE_IDX 2 +#define mmDN_PCIE_CFG_CNTL 0x0047 +#define mmDN_PCIE_CFG_CNTL_BASE_IDX 2 + + +// addressBlock: nbio_nbif0_rcc_dwnp_dev0_BIFDEC1 +// base address: 0x0 +#define mmPCIE_ERR_CNTL 0x004f +#define mmPCIE_ERR_CNTL_BASE_IDX 2 +#define mmPCIE_RX_CNTL 0x0050 +#define mmPCIE_RX_CNTL_BASE_IDX 2 +#define mmPCIE_LC_SPEED_CNTL 0x0051 +#define mmPCIE_LC_SPEED_CNTL_BASE_IDX 2 +#define mmPCIE_LC_CNTL2 0x0052 +#define mmPCIE_LC_CNTL2_BASE_IDX 2 +#define mmLTR_MSG_INFO_FROM_EP 0x0054 +#define mmLTR_MSG_INFO_FROM_EP_BASE_IDX 2 + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_BIFPFVFDEC1[13440..14975] +// base address: 0x3480 +#define mmRCC_ERR_LOG 0x0085 +#define mmRCC_ERR_LOG_BASE_IDX 2 +#define mmRCC_DOORBELL_APER_EN 0x00c0 +#define mmRCC_DOORBELL_APER_EN_BASE_IDX 2 +#define mmRCC_CONFIG_MEMSIZE 0x00c3 +#define mmRCC_CONFIG_MEMSIZE_BASE_IDX 2 +#define mmRCC_CONFIG_RESERVED 0x00c4 +#define mmRCC_CONFIG_RESERVED_BASE_IDX 2 +#define mmRCC_IOV_FUNC_IDENTIFIER 0x00c5 +#define mmRCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2 + + +// addressBlock: nbio_nbif0_rcc_dev0_BIFDEC1 +// base address: 0x0 +#define mmRCC_ERR_INT_CNTL 0x0086 +#define mmRCC_ERR_INT_CNTL_BASE_IDX 2 +#define mmRCC_BACO_CNTL_MISC 0x0087 +#define mmRCC_BACO_CNTL_MISC_BASE_IDX 2 +#define mmRCC_RESET_EN 0x0088 +#define mmRCC_RESET_EN_BASE_IDX 2 +#define mmRCC_VDM_SUPPORT 0x0089 +#define mmRCC_VDM_SUPPORT_BASE_IDX 2 +#define mmRCC_MARGIN_PARAM_CNTL0 0x008a +#define mmRCC_MARGIN_PARAM_CNTL0_BASE_IDX 2 +#define mmRCC_MARGIN_PARAM_CNTL1 0x008b +#define mmRCC_MARGIN_PARAM_CNTL1_BASE_IDX 2 +#define mmRCC_PEER_REG_RANGE0 0x00be +#define mmRCC_PEER_REG_RANGE0_BASE_IDX 2 +#define mmRCC_PEER_REG_RANGE1 0x00bf +#define mmRCC_PEER_REG_RANGE1_BASE_IDX 2 +#define mmRCC_BUS_CNTL 0x00c1 +#define mmRCC_BUS_CNTL_BASE_IDX 2 +#define mmRCC_CONFIG_CNTL 0x00c2 +#define mmRCC_CONFIG_CNTL_BASE_IDX 2 +#define mmRCC_CONFIG_F0_BASE 0x00c6 +#define mmRCC_CONFIG_F0_BASE_BASE_IDX 2 +#define mmRCC_CONFIG_APER_SIZE 0x00c7 +#define mmRCC_CONFIG_APER_SIZE_BASE_IDX 2 +#define mmRCC_CONFIG_REG_APER_SIZE 0x00c8 +#define mmRCC_CONFIG_REG_APER_SIZE_BASE_IDX 2 +#define mmRCC_XDMA_LO 0x00c9 +#define mmRCC_XDMA_LO_BASE_IDX 2 +#define mmRCC_XDMA_HI 0x00ca +#define mmRCC_XDMA_HI_BASE_IDX 2 +#define mmRCC_FEATURES_CONTROL_MISC 0x00cb +#define mmRCC_FEATURES_CONTROL_MISC_BASE_IDX 2 +#define mmRCC_BUSNUM_CNTL1 0x00cc +#define mmRCC_BUSNUM_CNTL1_BASE_IDX 2 +#define mmRCC_BUSNUM_LIST0 0x00cd +#define mmRCC_BUSNUM_LIST0_BASE_IDX 2 +#define mmRCC_BUSNUM_LIST1 0x00ce +#define mmRCC_BUSNUM_LIST1_BASE_IDX 2 +#define mmRCC_BUSNUM_CNTL2 0x00cf +#define mmRCC_BUSNUM_CNTL2_BASE_IDX 2 +#define mmRCC_CAPTURE_HOST_BUSNUM 0x00d0 +#define mmRCC_CAPTURE_HOST_BUSNUM_BASE_IDX 2 +#define mmRCC_HOST_BUSNUM 0x00d1 +#define mmRCC_HOST_BUSNUM_BASE_IDX 2 +#define mmRCC_PEER0_FB_OFFSET_HI 0x00d2 +#define mmRCC_PEER0_FB_OFFSET_HI_BASE_IDX 2 +#define mmRCC_PEER0_FB_OFFSET_LO 0x00d3 +#define mmRCC_PEER0_FB_OFFSET_LO_BASE_IDX 2 +#define mmRCC_PEER1_FB_OFFSET_HI 0x00d4 +#define mmRCC_PEER1_FB_OFFSET_HI_BASE_IDX 2 +#define mmRCC_PEER1_FB_OFFSET_LO 0x00d5 +#define mmRCC_PEER1_FB_OFFSET_LO_BASE_IDX 2 +#define mmRCC_PEER2_FB_OFFSET_HI 0x00d6 +#define mmRCC_PEER2_FB_OFFSET_HI_BASE_IDX 2 +#define mmRCC_PEER2_FB_OFFSET_LO 0x00d7 +#define mmRCC_PEER2_FB_OFFSET_LO_BASE_IDX 2 +#define mmRCC_PEER3_FB_OFFSET_HI 0x00d8 +#define mmRCC_PEER3_FB_OFFSET_HI_BASE_IDX 2 +#define mmRCC_PEER3_FB_OFFSET_LO 0x00d9 +#define mmRCC_PEER3_FB_OFFSET_LO_BASE_IDX 2 +#define mmRCC_CMN_LINK_CNTL 0x00de +#define mmRCC_CMN_LINK_CNTL_BASE_IDX 2 +#define mmRCC_EP_REQUESTERID_RESTORE 0x00df +#define mmRCC_EP_REQUESTERID_RESTORE_BASE_IDX 2 +#define mmRCC_LTR_LSWITCH_CNTL 0x00e0 +#define mmRCC_LTR_LSWITCH_CNTL_BASE_IDX 2 +#define mmRCC_MH_ARB_CNTL 0x00e1 +#define mmRCC_MH_ARB_CNTL_BASE_IDX 2 + + +// addressBlock: nbio_nbif0_bif_bx_BIFDEC1 +// base address: 0x0 +#define mmBIF_MM_INDACCESS_CNTL 0x00e6 +#define mmBIF_MM_INDACCESS_CNTL_BASE_IDX 2 +#define mmBUS_CNTL 0x00e7 +#define mmBUS_CNTL_BASE_IDX 2 +#define mmBIF_SCRATCH0 0x00e8 +#define mmBIF_SCRATCH0_BASE_IDX 2 +#define mmBIF_SCRATCH1 0x00e9 +#define mmBIF_SCRATCH1_BASE_IDX 2 +#define mmBX_RESET_EN 0x00ed +#define mmBX_RESET_EN_BASE_IDX 2 +#define mmMM_CFGREGS_CNTL 0x00ee +#define mmMM_CFGREGS_CNTL_BASE_IDX 2 +#define mmBX_RESET_CNTL 0x00f0 +#define mmBX_RESET_CNTL_BASE_IDX 2 +#define mmINTERRUPT_CNTL 0x00f1 +#define mmINTERRUPT_CNTL_BASE_IDX 2 +#define mmINTERRUPT_CNTL2 0x00f2 +#define mmINTERRUPT_CNTL2_BASE_IDX 2 +#define mmCLKREQB_PAD_CNTL 0x00f8 +#define mmCLKREQB_PAD_CNTL_BASE_IDX 2 +#define mmBIF_FEATURES_CONTROL_MISC 0x00fb +#define mmBIF_FEATURES_CONTROL_MISC_BASE_IDX 2 +#define mmBIF_DOORBELL_CNTL 0x00fc +#define mmBIF_DOORBELL_CNTL_BASE_IDX 2 +#define mmBIF_DOORBELL_INT_CNTL 0x00fd +#define mmBIF_DOORBELL_INT_CNTL_BASE_IDX 2 +#define mmBIF_FB_EN 0x00ff +#define mmBIF_FB_EN_BASE_IDX 2 +#define mmBIF_BUSY_DELAY_CNTR 0x0100 +#define mmBIF_BUSY_DELAY_CNTR_BASE_IDX 2 +#define mmBIF_MST_TRANS_PENDING_VF 0x0109 +#define mmBIF_MST_TRANS_PENDING_VF_BASE_IDX 2 +#define mmBIF_SLV_TRANS_PENDING_VF 0x010a +#define mmBIF_SLV_TRANS_PENDING_VF_BASE_IDX 2 +#define mmBACO_CNTL 0x010b +#define mmBACO_CNTL_BASE_IDX 2 +#define mmBIF_BACO_EXIT_TIME0 0x010c +#define mmBIF_BACO_EXIT_TIME0_BASE_IDX 2 +#define mmBIF_BACO_EXIT_TIMER1 0x010d +#define mmBIF_BACO_EXIT_TIMER1_BASE_IDX 2 +#define mmBIF_BACO_EXIT_TIMER2 0x010e +#define mmBIF_BACO_EXIT_TIMER2_BASE_IDX 2 +#define mmBIF_BACO_EXIT_TIMER3 0x010f +#define mmBIF_BACO_EXIT_TIMER3_BASE_IDX 2 +#define mmBIF_BACO_EXIT_TIMER4 0x0110 +#define mmBIF_BACO_EXIT_TIMER4_BASE_IDX 2 +#define mmMEM_TYPE_CNTL 0x0111 +#define mmMEM_TYPE_CNTL_BASE_IDX 2 +#define mmNBIF_GFX_ADDR_LUT_CNTL 0x0113 +#define mmNBIF_GFX_ADDR_LUT_CNTL_BASE_IDX 2 +#define mmNBIF_GFX_ADDR_LUT_0 0x0114 +#define mmNBIF_GFX_ADDR_LUT_0_BASE_IDX 2 +#define mmNBIF_GFX_ADDR_LUT_1 0x0115 +#define mmNBIF_GFX_ADDR_LUT_1_BASE_IDX 2 +#define mmNBIF_GFX_ADDR_LUT_2 0x0116 +#define mmNBIF_GFX_ADDR_LUT_2_BASE_IDX 2 +#define mmNBIF_GFX_ADDR_LUT_3 0x0117 +#define mmNBIF_GFX_ADDR_LUT_3_BASE_IDX 2 +#define mmNBIF_GFX_ADDR_LUT_4 0x0118 +#define mmNBIF_GFX_ADDR_LUT_4_BASE_IDX 2 +#define mmNBIF_GFX_ADDR_LUT_5 0x0119 +#define mmNBIF_GFX_ADDR_LUT_5_BASE_IDX 2 +#define mmNBIF_GFX_ADDR_LUT_6 0x011a +#define mmNBIF_GFX_ADDR_LUT_6_BASE_IDX 2 +#define mmNBIF_GFX_ADDR_LUT_7 0x011b +#define mmNBIF_GFX_ADDR_LUT_7_BASE_IDX 2 +#define mmNBIF_GFX_ADDR_LUT_8 0x011c +#define mmNBIF_GFX_ADDR_LUT_8_BASE_IDX 2 +#define mmNBIF_GFX_ADDR_LUT_9 0x011d +#define mmNBIF_GFX_ADDR_LUT_9_BASE_IDX 2 +#define mmNBIF_GFX_ADDR_LUT_10 0x011e +#define mmNBIF_GFX_ADDR_LUT_10_BASE_IDX 2 +#define mmNBIF_GFX_ADDR_LUT_11 0x011f +#define mmNBIF_GFX_ADDR_LUT_11_BASE_IDX 2 +#define mmNBIF_GFX_ADDR_LUT_12 0x0120 +#define mmNBIF_GFX_ADDR_LUT_12_BASE_IDX 2 +#define mmNBIF_GFX_ADDR_LUT_13 0x0121 +#define mmNBIF_GFX_ADDR_LUT_13_BASE_IDX 2 +#define mmNBIF_GFX_ADDR_LUT_14 0x0122 +#define mmNBIF_GFX_ADDR_LUT_14_BASE_IDX 2 +#define mmNBIF_GFX_ADDR_LUT_15 0x0123 +#define mmNBIF_GFX_ADDR_LUT_15_BASE_IDX 2 +#define mmREMAP_HDP_MEM_FLUSH_CNTL 0x012d +#define mmREMAP_HDP_MEM_FLUSH_CNTL_BASE_IDX 2 +#define mmREMAP_HDP_REG_FLUSH_CNTL 0x012e +#define mmREMAP_HDP_REG_FLUSH_CNTL_BASE_IDX 2 +#define mmBIF_RB_CNTL 0x012f +#define mmBIF_RB_CNTL_BASE_IDX 2 +#define mmBIF_RB_BASE 0x0130 +#define mmBIF_RB_BASE_BASE_IDX 2 +#define mmBIF_RB_RPTR 0x0131 +#define mmBIF_RB_RPTR_BASE_IDX 2 +#define mmBIF_RB_WPTR 0x0132 +#define mmBIF_RB_WPTR_BASE_IDX 2 +#define mmBIF_RB_WPTR_ADDR_HI 0x0133 +#define mmBIF_RB_WPTR_ADDR_HI_BASE_IDX 2 +#define mmBIF_RB_WPTR_ADDR_LO 0x0134 +#define mmBIF_RB_WPTR_ADDR_LO_BASE_IDX 2 +#define mmMAILBOX_INDEX 0x0135 +#define mmMAILBOX_INDEX_BASE_IDX 2 +#define mmBIF_MP1_INTR_CTRL 0x0142 +#define mmBIF_MP1_INTR_CTRL_BASE_IDX 2 +#define mmBIF_UVD_GPUIOV_CFG_SIZE 0x0143 +#define mmBIF_UVD_GPUIOV_CFG_SIZE_BASE_IDX 2 +#define mmBIF_VCE_GPUIOV_CFG_SIZE 0x0144 +#define mmBIF_VCE_GPUIOV_CFG_SIZE_BASE_IDX 2 +#define mmBIF_GFX_SDMA_GPUIOV_CFG_SIZE 0x0145 +#define mmBIF_GFX_SDMA_GPUIOV_CFG_SIZE_BASE_IDX 2 +#define mmBIF_PERSTB_PAD_CNTL 0x0148 +#define mmBIF_PERSTB_PAD_CNTL_BASE_IDX 2 +#define mmBIF_PX_EN_PAD_CNTL 0x0149 +#define mmBIF_PX_EN_PAD_CNTL_BASE_IDX 2 +#define mmBIF_REFPADKIN_PAD_CNTL 0x014a +#define mmBIF_REFPADKIN_PAD_CNTL_BASE_IDX 2 +#define mmBIF_CLKREQB_PAD_CNTL 0x014b +#define mmBIF_CLKREQB_PAD_CNTL_BASE_IDX 2 +#define mmBIF_PWRBRK_PAD_CNTL 0x014c +#define mmBIF_PWRBRK_PAD_CNTL_BASE_IDX 2 +#define mmBIF_WAKEB_PAD_CNTL 0x014d +#define mmBIF_WAKEB_PAD_CNTL_BASE_IDX 2 + + +// addressBlock: nbio_nbif0_bif_bx_pf_BIFPFVFDEC1 +// base address: 0x0 +#define mmBIF_BME_STATUS 0x00eb +#define mmBIF_BME_STATUS_BASE_IDX 2 +#define mmBIF_ATOMIC_ERR_LOG 0x00ec +#define mmBIF_ATOMIC_ERR_LOG_BASE_IDX 2 +#define mmDOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3 +#define mmDOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2 +#define mmDOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4 +#define mmDOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2 +#define mmDOORBELL_SELFRING_GPA_APER_CNTL 0x00f5 +#define mmDOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2 +#define mmHDP_REG_COHERENCY_FLUSH_CNTL 0x00f6 +#define mmHDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2 +#define mmHDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7 +#define mmHDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2 +#define mmGPU_HDP_FLUSH_REQ 0x0106 +#define mmGPU_HDP_FLUSH_REQ_BASE_IDX 2 +#define mmGPU_HDP_FLUSH_DONE 0x0107 +#define mmGPU_HDP_FLUSH_DONE_BASE_IDX 2 +#define mmBIF_TRANS_PENDING 0x0108 +#define mmBIF_TRANS_PENDING_BASE_IDX 2 +#define mmNBIF_GFX_ADDR_LUT_BYPASS 0x0112 +#define mmNBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2 +#define mmMAILBOX_MSGBUF_TRN_DW0 0x0136 +#define mmMAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2 +#define mmMAILBOX_MSGBUF_TRN_DW1 0x0137 +#define mmMAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2 +#define mmMAILBOX_MSGBUF_TRN_DW2 0x0138 +#define mmMAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2 +#define mmMAILBOX_MSGBUF_TRN_DW3 0x0139 +#define mmMAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2 +#define mmMAILBOX_MSGBUF_RCV_DW0 0x013a +#define mmMAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2 +#define mmMAILBOX_MSGBUF_RCV_DW1 0x013b +#define mmMAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2 +#define mmMAILBOX_MSGBUF_RCV_DW2 0x013c +#define mmMAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2 +#define mmMAILBOX_MSGBUF_RCV_DW3 0x013d +#define mmMAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2 +#define mmMAILBOX_CONTROL 0x013e +#define mmMAILBOX_CONTROL_BASE_IDX 2 +#define mmMAILBOX_INT_CNTL 0x013f +#define mmMAILBOX_INT_CNTL_BASE_IDX 2 +#define mmBIF_VMHV_MAILBOX 0x0140 +#define mmBIF_VMHV_MAILBOX_BASE_IDX 2 + + +// addressBlock: nbio_nbif0_gdc_GDCDEC +// base address: 0x0 +#define mmNGDC_SDP_PORT_CTRL 0x01c2 +#define mmNGDC_SDP_PORT_CTRL_BASE_IDX 2 +#define mmSHUB_REGS_IF_CTL 0x01c3 +#define mmSHUB_REGS_IF_CTL_BASE_IDX 2 +#define mmNGDC_MGCG_CTRL 0x01ca +#define mmNGDC_MGCG_CTRL_BASE_IDX 2 +#define mmNGDC_RESERVED_0 0x01cb +#define mmNGDC_RESERVED_0_BASE_IDX 2 +#define mmNGDC_RESERVED_1 0x01cc +#define mmNGDC_RESERVED_1_BASE_IDX 2 +#define mmNGDC_SDP_PORT_CTRL_SOCCLK 0x01cd +#define mmNGDC_SDP_PORT_CTRL_SOCCLK_BASE_IDX 2 +#define mmBIF_SDMA0_DOORBELL_RANGE 0x01d0 +#define mmBIF_SDMA0_DOORBELL_RANGE_BASE_IDX 2 +#define mmBIF_SDMA1_DOORBELL_RANGE 0x01d1 +#define mmBIF_SDMA1_DOORBELL_RANGE_BASE_IDX 2 +#define mmBIF_IH_DOORBELL_RANGE 0x01d2 +#define mmBIF_IH_DOORBELL_RANGE_BASE_IDX 2 +#define mmBIF_MMSCH0_DOORBELL_RANGE 0x01d3 +#define mmBIF_MMSCH0_DOORBELL_RANGE_BASE_IDX 2 +#define mmBIF_ACV_DOORBELL_RANGE 0x01d4 +#define mmBIF_ACV_DOORBELL_RANGE_BASE_IDX 2 +#define mmBIF_DOORBELL_FENCE_CNTL 0x01de +#define mmBIF_DOORBELL_FENCE_CNTL_BASE_IDX 2 +#define mmS2A_MISC_CNTL 0x01df +#define mmS2A_MISC_CNTL_BASE_IDX 2 + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_BIFDEC2 +// base address: 0x0 +#define mmGFXMSIX_VECT0_ADDR_LO 0x0400 +#define mmGFXMSIX_VECT0_ADDR_LO_BASE_IDX 3 +#define mmGFXMSIX_VECT0_ADDR_HI 0x0401 +#define mmGFXMSIX_VECT0_ADDR_HI_BASE_IDX 3 +#define mmGFXMSIX_VECT0_MSG_DATA 0x0402 +#define mmGFXMSIX_VECT0_MSG_DATA_BASE_IDX 3 +#define mmGFXMSIX_VECT0_CONTROL 0x0403 +#define mmGFXMSIX_VECT0_CONTROL_BASE_IDX 3 +#define mmGFXMSIX_VECT1_ADDR_LO 0x0404 +#define mmGFXMSIX_VECT1_ADDR_LO_BASE_IDX 3 +#define mmGFXMSIX_VECT1_ADDR_HI 0x0405 +#define mmGFXMSIX_VECT1_ADDR_HI_BASE_IDX 3 +#define mmGFXMSIX_VECT1_MSG_DATA 0x0406 +#define mmGFXMSIX_VECT1_MSG_DATA_BASE_IDX 3 +#define mmGFXMSIX_VECT1_CONTROL 0x0407 +#define mmGFXMSIX_VECT1_CONTROL_BASE_IDX 3 +#define mmGFXMSIX_VECT2_ADDR_LO 0x0408 +#define mmGFXMSIX_VECT2_ADDR_LO_BASE_IDX 3 +#define mmGFXMSIX_VECT2_ADDR_HI 0x0409 +#define mmGFXMSIX_VECT2_ADDR_HI_BASE_IDX 3 +#define mmGFXMSIX_VECT2_MSG_DATA 0x040a +#define mmGFXMSIX_VECT2_MSG_DATA_BASE_IDX 3 +#define mmGFXMSIX_VECT2_CONTROL 0x040b +#define mmGFXMSIX_VECT2_CONTROL_BASE_IDX 3 +#define mmGFXMSIX_PBA 0x0800 +#define mmGFXMSIX_PBA_BASE_IDX 3 + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf0_SYSPFVFDEC +// base address: 0x0 +#define mmBIF_BX_DEV0_EPF0_VF0_MM_INDEX 0x0000 +#define mmBIF_BX_DEV0_EPF0_VF0_MM_INDEX_BASE_IDX 0 +#define mmBIF_BX_DEV0_EPF0_VF0_MM_DATA 0x0001 +#define mmBIF_BX_DEV0_EPF0_VF0_MM_DATA_BASE_IDX 0 +#define mmBIF_BX_DEV0_EPF0_VF0_MM_INDEX_HI 0x0006 +#define mmBIF_BX_DEV0_EPF0_VF0_MM_INDEX_HI_BASE_IDX 0 + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf0_BIFPFVFDEC1 +// base address: 0x0 +#define mmRCC_DEV0_EPF0_VF0_RCC_ERR_LOG 0x0085 +#define mmRCC_DEV0_EPF0_VF0_RCC_ERR_LOG_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF0_RCC_DOORBELL_APER_EN 0x00c0 +#define mmRCC_DEV0_EPF0_VF0_RCC_DOORBELL_APER_EN_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF0_RCC_CONFIG_MEMSIZE 0x00c3 +#define mmRCC_DEV0_EPF0_VF0_RCC_CONFIG_MEMSIZE_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF0_RCC_CONFIG_RESERVED 0x00c4 +#define mmRCC_DEV0_EPF0_VF0_RCC_CONFIG_RESERVED_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF0_RCC_IOV_FUNC_IDENTIFIER 0x00c5 +#define mmRCC_DEV0_EPF0_VF0_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2 + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf0_BIFPFVFDEC1 +// base address: 0x0 +#define mmBIF_BX_DEV0_EPF0_VF0_BIF_BME_STATUS 0x00eb +#define mmBIF_BX_DEV0_EPF0_VF0_BIF_BME_STATUS_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG 0x00ec +#define mmBIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3 +#define mmBIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4 +#define mmBIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5 +#define mmBIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF0_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6 +#define mmBIF_BX_DEV0_EPF0_VF0_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7 +#define mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ 0x0106 +#define mmBIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE 0x0107 +#define mmBIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF0_BIF_TRANS_PENDING 0x0108 +#define mmBIF_BX_DEV0_EPF0_VF0_BIF_TRANS_PENDING_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF0_NBIF_GFX_ADDR_LUT_BYPASS 0x0112 +#define mmBIF_BX_DEV0_EPF0_VF0_NBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW0 0x0136 +#define mmBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW1 0x0137 +#define mmBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW2 0x0138 +#define mmBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW3 0x0139 +#define mmBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW0 0x013a +#define mmBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW1 0x013b +#define mmBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW2 0x013c +#define mmBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW3 0x013d +#define mmBIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL 0x013e +#define mmBIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF0_MAILBOX_INT_CNTL 0x013f +#define mmBIF_BX_DEV0_EPF0_VF0_MAILBOX_INT_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX 0x0140 +#define mmBIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX_BASE_IDX 2 + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf0_BIFDEC2 +// base address: 0x0 +#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_ADDR_LO 0x0400 +#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_ADDR_HI 0x0401 +#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_MSG_DATA 0x0402 +#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_CONTROL 0x0403 +#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_ADDR_LO 0x0404 +#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_ADDR_HI 0x0405 +#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_MSG_DATA 0x0406 +#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_CONTROL 0x0407 +#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_ADDR_LO 0x0408 +#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_ADDR_HI 0x0409 +#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_MSG_DATA 0x040a +#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_CONTROL 0x040b +#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_PBA 0x0800 +#define mmRCC_DEV0_EPF0_VF0_GFXMSIX_PBA_BASE_IDX 3 + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf1_SYSPFVFDEC +// base address: 0x0 +#define mmBIF_BX_DEV0_EPF0_VF1_MM_INDEX 0x0000 +#define mmBIF_BX_DEV0_EPF0_VF1_MM_INDEX_BASE_IDX 0 +#define mmBIF_BX_DEV0_EPF0_VF1_MM_DATA 0x0001 +#define mmBIF_BX_DEV0_EPF0_VF1_MM_DATA_BASE_IDX 0 +#define mmBIF_BX_DEV0_EPF0_VF1_MM_INDEX_HI 0x0006 +#define mmBIF_BX_DEV0_EPF0_VF1_MM_INDEX_HI_BASE_IDX 0 + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf1_BIFPFVFDEC1 +// base address: 0x0 +#define mmRCC_DEV0_EPF0_VF1_RCC_ERR_LOG 0x0085 +#define mmRCC_DEV0_EPF0_VF1_RCC_ERR_LOG_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF1_RCC_DOORBELL_APER_EN 0x00c0 +#define mmRCC_DEV0_EPF0_VF1_RCC_DOORBELL_APER_EN_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF1_RCC_CONFIG_MEMSIZE 0x00c3 +#define mmRCC_DEV0_EPF0_VF1_RCC_CONFIG_MEMSIZE_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF1_RCC_CONFIG_RESERVED 0x00c4 +#define mmRCC_DEV0_EPF0_VF1_RCC_CONFIG_RESERVED_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF1_RCC_IOV_FUNC_IDENTIFIER 0x00c5 +#define mmRCC_DEV0_EPF0_VF1_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2 + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf1_BIFPFVFDEC1 +// base address: 0x0 +#define mmBIF_BX_DEV0_EPF0_VF1_BIF_BME_STATUS 0x00eb +#define mmBIF_BX_DEV0_EPF0_VF1_BIF_BME_STATUS_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG 0x00ec +#define mmBIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3 +#define mmBIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4 +#define mmBIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5 +#define mmBIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF1_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6 +#define mmBIF_BX_DEV0_EPF0_VF1_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF1_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7 +#define mmBIF_BX_DEV0_EPF0_VF1_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ 0x0106 +#define mmBIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE 0x0107 +#define mmBIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF1_BIF_TRANS_PENDING 0x0108 +#define mmBIF_BX_DEV0_EPF0_VF1_BIF_TRANS_PENDING_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF1_NBIF_GFX_ADDR_LUT_BYPASS 0x0112 +#define mmBIF_BX_DEV0_EPF0_VF1_NBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW0 0x0136 +#define mmBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW1 0x0137 +#define mmBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW2 0x0138 +#define mmBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW3 0x0139 +#define mmBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW0 0x013a +#define mmBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW1 0x013b +#define mmBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW2 0x013c +#define mmBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW3 0x013d +#define mmBIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL 0x013e +#define mmBIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF1_MAILBOX_INT_CNTL 0x013f +#define mmBIF_BX_DEV0_EPF0_VF1_MAILBOX_INT_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX 0x0140 +#define mmBIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX_BASE_IDX 2 + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf1_BIFDEC2 +// base address: 0x0 +#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_ADDR_LO 0x0400 +#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_ADDR_HI 0x0401 +#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_MSG_DATA 0x0402 +#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_CONTROL 0x0403 +#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_ADDR_LO 0x0404 +#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_ADDR_HI 0x0405 +#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_MSG_DATA 0x0406 +#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_CONTROL 0x0407 +#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_ADDR_LO 0x0408 +#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_ADDR_HI 0x0409 +#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_MSG_DATA 0x040a +#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_CONTROL 0x040b +#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_PBA 0x0800 +#define mmRCC_DEV0_EPF0_VF1_GFXMSIX_PBA_BASE_IDX 3 + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf2_SYSPFVFDEC +// base address: 0x0 +#define mmBIF_BX_DEV0_EPF0_VF2_MM_INDEX 0x0000 +#define mmBIF_BX_DEV0_EPF0_VF2_MM_INDEX_BASE_IDX 0 +#define mmBIF_BX_DEV0_EPF0_VF2_MM_DATA 0x0001 +#define mmBIF_BX_DEV0_EPF0_VF2_MM_DATA_BASE_IDX 0 +#define mmBIF_BX_DEV0_EPF0_VF2_MM_INDEX_HI 0x0006 +#define mmBIF_BX_DEV0_EPF0_VF2_MM_INDEX_HI_BASE_IDX 0 + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf2_BIFPFVFDEC1 +// base address: 0x0 +#define mmRCC_DEV0_EPF0_VF2_RCC_ERR_LOG 0x0085 +#define mmRCC_DEV0_EPF0_VF2_RCC_ERR_LOG_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF2_RCC_DOORBELL_APER_EN 0x00c0 +#define mmRCC_DEV0_EPF0_VF2_RCC_DOORBELL_APER_EN_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF2_RCC_CONFIG_MEMSIZE 0x00c3 +#define mmRCC_DEV0_EPF0_VF2_RCC_CONFIG_MEMSIZE_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF2_RCC_CONFIG_RESERVED 0x00c4 +#define mmRCC_DEV0_EPF0_VF2_RCC_CONFIG_RESERVED_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF2_RCC_IOV_FUNC_IDENTIFIER 0x00c5 +#define mmRCC_DEV0_EPF0_VF2_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2 + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf2_BIFPFVFDEC1 +// base address: 0x0 +#define mmBIF_BX_DEV0_EPF0_VF2_BIF_BME_STATUS 0x00eb +#define mmBIF_BX_DEV0_EPF0_VF2_BIF_BME_STATUS_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG 0x00ec +#define mmBIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3 +#define mmBIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4 +#define mmBIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5 +#define mmBIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF2_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6 +#define mmBIF_BX_DEV0_EPF0_VF2_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF2_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7 +#define mmBIF_BX_DEV0_EPF0_VF2_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ 0x0106 +#define mmBIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE 0x0107 +#define mmBIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF2_BIF_TRANS_PENDING 0x0108 +#define mmBIF_BX_DEV0_EPF0_VF2_BIF_TRANS_PENDING_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF2_NBIF_GFX_ADDR_LUT_BYPASS 0x0112 +#define mmBIF_BX_DEV0_EPF0_VF2_NBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW0 0x0136 +#define mmBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW1 0x0137 +#define mmBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW2 0x0138 +#define mmBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW3 0x0139 +#define mmBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW0 0x013a +#define mmBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW1 0x013b +#define mmBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW2 0x013c +#define mmBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW3 0x013d +#define mmBIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL 0x013e +#define mmBIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF2_MAILBOX_INT_CNTL 0x013f +#define mmBIF_BX_DEV0_EPF0_VF2_MAILBOX_INT_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX 0x0140 +#define mmBIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX_BASE_IDX 2 + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf2_BIFDEC2 +// base address: 0x0 +#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_ADDR_LO 0x0400 +#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_ADDR_HI 0x0401 +#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_MSG_DATA 0x0402 +#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_CONTROL 0x0403 +#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_ADDR_LO 0x0404 +#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_ADDR_HI 0x0405 +#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_MSG_DATA 0x0406 +#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_CONTROL 0x0407 +#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_ADDR_LO 0x0408 +#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_ADDR_HI 0x0409 +#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_MSG_DATA 0x040a +#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_CONTROL 0x040b +#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_PBA 0x0800 +#define mmRCC_DEV0_EPF0_VF2_GFXMSIX_PBA_BASE_IDX 3 + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf3_SYSPFVFDEC +// base address: 0x0 +#define mmBIF_BX_DEV0_EPF0_VF3_MM_INDEX 0x0000 +#define mmBIF_BX_DEV0_EPF0_VF3_MM_INDEX_BASE_IDX 0 +#define mmBIF_BX_DEV0_EPF0_VF3_MM_DATA 0x0001 +#define mmBIF_BX_DEV0_EPF0_VF3_MM_DATA_BASE_IDX 0 +#define mmBIF_BX_DEV0_EPF0_VF3_MM_INDEX_HI 0x0006 +#define mmBIF_BX_DEV0_EPF0_VF3_MM_INDEX_HI_BASE_IDX 0 + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf3_BIFPFVFDEC1 +// base address: 0x0 +#define mmRCC_DEV0_EPF0_VF3_RCC_ERR_LOG 0x0085 +#define mmRCC_DEV0_EPF0_VF3_RCC_ERR_LOG_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF3_RCC_DOORBELL_APER_EN 0x00c0 +#define mmRCC_DEV0_EPF0_VF3_RCC_DOORBELL_APER_EN_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF3_RCC_CONFIG_MEMSIZE 0x00c3 +#define mmRCC_DEV0_EPF0_VF3_RCC_CONFIG_MEMSIZE_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF3_RCC_CONFIG_RESERVED 0x00c4 +#define mmRCC_DEV0_EPF0_VF3_RCC_CONFIG_RESERVED_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF3_RCC_IOV_FUNC_IDENTIFIER 0x00c5 +#define mmRCC_DEV0_EPF0_VF3_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2 + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf3_BIFPFVFDEC1 +// base address: 0x0 +#define mmBIF_BX_DEV0_EPF0_VF3_BIF_BME_STATUS 0x00eb +#define mmBIF_BX_DEV0_EPF0_VF3_BIF_BME_STATUS_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG 0x00ec +#define mmBIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3 +#define mmBIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4 +#define mmBIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5 +#define mmBIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF3_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6 +#define mmBIF_BX_DEV0_EPF0_VF3_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF3_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7 +#define mmBIF_BX_DEV0_EPF0_VF3_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ 0x0106 +#define mmBIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE 0x0107 +#define mmBIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF3_BIF_TRANS_PENDING 0x0108 +#define mmBIF_BX_DEV0_EPF0_VF3_BIF_TRANS_PENDING_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF3_NBIF_GFX_ADDR_LUT_BYPASS 0x0112 +#define mmBIF_BX_DEV0_EPF0_VF3_NBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW0 0x0136 +#define mmBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW1 0x0137 +#define mmBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW2 0x0138 +#define mmBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW3 0x0139 +#define mmBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW0 0x013a +#define mmBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW1 0x013b +#define mmBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW2 0x013c +#define mmBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW3 0x013d +#define mmBIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL 0x013e +#define mmBIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF3_MAILBOX_INT_CNTL 0x013f +#define mmBIF_BX_DEV0_EPF0_VF3_MAILBOX_INT_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX 0x0140 +#define mmBIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX_BASE_IDX 2 + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf3_BIFDEC2 +// base address: 0x0 +#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_ADDR_LO 0x0400 +#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_ADDR_HI 0x0401 +#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_MSG_DATA 0x0402 +#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_CONTROL 0x0403 +#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_ADDR_LO 0x0404 +#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_ADDR_HI 0x0405 +#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_MSG_DATA 0x0406 +#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_CONTROL 0x0407 +#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_ADDR_LO 0x0408 +#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_ADDR_HI 0x0409 +#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_MSG_DATA 0x040a +#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_CONTROL 0x040b +#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_PBA 0x0800 +#define mmRCC_DEV0_EPF0_VF3_GFXMSIX_PBA_BASE_IDX 3 + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf4_SYSPFVFDEC +// base address: 0x0 +#define mmBIF_BX_DEV0_EPF0_VF4_MM_INDEX 0x0000 +#define mmBIF_BX_DEV0_EPF0_VF4_MM_INDEX_BASE_IDX 0 +#define mmBIF_BX_DEV0_EPF0_VF4_MM_DATA 0x0001 +#define mmBIF_BX_DEV0_EPF0_VF4_MM_DATA_BASE_IDX 0 +#define mmBIF_BX_DEV0_EPF0_VF4_MM_INDEX_HI 0x0006 +#define mmBIF_BX_DEV0_EPF0_VF4_MM_INDEX_HI_BASE_IDX 0 + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf4_BIFPFVFDEC1 +// base address: 0x0 +#define mmRCC_DEV0_EPF0_VF4_RCC_ERR_LOG 0x0085 +#define mmRCC_DEV0_EPF0_VF4_RCC_ERR_LOG_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF4_RCC_DOORBELL_APER_EN 0x00c0 +#define mmRCC_DEV0_EPF0_VF4_RCC_DOORBELL_APER_EN_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF4_RCC_CONFIG_MEMSIZE 0x00c3 +#define mmRCC_DEV0_EPF0_VF4_RCC_CONFIG_MEMSIZE_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF4_RCC_CONFIG_RESERVED 0x00c4 +#define mmRCC_DEV0_EPF0_VF4_RCC_CONFIG_RESERVED_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF4_RCC_IOV_FUNC_IDENTIFIER 0x00c5 +#define mmRCC_DEV0_EPF0_VF4_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2 + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf4_BIFPFVFDEC1 +// base address: 0x0 +#define mmBIF_BX_DEV0_EPF0_VF4_BIF_BME_STATUS 0x00eb +#define mmBIF_BX_DEV0_EPF0_VF4_BIF_BME_STATUS_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG 0x00ec +#define mmBIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3 +#define mmBIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4 +#define mmBIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5 +#define mmBIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF4_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6 +#define mmBIF_BX_DEV0_EPF0_VF4_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF4_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7 +#define mmBIF_BX_DEV0_EPF0_VF4_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ 0x0106 +#define mmBIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE 0x0107 +#define mmBIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF4_BIF_TRANS_PENDING 0x0108 +#define mmBIF_BX_DEV0_EPF0_VF4_BIF_TRANS_PENDING_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF4_NBIF_GFX_ADDR_LUT_BYPASS 0x0112 +#define mmBIF_BX_DEV0_EPF0_VF4_NBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW0 0x0136 +#define mmBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW1 0x0137 +#define mmBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW2 0x0138 +#define mmBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW3 0x0139 +#define mmBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW0 0x013a +#define mmBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW1 0x013b +#define mmBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW2 0x013c +#define mmBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW3 0x013d +#define mmBIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL 0x013e +#define mmBIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF4_MAILBOX_INT_CNTL 0x013f +#define mmBIF_BX_DEV0_EPF0_VF4_MAILBOX_INT_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX 0x0140 +#define mmBIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX_BASE_IDX 2 + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf4_BIFDEC2 +// base address: 0x0 +#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_ADDR_LO 0x0400 +#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_ADDR_HI 0x0401 +#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_MSG_DATA 0x0402 +#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_CONTROL 0x0403 +#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_ADDR_LO 0x0404 +#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_ADDR_HI 0x0405 +#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_MSG_DATA 0x0406 +#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_CONTROL 0x0407 +#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_ADDR_LO 0x0408 +#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_ADDR_HI 0x0409 +#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_MSG_DATA 0x040a +#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_CONTROL 0x040b +#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_PBA 0x0800 +#define mmRCC_DEV0_EPF0_VF4_GFXMSIX_PBA_BASE_IDX 3 + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf5_SYSPFVFDEC +// base address: 0x0 +#define mmBIF_BX_DEV0_EPF0_VF5_MM_INDEX 0x0000 +#define mmBIF_BX_DEV0_EPF0_VF5_MM_INDEX_BASE_IDX 0 +#define mmBIF_BX_DEV0_EPF0_VF5_MM_DATA 0x0001 +#define mmBIF_BX_DEV0_EPF0_VF5_MM_DATA_BASE_IDX 0 +#define mmBIF_BX_DEV0_EPF0_VF5_MM_INDEX_HI 0x0006 +#define mmBIF_BX_DEV0_EPF0_VF5_MM_INDEX_HI_BASE_IDX 0 + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf5_BIFPFVFDEC1 +// base address: 0x0 +#define mmRCC_DEV0_EPF0_VF5_RCC_ERR_LOG 0x0085 +#define mmRCC_DEV0_EPF0_VF5_RCC_ERR_LOG_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF5_RCC_DOORBELL_APER_EN 0x00c0 +#define mmRCC_DEV0_EPF0_VF5_RCC_DOORBELL_APER_EN_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF5_RCC_CONFIG_MEMSIZE 0x00c3 +#define mmRCC_DEV0_EPF0_VF5_RCC_CONFIG_MEMSIZE_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF5_RCC_CONFIG_RESERVED 0x00c4 +#define mmRCC_DEV0_EPF0_VF5_RCC_CONFIG_RESERVED_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF5_RCC_IOV_FUNC_IDENTIFIER 0x00c5 +#define mmRCC_DEV0_EPF0_VF5_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2 + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf5_BIFPFVFDEC1 +// base address: 0x0 +#define mmBIF_BX_DEV0_EPF0_VF5_BIF_BME_STATUS 0x00eb +#define mmBIF_BX_DEV0_EPF0_VF5_BIF_BME_STATUS_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG 0x00ec +#define mmBIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3 +#define mmBIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4 +#define mmBIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5 +#define mmBIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF5_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6 +#define mmBIF_BX_DEV0_EPF0_VF5_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF5_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7 +#define mmBIF_BX_DEV0_EPF0_VF5_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ 0x0106 +#define mmBIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE 0x0107 +#define mmBIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF5_BIF_TRANS_PENDING 0x0108 +#define mmBIF_BX_DEV0_EPF0_VF5_BIF_TRANS_PENDING_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF5_NBIF_GFX_ADDR_LUT_BYPASS 0x0112 +#define mmBIF_BX_DEV0_EPF0_VF5_NBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW0 0x0136 +#define mmBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW1 0x0137 +#define mmBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW2 0x0138 +#define mmBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW3 0x0139 +#define mmBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW0 0x013a +#define mmBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW1 0x013b +#define mmBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW2 0x013c +#define mmBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW3 0x013d +#define mmBIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL 0x013e +#define mmBIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF5_MAILBOX_INT_CNTL 0x013f +#define mmBIF_BX_DEV0_EPF0_VF5_MAILBOX_INT_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX 0x0140 +#define mmBIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX_BASE_IDX 2 + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf5_BIFDEC2 +// base address: 0x0 +#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_ADDR_LO 0x0400 +#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_ADDR_HI 0x0401 +#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_MSG_DATA 0x0402 +#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_CONTROL 0x0403 +#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_ADDR_LO 0x0404 +#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_ADDR_HI 0x0405 +#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_MSG_DATA 0x0406 +#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_CONTROL 0x0407 +#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_ADDR_LO 0x0408 +#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_ADDR_HI 0x0409 +#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_MSG_DATA 0x040a +#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_CONTROL 0x040b +#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_PBA 0x0800 +#define mmRCC_DEV0_EPF0_VF5_GFXMSIX_PBA_BASE_IDX 3 + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf6_SYSPFVFDEC +// base address: 0x0 +#define mmBIF_BX_DEV0_EPF0_VF6_MM_INDEX 0x0000 +#define mmBIF_BX_DEV0_EPF0_VF6_MM_INDEX_BASE_IDX 0 +#define mmBIF_BX_DEV0_EPF0_VF6_MM_DATA 0x0001 +#define mmBIF_BX_DEV0_EPF0_VF6_MM_DATA_BASE_IDX 0 +#define mmBIF_BX_DEV0_EPF0_VF6_MM_INDEX_HI 0x0006 +#define mmBIF_BX_DEV0_EPF0_VF6_MM_INDEX_HI_BASE_IDX 0 + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf6_BIFPFVFDEC1 +// base address: 0x0 +#define mmRCC_DEV0_EPF0_VF6_RCC_ERR_LOG 0x0085 +#define mmRCC_DEV0_EPF0_VF6_RCC_ERR_LOG_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF6_RCC_DOORBELL_APER_EN 0x00c0 +#define mmRCC_DEV0_EPF0_VF6_RCC_DOORBELL_APER_EN_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF6_RCC_CONFIG_MEMSIZE 0x00c3 +#define mmRCC_DEV0_EPF0_VF6_RCC_CONFIG_MEMSIZE_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF6_RCC_CONFIG_RESERVED 0x00c4 +#define mmRCC_DEV0_EPF0_VF6_RCC_CONFIG_RESERVED_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF6_RCC_IOV_FUNC_IDENTIFIER 0x00c5 +#define mmRCC_DEV0_EPF0_VF6_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2 + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf6_BIFPFVFDEC1 +// base address: 0x0 +#define mmBIF_BX_DEV0_EPF0_VF6_BIF_BME_STATUS 0x00eb +#define mmBIF_BX_DEV0_EPF0_VF6_BIF_BME_STATUS_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG 0x00ec +#define mmBIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3 +#define mmBIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4 +#define mmBIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5 +#define mmBIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF6_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6 +#define mmBIF_BX_DEV0_EPF0_VF6_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF6_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7 +#define mmBIF_BX_DEV0_EPF0_VF6_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ 0x0106 +#define mmBIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE 0x0107 +#define mmBIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF6_BIF_TRANS_PENDING 0x0108 +#define mmBIF_BX_DEV0_EPF0_VF6_BIF_TRANS_PENDING_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF6_NBIF_GFX_ADDR_LUT_BYPASS 0x0112 +#define mmBIF_BX_DEV0_EPF0_VF6_NBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW0 0x0136 +#define mmBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW1 0x0137 +#define mmBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW2 0x0138 +#define mmBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW3 0x0139 +#define mmBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW0 0x013a +#define mmBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW1 0x013b +#define mmBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW2 0x013c +#define mmBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW3 0x013d +#define mmBIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL 0x013e +#define mmBIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF6_MAILBOX_INT_CNTL 0x013f +#define mmBIF_BX_DEV0_EPF0_VF6_MAILBOX_INT_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX 0x0140 +#define mmBIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX_BASE_IDX 2 + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf6_BIFDEC2 +// base address: 0x0 +#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_ADDR_LO 0x0400 +#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_ADDR_HI 0x0401 +#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_MSG_DATA 0x0402 +#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_CONTROL 0x0403 +#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_ADDR_LO 0x0404 +#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_ADDR_HI 0x0405 +#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_MSG_DATA 0x0406 +#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_CONTROL 0x0407 +#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_ADDR_LO 0x0408 +#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_ADDR_HI 0x0409 +#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_MSG_DATA 0x040a +#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_CONTROL 0x040b +#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_PBA 0x0800 +#define mmRCC_DEV0_EPF0_VF6_GFXMSIX_PBA_BASE_IDX 3 + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf7_SYSPFVFDEC +// base address: 0x0 +#define mmBIF_BX_DEV0_EPF0_VF7_MM_INDEX 0x0000 +#define mmBIF_BX_DEV0_EPF0_VF7_MM_INDEX_BASE_IDX 0 +#define mmBIF_BX_DEV0_EPF0_VF7_MM_DATA 0x0001 +#define mmBIF_BX_DEV0_EPF0_VF7_MM_DATA_BASE_IDX 0 +#define mmBIF_BX_DEV0_EPF0_VF7_MM_INDEX_HI 0x0006 +#define mmBIF_BX_DEV0_EPF0_VF7_MM_INDEX_HI_BASE_IDX 0 + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf7_BIFPFVFDEC1 +// base address: 0x0 +#define mmRCC_DEV0_EPF0_VF7_RCC_ERR_LOG 0x0085 +#define mmRCC_DEV0_EPF0_VF7_RCC_ERR_LOG_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF7_RCC_DOORBELL_APER_EN 0x00c0 +#define mmRCC_DEV0_EPF0_VF7_RCC_DOORBELL_APER_EN_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF7_RCC_CONFIG_MEMSIZE 0x00c3 +#define mmRCC_DEV0_EPF0_VF7_RCC_CONFIG_MEMSIZE_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF7_RCC_CONFIG_RESERVED 0x00c4 +#define mmRCC_DEV0_EPF0_VF7_RCC_CONFIG_RESERVED_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF7_RCC_IOV_FUNC_IDENTIFIER 0x00c5 +#define mmRCC_DEV0_EPF0_VF7_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2 + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf7_BIFPFVFDEC1 +// base address: 0x0 +#define mmBIF_BX_DEV0_EPF0_VF7_BIF_BME_STATUS 0x00eb +#define mmBIF_BX_DEV0_EPF0_VF7_BIF_BME_STATUS_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG 0x00ec +#define mmBIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3 +#define mmBIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4 +#define mmBIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5 +#define mmBIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF7_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6 +#define mmBIF_BX_DEV0_EPF0_VF7_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF7_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7 +#define mmBIF_BX_DEV0_EPF0_VF7_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ 0x0106 +#define mmBIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE 0x0107 +#define mmBIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF7_BIF_TRANS_PENDING 0x0108 +#define mmBIF_BX_DEV0_EPF0_VF7_BIF_TRANS_PENDING_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF7_NBIF_GFX_ADDR_LUT_BYPASS 0x0112 +#define mmBIF_BX_DEV0_EPF0_VF7_NBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW0 0x0136 +#define mmBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW1 0x0137 +#define mmBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW2 0x0138 +#define mmBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW3 0x0139 +#define mmBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW0 0x013a +#define mmBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW1 0x013b +#define mmBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW2 0x013c +#define mmBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW3 0x013d +#define mmBIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL 0x013e +#define mmBIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF7_MAILBOX_INT_CNTL 0x013f +#define mmBIF_BX_DEV0_EPF0_VF7_MAILBOX_INT_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX 0x0140 +#define mmBIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX_BASE_IDX 2 + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf7_BIFDEC2 +// base address: 0x0 +#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_ADDR_LO 0x0400 +#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_ADDR_HI 0x0401 +#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_MSG_DATA 0x0402 +#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_CONTROL 0x0403 +#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_ADDR_LO 0x0404 +#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_ADDR_HI 0x0405 +#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_MSG_DATA 0x0406 +#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_CONTROL 0x0407 +#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_ADDR_LO 0x0408 +#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_ADDR_HI 0x0409 +#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_MSG_DATA 0x040a +#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_CONTROL 0x040b +#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_PBA 0x0800 +#define mmRCC_DEV0_EPF0_VF7_GFXMSIX_PBA_BASE_IDX 3 + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf8_SYSPFVFDEC +// base address: 0x0 +#define mmBIF_BX_DEV0_EPF0_VF8_MM_INDEX 0x0000 +#define mmBIF_BX_DEV0_EPF0_VF8_MM_INDEX_BASE_IDX 0 +#define mmBIF_BX_DEV0_EPF0_VF8_MM_DATA 0x0001 +#define mmBIF_BX_DEV0_EPF0_VF8_MM_DATA_BASE_IDX 0 +#define mmBIF_BX_DEV0_EPF0_VF8_MM_INDEX_HI 0x0006 +#define mmBIF_BX_DEV0_EPF0_VF8_MM_INDEX_HI_BASE_IDX 0 + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf8_BIFPFVFDEC1 +// base address: 0x0 +#define mmRCC_DEV0_EPF0_VF8_RCC_ERR_LOG 0x0085 +#define mmRCC_DEV0_EPF0_VF8_RCC_ERR_LOG_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF8_RCC_DOORBELL_APER_EN 0x00c0 +#define mmRCC_DEV0_EPF0_VF8_RCC_DOORBELL_APER_EN_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF8_RCC_CONFIG_MEMSIZE 0x00c3 +#define mmRCC_DEV0_EPF0_VF8_RCC_CONFIG_MEMSIZE_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF8_RCC_CONFIG_RESERVED 0x00c4 +#define mmRCC_DEV0_EPF0_VF8_RCC_CONFIG_RESERVED_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF8_RCC_IOV_FUNC_IDENTIFIER 0x00c5 +#define mmRCC_DEV0_EPF0_VF8_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2 + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf8_BIFPFVFDEC1 +// base address: 0x0 +#define mmBIF_BX_DEV0_EPF0_VF8_BIF_BME_STATUS 0x00eb +#define mmBIF_BX_DEV0_EPF0_VF8_BIF_BME_STATUS_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG 0x00ec +#define mmBIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3 +#define mmBIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4 +#define mmBIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5 +#define mmBIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF8_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6 +#define mmBIF_BX_DEV0_EPF0_VF8_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF8_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7 +#define mmBIF_BX_DEV0_EPF0_VF8_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ 0x0106 +#define mmBIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE 0x0107 +#define mmBIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF8_BIF_TRANS_PENDING 0x0108 +#define mmBIF_BX_DEV0_EPF0_VF8_BIF_TRANS_PENDING_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF8_NBIF_GFX_ADDR_LUT_BYPASS 0x0112 +#define mmBIF_BX_DEV0_EPF0_VF8_NBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW0 0x0136 +#define mmBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW1 0x0137 +#define mmBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW2 0x0138 +#define mmBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW3 0x0139 +#define mmBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW0 0x013a +#define mmBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW1 0x013b +#define mmBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW2 0x013c +#define mmBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW3 0x013d +#define mmBIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF8_MAILBOX_CONTROL 0x013e +#define mmBIF_BX_DEV0_EPF0_VF8_MAILBOX_CONTROL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF8_MAILBOX_INT_CNTL 0x013f +#define mmBIF_BX_DEV0_EPF0_VF8_MAILBOX_INT_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX 0x0140 +#define mmBIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX_BASE_IDX 2 + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf8_BIFDEC2 +// base address: 0x0 +#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_ADDR_LO 0x0400 +#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_ADDR_HI 0x0401 +#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_MSG_DATA 0x0402 +#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_CONTROL 0x0403 +#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_ADDR_LO 0x0404 +#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_ADDR_HI 0x0405 +#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_MSG_DATA 0x0406 +#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_CONTROL 0x0407 +#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_ADDR_LO 0x0408 +#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_ADDR_HI 0x0409 +#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_MSG_DATA 0x040a +#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_CONTROL 0x040b +#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_PBA 0x0800 +#define mmRCC_DEV0_EPF0_VF8_GFXMSIX_PBA_BASE_IDX 3 + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf9_SYSPFVFDEC +// base address: 0x0 +#define mmBIF_BX_DEV0_EPF0_VF9_MM_INDEX 0x0000 +#define mmBIF_BX_DEV0_EPF0_VF9_MM_INDEX_BASE_IDX 0 +#define mmBIF_BX_DEV0_EPF0_VF9_MM_DATA 0x0001 +#define mmBIF_BX_DEV0_EPF0_VF9_MM_DATA_BASE_IDX 0 +#define mmBIF_BX_DEV0_EPF0_VF9_MM_INDEX_HI 0x0006 +#define mmBIF_BX_DEV0_EPF0_VF9_MM_INDEX_HI_BASE_IDX 0 + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf9_BIFPFVFDEC1 +// base address: 0x0 +#define mmRCC_DEV0_EPF0_VF9_RCC_ERR_LOG 0x0085 +#define mmRCC_DEV0_EPF0_VF9_RCC_ERR_LOG_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF9_RCC_DOORBELL_APER_EN 0x00c0 +#define mmRCC_DEV0_EPF0_VF9_RCC_DOORBELL_APER_EN_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF9_RCC_CONFIG_MEMSIZE 0x00c3 +#define mmRCC_DEV0_EPF0_VF9_RCC_CONFIG_MEMSIZE_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF9_RCC_CONFIG_RESERVED 0x00c4 +#define mmRCC_DEV0_EPF0_VF9_RCC_CONFIG_RESERVED_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF9_RCC_IOV_FUNC_IDENTIFIER 0x00c5 +#define mmRCC_DEV0_EPF0_VF9_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2 + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf9_BIFPFVFDEC1 +// base address: 0x0 +#define mmBIF_BX_DEV0_EPF0_VF9_BIF_BME_STATUS 0x00eb +#define mmBIF_BX_DEV0_EPF0_VF9_BIF_BME_STATUS_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG 0x00ec +#define mmBIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3 +#define mmBIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4 +#define mmBIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5 +#define mmBIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF9_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6 +#define mmBIF_BX_DEV0_EPF0_VF9_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF9_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7 +#define mmBIF_BX_DEV0_EPF0_VF9_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ 0x0106 +#define mmBIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE 0x0107 +#define mmBIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF9_BIF_TRANS_PENDING 0x0108 +#define mmBIF_BX_DEV0_EPF0_VF9_BIF_TRANS_PENDING_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF9_NBIF_GFX_ADDR_LUT_BYPASS 0x0112 +#define mmBIF_BX_DEV0_EPF0_VF9_NBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW0 0x0136 +#define mmBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW1 0x0137 +#define mmBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW2 0x0138 +#define mmBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW3 0x0139 +#define mmBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW0 0x013a +#define mmBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW1 0x013b +#define mmBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW2 0x013c +#define mmBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW3 0x013d +#define mmBIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF9_MAILBOX_CONTROL 0x013e +#define mmBIF_BX_DEV0_EPF0_VF9_MAILBOX_CONTROL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF9_MAILBOX_INT_CNTL 0x013f +#define mmBIF_BX_DEV0_EPF0_VF9_MAILBOX_INT_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX 0x0140 +#define mmBIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX_BASE_IDX 2 + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf9_BIFDEC2 +// base address: 0x0 +#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_ADDR_LO 0x0400 +#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_ADDR_HI 0x0401 +#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_MSG_DATA 0x0402 +#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_CONTROL 0x0403 +#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_ADDR_LO 0x0404 +#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_ADDR_HI 0x0405 +#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_MSG_DATA 0x0406 +#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_CONTROL 0x0407 +#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_ADDR_LO 0x0408 +#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_ADDR_HI 0x0409 +#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_MSG_DATA 0x040a +#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_CONTROL 0x040b +#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_PBA 0x0800 +#define mmRCC_DEV0_EPF0_VF9_GFXMSIX_PBA_BASE_IDX 3 + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf10_SYSPFVFDEC +// base address: 0x0 +#define mmBIF_BX_DEV0_EPF0_VF10_MM_INDEX 0x0000 +#define mmBIF_BX_DEV0_EPF0_VF10_MM_INDEX_BASE_IDX 0 +#define mmBIF_BX_DEV0_EPF0_VF10_MM_DATA 0x0001 +#define mmBIF_BX_DEV0_EPF0_VF10_MM_DATA_BASE_IDX 0 +#define mmBIF_BX_DEV0_EPF0_VF10_MM_INDEX_HI 0x0006 +#define mmBIF_BX_DEV0_EPF0_VF10_MM_INDEX_HI_BASE_IDX 0 + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf10_BIFPFVFDEC1 +// base address: 0x0 +#define mmRCC_DEV0_EPF0_VF10_RCC_ERR_LOG 0x0085 +#define mmRCC_DEV0_EPF0_VF10_RCC_ERR_LOG_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF10_RCC_DOORBELL_APER_EN 0x00c0 +#define mmRCC_DEV0_EPF0_VF10_RCC_DOORBELL_APER_EN_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF10_RCC_CONFIG_MEMSIZE 0x00c3 +#define mmRCC_DEV0_EPF0_VF10_RCC_CONFIG_MEMSIZE_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF10_RCC_CONFIG_RESERVED 0x00c4 +#define mmRCC_DEV0_EPF0_VF10_RCC_CONFIG_RESERVED_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF10_RCC_IOV_FUNC_IDENTIFIER 0x00c5 +#define mmRCC_DEV0_EPF0_VF10_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2 + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf10_BIFPFVFDEC1 +// base address: 0x0 +#define mmBIF_BX_DEV0_EPF0_VF10_BIF_BME_STATUS 0x00eb +#define mmBIF_BX_DEV0_EPF0_VF10_BIF_BME_STATUS_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG 0x00ec +#define mmBIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3 +#define mmBIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4 +#define mmBIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5 +#define mmBIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF10_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6 +#define mmBIF_BX_DEV0_EPF0_VF10_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF10_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7 +#define mmBIF_BX_DEV0_EPF0_VF10_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ 0x0106 +#define mmBIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE 0x0107 +#define mmBIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF10_BIF_TRANS_PENDING 0x0108 +#define mmBIF_BX_DEV0_EPF0_VF10_BIF_TRANS_PENDING_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF10_NBIF_GFX_ADDR_LUT_BYPASS 0x0112 +#define mmBIF_BX_DEV0_EPF0_VF10_NBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW0 0x0136 +#define mmBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW1 0x0137 +#define mmBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW2 0x0138 +#define mmBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW3 0x0139 +#define mmBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW0 0x013a +#define mmBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW1 0x013b +#define mmBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW2 0x013c +#define mmBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW3 0x013d +#define mmBIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF10_MAILBOX_CONTROL 0x013e +#define mmBIF_BX_DEV0_EPF0_VF10_MAILBOX_CONTROL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF10_MAILBOX_INT_CNTL 0x013f +#define mmBIF_BX_DEV0_EPF0_VF10_MAILBOX_INT_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX 0x0140 +#define mmBIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX_BASE_IDX 2 + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf10_BIFDEC2 +// base address: 0x0 +#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_ADDR_LO 0x0400 +#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_ADDR_HI 0x0401 +#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_MSG_DATA 0x0402 +#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_CONTROL 0x0403 +#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_ADDR_LO 0x0404 +#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_ADDR_HI 0x0405 +#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_MSG_DATA 0x0406 +#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_CONTROL 0x0407 +#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_ADDR_LO 0x0408 +#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_ADDR_HI 0x0409 +#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_MSG_DATA 0x040a +#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_CONTROL 0x040b +#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_PBA 0x0800 +#define mmRCC_DEV0_EPF0_VF10_GFXMSIX_PBA_BASE_IDX 3 + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf11_SYSPFVFDEC +// base address: 0x0 +#define mmBIF_BX_DEV0_EPF0_VF11_MM_INDEX 0x0000 +#define mmBIF_BX_DEV0_EPF0_VF11_MM_INDEX_BASE_IDX 0 +#define mmBIF_BX_DEV0_EPF0_VF11_MM_DATA 0x0001 +#define mmBIF_BX_DEV0_EPF0_VF11_MM_DATA_BASE_IDX 0 +#define mmBIF_BX_DEV0_EPF0_VF11_MM_INDEX_HI 0x0006 +#define mmBIF_BX_DEV0_EPF0_VF11_MM_INDEX_HI_BASE_IDX 0 + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf11_BIFPFVFDEC1 +// base address: 0x0 +#define mmRCC_DEV0_EPF0_VF11_RCC_ERR_LOG 0x0085 +#define mmRCC_DEV0_EPF0_VF11_RCC_ERR_LOG_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF11_RCC_DOORBELL_APER_EN 0x00c0 +#define mmRCC_DEV0_EPF0_VF11_RCC_DOORBELL_APER_EN_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF11_RCC_CONFIG_MEMSIZE 0x00c3 +#define mmRCC_DEV0_EPF0_VF11_RCC_CONFIG_MEMSIZE_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF11_RCC_CONFIG_RESERVED 0x00c4 +#define mmRCC_DEV0_EPF0_VF11_RCC_CONFIG_RESERVED_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF11_RCC_IOV_FUNC_IDENTIFIER 0x00c5 +#define mmRCC_DEV0_EPF0_VF11_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2 + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf11_BIFPFVFDEC1 +// base address: 0x0 +#define mmBIF_BX_DEV0_EPF0_VF11_BIF_BME_STATUS 0x00eb +#define mmBIF_BX_DEV0_EPF0_VF11_BIF_BME_STATUS_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG 0x00ec +#define mmBIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3 +#define mmBIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4 +#define mmBIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5 +#define mmBIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF11_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6 +#define mmBIF_BX_DEV0_EPF0_VF11_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF11_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7 +#define mmBIF_BX_DEV0_EPF0_VF11_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ 0x0106 +#define mmBIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE 0x0107 +#define mmBIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF11_BIF_TRANS_PENDING 0x0108 +#define mmBIF_BX_DEV0_EPF0_VF11_BIF_TRANS_PENDING_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF11_NBIF_GFX_ADDR_LUT_BYPASS 0x0112 +#define mmBIF_BX_DEV0_EPF0_VF11_NBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW0 0x0136 +#define mmBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW1 0x0137 +#define mmBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW2 0x0138 +#define mmBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW3 0x0139 +#define mmBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW0 0x013a +#define mmBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW1 0x013b +#define mmBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW2 0x013c +#define mmBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW3 0x013d +#define mmBIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF11_MAILBOX_CONTROL 0x013e +#define mmBIF_BX_DEV0_EPF0_VF11_MAILBOX_CONTROL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF11_MAILBOX_INT_CNTL 0x013f +#define mmBIF_BX_DEV0_EPF0_VF11_MAILBOX_INT_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX 0x0140 +#define mmBIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX_BASE_IDX 2 + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf11_BIFDEC2 +// base address: 0x0 +#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_ADDR_LO 0x0400 +#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_ADDR_HI 0x0401 +#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_MSG_DATA 0x0402 +#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_CONTROL 0x0403 +#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_ADDR_LO 0x0404 +#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_ADDR_HI 0x0405 +#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_MSG_DATA 0x0406 +#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_CONTROL 0x0407 +#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_ADDR_LO 0x0408 +#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_ADDR_HI 0x0409 +#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_MSG_DATA 0x040a +#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_CONTROL 0x040b +#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_PBA 0x0800 +#define mmRCC_DEV0_EPF0_VF11_GFXMSIX_PBA_BASE_IDX 3 + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf12_SYSPFVFDEC +// base address: 0x0 +#define mmBIF_BX_DEV0_EPF0_VF12_MM_INDEX 0x0000 +#define mmBIF_BX_DEV0_EPF0_VF12_MM_INDEX_BASE_IDX 0 +#define mmBIF_BX_DEV0_EPF0_VF12_MM_DATA 0x0001 +#define mmBIF_BX_DEV0_EPF0_VF12_MM_DATA_BASE_IDX 0 +#define mmBIF_BX_DEV0_EPF0_VF12_MM_INDEX_HI 0x0006 +#define mmBIF_BX_DEV0_EPF0_VF12_MM_INDEX_HI_BASE_IDX 0 + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf12_BIFPFVFDEC1 +// base address: 0x0 +#define mmRCC_DEV0_EPF0_VF12_RCC_ERR_LOG 0x0085 +#define mmRCC_DEV0_EPF0_VF12_RCC_ERR_LOG_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF12_RCC_DOORBELL_APER_EN 0x00c0 +#define mmRCC_DEV0_EPF0_VF12_RCC_DOORBELL_APER_EN_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF12_RCC_CONFIG_MEMSIZE 0x00c3 +#define mmRCC_DEV0_EPF0_VF12_RCC_CONFIG_MEMSIZE_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF12_RCC_CONFIG_RESERVED 0x00c4 +#define mmRCC_DEV0_EPF0_VF12_RCC_CONFIG_RESERVED_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF12_RCC_IOV_FUNC_IDENTIFIER 0x00c5 +#define mmRCC_DEV0_EPF0_VF12_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2 + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf12_BIFPFVFDEC1 +// base address: 0x0 +#define mmBIF_BX_DEV0_EPF0_VF12_BIF_BME_STATUS 0x00eb +#define mmBIF_BX_DEV0_EPF0_VF12_BIF_BME_STATUS_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG 0x00ec +#define mmBIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3 +#define mmBIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4 +#define mmBIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5 +#define mmBIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF12_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6 +#define mmBIF_BX_DEV0_EPF0_VF12_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF12_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7 +#define mmBIF_BX_DEV0_EPF0_VF12_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ 0x0106 +#define mmBIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE 0x0107 +#define mmBIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF12_BIF_TRANS_PENDING 0x0108 +#define mmBIF_BX_DEV0_EPF0_VF12_BIF_TRANS_PENDING_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF12_NBIF_GFX_ADDR_LUT_BYPASS 0x0112 +#define mmBIF_BX_DEV0_EPF0_VF12_NBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW0 0x0136 +#define mmBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW1 0x0137 +#define mmBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW2 0x0138 +#define mmBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW3 0x0139 +#define mmBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW0 0x013a +#define mmBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW1 0x013b +#define mmBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW2 0x013c +#define mmBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW3 0x013d +#define mmBIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF12_MAILBOX_CONTROL 0x013e +#define mmBIF_BX_DEV0_EPF0_VF12_MAILBOX_CONTROL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF12_MAILBOX_INT_CNTL 0x013f +#define mmBIF_BX_DEV0_EPF0_VF12_MAILBOX_INT_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX 0x0140 +#define mmBIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX_BASE_IDX 2 + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf12_BIFDEC2 +// base address: 0x0 +#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_ADDR_LO 0x0400 +#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_ADDR_HI 0x0401 +#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_MSG_DATA 0x0402 +#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_CONTROL 0x0403 +#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_ADDR_LO 0x0404 +#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_ADDR_HI 0x0405 +#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_MSG_DATA 0x0406 +#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_CONTROL 0x0407 +#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_ADDR_LO 0x0408 +#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_ADDR_HI 0x0409 +#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_MSG_DATA 0x040a +#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_CONTROL 0x040b +#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_PBA 0x0800 +#define mmRCC_DEV0_EPF0_VF12_GFXMSIX_PBA_BASE_IDX 3 + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf13_SYSPFVFDEC +// base address: 0x0 +#define mmBIF_BX_DEV0_EPF0_VF13_MM_INDEX 0x0000 +#define mmBIF_BX_DEV0_EPF0_VF13_MM_INDEX_BASE_IDX 0 +#define mmBIF_BX_DEV0_EPF0_VF13_MM_DATA 0x0001 +#define mmBIF_BX_DEV0_EPF0_VF13_MM_DATA_BASE_IDX 0 +#define mmBIF_BX_DEV0_EPF0_VF13_MM_INDEX_HI 0x0006 +#define mmBIF_BX_DEV0_EPF0_VF13_MM_INDEX_HI_BASE_IDX 0 + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf13_BIFPFVFDEC1 +// base address: 0x0 +#define mmRCC_DEV0_EPF0_VF13_RCC_ERR_LOG 0x0085 +#define mmRCC_DEV0_EPF0_VF13_RCC_ERR_LOG_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF13_RCC_DOORBELL_APER_EN 0x00c0 +#define mmRCC_DEV0_EPF0_VF13_RCC_DOORBELL_APER_EN_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF13_RCC_CONFIG_MEMSIZE 0x00c3 +#define mmRCC_DEV0_EPF0_VF13_RCC_CONFIG_MEMSIZE_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF13_RCC_CONFIG_RESERVED 0x00c4 +#define mmRCC_DEV0_EPF0_VF13_RCC_CONFIG_RESERVED_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF13_RCC_IOV_FUNC_IDENTIFIER 0x00c5 +#define mmRCC_DEV0_EPF0_VF13_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2 + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf13_BIFPFVFDEC1 +// base address: 0x0 +#define mmBIF_BX_DEV0_EPF0_VF13_BIF_BME_STATUS 0x00eb +#define mmBIF_BX_DEV0_EPF0_VF13_BIF_BME_STATUS_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG 0x00ec +#define mmBIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3 +#define mmBIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4 +#define mmBIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5 +#define mmBIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF13_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6 +#define mmBIF_BX_DEV0_EPF0_VF13_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF13_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7 +#define mmBIF_BX_DEV0_EPF0_VF13_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ 0x0106 +#define mmBIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE 0x0107 +#define mmBIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF13_BIF_TRANS_PENDING 0x0108 +#define mmBIF_BX_DEV0_EPF0_VF13_BIF_TRANS_PENDING_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF13_NBIF_GFX_ADDR_LUT_BYPASS 0x0112 +#define mmBIF_BX_DEV0_EPF0_VF13_NBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW0 0x0136 +#define mmBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW1 0x0137 +#define mmBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW2 0x0138 +#define mmBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW3 0x0139 +#define mmBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW0 0x013a +#define mmBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW1 0x013b +#define mmBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW2 0x013c +#define mmBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW3 0x013d +#define mmBIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF13_MAILBOX_CONTROL 0x013e +#define mmBIF_BX_DEV0_EPF0_VF13_MAILBOX_CONTROL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF13_MAILBOX_INT_CNTL 0x013f +#define mmBIF_BX_DEV0_EPF0_VF13_MAILBOX_INT_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX 0x0140 +#define mmBIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX_BASE_IDX 2 + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf13_BIFDEC2 +// base address: 0x0 +#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_ADDR_LO 0x0400 +#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_ADDR_HI 0x0401 +#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_MSG_DATA 0x0402 +#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_CONTROL 0x0403 +#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_ADDR_LO 0x0404 +#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_ADDR_HI 0x0405 +#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_MSG_DATA 0x0406 +#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_CONTROL 0x0407 +#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_ADDR_LO 0x0408 +#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_ADDR_HI 0x0409 +#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_MSG_DATA 0x040a +#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_CONTROL 0x040b +#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_PBA 0x0800 +#define mmRCC_DEV0_EPF0_VF13_GFXMSIX_PBA_BASE_IDX 3 + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf14_SYSPFVFDEC +// base address: 0x0 +#define mmBIF_BX_DEV0_EPF0_VF14_MM_INDEX 0x0000 +#define mmBIF_BX_DEV0_EPF0_VF14_MM_INDEX_BASE_IDX 0 +#define mmBIF_BX_DEV0_EPF0_VF14_MM_DATA 0x0001 +#define mmBIF_BX_DEV0_EPF0_VF14_MM_DATA_BASE_IDX 0 +#define mmBIF_BX_DEV0_EPF0_VF14_MM_INDEX_HI 0x0006 +#define mmBIF_BX_DEV0_EPF0_VF14_MM_INDEX_HI_BASE_IDX 0 + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf14_BIFPFVFDEC1 +// base address: 0x0 +#define mmRCC_DEV0_EPF0_VF14_RCC_ERR_LOG 0x0085 +#define mmRCC_DEV0_EPF0_VF14_RCC_ERR_LOG_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF14_RCC_DOORBELL_APER_EN 0x00c0 +#define mmRCC_DEV0_EPF0_VF14_RCC_DOORBELL_APER_EN_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF14_RCC_CONFIG_MEMSIZE 0x00c3 +#define mmRCC_DEV0_EPF0_VF14_RCC_CONFIG_MEMSIZE_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF14_RCC_CONFIG_RESERVED 0x00c4 +#define mmRCC_DEV0_EPF0_VF14_RCC_CONFIG_RESERVED_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF14_RCC_IOV_FUNC_IDENTIFIER 0x00c5 +#define mmRCC_DEV0_EPF0_VF14_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2 + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf14_BIFPFVFDEC1 +// base address: 0x0 +#define mmBIF_BX_DEV0_EPF0_VF14_BIF_BME_STATUS 0x00eb +#define mmBIF_BX_DEV0_EPF0_VF14_BIF_BME_STATUS_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG 0x00ec +#define mmBIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3 +#define mmBIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4 +#define mmBIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5 +#define mmBIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF14_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6 +#define mmBIF_BX_DEV0_EPF0_VF14_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF14_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7 +#define mmBIF_BX_DEV0_EPF0_VF14_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ 0x0106 +#define mmBIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE 0x0107 +#define mmBIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF14_BIF_TRANS_PENDING 0x0108 +#define mmBIF_BX_DEV0_EPF0_VF14_BIF_TRANS_PENDING_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF14_NBIF_GFX_ADDR_LUT_BYPASS 0x0112 +#define mmBIF_BX_DEV0_EPF0_VF14_NBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW0 0x0136 +#define mmBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW1 0x0137 +#define mmBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW2 0x0138 +#define mmBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW3 0x0139 +#define mmBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW0 0x013a +#define mmBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW1 0x013b +#define mmBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW2 0x013c +#define mmBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW3 0x013d +#define mmBIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF14_MAILBOX_CONTROL 0x013e +#define mmBIF_BX_DEV0_EPF0_VF14_MAILBOX_CONTROL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF14_MAILBOX_INT_CNTL 0x013f +#define mmBIF_BX_DEV0_EPF0_VF14_MAILBOX_INT_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX 0x0140 +#define mmBIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX_BASE_IDX 2 + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf14_BIFDEC2 +// base address: 0x0 +#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_ADDR_LO 0x0400 +#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_ADDR_HI 0x0401 +#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_MSG_DATA 0x0402 +#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_CONTROL 0x0403 +#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_ADDR_LO 0x0404 +#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_ADDR_HI 0x0405 +#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_MSG_DATA 0x0406 +#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_CONTROL 0x0407 +#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_ADDR_LO 0x0408 +#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_ADDR_HI 0x0409 +#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_MSG_DATA 0x040a +#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_CONTROL 0x040b +#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_PBA 0x0800 +#define mmRCC_DEV0_EPF0_VF14_GFXMSIX_PBA_BASE_IDX 3 + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf15_SYSPFVFDEC +// base address: 0x0 +#define mmBIF_BX_DEV0_EPF0_VF15_MM_INDEX 0x0000 +#define mmBIF_BX_DEV0_EPF0_VF15_MM_INDEX_BASE_IDX 0 +#define mmBIF_BX_DEV0_EPF0_VF15_MM_DATA 0x0001 +#define mmBIF_BX_DEV0_EPF0_VF15_MM_DATA_BASE_IDX 0 +#define mmBIF_BX_DEV0_EPF0_VF15_MM_INDEX_HI 0x0006 +#define mmBIF_BX_DEV0_EPF0_VF15_MM_INDEX_HI_BASE_IDX 0 + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf15_BIFPFVFDEC1 +// base address: 0x0 +#define mmRCC_DEV0_EPF0_VF15_RCC_ERR_LOG 0x0085 +#define mmRCC_DEV0_EPF0_VF15_RCC_ERR_LOG_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF15_RCC_DOORBELL_APER_EN 0x00c0 +#define mmRCC_DEV0_EPF0_VF15_RCC_DOORBELL_APER_EN_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF15_RCC_CONFIG_MEMSIZE 0x00c3 +#define mmRCC_DEV0_EPF0_VF15_RCC_CONFIG_MEMSIZE_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF15_RCC_CONFIG_RESERVED 0x00c4 +#define mmRCC_DEV0_EPF0_VF15_RCC_CONFIG_RESERVED_BASE_IDX 2 +#define mmRCC_DEV0_EPF0_VF15_RCC_IOV_FUNC_IDENTIFIER 0x00c5 +#define mmRCC_DEV0_EPF0_VF15_RCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2 + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf15_BIFPFVFDEC1 +// base address: 0x0 +#define mmBIF_BX_DEV0_EPF0_VF15_BIF_BME_STATUS 0x00eb +#define mmBIF_BX_DEV0_EPF0_VF15_BIF_BME_STATUS_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG 0x00ec +#define mmBIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_BASE_HIGH 0x00f3 +#define mmBIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_BASE_HIGH_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_BASE_LOW 0x00f4 +#define mmBIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_BASE_LOW_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_CNTL 0x00f5 +#define mmBIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF15_HDP_REG_COHERENCY_FLUSH_CNTL 0x00f6 +#define mmBIF_BX_DEV0_EPF0_VF15_HDP_REG_COHERENCY_FLUSH_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF15_HDP_MEM_COHERENCY_FLUSH_CNTL 0x00f7 +#define mmBIF_BX_DEV0_EPF0_VF15_HDP_MEM_COHERENCY_FLUSH_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ 0x0106 +#define mmBIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE 0x0107 +#define mmBIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF15_BIF_TRANS_PENDING 0x0108 +#define mmBIF_BX_DEV0_EPF0_VF15_BIF_TRANS_PENDING_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF15_NBIF_GFX_ADDR_LUT_BYPASS 0x0112 +#define mmBIF_BX_DEV0_EPF0_VF15_NBIF_GFX_ADDR_LUT_BYPASS_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW0 0x0136 +#define mmBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW0_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW1 0x0137 +#define mmBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW1_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW2 0x0138 +#define mmBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW2_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW3 0x0139 +#define mmBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW3_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW0 0x013a +#define mmBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW0_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW1 0x013b +#define mmBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW1_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW2 0x013c +#define mmBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW2_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW3 0x013d +#define mmBIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW3_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF15_MAILBOX_CONTROL 0x013e +#define mmBIF_BX_DEV0_EPF0_VF15_MAILBOX_CONTROL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF15_MAILBOX_INT_CNTL 0x013f +#define mmBIF_BX_DEV0_EPF0_VF15_MAILBOX_INT_CNTL_BASE_IDX 2 +#define mmBIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX 0x0140 +#define mmBIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX_BASE_IDX 2 + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf15_BIFDEC2 +// base address: 0x0 +#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_ADDR_LO 0x0400 +#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_ADDR_HI 0x0401 +#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_MSG_DATA 0x0402 +#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_CONTROL 0x0403 +#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_ADDR_LO 0x0404 +#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_ADDR_HI 0x0405 +#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_MSG_DATA 0x0406 +#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_CONTROL 0x0407 +#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_ADDR_LO 0x0408 +#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_ADDR_LO_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_ADDR_HI 0x0409 +#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_ADDR_HI_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_MSG_DATA 0x040a +#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_MSG_DATA_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_CONTROL 0x040b +#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_CONTROL_BASE_IDX 3 +#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_PBA 0x0800 +#define mmRCC_DEV0_EPF0_VF15_GFXMSIX_PBA_BASE_IDX 3 + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h new file mode 100644 index 000000000000..d3704b438f2d --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h @@ -0,0 +1,48436 @@ +/* + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _nbio_7_4_SH_MASK_HEADER +#define _nbio_7_4_SH_MASK_HEADER + + +// addressBlock: nbio_pcie0_pswuscfg0_cfgdecp +//PSWUSCFG0_VENDOR_ID +#define PSWUSCFG0_VENDOR_ID__VENDOR_ID__SHIFT 0x0 +#define PSWUSCFG0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL +//PSWUSCFG0_DEVICE_ID +#define PSWUSCFG0_DEVICE_ID__DEVICE_ID__SHIFT 0x0 +#define PSWUSCFG0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL +//PSWUSCFG0_COMMAND +#define PSWUSCFG0_COMMAND__IO_ACCESS_EN__SHIFT 0x0 +#define PSWUSCFG0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1 +#define PSWUSCFG0_COMMAND__BUS_MASTER_EN__SHIFT 0x2 +#define PSWUSCFG0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3 +#define PSWUSCFG0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4 +#define PSWUSCFG0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5 +#define PSWUSCFG0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6 +#define PSWUSCFG0_COMMAND__AD_STEPPING__SHIFT 0x7 +#define PSWUSCFG0_COMMAND__SERR_EN__SHIFT 0x8 +#define PSWUSCFG0_COMMAND__FAST_B2B_EN__SHIFT 0x9 +#define PSWUSCFG0_COMMAND__INT_DIS__SHIFT 0xa +#define PSWUSCFG0_COMMAND__IO_ACCESS_EN_MASK 0x0001L +#define PSWUSCFG0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L +#define PSWUSCFG0_COMMAND__BUS_MASTER_EN_MASK 0x0004L +#define PSWUSCFG0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L +#define PSWUSCFG0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L +#define PSWUSCFG0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L +#define PSWUSCFG0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L +#define PSWUSCFG0_COMMAND__AD_STEPPING_MASK 0x0080L +#define PSWUSCFG0_COMMAND__SERR_EN_MASK 0x0100L +#define PSWUSCFG0_COMMAND__FAST_B2B_EN_MASK 0x0200L +#define PSWUSCFG0_COMMAND__INT_DIS_MASK 0x0400L +//PSWUSCFG0_STATUS +#define PSWUSCFG0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0 +#define PSWUSCFG0_STATUS__INT_STATUS__SHIFT 0x3 +#define PSWUSCFG0_STATUS__CAP_LIST__SHIFT 0x4 +#define PSWUSCFG0_STATUS__PCI_66_CAP__SHIFT 0x5 +#define PSWUSCFG0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7 +#define PSWUSCFG0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8 +#define PSWUSCFG0_STATUS__DEVSEL_TIMING__SHIFT 0x9 +#define PSWUSCFG0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb +#define PSWUSCFG0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc +#define PSWUSCFG0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd +#define PSWUSCFG0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe +#define PSWUSCFG0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf +#define PSWUSCFG0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L +#define PSWUSCFG0_STATUS__INT_STATUS_MASK 0x0008L +#define PSWUSCFG0_STATUS__CAP_LIST_MASK 0x0010L +#define PSWUSCFG0_STATUS__PCI_66_CAP_MASK 0x0020L +#define PSWUSCFG0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L +#define PSWUSCFG0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L +#define PSWUSCFG0_STATUS__DEVSEL_TIMING_MASK 0x0600L +#define PSWUSCFG0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L +#define PSWUSCFG0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L +#define PSWUSCFG0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L +#define PSWUSCFG0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L +#define PSWUSCFG0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L +//PSWUSCFG0_REVISION_ID +#define PSWUSCFG0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0 +#define PSWUSCFG0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4 +#define PSWUSCFG0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL +#define PSWUSCFG0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L +//PSWUSCFG0_PROG_INTERFACE +#define PSWUSCFG0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0 +#define PSWUSCFG0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL +//PSWUSCFG0_SUB_CLASS +#define PSWUSCFG0_SUB_CLASS__SUB_CLASS__SHIFT 0x0 +#define PSWUSCFG0_SUB_CLASS__SUB_CLASS_MASK 0xFFL +//PSWUSCFG0_BASE_CLASS +#define PSWUSCFG0_BASE_CLASS__BASE_CLASS__SHIFT 0x0 +#define PSWUSCFG0_BASE_CLASS__BASE_CLASS_MASK 0xFFL +//PSWUSCFG0_CACHE_LINE +#define PSWUSCFG0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0 +#define PSWUSCFG0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL +//PSWUSCFG0_LATENCY +#define PSWUSCFG0_LATENCY__LATENCY_TIMER__SHIFT 0x0 +#define PSWUSCFG0_LATENCY__LATENCY_TIMER_MASK 0xFFL +//PSWUSCFG0_HEADER +#define PSWUSCFG0_HEADER__HEADER_TYPE__SHIFT 0x0 +#define PSWUSCFG0_HEADER__DEVICE_TYPE__SHIFT 0x7 +#define PSWUSCFG0_HEADER__HEADER_TYPE_MASK 0x7FL +#define PSWUSCFG0_HEADER__DEVICE_TYPE_MASK 0x80L +//PSWUSCFG0_BIST +#define PSWUSCFG0_BIST__BIST_COMP__SHIFT 0x0 +#define PSWUSCFG0_BIST__BIST_STRT__SHIFT 0x6 +#define PSWUSCFG0_BIST__BIST_CAP__SHIFT 0x7 +#define PSWUSCFG0_BIST__BIST_COMP_MASK 0x0FL +#define PSWUSCFG0_BIST__BIST_STRT_MASK 0x40L +#define PSWUSCFG0_BIST__BIST_CAP_MASK 0x80L +//PSWUSCFG0_SUB_BUS_NUMBER_LATENCY +#define PSWUSCFG0_SUB_BUS_NUMBER_LATENCY__PRIMARY_BUS__SHIFT 0x0 +#define PSWUSCFG0_SUB_BUS_NUMBER_LATENCY__SECONDARY_BUS__SHIFT 0x8 +#define PSWUSCFG0_SUB_BUS_NUMBER_LATENCY__SUB_BUS_NUM__SHIFT 0x10 +#define PSWUSCFG0_SUB_BUS_NUMBER_LATENCY__SECONDARY_LATENCY_TIMER__SHIFT 0x18 +#define PSWUSCFG0_SUB_BUS_NUMBER_LATENCY__PRIMARY_BUS_MASK 0x000000FFL +#define PSWUSCFG0_SUB_BUS_NUMBER_LATENCY__SECONDARY_BUS_MASK 0x0000FF00L +#define PSWUSCFG0_SUB_BUS_NUMBER_LATENCY__SUB_BUS_NUM_MASK 0x00FF0000L +#define PSWUSCFG0_SUB_BUS_NUMBER_LATENCY__SECONDARY_LATENCY_TIMER_MASK 0xFF000000L +//PSWUSCFG0_IO_BASE_LIMIT +#define PSWUSCFG0_IO_BASE_LIMIT__IO_BASE_TYPE__SHIFT 0x0 +#define PSWUSCFG0_IO_BASE_LIMIT__IO_BASE__SHIFT 0x4 +#define PSWUSCFG0_IO_BASE_LIMIT__IO_LIMIT_TYPE__SHIFT 0x8 +#define PSWUSCFG0_IO_BASE_LIMIT__IO_LIMIT__SHIFT 0xc +#define PSWUSCFG0_IO_BASE_LIMIT__IO_BASE_TYPE_MASK 0x000FL +#define PSWUSCFG0_IO_BASE_LIMIT__IO_BASE_MASK 0x00F0L +#define PSWUSCFG0_IO_BASE_LIMIT__IO_LIMIT_TYPE_MASK 0x0F00L +#define PSWUSCFG0_IO_BASE_LIMIT__IO_LIMIT_MASK 0xF000L +//PSWUSCFG0_SECONDARY_STATUS +#define PSWUSCFG0_SECONDARY_STATUS__PCI_66_CAP__SHIFT 0x5 +#define PSWUSCFG0_SECONDARY_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7 +#define PSWUSCFG0_SECONDARY_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8 +#define PSWUSCFG0_SECONDARY_STATUS__DEVSEL_TIMING__SHIFT 0x9 +#define PSWUSCFG0_SECONDARY_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb +#define PSWUSCFG0_SECONDARY_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc +#define PSWUSCFG0_SECONDARY_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd +#define PSWUSCFG0_SECONDARY_STATUS__RECEIVED_SYSTEM_ERROR__SHIFT 0xe +#define PSWUSCFG0_SECONDARY_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf +#define PSWUSCFG0_SECONDARY_STATUS__PCI_66_CAP_MASK 0x0020L +#define PSWUSCFG0_SECONDARY_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L +#define PSWUSCFG0_SECONDARY_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L +#define PSWUSCFG0_SECONDARY_STATUS__DEVSEL_TIMING_MASK 0x0600L +#define PSWUSCFG0_SECONDARY_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L +#define PSWUSCFG0_SECONDARY_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L +#define PSWUSCFG0_SECONDARY_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L +#define PSWUSCFG0_SECONDARY_STATUS__RECEIVED_SYSTEM_ERROR_MASK 0x4000L +#define PSWUSCFG0_SECONDARY_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L +//PSWUSCFG0_MEM_BASE_LIMIT +#define PSWUSCFG0_MEM_BASE_LIMIT__MEM_BASE_TYPE__SHIFT 0x0 +#define PSWUSCFG0_MEM_BASE_LIMIT__MEM_BASE_31_20__SHIFT 0x4 +#define PSWUSCFG0_MEM_BASE_LIMIT__MEM_LIMIT_TYPE__SHIFT 0x10 +#define PSWUSCFG0_MEM_BASE_LIMIT__MEM_LIMIT_31_20__SHIFT 0x14 +#define PSWUSCFG0_MEM_BASE_LIMIT__MEM_BASE_TYPE_MASK 0x0000000FL +#define PSWUSCFG0_MEM_BASE_LIMIT__MEM_BASE_31_20_MASK 0x0000FFF0L +#define PSWUSCFG0_MEM_BASE_LIMIT__MEM_LIMIT_TYPE_MASK 0x000F0000L +#define PSWUSCFG0_MEM_BASE_LIMIT__MEM_LIMIT_31_20_MASK 0xFFF00000L +//PSWUSCFG0_PREF_BASE_LIMIT +#define PSWUSCFG0_PREF_BASE_LIMIT__PREF_MEM_BASE_TYPE__SHIFT 0x0 +#define PSWUSCFG0_PREF_BASE_LIMIT__PREF_MEM_BASE_31_20__SHIFT 0x4 +#define PSWUSCFG0_PREF_BASE_LIMIT__PREF_MEM_LIMIT_TYPE__SHIFT 0x10 +#define PSWUSCFG0_PREF_BASE_LIMIT__PREF_MEM_LIMIT_31_20__SHIFT 0x14 +#define PSWUSCFG0_PREF_BASE_LIMIT__PREF_MEM_BASE_TYPE_MASK 0x0000000FL +#define PSWUSCFG0_PREF_BASE_LIMIT__PREF_MEM_BASE_31_20_MASK 0x0000FFF0L +#define PSWUSCFG0_PREF_BASE_LIMIT__PREF_MEM_LIMIT_TYPE_MASK 0x000F0000L +#define PSWUSCFG0_PREF_BASE_LIMIT__PREF_MEM_LIMIT_31_20_MASK 0xFFF00000L +//PSWUSCFG0_PREF_BASE_UPPER +#define PSWUSCFG0_PREF_BASE_UPPER__PREF_BASE_UPPER__SHIFT 0x0 +#define PSWUSCFG0_PREF_BASE_UPPER__PREF_BASE_UPPER_MASK 0xFFFFFFFFL +//PSWUSCFG0_PREF_LIMIT_UPPER +#define PSWUSCFG0_PREF_LIMIT_UPPER__PREF_LIMIT_UPPER__SHIFT 0x0 +#define PSWUSCFG0_PREF_LIMIT_UPPER__PREF_LIMIT_UPPER_MASK 0xFFFFFFFFL +//PSWUSCFG0_IO_BASE_LIMIT_HI +#define PSWUSCFG0_IO_BASE_LIMIT_HI__IO_BASE_31_16__SHIFT 0x0 +#define PSWUSCFG0_IO_BASE_LIMIT_HI__IO_LIMIT_31_16__SHIFT 0x10 +#define PSWUSCFG0_IO_BASE_LIMIT_HI__IO_BASE_31_16_MASK 0x0000FFFFL +#define PSWUSCFG0_IO_BASE_LIMIT_HI__IO_LIMIT_31_16_MASK 0xFFFF0000L +//PSWUSCFG0_CAP_PTR +#define PSWUSCFG0_CAP_PTR__CAP_PTR__SHIFT 0x0 +#define PSWUSCFG0_CAP_PTR__CAP_PTR_MASK 0x000000FFL +//PSWUSCFG0_INTERRUPT_LINE +#define PSWUSCFG0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0 +#define PSWUSCFG0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL +//PSWUSCFG0_INTERRUPT_PIN +#define PSWUSCFG0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0 +#define PSWUSCFG0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL +//PSWUSCFG0_IRQ_BRIDGE_CNTL +#define PSWUSCFG0_IRQ_BRIDGE_CNTL__PARITY_RESPONSE_EN__SHIFT 0x0 +#define PSWUSCFG0_IRQ_BRIDGE_CNTL__SERR_EN__SHIFT 0x1 +#define PSWUSCFG0_IRQ_BRIDGE_CNTL__ISA_EN__SHIFT 0x2 +#define PSWUSCFG0_IRQ_BRIDGE_CNTL__VGA_EN__SHIFT 0x3 +#define PSWUSCFG0_IRQ_BRIDGE_CNTL__VGA_DEC__SHIFT 0x4 +#define PSWUSCFG0_IRQ_BRIDGE_CNTL__MASTER_ABORT_MODE__SHIFT 0x5 +#define PSWUSCFG0_IRQ_BRIDGE_CNTL__SECONDARY_BUS_RESET__SHIFT 0x6 +#define PSWUSCFG0_IRQ_BRIDGE_CNTL__FAST_B2B_EN__SHIFT 0x7 +#define PSWUSCFG0_IRQ_BRIDGE_CNTL__PARITY_RESPONSE_EN_MASK 0x0001L +#define PSWUSCFG0_IRQ_BRIDGE_CNTL__SERR_EN_MASK 0x0002L +#define PSWUSCFG0_IRQ_BRIDGE_CNTL__ISA_EN_MASK 0x0004L +#define PSWUSCFG0_IRQ_BRIDGE_CNTL__VGA_EN_MASK 0x0008L +#define PSWUSCFG0_IRQ_BRIDGE_CNTL__VGA_DEC_MASK 0x0010L +#define PSWUSCFG0_IRQ_BRIDGE_CNTL__MASTER_ABORT_MODE_MASK 0x0020L +#define PSWUSCFG0_IRQ_BRIDGE_CNTL__SECONDARY_BUS_RESET_MASK 0x0040L +#define PSWUSCFG0_IRQ_BRIDGE_CNTL__FAST_B2B_EN_MASK 0x0080L +//EXT_BRIDGE_CNTL +#define EXT_BRIDGE_CNTL__IO_PORT_80_EN__SHIFT 0x0 +#define EXT_BRIDGE_CNTL__IO_PORT_80_EN_MASK 0x01L +//PSWUSCFG0_VENDOR_CAP_LIST +#define PSWUSCFG0_VENDOR_CAP_LIST__CAP_ID__SHIFT 0x0 +#define PSWUSCFG0_VENDOR_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define PSWUSCFG0_VENDOR_CAP_LIST__LENGTH__SHIFT 0x10 +#define PSWUSCFG0_VENDOR_CAP_LIST__CAP_ID_MASK 0x000000FFL +#define PSWUSCFG0_VENDOR_CAP_LIST__NEXT_PTR_MASK 0x0000FF00L +#define PSWUSCFG0_VENDOR_CAP_LIST__LENGTH_MASK 0x00FF0000L +//PSWUSCFG0_ADAPTER_ID_W +#define PSWUSCFG0_ADAPTER_ID_W__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define PSWUSCFG0_ADAPTER_ID_W__SUBSYSTEM_ID__SHIFT 0x10 +#define PSWUSCFG0_ADAPTER_ID_W__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define PSWUSCFG0_ADAPTER_ID_W__SUBSYSTEM_ID_MASK 0xFFFF0000L +//PSWUSCFG0_PMI_CAP_LIST +#define PSWUSCFG0_PMI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define PSWUSCFG0_PMI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define PSWUSCFG0_PMI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define PSWUSCFG0_PMI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//PSWUSCFG0_PMI_CAP +#define PSWUSCFG0_PMI_CAP__VERSION__SHIFT 0x0 +#define PSWUSCFG0_PMI_CAP__PME_CLOCK__SHIFT 0x3 +#define PSWUSCFG0_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0__SHIFT 0x4 +#define PSWUSCFG0_PMI_CAP__DEV_SPECIFIC_INIT__SHIFT 0x5 +#define PSWUSCFG0_PMI_CAP__AUX_CURRENT__SHIFT 0x6 +#define PSWUSCFG0_PMI_CAP__D1_SUPPORT__SHIFT 0x9 +#define PSWUSCFG0_PMI_CAP__D2_SUPPORT__SHIFT 0xa +#define PSWUSCFG0_PMI_CAP__PME_SUPPORT__SHIFT 0xb +#define PSWUSCFG0_PMI_CAP__VERSION_MASK 0x0007L +#define PSWUSCFG0_PMI_CAP__PME_CLOCK_MASK 0x0008L +#define PSWUSCFG0_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0_MASK 0x0010L +#define PSWUSCFG0_PMI_CAP__DEV_SPECIFIC_INIT_MASK 0x0020L +#define PSWUSCFG0_PMI_CAP__AUX_CURRENT_MASK 0x01C0L +#define PSWUSCFG0_PMI_CAP__D1_SUPPORT_MASK 0x0200L +#define PSWUSCFG0_PMI_CAP__D2_SUPPORT_MASK 0x0400L +#define PSWUSCFG0_PMI_CAP__PME_SUPPORT_MASK 0xF800L +//PSWUSCFG0_PMI_STATUS_CNTL +#define PSWUSCFG0_PMI_STATUS_CNTL__POWER_STATE__SHIFT 0x0 +#define PSWUSCFG0_PMI_STATUS_CNTL__NO_SOFT_RESET__SHIFT 0x3 +#define PSWUSCFG0_PMI_STATUS_CNTL__PME_EN__SHIFT 0x8 +#define PSWUSCFG0_PMI_STATUS_CNTL__DATA_SELECT__SHIFT 0x9 +#define PSWUSCFG0_PMI_STATUS_CNTL__DATA_SCALE__SHIFT 0xd +#define PSWUSCFG0_PMI_STATUS_CNTL__PME_STATUS__SHIFT 0xf +#define PSWUSCFG0_PMI_STATUS_CNTL__B2_B3_SUPPORT__SHIFT 0x16 +#define PSWUSCFG0_PMI_STATUS_CNTL__BUS_PWR_EN__SHIFT 0x17 +#define PSWUSCFG0_PMI_STATUS_CNTL__PMI_DATA__SHIFT 0x18 +#define PSWUSCFG0_PMI_STATUS_CNTL__POWER_STATE_MASK 0x00000003L +#define PSWUSCFG0_PMI_STATUS_CNTL__NO_SOFT_RESET_MASK 0x00000008L +#define PSWUSCFG0_PMI_STATUS_CNTL__PME_EN_MASK 0x00000100L +#define PSWUSCFG0_PMI_STATUS_CNTL__DATA_SELECT_MASK 0x00001E00L +#define PSWUSCFG0_PMI_STATUS_CNTL__DATA_SCALE_MASK 0x00006000L +#define PSWUSCFG0_PMI_STATUS_CNTL__PME_STATUS_MASK 0x00008000L +#define PSWUSCFG0_PMI_STATUS_CNTL__B2_B3_SUPPORT_MASK 0x00400000L +#define PSWUSCFG0_PMI_STATUS_CNTL__BUS_PWR_EN_MASK 0x00800000L +#define PSWUSCFG0_PMI_STATUS_CNTL__PMI_DATA_MASK 0xFF000000L +//PSWUSCFG0_PCIE_CAP_LIST +#define PSWUSCFG0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0 +#define PSWUSCFG0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define PSWUSCFG0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL +#define PSWUSCFG0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//PSWUSCFG0_PCIE_CAP +#define PSWUSCFG0_PCIE_CAP__VERSION__SHIFT 0x0 +#define PSWUSCFG0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4 +#define PSWUSCFG0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8 +#define PSWUSCFG0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9 +#define PSWUSCFG0_PCIE_CAP__VERSION_MASK 0x000FL +#define PSWUSCFG0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L +#define PSWUSCFG0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L +#define PSWUSCFG0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L +//PSWUSCFG0_DEVICE_CAP +#define PSWUSCFG0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0 +#define PSWUSCFG0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3 +#define PSWUSCFG0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5 +#define PSWUSCFG0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6 +#define PSWUSCFG0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9 +#define PSWUSCFG0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf +#define PSWUSCFG0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12 +#define PSWUSCFG0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a +#define PSWUSCFG0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c +#define PSWUSCFG0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L +#define PSWUSCFG0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L +#define PSWUSCFG0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L +#define PSWUSCFG0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L +#define PSWUSCFG0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L +#define PSWUSCFG0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L +#define PSWUSCFG0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L +#define PSWUSCFG0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L +#define PSWUSCFG0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L +//PSWUSCFG0_DEVICE_CNTL +#define PSWUSCFG0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0 +#define PSWUSCFG0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1 +#define PSWUSCFG0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2 +#define PSWUSCFG0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3 +#define PSWUSCFG0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4 +#define PSWUSCFG0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5 +#define PSWUSCFG0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8 +#define PSWUSCFG0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9 +#define PSWUSCFG0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa +#define PSWUSCFG0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb +#define PSWUSCFG0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc +#define PSWUSCFG0_DEVICE_CNTL__BRIDGE_CFG_RETRY_EN__SHIFT 0xf +#define PSWUSCFG0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L +#define PSWUSCFG0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L +#define PSWUSCFG0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L +#define PSWUSCFG0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L +#define PSWUSCFG0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L +#define PSWUSCFG0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L +#define PSWUSCFG0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L +#define PSWUSCFG0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L +#define PSWUSCFG0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L +#define PSWUSCFG0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L +#define PSWUSCFG0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L +#define PSWUSCFG0_DEVICE_CNTL__BRIDGE_CFG_RETRY_EN_MASK 0x8000L +//PSWUSCFG0_DEVICE_STATUS +#define PSWUSCFG0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0 +#define PSWUSCFG0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1 +#define PSWUSCFG0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2 +#define PSWUSCFG0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3 +#define PSWUSCFG0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4 +#define PSWUSCFG0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5 +#define PSWUSCFG0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L +#define PSWUSCFG0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L +#define PSWUSCFG0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L +#define PSWUSCFG0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L +#define PSWUSCFG0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L +#define PSWUSCFG0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L +//PSWUSCFG0_LINK_CAP +#define PSWUSCFG0_LINK_CAP__LINK_SPEED__SHIFT 0x0 +#define PSWUSCFG0_LINK_CAP__LINK_WIDTH__SHIFT 0x4 +#define PSWUSCFG0_LINK_CAP__PM_SUPPORT__SHIFT 0xa +#define PSWUSCFG0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc +#define PSWUSCFG0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf +#define PSWUSCFG0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12 +#define PSWUSCFG0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13 +#define PSWUSCFG0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14 +#define PSWUSCFG0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15 +#define PSWUSCFG0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16 +#define PSWUSCFG0_LINK_CAP__PORT_NUMBER__SHIFT 0x18 +#define PSWUSCFG0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL +#define PSWUSCFG0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L +#define PSWUSCFG0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L +#define PSWUSCFG0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L +#define PSWUSCFG0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L +#define PSWUSCFG0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L +#define PSWUSCFG0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L +#define PSWUSCFG0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L +#define PSWUSCFG0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L +#define PSWUSCFG0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L +#define PSWUSCFG0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L +//PSWUSCFG0_LINK_CNTL +#define PSWUSCFG0_LINK_CNTL__PM_CONTROL__SHIFT 0x0 +#define PSWUSCFG0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3 +#define PSWUSCFG0_LINK_CNTL__LINK_DIS__SHIFT 0x4 +#define PSWUSCFG0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5 +#define PSWUSCFG0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6 +#define PSWUSCFG0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7 +#define PSWUSCFG0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8 +#define PSWUSCFG0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9 +#define PSWUSCFG0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa +#define PSWUSCFG0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb +#define PSWUSCFG0_LINK_CNTL__DRS_SIGNALING_CONTROL__SHIFT 0xe +#define PSWUSCFG0_LINK_CNTL__PM_CONTROL_MASK 0x0003L +#define PSWUSCFG0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L +#define PSWUSCFG0_LINK_CNTL__LINK_DIS_MASK 0x0010L +#define PSWUSCFG0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L +#define PSWUSCFG0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L +#define PSWUSCFG0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L +#define PSWUSCFG0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L +#define PSWUSCFG0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L +#define PSWUSCFG0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L +#define PSWUSCFG0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L +#define PSWUSCFG0_LINK_CNTL__DRS_SIGNALING_CONTROL_MASK 0xC000L +//PSWUSCFG0_LINK_STATUS +#define PSWUSCFG0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0 +#define PSWUSCFG0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4 +#define PSWUSCFG0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb +#define PSWUSCFG0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc +#define PSWUSCFG0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd +#define PSWUSCFG0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe +#define PSWUSCFG0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf +#define PSWUSCFG0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL +#define PSWUSCFG0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L +#define PSWUSCFG0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L +#define PSWUSCFG0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L +#define PSWUSCFG0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L +#define PSWUSCFG0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L +#define PSWUSCFG0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L +//PSWUSCFG0_DEVICE_CAP2 +#define PSWUSCFG0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0 +#define PSWUSCFG0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4 +#define PSWUSCFG0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5 +#define PSWUSCFG0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6 +#define PSWUSCFG0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7 +#define PSWUSCFG0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8 +#define PSWUSCFG0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9 +#define PSWUSCFG0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa +#define PSWUSCFG0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb +#define PSWUSCFG0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc +#define PSWUSCFG0_DEVICE_CAP2__LN_SYSTEM_CLS__SHIFT 0xe +#define PSWUSCFG0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10 +#define PSWUSCFG0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11 +#define PSWUSCFG0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12 +#define PSWUSCFG0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14 +#define PSWUSCFG0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15 +#define PSWUSCFG0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16 +#define PSWUSCFG0_DEVICE_CAP2__FRS_SUPPORTED__SHIFT 0x1f +#define PSWUSCFG0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL +#define PSWUSCFG0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L +#define PSWUSCFG0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L +#define PSWUSCFG0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L +#define PSWUSCFG0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L +#define PSWUSCFG0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L +#define PSWUSCFG0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L +#define PSWUSCFG0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L +#define PSWUSCFG0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L +#define PSWUSCFG0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L +#define PSWUSCFG0_DEVICE_CAP2__LN_SYSTEM_CLS_MASK 0x0000C000L +#define PSWUSCFG0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L +#define PSWUSCFG0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L +#define PSWUSCFG0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L +#define PSWUSCFG0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L +#define PSWUSCFG0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L +#define PSWUSCFG0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L +#define PSWUSCFG0_DEVICE_CAP2__FRS_SUPPORTED_MASK 0x80000000L +//PSWUSCFG0_DEVICE_CNTL2 +#define PSWUSCFG0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0 +#define PSWUSCFG0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4 +#define PSWUSCFG0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5 +#define PSWUSCFG0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6 +#define PSWUSCFG0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7 +#define PSWUSCFG0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8 +#define PSWUSCFG0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9 +#define PSWUSCFG0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa +#define PSWUSCFG0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb +#define PSWUSCFG0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc +#define PSWUSCFG0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd +#define PSWUSCFG0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf +#define PSWUSCFG0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL +#define PSWUSCFG0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L +#define PSWUSCFG0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L +#define PSWUSCFG0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L +#define PSWUSCFG0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L +#define PSWUSCFG0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L +#define PSWUSCFG0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L +#define PSWUSCFG0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L +#define PSWUSCFG0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L +#define PSWUSCFG0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L +#define PSWUSCFG0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L +#define PSWUSCFG0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L +//PSWUSCFG0_DEVICE_STATUS2 +#define PSWUSCFG0_DEVICE_STATUS2__RESERVED__SHIFT 0x0 +#define PSWUSCFG0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL +//PSWUSCFG0_LINK_CAP2 +#define PSWUSCFG0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1 +#define PSWUSCFG0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8 +#define PSWUSCFG0_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT__SHIFT 0x9 +#define PSWUSCFG0_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT__SHIFT 0x10 +#define PSWUSCFG0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17 +#define PSWUSCFG0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18 +#define PSWUSCFG0_LINK_CAP2__DRS_SUPPORTED__SHIFT 0x1f +#define PSWUSCFG0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL +#define PSWUSCFG0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L +#define PSWUSCFG0_LINK_CAP2__LOWER_SKP_OS_GEN_SUPPORT_MASK 0x00001E00L +#define PSWUSCFG0_LINK_CAP2__LOWER_SKP_OS_RCV_SUPPORT_MASK 0x000F0000L +#define PSWUSCFG0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L +#define PSWUSCFG0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L +#define PSWUSCFG0_LINK_CAP2__DRS_SUPPORTED_MASK 0x80000000L +//PSWUSCFG0_LINK_CNTL2 +#define PSWUSCFG0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0 +#define PSWUSCFG0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4 +#define PSWUSCFG0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5 +#define PSWUSCFG0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6 +#define PSWUSCFG0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7 +#define PSWUSCFG0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa +#define PSWUSCFG0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb +#define PSWUSCFG0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc +#define PSWUSCFG0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL +#define PSWUSCFG0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L +#define PSWUSCFG0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L +#define PSWUSCFG0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L +#define PSWUSCFG0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L +#define PSWUSCFG0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L +#define PSWUSCFG0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L +#define PSWUSCFG0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L +//PSWUSCFG0_LINK_STATUS2 +#define PSWUSCFG0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0 +#define PSWUSCFG0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1 +#define PSWUSCFG0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2 +#define PSWUSCFG0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3 +#define PSWUSCFG0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4 +#define PSWUSCFG0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5 +#define PSWUSCFG0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6 +#define PSWUSCFG0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7 +#define PSWUSCFG0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc +#define PSWUSCFG0_LINK_STATUS2__DRS_MESSAGE_RECEIVED__SHIFT 0xf +#define PSWUSCFG0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L +#define PSWUSCFG0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L +#define PSWUSCFG0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L +#define PSWUSCFG0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L +#define PSWUSCFG0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L +#define PSWUSCFG0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L +#define PSWUSCFG0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L +#define PSWUSCFG0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L +#define PSWUSCFG0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L +#define PSWUSCFG0_LINK_STATUS2__DRS_MESSAGE_RECEIVED_MASK 0x8000L +//PSWUSCFG0_MSI_CAP_LIST +#define PSWUSCFG0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define PSWUSCFG0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define PSWUSCFG0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define PSWUSCFG0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//PSWUSCFG0_MSI_MSG_CNTL +#define PSWUSCFG0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0 +#define PSWUSCFG0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1 +#define PSWUSCFG0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4 +#define PSWUSCFG0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7 +#define PSWUSCFG0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8 +#define PSWUSCFG0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L +#define PSWUSCFG0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL +#define PSWUSCFG0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L +#define PSWUSCFG0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L +#define PSWUSCFG0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L +//PSWUSCFG0_MSI_MSG_ADDR_LO +#define PSWUSCFG0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2 +#define PSWUSCFG0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL +//PSWUSCFG0_MSI_MSG_ADDR_HI +#define PSWUSCFG0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0 +#define PSWUSCFG0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL +//PSWUSCFG0_MSI_MSG_DATA +#define PSWUSCFG0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0 +#define PSWUSCFG0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL +//PSWUSCFG0_MSI_MSG_DATA_64 +#define PSWUSCFG0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0 +#define PSWUSCFG0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0xFFFFL +//PSWUSCFG0_SSID_CAP_LIST +#define PSWUSCFG0_SSID_CAP_LIST__CAP_ID__SHIFT 0x0 +#define PSWUSCFG0_SSID_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define PSWUSCFG0_SSID_CAP_LIST__CAP_ID_MASK 0x00FFL +#define PSWUSCFG0_SSID_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//PSWUSCFG0_SSID_CAP +#define PSWUSCFG0_SSID_CAP__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define PSWUSCFG0_SSID_CAP__SUBSYSTEM_ID__SHIFT 0x10 +#define PSWUSCFG0_SSID_CAP__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define PSWUSCFG0_SSID_CAP__SUBSYSTEM_ID_MASK 0xFFFF0000L +//MSI_MAP_CAP_LIST +#define MSI_MAP_CAP_LIST__CAP_ID__SHIFT 0x0 +#define MSI_MAP_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define MSI_MAP_CAP_LIST__CAP_ID_MASK 0x00FFL +#define MSI_MAP_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//MSI_MAP_CAP +#define MSI_MAP_CAP__EN__SHIFT 0x0 +#define MSI_MAP_CAP__FIXD__SHIFT 0x1 +#define MSI_MAP_CAP__CAP_TYPE__SHIFT 0xb +#define MSI_MAP_CAP__EN_MASK 0x0001L +#define MSI_MAP_CAP__FIXD_MASK 0x0002L +#define MSI_MAP_CAP__CAP_TYPE_MASK 0xF800L +//MSI_MAP_ADDR_LO +#define MSI_MAP_ADDR_LO__MSI_MAP_ADDR_LO__SHIFT 0x14 +#define MSI_MAP_ADDR_LO__MSI_MAP_ADDR_LO_MASK 0xFFF00000L +//MSI_MAP_ADDR_HI +#define MSI_MAP_ADDR_HI__MSI_MAP_ADDR_HI__SHIFT 0x0 +#define MSI_MAP_ADDR_HI__MSI_MAP_ADDR_HI_MASK 0xFFFFFFFFL +//PSWUSCFG0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST +#define PSWUSCFG0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define PSWUSCFG0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define PSWUSCFG0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define PSWUSCFG0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define PSWUSCFG0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define PSWUSCFG0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//PSWUSCFG0_PCIE_VENDOR_SPECIFIC_HDR +#define PSWUSCFG0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0 +#define PSWUSCFG0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10 +#define PSWUSCFG0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14 +#define PSWUSCFG0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL +#define PSWUSCFG0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L +#define PSWUSCFG0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L +//PSWUSCFG0_PCIE_VENDOR_SPECIFIC1 +#define PSWUSCFG0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0 +#define PSWUSCFG0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL +//PSWUSCFG0_PCIE_VENDOR_SPECIFIC2 +#define PSWUSCFG0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0 +#define PSWUSCFG0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL +//PSWUSCFG0_PCIE_VC_ENH_CAP_LIST +#define PSWUSCFG0_PCIE_VC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define PSWUSCFG0_PCIE_VC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define PSWUSCFG0_PCIE_VC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define PSWUSCFG0_PCIE_VC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define PSWUSCFG0_PCIE_VC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define PSWUSCFG0_PCIE_VC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//PSWUSCFG0_PCIE_PORT_VC_CAP_REG1 +#define PSWUSCFG0_PCIE_PORT_VC_CAP_REG1__EXT_VC_COUNT__SHIFT 0x0 +#define PSWUSCFG0_PCIE_PORT_VC_CAP_REG1__LOW_PRIORITY_EXT_VC_COUNT__SHIFT 0x4 +#define PSWUSCFG0_PCIE_PORT_VC_CAP_REG1__REF_CLK__SHIFT 0x8 +#define PSWUSCFG0_PCIE_PORT_VC_CAP_REG1__PORT_ARB_TABLE_ENTRY_SIZE__SHIFT 0xa +#define PSWUSCFG0_PCIE_PORT_VC_CAP_REG1__EXT_VC_COUNT_MASK 0x00000007L +#define PSWUSCFG0_PCIE_PORT_VC_CAP_REG1__LOW_PRIORITY_EXT_VC_COUNT_MASK 0x00000070L +#define PSWUSCFG0_PCIE_PORT_VC_CAP_REG1__REF_CLK_MASK 0x00000300L +#define PSWUSCFG0_PCIE_PORT_VC_CAP_REG1__PORT_ARB_TABLE_ENTRY_SIZE_MASK 0x00000C00L +//PSWUSCFG0_PCIE_PORT_VC_CAP_REG2 +#define PSWUSCFG0_PCIE_PORT_VC_CAP_REG2__VC_ARB_CAP__SHIFT 0x0 +#define PSWUSCFG0_PCIE_PORT_VC_CAP_REG2__VC_ARB_TABLE_OFFSET__SHIFT 0x18 +#define PSWUSCFG0_PCIE_PORT_VC_CAP_REG2__VC_ARB_CAP_MASK 0x000000FFL +#define PSWUSCFG0_PCIE_PORT_VC_CAP_REG2__VC_ARB_TABLE_OFFSET_MASK 0xFF000000L +//PSWUSCFG0_PCIE_PORT_VC_CNTL +#define PSWUSCFG0_PCIE_PORT_VC_CNTL__LOAD_VC_ARB_TABLE__SHIFT 0x0 +#define PSWUSCFG0_PCIE_PORT_VC_CNTL__VC_ARB_SELECT__SHIFT 0x1 +#define PSWUSCFG0_PCIE_PORT_VC_CNTL__LOAD_VC_ARB_TABLE_MASK 0x0001L +#define PSWUSCFG0_PCIE_PORT_VC_CNTL__VC_ARB_SELECT_MASK 0x000EL +//PSWUSCFG0_PCIE_PORT_VC_STATUS +#define PSWUSCFG0_PCIE_PORT_VC_STATUS__VC_ARB_TABLE_STATUS__SHIFT 0x0 +#define PSWUSCFG0_PCIE_PORT_VC_STATUS__VC_ARB_TABLE_STATUS_MASK 0x0001L +//PSWUSCFG0_PCIE_VC0_RESOURCE_CAP +#define PSWUSCFG0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_CAP__SHIFT 0x0 +#define PSWUSCFG0_PCIE_VC0_RESOURCE_CAP__REJECT_SNOOP_TRANS__SHIFT 0xf +#define PSWUSCFG0_PCIE_VC0_RESOURCE_CAP__MAX_TIME_SLOTS__SHIFT 0x10 +#define PSWUSCFG0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET__SHIFT 0x18 +#define PSWUSCFG0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_CAP_MASK 0x000000FFL +#define PSWUSCFG0_PCIE_VC0_RESOURCE_CAP__REJECT_SNOOP_TRANS_MASK 0x00008000L +#define PSWUSCFG0_PCIE_VC0_RESOURCE_CAP__MAX_TIME_SLOTS_MASK 0x003F0000L +#define PSWUSCFG0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET_MASK 0xFF000000L +//PSWUSCFG0_PCIE_VC0_RESOURCE_CNTL +#define PSWUSCFG0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC0__SHIFT 0x0 +#define PSWUSCFG0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC1_7__SHIFT 0x1 +#define PSWUSCFG0_PCIE_VC0_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE__SHIFT 0x10 +#define PSWUSCFG0_PCIE_VC0_RESOURCE_CNTL__PORT_ARB_SELECT__SHIFT 0x11 +#define PSWUSCFG0_PCIE_VC0_RESOURCE_CNTL__VC_ID__SHIFT 0x18 +#define PSWUSCFG0_PCIE_VC0_RESOURCE_CNTL__VC_ENABLE__SHIFT 0x1f +#define PSWUSCFG0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC0_MASK 0x00000001L +#define PSWUSCFG0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC1_7_MASK 0x000000FEL +#define PSWUSCFG0_PCIE_VC0_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE_MASK 0x00010000L +#define PSWUSCFG0_PCIE_VC0_RESOURCE_CNTL__PORT_ARB_SELECT_MASK 0x000E0000L +#define PSWUSCFG0_PCIE_VC0_RESOURCE_CNTL__VC_ID_MASK 0x07000000L +#define PSWUSCFG0_PCIE_VC0_RESOURCE_CNTL__VC_ENABLE_MASK 0x80000000L +//PSWUSCFG0_PCIE_VC0_RESOURCE_STATUS +#define PSWUSCFG0_PCIE_VC0_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS__SHIFT 0x0 +#define PSWUSCFG0_PCIE_VC0_RESOURCE_STATUS__VC_NEGOTIATION_PENDING__SHIFT 0x1 +#define PSWUSCFG0_PCIE_VC0_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS_MASK 0x0001L +#define PSWUSCFG0_PCIE_VC0_RESOURCE_STATUS__VC_NEGOTIATION_PENDING_MASK 0x0002L +//PSWUSCFG0_PCIE_VC1_RESOURCE_CAP +#define PSWUSCFG0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_CAP__SHIFT 0x0 +#define PSWUSCFG0_PCIE_VC1_RESOURCE_CAP__REJECT_SNOOP_TRANS__SHIFT 0xf +#define PSWUSCFG0_PCIE_VC1_RESOURCE_CAP__MAX_TIME_SLOTS__SHIFT 0x10 +#define PSWUSCFG0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET__SHIFT 0x18 +#define PSWUSCFG0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_CAP_MASK 0x000000FFL +#define PSWUSCFG0_PCIE_VC1_RESOURCE_CAP__REJECT_SNOOP_TRANS_MASK 0x00008000L +#define PSWUSCFG0_PCIE_VC1_RESOURCE_CAP__MAX_TIME_SLOTS_MASK 0x003F0000L +#define PSWUSCFG0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET_MASK 0xFF000000L +//PSWUSCFG0_PCIE_VC1_RESOURCE_CNTL +#define PSWUSCFG0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC0__SHIFT 0x0 +#define PSWUSCFG0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC1_7__SHIFT 0x1 +#define PSWUSCFG0_PCIE_VC1_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE__SHIFT 0x10 +#define PSWUSCFG0_PCIE_VC1_RESOURCE_CNTL__PORT_ARB_SELECT__SHIFT 0x11 +#define PSWUSCFG0_PCIE_VC1_RESOURCE_CNTL__VC_ID__SHIFT 0x18 +#define PSWUSCFG0_PCIE_VC1_RESOURCE_CNTL__VC_ENABLE__SHIFT 0x1f +#define PSWUSCFG0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC0_MASK 0x00000001L +#define PSWUSCFG0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC1_7_MASK 0x000000FEL +#define PSWUSCFG0_PCIE_VC1_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE_MASK 0x00010000L +#define PSWUSCFG0_PCIE_VC1_RESOURCE_CNTL__PORT_ARB_SELECT_MASK 0x000E0000L +#define PSWUSCFG0_PCIE_VC1_RESOURCE_CNTL__VC_ID_MASK 0x07000000L +#define PSWUSCFG0_PCIE_VC1_RESOURCE_CNTL__VC_ENABLE_MASK 0x80000000L +//PSWUSCFG0_PCIE_VC1_RESOURCE_STATUS +#define PSWUSCFG0_PCIE_VC1_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS__SHIFT 0x0 +#define PSWUSCFG0_PCIE_VC1_RESOURCE_STATUS__VC_NEGOTIATION_PENDING__SHIFT 0x1 +#define PSWUSCFG0_PCIE_VC1_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS_MASK 0x0001L +#define PSWUSCFG0_PCIE_VC1_RESOURCE_STATUS__VC_NEGOTIATION_PENDING_MASK 0x0002L +//PSWUSCFG0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST +#define PSWUSCFG0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define PSWUSCFG0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define PSWUSCFG0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define PSWUSCFG0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define PSWUSCFG0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define PSWUSCFG0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//PSWUSCFG0_PCIE_DEV_SERIAL_NUM_DW1 +#define PSWUSCFG0_PCIE_DEV_SERIAL_NUM_DW1__SERIAL_NUMBER_LO__SHIFT 0x0 +#define PSWUSCFG0_PCIE_DEV_SERIAL_NUM_DW1__SERIAL_NUMBER_LO_MASK 0xFFFFFFFFL +//PSWUSCFG0_PCIE_DEV_SERIAL_NUM_DW2 +#define PSWUSCFG0_PCIE_DEV_SERIAL_NUM_DW2__SERIAL_NUMBER_HI__SHIFT 0x0 +#define PSWUSCFG0_PCIE_DEV_SERIAL_NUM_DW2__SERIAL_NUMBER_HI_MASK 0xFFFFFFFFL +//PSWUSCFG0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST +#define PSWUSCFG0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define PSWUSCFG0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define PSWUSCFG0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define PSWUSCFG0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define PSWUSCFG0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define PSWUSCFG0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//PSWUSCFG0_PCIE_UNCORR_ERR_STATUS +#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4 +#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5 +#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc +#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd +#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe +#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf +#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10 +#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11 +#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12 +#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13 +#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14 +#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15 +#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16 +#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17 +#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18 +#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19 +#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS__SHIFT 0x1a +#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L +#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L +#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L +#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L +#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L +#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L +#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L +#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L +#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L +#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L +#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L +#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L +#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L +#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L +#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L +#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L +#define PSWUSCFG0_PCIE_UNCORR_ERR_STATUS__POISONED_TLP_EGRESS_BLOCKED_STATUS_MASK 0x04000000L +//PSWUSCFG0_PCIE_UNCORR_ERR_MASK +#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4 +#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5 +#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc +#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd +#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe +#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf +#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10 +#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11 +#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12 +#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13 +#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14 +#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15 +#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16 +#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17 +#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18 +#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19 +#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK__SHIFT 0x1a +#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L +#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L +#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L +#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L +#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L +#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L +#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L +#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L +#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L +#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L +#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L +#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L +#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L +#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L +#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L +#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L +#define PSWUSCFG0_PCIE_UNCORR_ERR_MASK__POISONED_TLP_EGRESS_BLOCKED_MASK_MASK 0x04000000L +//PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY +#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4 +#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5 +#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc +#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd +#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe +#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf +#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10 +#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11 +#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12 +#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13 +#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14 +#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15 +#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16 +#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17 +#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18 +#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19 +#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x1a +#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L +#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L +#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L +#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L +#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L +#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L +#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L +#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L +#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L +#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L +#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L +#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L +#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L +#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L +#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L +#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L +#define PSWUSCFG0_PCIE_UNCORR_ERR_SEVERITY__POISONED_TLP_EGRESS_BLOCKED_SEVERITY_MASK 0x04000000L +//PSWUSCFG0_PCIE_CORR_ERR_STATUS +#define PSWUSCFG0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0 +#define PSWUSCFG0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6 +#define PSWUSCFG0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7 +#define PSWUSCFG0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8 +#define PSWUSCFG0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc +#define PSWUSCFG0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd +#define PSWUSCFG0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe +#define PSWUSCFG0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf +#define PSWUSCFG0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L +#define PSWUSCFG0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L +#define PSWUSCFG0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L +#define PSWUSCFG0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L +#define PSWUSCFG0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L +#define PSWUSCFG0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L +#define PSWUSCFG0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L +#define PSWUSCFG0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L +//PSWUSCFG0_PCIE_CORR_ERR_MASK +#define PSWUSCFG0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0 +#define PSWUSCFG0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6 +#define PSWUSCFG0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7 +#define PSWUSCFG0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8 +#define PSWUSCFG0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc +#define PSWUSCFG0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd +#define PSWUSCFG0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe +#define PSWUSCFG0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf +#define PSWUSCFG0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L +#define PSWUSCFG0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L +#define PSWUSCFG0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L +#define PSWUSCFG0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L +#define PSWUSCFG0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L +#define PSWUSCFG0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L +#define PSWUSCFG0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L +#define PSWUSCFG0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L +//PSWUSCFG0_PCIE_ADV_ERR_CAP_CNTL +#define PSWUSCFG0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0 +#define PSWUSCFG0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5 +#define PSWUSCFG0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6 +#define PSWUSCFG0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7 +#define PSWUSCFG0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8 +#define PSWUSCFG0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9 +#define PSWUSCFG0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa +#define PSWUSCFG0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb +#define PSWUSCFG0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc +#define PSWUSCFG0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL +#define PSWUSCFG0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L +#define PSWUSCFG0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L +#define PSWUSCFG0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L +#define PSWUSCFG0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L +#define PSWUSCFG0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L +#define PSWUSCFG0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L +#define PSWUSCFG0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L +#define PSWUSCFG0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L +//PSWUSCFG0_PCIE_HDR_LOG0 +#define PSWUSCFG0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0 +#define PSWUSCFG0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL +//PSWUSCFG0_PCIE_HDR_LOG1 +#define PSWUSCFG0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0 +#define PSWUSCFG0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL +//PSWUSCFG0_PCIE_HDR_LOG2 +#define PSWUSCFG0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0 +#define PSWUSCFG0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL +//PSWUSCFG0_PCIE_HDR_LOG3 +#define PSWUSCFG0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0 +#define PSWUSCFG0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL +//PSWUSCFG0_PCIE_TLP_PREFIX_LOG0 +#define PSWUSCFG0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0 +#define PSWUSCFG0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL +//PSWUSCFG0_PCIE_TLP_PREFIX_LOG1 +#define PSWUSCFG0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0 +#define PSWUSCFG0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL +//PSWUSCFG0_PCIE_TLP_PREFIX_LOG2 +#define PSWUSCFG0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0 +#define PSWUSCFG0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL +//PSWUSCFG0_PCIE_TLP_PREFIX_LOG3 +#define PSWUSCFG0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0 +#define PSWUSCFG0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL +//PSWUSCFG0_PCIE_SECONDARY_ENH_CAP_LIST +#define PSWUSCFG0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define PSWUSCFG0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define PSWUSCFG0_PCIE_SECONDARY_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define PSWUSCFG0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define PSWUSCFG0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define PSWUSCFG0_PCIE_SECONDARY_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//PSWUSCFG0_PCIE_LINK_CNTL3 +#define PSWUSCFG0_PCIE_LINK_CNTL3__PERFORM_EQUALIZATION__SHIFT 0x0 +#define PSWUSCFG0_PCIE_LINK_CNTL3__LINK_EQUALIZATION_REQ_INT_EN__SHIFT 0x1 +#define PSWUSCFG0_PCIE_LINK_CNTL3__ENABLE_LOWER_SKP_OS_GEN__SHIFT 0x9 +#define PSWUSCFG0_PCIE_LINK_CNTL3__RESERVED__SHIFT 0x10 +#define PSWUSCFG0_PCIE_LINK_CNTL3__PERFORM_EQUALIZATION_MASK 0x00000001L +#define PSWUSCFG0_PCIE_LINK_CNTL3__LINK_EQUALIZATION_REQ_INT_EN_MASK 0x00000002L +#define PSWUSCFG0_PCIE_LINK_CNTL3__ENABLE_LOWER_SKP_OS_GEN_MASK 0x0000FE00L +#define PSWUSCFG0_PCIE_LINK_CNTL3__RESERVED_MASK 0xFFFF0000L +//PSWUSCFG0_PCIE_LANE_ERROR_STATUS +#define PSWUSCFG0_PCIE_LANE_ERROR_STATUS__LANE_ERROR_STATUS_BITS__SHIFT 0x0 +#define PSWUSCFG0_PCIE_LANE_ERROR_STATUS__RESERVED__SHIFT 0x10 +#define PSWUSCFG0_PCIE_LANE_ERROR_STATUS__LANE_ERROR_STATUS_BITS_MASK 0x0000FFFFL +#define PSWUSCFG0_PCIE_LANE_ERROR_STATUS__RESERVED_MASK 0xFFFF0000L +//PSWUSCFG0_PCIE_LANE_0_EQUALIZATION_CNTL +#define PSWUSCFG0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define PSWUSCFG0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define PSWUSCFG0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define PSWUSCFG0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define PSWUSCFG0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define PSWUSCFG0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define PSWUSCFG0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define PSWUSCFG0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +//PSWUSCFG0_PCIE_LANE_1_EQUALIZATION_CNTL +#define PSWUSCFG0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define PSWUSCFG0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define PSWUSCFG0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define PSWUSCFG0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define PSWUSCFG0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define PSWUSCFG0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define PSWUSCFG0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define PSWUSCFG0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +//PSWUSCFG0_PCIE_LANE_2_EQUALIZATION_CNTL +#define PSWUSCFG0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define PSWUSCFG0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define PSWUSCFG0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define PSWUSCFG0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define PSWUSCFG0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define PSWUSCFG0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define PSWUSCFG0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define PSWUSCFG0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +//PSWUSCFG0_PCIE_LANE_3_EQUALIZATION_CNTL +#define PSWUSCFG0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define PSWUSCFG0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define PSWUSCFG0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define PSWUSCFG0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define PSWUSCFG0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define PSWUSCFG0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define PSWUSCFG0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define PSWUSCFG0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +//PSWUSCFG0_PCIE_LANE_4_EQUALIZATION_CNTL +#define PSWUSCFG0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define PSWUSCFG0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define PSWUSCFG0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define PSWUSCFG0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define PSWUSCFG0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define PSWUSCFG0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define PSWUSCFG0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define PSWUSCFG0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +//PSWUSCFG0_PCIE_LANE_5_EQUALIZATION_CNTL +#define PSWUSCFG0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define PSWUSCFG0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define PSWUSCFG0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define PSWUSCFG0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define PSWUSCFG0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define PSWUSCFG0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define PSWUSCFG0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define PSWUSCFG0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +//PSWUSCFG0_PCIE_LANE_6_EQUALIZATION_CNTL +#define PSWUSCFG0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define PSWUSCFG0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define PSWUSCFG0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define PSWUSCFG0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define PSWUSCFG0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define PSWUSCFG0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define PSWUSCFG0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define PSWUSCFG0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +//PSWUSCFG0_PCIE_LANE_7_EQUALIZATION_CNTL +#define PSWUSCFG0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define PSWUSCFG0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define PSWUSCFG0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define PSWUSCFG0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define PSWUSCFG0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define PSWUSCFG0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define PSWUSCFG0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define PSWUSCFG0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +//PSWUSCFG0_PCIE_LANE_8_EQUALIZATION_CNTL +#define PSWUSCFG0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define PSWUSCFG0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define PSWUSCFG0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define PSWUSCFG0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define PSWUSCFG0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define PSWUSCFG0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define PSWUSCFG0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define PSWUSCFG0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +//PSWUSCFG0_PCIE_LANE_9_EQUALIZATION_CNTL +#define PSWUSCFG0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define PSWUSCFG0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define PSWUSCFG0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define PSWUSCFG0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define PSWUSCFG0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define PSWUSCFG0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define PSWUSCFG0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define PSWUSCFG0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +//PSWUSCFG0_PCIE_LANE_10_EQUALIZATION_CNTL +#define PSWUSCFG0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define PSWUSCFG0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define PSWUSCFG0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define PSWUSCFG0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define PSWUSCFG0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define PSWUSCFG0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define PSWUSCFG0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define PSWUSCFG0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +//PSWUSCFG0_PCIE_LANE_11_EQUALIZATION_CNTL +#define PSWUSCFG0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define PSWUSCFG0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define PSWUSCFG0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define PSWUSCFG0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define PSWUSCFG0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define PSWUSCFG0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define PSWUSCFG0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define PSWUSCFG0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +//PSWUSCFG0_PCIE_LANE_12_EQUALIZATION_CNTL +#define PSWUSCFG0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define PSWUSCFG0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define PSWUSCFG0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define PSWUSCFG0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define PSWUSCFG0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define PSWUSCFG0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define PSWUSCFG0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define PSWUSCFG0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +//PSWUSCFG0_PCIE_LANE_13_EQUALIZATION_CNTL +#define PSWUSCFG0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define PSWUSCFG0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define PSWUSCFG0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define PSWUSCFG0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define PSWUSCFG0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define PSWUSCFG0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define PSWUSCFG0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define PSWUSCFG0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +//PSWUSCFG0_PCIE_LANE_14_EQUALIZATION_CNTL +#define PSWUSCFG0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define PSWUSCFG0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define PSWUSCFG0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define PSWUSCFG0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define PSWUSCFG0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define PSWUSCFG0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define PSWUSCFG0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define PSWUSCFG0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +//PSWUSCFG0_PCIE_LANE_15_EQUALIZATION_CNTL +#define PSWUSCFG0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define PSWUSCFG0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define PSWUSCFG0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define PSWUSCFG0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define PSWUSCFG0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define PSWUSCFG0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define PSWUSCFG0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define PSWUSCFG0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +//PSWUSCFG0_PCIE_ACS_ENH_CAP_LIST +#define PSWUSCFG0_PCIE_ACS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define PSWUSCFG0_PCIE_ACS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define PSWUSCFG0_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define PSWUSCFG0_PCIE_ACS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define PSWUSCFG0_PCIE_ACS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define PSWUSCFG0_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//PSWUSCFG0_PCIE_ACS_CAP +#define PSWUSCFG0_PCIE_ACS_CAP__SOURCE_VALIDATION__SHIFT 0x0 +#define PSWUSCFG0_PCIE_ACS_CAP__TRANSLATION_BLOCKING__SHIFT 0x1 +#define PSWUSCFG0_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT__SHIFT 0x2 +#define PSWUSCFG0_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT__SHIFT 0x3 +#define PSWUSCFG0_PCIE_ACS_CAP__UPSTREAM_FORWARDING__SHIFT 0x4 +#define PSWUSCFG0_PCIE_ACS_CAP__P2P_EGRESS_CONTROL__SHIFT 0x5 +#define PSWUSCFG0_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P__SHIFT 0x6 +#define PSWUSCFG0_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE__SHIFT 0x8 +#define PSWUSCFG0_PCIE_ACS_CAP__SOURCE_VALIDATION_MASK 0x0001L +#define PSWUSCFG0_PCIE_ACS_CAP__TRANSLATION_BLOCKING_MASK 0x0002L +#define PSWUSCFG0_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT_MASK 0x0004L +#define PSWUSCFG0_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT_MASK 0x0008L +#define PSWUSCFG0_PCIE_ACS_CAP__UPSTREAM_FORWARDING_MASK 0x0010L +#define PSWUSCFG0_PCIE_ACS_CAP__P2P_EGRESS_CONTROL_MASK 0x0020L +#define PSWUSCFG0_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P_MASK 0x0040L +#define PSWUSCFG0_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE_MASK 0xFF00L +//PSWUSCFG0_PCIE_ACS_CNTL +#define PSWUSCFG0_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN__SHIFT 0x0 +#define PSWUSCFG0_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN__SHIFT 0x1 +#define PSWUSCFG0_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN__SHIFT 0x2 +#define PSWUSCFG0_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN__SHIFT 0x3 +#define PSWUSCFG0_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN__SHIFT 0x4 +#define PSWUSCFG0_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN__SHIFT 0x5 +#define PSWUSCFG0_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN__SHIFT 0x6 +#define PSWUSCFG0_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN_MASK 0x0001L +#define PSWUSCFG0_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN_MASK 0x0002L +#define PSWUSCFG0_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN_MASK 0x0004L +#define PSWUSCFG0_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN_MASK 0x0008L +#define PSWUSCFG0_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN_MASK 0x0010L +#define PSWUSCFG0_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN_MASK 0x0020L +#define PSWUSCFG0_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN_MASK 0x0040L +//PSWUSCFG0_PCIE_MC_ENH_CAP_LIST +#define PSWUSCFG0_PCIE_MC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define PSWUSCFG0_PCIE_MC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define PSWUSCFG0_PCIE_MC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define PSWUSCFG0_PCIE_MC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define PSWUSCFG0_PCIE_MC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define PSWUSCFG0_PCIE_MC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//PSWUSCFG0_PCIE_MC_CAP +#define PSWUSCFG0_PCIE_MC_CAP__MC_MAX_GROUP__SHIFT 0x0 +#define PSWUSCFG0_PCIE_MC_CAP__MC_ECRC_REGEN_SUPP__SHIFT 0xf +#define PSWUSCFG0_PCIE_MC_CAP__MC_MAX_GROUP_MASK 0x003FL +#define PSWUSCFG0_PCIE_MC_CAP__MC_ECRC_REGEN_SUPP_MASK 0x8000L +//PSWUSCFG0_PCIE_MC_CNTL +#define PSWUSCFG0_PCIE_MC_CNTL__MC_NUM_GROUP__SHIFT 0x0 +#define PSWUSCFG0_PCIE_MC_CNTL__MC_ENABLE__SHIFT 0xf +#define PSWUSCFG0_PCIE_MC_CNTL__MC_NUM_GROUP_MASK 0x003FL +#define PSWUSCFG0_PCIE_MC_CNTL__MC_ENABLE_MASK 0x8000L +//PSWUSCFG0_PCIE_MC_ADDR0 +#define PSWUSCFG0_PCIE_MC_ADDR0__MC_INDEX_POS__SHIFT 0x0 +#define PSWUSCFG0_PCIE_MC_ADDR0__MC_BASE_ADDR_0__SHIFT 0xc +#define PSWUSCFG0_PCIE_MC_ADDR0__MC_INDEX_POS_MASK 0x0000003FL +#define PSWUSCFG0_PCIE_MC_ADDR0__MC_BASE_ADDR_0_MASK 0xFFFFF000L +//PSWUSCFG0_PCIE_MC_ADDR1 +#define PSWUSCFG0_PCIE_MC_ADDR1__MC_BASE_ADDR_1__SHIFT 0x0 +#define PSWUSCFG0_PCIE_MC_ADDR1__MC_BASE_ADDR_1_MASK 0xFFFFFFFFL +//PSWUSCFG0_PCIE_MC_RCV0 +#define PSWUSCFG0_PCIE_MC_RCV0__MC_RECEIVE_0__SHIFT 0x0 +#define PSWUSCFG0_PCIE_MC_RCV0__MC_RECEIVE_0_MASK 0xFFFFFFFFL +//PSWUSCFG0_PCIE_MC_RCV1 +#define PSWUSCFG0_PCIE_MC_RCV1__MC_RECEIVE_1__SHIFT 0x0 +#define PSWUSCFG0_PCIE_MC_RCV1__MC_RECEIVE_1_MASK 0xFFFFFFFFL +//PSWUSCFG0_PCIE_MC_BLOCK_ALL0 +#define PSWUSCFG0_PCIE_MC_BLOCK_ALL0__MC_BLOCK_ALL_0__SHIFT 0x0 +#define PSWUSCFG0_PCIE_MC_BLOCK_ALL0__MC_BLOCK_ALL_0_MASK 0xFFFFFFFFL +//PSWUSCFG0_PCIE_MC_BLOCK_ALL1 +#define PSWUSCFG0_PCIE_MC_BLOCK_ALL1__MC_BLOCK_ALL_1__SHIFT 0x0 +#define PSWUSCFG0_PCIE_MC_BLOCK_ALL1__MC_BLOCK_ALL_1_MASK 0xFFFFFFFFL +//PSWUSCFG0_PCIE_MC_BLOCK_UNTRANSLATED_0 +#define PSWUSCFG0_PCIE_MC_BLOCK_UNTRANSLATED_0__MC_BLOCK_UNTRANSLATED_0__SHIFT 0x0 +#define PSWUSCFG0_PCIE_MC_BLOCK_UNTRANSLATED_0__MC_BLOCK_UNTRANSLATED_0_MASK 0xFFFFFFFFL +//PSWUSCFG0_PCIE_MC_BLOCK_UNTRANSLATED_1 +#define PSWUSCFG0_PCIE_MC_BLOCK_UNTRANSLATED_1__MC_BLOCK_UNTRANSLATED_1__SHIFT 0x0 +#define PSWUSCFG0_PCIE_MC_BLOCK_UNTRANSLATED_1__MC_BLOCK_UNTRANSLATED_1_MASK 0xFFFFFFFFL +//PCIE_MC_OVERLAY_BAR0 +#define PCIE_MC_OVERLAY_BAR0__MC_OVERLAY_SIZE__SHIFT 0x0 +#define PCIE_MC_OVERLAY_BAR0__MC_OVERLAY_BAR_0__SHIFT 0x6 +#define PCIE_MC_OVERLAY_BAR0__MC_OVERLAY_SIZE_MASK 0x0000003FL +#define PCIE_MC_OVERLAY_BAR0__MC_OVERLAY_BAR_0_MASK 0xFFFFFFC0L +//PCIE_MC_OVERLAY_BAR1 +#define PCIE_MC_OVERLAY_BAR1__MC_OVERLAY_BAR_1__SHIFT 0x0 +#define PCIE_MC_OVERLAY_BAR1__MC_OVERLAY_BAR_1_MASK 0xFFFFFFFFL +//PSWUSCFG0_PCIE_LTR_ENH_CAP_LIST +#define PSWUSCFG0_PCIE_LTR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define PSWUSCFG0_PCIE_LTR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define PSWUSCFG0_PCIE_LTR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define PSWUSCFG0_PCIE_LTR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define PSWUSCFG0_PCIE_LTR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define PSWUSCFG0_PCIE_LTR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//PSWUSCFG0_PCIE_LTR_CAP +#define PSWUSCFG0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_VALUE__SHIFT 0x0 +#define PSWUSCFG0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_SCALE__SHIFT 0xa +#define PSWUSCFG0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_VALUE__SHIFT 0x10 +#define PSWUSCFG0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_SCALE__SHIFT 0x1a +#define PSWUSCFG0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_VALUE_MASK 0x000003FFL +#define PSWUSCFG0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_SCALE_MASK 0x00001C00L +#define PSWUSCFG0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_VALUE_MASK 0x03FF0000L +#define PSWUSCFG0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_SCALE_MASK 0x1C000000L +//PSWUSCFG0_PCIE_ARI_ENH_CAP_LIST +#define PSWUSCFG0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define PSWUSCFG0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define PSWUSCFG0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define PSWUSCFG0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define PSWUSCFG0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define PSWUSCFG0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//PSWUSCFG0_PCIE_ARI_CAP +#define PSWUSCFG0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0 +#define PSWUSCFG0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1 +#define PSWUSCFG0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8 +#define PSWUSCFG0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L +#define PSWUSCFG0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L +#define PSWUSCFG0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L +//PSWUSCFG0_PCIE_ARI_CNTL +#define PSWUSCFG0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0 +#define PSWUSCFG0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1 +#define PSWUSCFG0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4 +#define PSWUSCFG0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L +#define PSWUSCFG0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L +#define PSWUSCFG0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L +//PCIE_L1_PM_SUB_CAP_LIST +#define PCIE_L1_PM_SUB_CAP_LIST__CAP_ID__SHIFT 0x0 +#define PCIE_L1_PM_SUB_CAP_LIST__CAP_VER__SHIFT 0x10 +#define PCIE_L1_PM_SUB_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define PCIE_L1_PM_SUB_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define PCIE_L1_PM_SUB_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define PCIE_L1_PM_SUB_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//PCIE_L1_PM_SUB_CAP +#define PCIE_L1_PM_SUB_CAP__PCI_PM_L1_2_SUPPORTED__SHIFT 0x0 +#define PCIE_L1_PM_SUB_CAP__PCI_PM_L1_1_SUPPORTED__SHIFT 0x1 +#define PCIE_L1_PM_SUB_CAP__ASPM_L1_2_SUPPORTED__SHIFT 0x2 +#define PCIE_L1_PM_SUB_CAP__ASPM_L1_1_SUPPORTED__SHIFT 0x3 +#define PCIE_L1_PM_SUB_CAP__L1_PM_SUB_SUPPORTED__SHIFT 0x4 +#define PCIE_L1_PM_SUB_CAP__PORT_CM_RESTORE_TIME__SHIFT 0x8 +#define PCIE_L1_PM_SUB_CAP__PORT_T_POWER_ON_SCALE__SHIFT 0x10 +#define PCIE_L1_PM_SUB_CAP__PORT_T_POWER_ON_VALUE__SHIFT 0x13 +#define PCIE_L1_PM_SUB_CAP__PCI_PM_L1_2_SUPPORTED_MASK 0x00000001L +#define PCIE_L1_PM_SUB_CAP__PCI_PM_L1_1_SUPPORTED_MASK 0x00000002L +#define PCIE_L1_PM_SUB_CAP__ASPM_L1_2_SUPPORTED_MASK 0x00000004L +#define PCIE_L1_PM_SUB_CAP__ASPM_L1_1_SUPPORTED_MASK 0x00000008L +#define PCIE_L1_PM_SUB_CAP__L1_PM_SUB_SUPPORTED_MASK 0x00000010L +#define PCIE_L1_PM_SUB_CAP__PORT_CM_RESTORE_TIME_MASK 0x0000FF00L +#define PCIE_L1_PM_SUB_CAP__PORT_T_POWER_ON_SCALE_MASK 0x00030000L +#define PCIE_L1_PM_SUB_CAP__PORT_T_POWER_ON_VALUE_MASK 0x00F80000L +//PCIE_L1_PM_SUB_CNTL +#define PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_2_EN__SHIFT 0x0 +#define PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_1_EN__SHIFT 0x1 +#define PCIE_L1_PM_SUB_CNTL__ASPM_L1_2_EN__SHIFT 0x2 +#define PCIE_L1_PM_SUB_CNTL__ASPM_L1_1_EN__SHIFT 0x3 +#define PCIE_L1_PM_SUB_CNTL__COMMON_MODE_RESTORE_TIME__SHIFT 0x8 +#define PCIE_L1_PM_SUB_CNTL__LTR_L1_2_THRESHOLD_VALUE__SHIFT 0x10 +#define PCIE_L1_PM_SUB_CNTL__LTR_L1_2_THRESHOLD_SCALE__SHIFT 0x1d +#define PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_2_EN_MASK 0x00000001L +#define PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_1_EN_MASK 0x00000002L +#define PCIE_L1_PM_SUB_CNTL__ASPM_L1_2_EN_MASK 0x00000004L +#define PCIE_L1_PM_SUB_CNTL__ASPM_L1_1_EN_MASK 0x00000008L +#define PCIE_L1_PM_SUB_CNTL__COMMON_MODE_RESTORE_TIME_MASK 0x0000FF00L +#define PCIE_L1_PM_SUB_CNTL__LTR_L1_2_THRESHOLD_VALUE_MASK 0x03FF0000L +#define PCIE_L1_PM_SUB_CNTL__LTR_L1_2_THRESHOLD_SCALE_MASK 0xE0000000L +//PCIE_L1_PM_SUB_CNTL2 +#define PCIE_L1_PM_SUB_CNTL2__T_POWER_ON_SCALE__SHIFT 0x0 +#define PCIE_L1_PM_SUB_CNTL2__T_POWER_ON_VALUE__SHIFT 0x3 +#define PCIE_L1_PM_SUB_CNTL2__T_POWER_ON_SCALE_MASK 0x00000003L +#define PCIE_L1_PM_SUB_CNTL2__T_POWER_ON_VALUE_MASK 0x000000F8L +//PCIE_ESM_CAP_LIST +#define PCIE_ESM_CAP_LIST__CAP_ID__SHIFT 0x0 +#define PCIE_ESM_CAP_LIST__CAP_VER__SHIFT 0x10 +#define PCIE_ESM_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define PCIE_ESM_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define PCIE_ESM_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define PCIE_ESM_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//PCIE_ESM_HEADER_1 +#define PCIE_ESM_HEADER_1__ESM_VENDOR_ID__SHIFT 0x0 +#define PCIE_ESM_HEADER_1__ESM_CAP_REV__SHIFT 0x10 +#define PCIE_ESM_HEADER_1__ESM_CAP_LEN__SHIFT 0x14 +#define PCIE_ESM_HEADER_1__ESM_VENDOR_ID_MASK 0x0000FFFFL +#define PCIE_ESM_HEADER_1__ESM_CAP_REV_MASK 0x000F0000L +#define PCIE_ESM_HEADER_1__ESM_CAP_LEN_MASK 0xFFF00000L +//PCIE_ESM_HEADER_2 +#define PCIE_ESM_HEADER_2__CAP_ID__SHIFT 0x0 +#define PCIE_ESM_HEADER_2__CAP_ID_MASK 0xFFFFL +//PCIE_ESM_STATUS +#define PCIE_ESM_STATUS__MIN_TIME_IN_EI_VAL__SHIFT 0x0 +#define PCIE_ESM_STATUS__MIN_TIME_IN_EI_SCALE__SHIFT 0x9 +#define PCIE_ESM_STATUS__MIN_TIME_IN_EI_VAL_MASK 0x01FFL +#define PCIE_ESM_STATUS__MIN_TIME_IN_EI_SCALE_MASK 0x0E00L +//PCIE_ESM_CTRL +#define PCIE_ESM_CTRL__ESM_GEN_3_DATA_RATE__SHIFT 0x0 +#define PCIE_ESM_CTRL__ESM_GEN_4_DATA_RATE__SHIFT 0x8 +#define PCIE_ESM_CTRL__ESM_ENABLED__SHIFT 0xf +#define PCIE_ESM_CTRL__ESM_GEN_3_DATA_RATE_MASK 0x007FL +#define PCIE_ESM_CTRL__ESM_GEN_4_DATA_RATE_MASK 0x7F00L +#define PCIE_ESM_CTRL__ESM_ENABLED_MASK 0x8000L +//PCIE_ESM_CAP_1 +#define PCIE_ESM_CAP_1__ESM_8P0G__SHIFT 0x0 +#define PCIE_ESM_CAP_1__ESM_8P1G__SHIFT 0x1 +#define PCIE_ESM_CAP_1__ESM_8P2G__SHIFT 0x2 +#define PCIE_ESM_CAP_1__ESM_8P3G__SHIFT 0x3 +#define PCIE_ESM_CAP_1__ESM_8P4G__SHIFT 0x4 +#define PCIE_ESM_CAP_1__ESM_8P5G__SHIFT 0x5 +#define PCIE_ESM_CAP_1__ESM_8P6G__SHIFT 0x6 +#define PCIE_ESM_CAP_1__ESM_8P7G__SHIFT 0x7 +#define PCIE_ESM_CAP_1__ESM_8P8G__SHIFT 0x8 +#define PCIE_ESM_CAP_1__ESM_8P9G__SHIFT 0x9 +#define PCIE_ESM_CAP_1__ESM_9P0G__SHIFT 0xa +#define PCIE_ESM_CAP_1__ESM_9P1G__SHIFT 0xb +#define PCIE_ESM_CAP_1__ESM_9P2G__SHIFT 0xc +#define PCIE_ESM_CAP_1__ESM_9P3G__SHIFT 0xd +#define PCIE_ESM_CAP_1__ESM_9P4G__SHIFT 0xe +#define PCIE_ESM_CAP_1__ESM_9P5G__SHIFT 0xf +#define PCIE_ESM_CAP_1__ESM_9P6G__SHIFT 0x10 +#define PCIE_ESM_CAP_1__ESM_9P7G__SHIFT 0x11 +#define PCIE_ESM_CAP_1__ESM_9P8G__SHIFT 0x12 +#define PCIE_ESM_CAP_1__ESM_9P9G__SHIFT 0x13 +#define PCIE_ESM_CAP_1__ESM_10P0G__SHIFT 0x14 +#define PCIE_ESM_CAP_1__ESM_10P1G__SHIFT 0x15 +#define PCIE_ESM_CAP_1__ESM_10P2G__SHIFT 0x16 +#define PCIE_ESM_CAP_1__ESM_10P3G__SHIFT 0x17 +#define PCIE_ESM_CAP_1__ESM_10P4G__SHIFT 0x18 +#define PCIE_ESM_CAP_1__ESM_10P5G__SHIFT 0x19 +#define PCIE_ESM_CAP_1__ESM_10P6G__SHIFT 0x1a +#define PCIE_ESM_CAP_1__ESM_10P7G__SHIFT 0x1b +#define PCIE_ESM_CAP_1__ESM_10P8G__SHIFT 0x1c +#define PCIE_ESM_CAP_1__ESM_10P9G__SHIFT 0x1d +#define PCIE_ESM_CAP_1__ESM_8P0G_MASK 0x00000001L +#define PCIE_ESM_CAP_1__ESM_8P1G_MASK 0x00000002L +#define PCIE_ESM_CAP_1__ESM_8P2G_MASK 0x00000004L +#define PCIE_ESM_CAP_1__ESM_8P3G_MASK 0x00000008L +#define PCIE_ESM_CAP_1__ESM_8P4G_MASK 0x00000010L +#define PCIE_ESM_CAP_1__ESM_8P5G_MASK 0x00000020L +#define PCIE_ESM_CAP_1__ESM_8P6G_MASK 0x00000040L +#define PCIE_ESM_CAP_1__ESM_8P7G_MASK 0x00000080L +#define PCIE_ESM_CAP_1__ESM_8P8G_MASK 0x00000100L +#define PCIE_ESM_CAP_1__ESM_8P9G_MASK 0x00000200L +#define PCIE_ESM_CAP_1__ESM_9P0G_MASK 0x00000400L +#define PCIE_ESM_CAP_1__ESM_9P1G_MASK 0x00000800L +#define PCIE_ESM_CAP_1__ESM_9P2G_MASK 0x00001000L +#define PCIE_ESM_CAP_1__ESM_9P3G_MASK 0x00002000L +#define PCIE_ESM_CAP_1__ESM_9P4G_MASK 0x00004000L +#define PCIE_ESM_CAP_1__ESM_9P5G_MASK 0x00008000L +#define PCIE_ESM_CAP_1__ESM_9P6G_MASK 0x00010000L +#define PCIE_ESM_CAP_1__ESM_9P7G_MASK 0x00020000L +#define PCIE_ESM_CAP_1__ESM_9P8G_MASK 0x00040000L +#define PCIE_ESM_CAP_1__ESM_9P9G_MASK 0x00080000L +#define PCIE_ESM_CAP_1__ESM_10P0G_MASK 0x00100000L +#define PCIE_ESM_CAP_1__ESM_10P1G_MASK 0x00200000L +#define PCIE_ESM_CAP_1__ESM_10P2G_MASK 0x00400000L +#define PCIE_ESM_CAP_1__ESM_10P3G_MASK 0x00800000L +#define PCIE_ESM_CAP_1__ESM_10P4G_MASK 0x01000000L +#define PCIE_ESM_CAP_1__ESM_10P5G_MASK 0x02000000L +#define PCIE_ESM_CAP_1__ESM_10P6G_MASK 0x04000000L +#define PCIE_ESM_CAP_1__ESM_10P7G_MASK 0x08000000L +#define PCIE_ESM_CAP_1__ESM_10P8G_MASK 0x10000000L +#define PCIE_ESM_CAP_1__ESM_10P9G_MASK 0x20000000L +//PCIE_ESM_CAP_2 +#define PCIE_ESM_CAP_2__ESM_11P0G__SHIFT 0x0 +#define PCIE_ESM_CAP_2__ESM_11P1G__SHIFT 0x1 +#define PCIE_ESM_CAP_2__ESM_11P2G__SHIFT 0x2 +#define PCIE_ESM_CAP_2__ESM_11P3G__SHIFT 0x3 +#define PCIE_ESM_CAP_2__ESM_11P4G__SHIFT 0x4 +#define PCIE_ESM_CAP_2__ESM_11P5G__SHIFT 0x5 +#define PCIE_ESM_CAP_2__ESM_11P6G__SHIFT 0x6 +#define PCIE_ESM_CAP_2__ESM_11P7G__SHIFT 0x7 +#define PCIE_ESM_CAP_2__ESM_11P8G__SHIFT 0x8 +#define PCIE_ESM_CAP_2__ESM_11P9G__SHIFT 0x9 +#define PCIE_ESM_CAP_2__ESM_12P0G__SHIFT 0xa +#define PCIE_ESM_CAP_2__ESM_12P1G__SHIFT 0xb +#define PCIE_ESM_CAP_2__ESM_12P2G__SHIFT 0xc +#define PCIE_ESM_CAP_2__ESM_12P3G__SHIFT 0xd +#define PCIE_ESM_CAP_2__ESM_12P4G__SHIFT 0xe +#define PCIE_ESM_CAP_2__ESM_12P5G__SHIFT 0xf +#define PCIE_ESM_CAP_2__ESM_12P6G__SHIFT 0x10 +#define PCIE_ESM_CAP_2__ESM_12P7G__SHIFT 0x11 +#define PCIE_ESM_CAP_2__ESM_12P8G__SHIFT 0x12 +#define PCIE_ESM_CAP_2__ESM_12P9G__SHIFT 0x13 +#define PCIE_ESM_CAP_2__ESM_13P0G__SHIFT 0x14 +#define PCIE_ESM_CAP_2__ESM_13P1G__SHIFT 0x15 +#define PCIE_ESM_CAP_2__ESM_13P2G__SHIFT 0x16 +#define PCIE_ESM_CAP_2__ESM_13P3G__SHIFT 0x17 +#define PCIE_ESM_CAP_2__ESM_13P4G__SHIFT 0x18 +#define PCIE_ESM_CAP_2__ESM_13P5G__SHIFT 0x19 +#define PCIE_ESM_CAP_2__ESM_13P6G__SHIFT 0x1a +#define PCIE_ESM_CAP_2__ESM_13P7G__SHIFT 0x1b +#define PCIE_ESM_CAP_2__ESM_13P8G__SHIFT 0x1c +#define PCIE_ESM_CAP_2__ESM_13P9G__SHIFT 0x1d +#define PCIE_ESM_CAP_2__ESM_11P0G_MASK 0x00000001L +#define PCIE_ESM_CAP_2__ESM_11P1G_MASK 0x00000002L +#define PCIE_ESM_CAP_2__ESM_11P2G_MASK 0x00000004L +#define PCIE_ESM_CAP_2__ESM_11P3G_MASK 0x00000008L +#define PCIE_ESM_CAP_2__ESM_11P4G_MASK 0x00000010L +#define PCIE_ESM_CAP_2__ESM_11P5G_MASK 0x00000020L +#define PCIE_ESM_CAP_2__ESM_11P6G_MASK 0x00000040L +#define PCIE_ESM_CAP_2__ESM_11P7G_MASK 0x00000080L +#define PCIE_ESM_CAP_2__ESM_11P8G_MASK 0x00000100L +#define PCIE_ESM_CAP_2__ESM_11P9G_MASK 0x00000200L +#define PCIE_ESM_CAP_2__ESM_12P0G_MASK 0x00000400L +#define PCIE_ESM_CAP_2__ESM_12P1G_MASK 0x00000800L +#define PCIE_ESM_CAP_2__ESM_12P2G_MASK 0x00001000L +#define PCIE_ESM_CAP_2__ESM_12P3G_MASK 0x00002000L +#define PCIE_ESM_CAP_2__ESM_12P4G_MASK 0x00004000L +#define PCIE_ESM_CAP_2__ESM_12P5G_MASK 0x00008000L +#define PCIE_ESM_CAP_2__ESM_12P6G_MASK 0x00010000L +#define PCIE_ESM_CAP_2__ESM_12P7G_MASK 0x00020000L +#define PCIE_ESM_CAP_2__ESM_12P8G_MASK 0x00040000L +#define PCIE_ESM_CAP_2__ESM_12P9G_MASK 0x00080000L +#define PCIE_ESM_CAP_2__ESM_13P0G_MASK 0x00100000L +#define PCIE_ESM_CAP_2__ESM_13P1G_MASK 0x00200000L +#define PCIE_ESM_CAP_2__ESM_13P2G_MASK 0x00400000L +#define PCIE_ESM_CAP_2__ESM_13P3G_MASK 0x00800000L +#define PCIE_ESM_CAP_2__ESM_13P4G_MASK 0x01000000L +#define PCIE_ESM_CAP_2__ESM_13P5G_MASK 0x02000000L +#define PCIE_ESM_CAP_2__ESM_13P6G_MASK 0x04000000L +#define PCIE_ESM_CAP_2__ESM_13P7G_MASK 0x08000000L +#define PCIE_ESM_CAP_2__ESM_13P8G_MASK 0x10000000L +#define PCIE_ESM_CAP_2__ESM_13P9G_MASK 0x20000000L +//PCIE_ESM_CAP_3 +#define PCIE_ESM_CAP_3__ESM_14P0G__SHIFT 0x0 +#define PCIE_ESM_CAP_3__ESM_14P1G__SHIFT 0x1 +#define PCIE_ESM_CAP_3__ESM_14P2G__SHIFT 0x2 +#define PCIE_ESM_CAP_3__ESM_14P3G__SHIFT 0x3 +#define PCIE_ESM_CAP_3__ESM_14P4G__SHIFT 0x4 +#define PCIE_ESM_CAP_3__ESM_14P5G__SHIFT 0x5 +#define PCIE_ESM_CAP_3__ESM_14P6G__SHIFT 0x6 +#define PCIE_ESM_CAP_3__ESM_14P7G__SHIFT 0x7 +#define PCIE_ESM_CAP_3__ESM_14P8G__SHIFT 0x8 +#define PCIE_ESM_CAP_3__ESM_14P9G__SHIFT 0x9 +#define PCIE_ESM_CAP_3__ESM_15P0G__SHIFT 0xa +#define PCIE_ESM_CAP_3__ESM_15P1G__SHIFT 0xb +#define PCIE_ESM_CAP_3__ESM_15P2G__SHIFT 0xc +#define PCIE_ESM_CAP_3__ESM_15P3G__SHIFT 0xd +#define PCIE_ESM_CAP_3__ESM_15P4G__SHIFT 0xe +#define PCIE_ESM_CAP_3__ESM_15P5G__SHIFT 0xf +#define PCIE_ESM_CAP_3__ESM_15P6G__SHIFT 0x10 +#define PCIE_ESM_CAP_3__ESM_15P7G__SHIFT 0x11 +#define PCIE_ESM_CAP_3__ESM_15P8G__SHIFT 0x12 +#define PCIE_ESM_CAP_3__ESM_15P9G__SHIFT 0x13 +#define PCIE_ESM_CAP_3__ESM_14P0G_MASK 0x00000001L +#define PCIE_ESM_CAP_3__ESM_14P1G_MASK 0x00000002L +#define PCIE_ESM_CAP_3__ESM_14P2G_MASK 0x00000004L +#define PCIE_ESM_CAP_3__ESM_14P3G_MASK 0x00000008L +#define PCIE_ESM_CAP_3__ESM_14P4G_MASK 0x00000010L +#define PCIE_ESM_CAP_3__ESM_14P5G_MASK 0x00000020L +#define PCIE_ESM_CAP_3__ESM_14P6G_MASK 0x00000040L +#define PCIE_ESM_CAP_3__ESM_14P7G_MASK 0x00000080L +#define PCIE_ESM_CAP_3__ESM_14P8G_MASK 0x00000100L +#define PCIE_ESM_CAP_3__ESM_14P9G_MASK 0x00000200L +#define PCIE_ESM_CAP_3__ESM_15P0G_MASK 0x00000400L +#define PCIE_ESM_CAP_3__ESM_15P1G_MASK 0x00000800L +#define PCIE_ESM_CAP_3__ESM_15P2G_MASK 0x00001000L +#define PCIE_ESM_CAP_3__ESM_15P3G_MASK 0x00002000L +#define PCIE_ESM_CAP_3__ESM_15P4G_MASK 0x00004000L +#define PCIE_ESM_CAP_3__ESM_15P5G_MASK 0x00008000L +#define PCIE_ESM_CAP_3__ESM_15P6G_MASK 0x00010000L +#define PCIE_ESM_CAP_3__ESM_15P7G_MASK 0x00020000L +#define PCIE_ESM_CAP_3__ESM_15P8G_MASK 0x00040000L +#define PCIE_ESM_CAP_3__ESM_15P9G_MASK 0x00080000L +//PCIE_ESM_CAP_4 +#define PCIE_ESM_CAP_4__ESM_16P0G__SHIFT 0x0 +#define PCIE_ESM_CAP_4__ESM_16P1G__SHIFT 0x1 +#define PCIE_ESM_CAP_4__ESM_16P2G__SHIFT 0x2 +#define PCIE_ESM_CAP_4__ESM_16P3G__SHIFT 0x3 +#define PCIE_ESM_CAP_4__ESM_16P4G__SHIFT 0x4 +#define PCIE_ESM_CAP_4__ESM_16P5G__SHIFT 0x5 +#define PCIE_ESM_CAP_4__ESM_16P6G__SHIFT 0x6 +#define PCIE_ESM_CAP_4__ESM_16P7G__SHIFT 0x7 +#define PCIE_ESM_CAP_4__ESM_16P8G__SHIFT 0x8 +#define PCIE_ESM_CAP_4__ESM_16P9G__SHIFT 0x9 +#define PCIE_ESM_CAP_4__ESM_17P0G__SHIFT 0xa +#define PCIE_ESM_CAP_4__ESM_17P1G__SHIFT 0xb +#define PCIE_ESM_CAP_4__ESM_17P2G__SHIFT 0xc +#define PCIE_ESM_CAP_4__ESM_17P3G__SHIFT 0xd +#define PCIE_ESM_CAP_4__ESM_17P4G__SHIFT 0xe +#define PCIE_ESM_CAP_4__ESM_17P5G__SHIFT 0xf +#define PCIE_ESM_CAP_4__ESM_17P6G__SHIFT 0x10 +#define PCIE_ESM_CAP_4__ESM_17P7G__SHIFT 0x11 +#define PCIE_ESM_CAP_4__ESM_17P8G__SHIFT 0x12 +#define PCIE_ESM_CAP_4__ESM_17P9G__SHIFT 0x13 +#define PCIE_ESM_CAP_4__ESM_18P0G__SHIFT 0x14 +#define PCIE_ESM_CAP_4__ESM_18P1G__SHIFT 0x15 +#define PCIE_ESM_CAP_4__ESM_18P2G__SHIFT 0x16 +#define PCIE_ESM_CAP_4__ESM_18P3G__SHIFT 0x17 +#define PCIE_ESM_CAP_4__ESM_18P4G__SHIFT 0x18 +#define PCIE_ESM_CAP_4__ESM_18P5G__SHIFT 0x19 +#define PCIE_ESM_CAP_4__ESM_18P6G__SHIFT 0x1a +#define PCIE_ESM_CAP_4__ESM_18P7G__SHIFT 0x1b +#define PCIE_ESM_CAP_4__ESM_18P8G__SHIFT 0x1c +#define PCIE_ESM_CAP_4__ESM_18P9G__SHIFT 0x1d +#define PCIE_ESM_CAP_4__ESM_16P0G_MASK 0x00000001L +#define PCIE_ESM_CAP_4__ESM_16P1G_MASK 0x00000002L +#define PCIE_ESM_CAP_4__ESM_16P2G_MASK 0x00000004L +#define PCIE_ESM_CAP_4__ESM_16P3G_MASK 0x00000008L +#define PCIE_ESM_CAP_4__ESM_16P4G_MASK 0x00000010L +#define PCIE_ESM_CAP_4__ESM_16P5G_MASK 0x00000020L +#define PCIE_ESM_CAP_4__ESM_16P6G_MASK 0x00000040L +#define PCIE_ESM_CAP_4__ESM_16P7G_MASK 0x00000080L +#define PCIE_ESM_CAP_4__ESM_16P8G_MASK 0x00000100L +#define PCIE_ESM_CAP_4__ESM_16P9G_MASK 0x00000200L +#define PCIE_ESM_CAP_4__ESM_17P0G_MASK 0x00000400L +#define PCIE_ESM_CAP_4__ESM_17P1G_MASK 0x00000800L +#define PCIE_ESM_CAP_4__ESM_17P2G_MASK 0x00001000L +#define PCIE_ESM_CAP_4__ESM_17P3G_MASK 0x00002000L +#define PCIE_ESM_CAP_4__ESM_17P4G_MASK 0x00004000L +#define PCIE_ESM_CAP_4__ESM_17P5G_MASK 0x00008000L +#define PCIE_ESM_CAP_4__ESM_17P6G_MASK 0x00010000L +#define PCIE_ESM_CAP_4__ESM_17P7G_MASK 0x00020000L +#define PCIE_ESM_CAP_4__ESM_17P8G_MASK 0x00040000L +#define PCIE_ESM_CAP_4__ESM_17P9G_MASK 0x00080000L +#define PCIE_ESM_CAP_4__ESM_18P0G_MASK 0x00100000L +#define PCIE_ESM_CAP_4__ESM_18P1G_MASK 0x00200000L +#define PCIE_ESM_CAP_4__ESM_18P2G_MASK 0x00400000L +#define PCIE_ESM_CAP_4__ESM_18P3G_MASK 0x00800000L +#define PCIE_ESM_CAP_4__ESM_18P4G_MASK 0x01000000L +#define PCIE_ESM_CAP_4__ESM_18P5G_MASK 0x02000000L +#define PCIE_ESM_CAP_4__ESM_18P6G_MASK 0x04000000L +#define PCIE_ESM_CAP_4__ESM_18P7G_MASK 0x08000000L +#define PCIE_ESM_CAP_4__ESM_18P8G_MASK 0x10000000L +#define PCIE_ESM_CAP_4__ESM_18P9G_MASK 0x20000000L +//PCIE_ESM_CAP_5 +#define PCIE_ESM_CAP_5__ESM_19P0G__SHIFT 0x0 +#define PCIE_ESM_CAP_5__ESM_19P1G__SHIFT 0x1 +#define PCIE_ESM_CAP_5__ESM_19P2G__SHIFT 0x2 +#define PCIE_ESM_CAP_5__ESM_19P3G__SHIFT 0x3 +#define PCIE_ESM_CAP_5__ESM_19P4G__SHIFT 0x4 +#define PCIE_ESM_CAP_5__ESM_19P5G__SHIFT 0x5 +#define PCIE_ESM_CAP_5__ESM_19P6G__SHIFT 0x6 +#define PCIE_ESM_CAP_5__ESM_19P7G__SHIFT 0x7 +#define PCIE_ESM_CAP_5__ESM_19P8G__SHIFT 0x8 +#define PCIE_ESM_CAP_5__ESM_19P9G__SHIFT 0x9 +#define PCIE_ESM_CAP_5__ESM_20P0G__SHIFT 0xa +#define PCIE_ESM_CAP_5__ESM_20P1G__SHIFT 0xb +#define PCIE_ESM_CAP_5__ESM_20P2G__SHIFT 0xc +#define PCIE_ESM_CAP_5__ESM_20P3G__SHIFT 0xd +#define PCIE_ESM_CAP_5__ESM_20P4G__SHIFT 0xe +#define PCIE_ESM_CAP_5__ESM_20P5G__SHIFT 0xf +#define PCIE_ESM_CAP_5__ESM_20P6G__SHIFT 0x10 +#define PCIE_ESM_CAP_5__ESM_20P7G__SHIFT 0x11 +#define PCIE_ESM_CAP_5__ESM_20P8G__SHIFT 0x12 +#define PCIE_ESM_CAP_5__ESM_20P9G__SHIFT 0x13 +#define PCIE_ESM_CAP_5__ESM_21P0G__SHIFT 0x14 +#define PCIE_ESM_CAP_5__ESM_21P1G__SHIFT 0x15 +#define PCIE_ESM_CAP_5__ESM_21P2G__SHIFT 0x16 +#define PCIE_ESM_CAP_5__ESM_21P3G__SHIFT 0x17 +#define PCIE_ESM_CAP_5__ESM_21P4G__SHIFT 0x18 +#define PCIE_ESM_CAP_5__ESM_21P5G__SHIFT 0x19 +#define PCIE_ESM_CAP_5__ESM_21P6G__SHIFT 0x1a +#define PCIE_ESM_CAP_5__ESM_21P7G__SHIFT 0x1b +#define PCIE_ESM_CAP_5__ESM_21P8G__SHIFT 0x1c +#define PCIE_ESM_CAP_5__ESM_21P9G__SHIFT 0x1d +#define PCIE_ESM_CAP_5__ESM_19P0G_MASK 0x00000001L +#define PCIE_ESM_CAP_5__ESM_19P1G_MASK 0x00000002L +#define PCIE_ESM_CAP_5__ESM_19P2G_MASK 0x00000004L +#define PCIE_ESM_CAP_5__ESM_19P3G_MASK 0x00000008L +#define PCIE_ESM_CAP_5__ESM_19P4G_MASK 0x00000010L +#define PCIE_ESM_CAP_5__ESM_19P5G_MASK 0x00000020L +#define PCIE_ESM_CAP_5__ESM_19P6G_MASK 0x00000040L +#define PCIE_ESM_CAP_5__ESM_19P7G_MASK 0x00000080L +#define PCIE_ESM_CAP_5__ESM_19P8G_MASK 0x00000100L +#define PCIE_ESM_CAP_5__ESM_19P9G_MASK 0x00000200L +#define PCIE_ESM_CAP_5__ESM_20P0G_MASK 0x00000400L +#define PCIE_ESM_CAP_5__ESM_20P1G_MASK 0x00000800L +#define PCIE_ESM_CAP_5__ESM_20P2G_MASK 0x00001000L +#define PCIE_ESM_CAP_5__ESM_20P3G_MASK 0x00002000L +#define PCIE_ESM_CAP_5__ESM_20P4G_MASK 0x00004000L +#define PCIE_ESM_CAP_5__ESM_20P5G_MASK 0x00008000L +#define PCIE_ESM_CAP_5__ESM_20P6G_MASK 0x00010000L +#define PCIE_ESM_CAP_5__ESM_20P7G_MASK 0x00020000L +#define PCIE_ESM_CAP_5__ESM_20P8G_MASK 0x00040000L +#define PCIE_ESM_CAP_5__ESM_20P9G_MASK 0x00080000L +#define PCIE_ESM_CAP_5__ESM_21P0G_MASK 0x00100000L +#define PCIE_ESM_CAP_5__ESM_21P1G_MASK 0x00200000L +#define PCIE_ESM_CAP_5__ESM_21P2G_MASK 0x00400000L +#define PCIE_ESM_CAP_5__ESM_21P3G_MASK 0x00800000L +#define PCIE_ESM_CAP_5__ESM_21P4G_MASK 0x01000000L +#define PCIE_ESM_CAP_5__ESM_21P5G_MASK 0x02000000L +#define PCIE_ESM_CAP_5__ESM_21P6G_MASK 0x04000000L +#define PCIE_ESM_CAP_5__ESM_21P7G_MASK 0x08000000L +#define PCIE_ESM_CAP_5__ESM_21P8G_MASK 0x10000000L +#define PCIE_ESM_CAP_5__ESM_21P9G_MASK 0x20000000L +//PCIE_ESM_CAP_6 +#define PCIE_ESM_CAP_6__ESM_22P0G__SHIFT 0x0 +#define PCIE_ESM_CAP_6__ESM_22P1G__SHIFT 0x1 +#define PCIE_ESM_CAP_6__ESM_22P2G__SHIFT 0x2 +#define PCIE_ESM_CAP_6__ESM_22P3G__SHIFT 0x3 +#define PCIE_ESM_CAP_6__ESM_22P4G__SHIFT 0x4 +#define PCIE_ESM_CAP_6__ESM_22P5G__SHIFT 0x5 +#define PCIE_ESM_CAP_6__ESM_22P6G__SHIFT 0x6 +#define PCIE_ESM_CAP_6__ESM_22P7G__SHIFT 0x7 +#define PCIE_ESM_CAP_6__ESM_22P8G__SHIFT 0x8 +#define PCIE_ESM_CAP_6__ESM_22P9G__SHIFT 0x9 +#define PCIE_ESM_CAP_6__ESM_23P0G__SHIFT 0xa +#define PCIE_ESM_CAP_6__ESM_23P1G__SHIFT 0xb +#define PCIE_ESM_CAP_6__ESM_23P2G__SHIFT 0xc +#define PCIE_ESM_CAP_6__ESM_23P3G__SHIFT 0xd +#define PCIE_ESM_CAP_6__ESM_23P4G__SHIFT 0xe +#define PCIE_ESM_CAP_6__ESM_23P5G__SHIFT 0xf +#define PCIE_ESM_CAP_6__ESM_23P6G__SHIFT 0x10 +#define PCIE_ESM_CAP_6__ESM_23P7G__SHIFT 0x11 +#define PCIE_ESM_CAP_6__ESM_23P8G__SHIFT 0x12 +#define PCIE_ESM_CAP_6__ESM_23P9G__SHIFT 0x13 +#define PCIE_ESM_CAP_6__ESM_24P0G__SHIFT 0x14 +#define PCIE_ESM_CAP_6__ESM_24P1G__SHIFT 0x15 +#define PCIE_ESM_CAP_6__ESM_24P2G__SHIFT 0x16 +#define PCIE_ESM_CAP_6__ESM_24P3G__SHIFT 0x17 +#define PCIE_ESM_CAP_6__ESM_24P4G__SHIFT 0x18 +#define PCIE_ESM_CAP_6__ESM_24P5G__SHIFT 0x19 +#define PCIE_ESM_CAP_6__ESM_24P6G__SHIFT 0x1a +#define PCIE_ESM_CAP_6__ESM_24P7G__SHIFT 0x1b +#define PCIE_ESM_CAP_6__ESM_24P8G__SHIFT 0x1c +#define PCIE_ESM_CAP_6__ESM_24P9G__SHIFT 0x1d +#define PCIE_ESM_CAP_6__ESM_22P0G_MASK 0x00000001L +#define PCIE_ESM_CAP_6__ESM_22P1G_MASK 0x00000002L +#define PCIE_ESM_CAP_6__ESM_22P2G_MASK 0x00000004L +#define PCIE_ESM_CAP_6__ESM_22P3G_MASK 0x00000008L +#define PCIE_ESM_CAP_6__ESM_22P4G_MASK 0x00000010L +#define PCIE_ESM_CAP_6__ESM_22P5G_MASK 0x00000020L +#define PCIE_ESM_CAP_6__ESM_22P6G_MASK 0x00000040L +#define PCIE_ESM_CAP_6__ESM_22P7G_MASK 0x00000080L +#define PCIE_ESM_CAP_6__ESM_22P8G_MASK 0x00000100L +#define PCIE_ESM_CAP_6__ESM_22P9G_MASK 0x00000200L +#define PCIE_ESM_CAP_6__ESM_23P0G_MASK 0x00000400L +#define PCIE_ESM_CAP_6__ESM_23P1G_MASK 0x00000800L +#define PCIE_ESM_CAP_6__ESM_23P2G_MASK 0x00001000L +#define PCIE_ESM_CAP_6__ESM_23P3G_MASK 0x00002000L +#define PCIE_ESM_CAP_6__ESM_23P4G_MASK 0x00004000L +#define PCIE_ESM_CAP_6__ESM_23P5G_MASK 0x00008000L +#define PCIE_ESM_CAP_6__ESM_23P6G_MASK 0x00010000L +#define PCIE_ESM_CAP_6__ESM_23P7G_MASK 0x00020000L +#define PCIE_ESM_CAP_6__ESM_23P8G_MASK 0x00040000L +#define PCIE_ESM_CAP_6__ESM_23P9G_MASK 0x00080000L +#define PCIE_ESM_CAP_6__ESM_24P0G_MASK 0x00100000L +#define PCIE_ESM_CAP_6__ESM_24P1G_MASK 0x00200000L +#define PCIE_ESM_CAP_6__ESM_24P2G_MASK 0x00400000L +#define PCIE_ESM_CAP_6__ESM_24P3G_MASK 0x00800000L +#define PCIE_ESM_CAP_6__ESM_24P4G_MASK 0x01000000L +#define PCIE_ESM_CAP_6__ESM_24P5G_MASK 0x02000000L +#define PCIE_ESM_CAP_6__ESM_24P6G_MASK 0x04000000L +#define PCIE_ESM_CAP_6__ESM_24P7G_MASK 0x08000000L +#define PCIE_ESM_CAP_6__ESM_24P8G_MASK 0x10000000L +#define PCIE_ESM_CAP_6__ESM_24P9G_MASK 0x20000000L +//PCIE_ESM_CAP_7 +#define PCIE_ESM_CAP_7__ESM_25P0G__SHIFT 0x0 +#define PCIE_ESM_CAP_7__ESM_25P1G__SHIFT 0x1 +#define PCIE_ESM_CAP_7__ESM_25P2G__SHIFT 0x2 +#define PCIE_ESM_CAP_7__ESM_25P3G__SHIFT 0x3 +#define PCIE_ESM_CAP_7__ESM_25P4G__SHIFT 0x4 +#define PCIE_ESM_CAP_7__ESM_25P5G__SHIFT 0x5 +#define PCIE_ESM_CAP_7__ESM_25P6G__SHIFT 0x6 +#define PCIE_ESM_CAP_7__ESM_25P7G__SHIFT 0x7 +#define PCIE_ESM_CAP_7__ESM_25P8G__SHIFT 0x8 +#define PCIE_ESM_CAP_7__ESM_25P9G__SHIFT 0x9 +#define PCIE_ESM_CAP_7__ESM_26P0G__SHIFT 0xa +#define PCIE_ESM_CAP_7__ESM_26P1G__SHIFT 0xb +#define PCIE_ESM_CAP_7__ESM_26P2G__SHIFT 0xc +#define PCIE_ESM_CAP_7__ESM_26P3G__SHIFT 0xd +#define PCIE_ESM_CAP_7__ESM_26P4G__SHIFT 0xe +#define PCIE_ESM_CAP_7__ESM_26P5G__SHIFT 0xf +#define PCIE_ESM_CAP_7__ESM_26P6G__SHIFT 0x10 +#define PCIE_ESM_CAP_7__ESM_26P7G__SHIFT 0x11 +#define PCIE_ESM_CAP_7__ESM_26P8G__SHIFT 0x12 +#define PCIE_ESM_CAP_7__ESM_26P9G__SHIFT 0x13 +#define PCIE_ESM_CAP_7__ESM_27P0G__SHIFT 0x14 +#define PCIE_ESM_CAP_7__ESM_27P1G__SHIFT 0x15 +#define PCIE_ESM_CAP_7__ESM_27P2G__SHIFT 0x16 +#define PCIE_ESM_CAP_7__ESM_27P3G__SHIFT 0x17 +#define PCIE_ESM_CAP_7__ESM_27P4G__SHIFT 0x18 +#define PCIE_ESM_CAP_7__ESM_27P5G__SHIFT 0x19 +#define PCIE_ESM_CAP_7__ESM_27P6G__SHIFT 0x1a +#define PCIE_ESM_CAP_7__ESM_27P7G__SHIFT 0x1b +#define PCIE_ESM_CAP_7__ESM_27P8G__SHIFT 0x1c +#define PCIE_ESM_CAP_7__ESM_27P9G__SHIFT 0x1d +#define PCIE_ESM_CAP_7__ESM_28P0G__SHIFT 0x1e +#define PCIE_ESM_CAP_7__ESM_25P0G_MASK 0x00000001L +#define PCIE_ESM_CAP_7__ESM_25P1G_MASK 0x00000002L +#define PCIE_ESM_CAP_7__ESM_25P2G_MASK 0x00000004L +#define PCIE_ESM_CAP_7__ESM_25P3G_MASK 0x00000008L +#define PCIE_ESM_CAP_7__ESM_25P4G_MASK 0x00000010L +#define PCIE_ESM_CAP_7__ESM_25P5G_MASK 0x00000020L +#define PCIE_ESM_CAP_7__ESM_25P6G_MASK 0x00000040L +#define PCIE_ESM_CAP_7__ESM_25P7G_MASK 0x00000080L +#define PCIE_ESM_CAP_7__ESM_25P8G_MASK 0x00000100L +#define PCIE_ESM_CAP_7__ESM_25P9G_MASK 0x00000200L +#define PCIE_ESM_CAP_7__ESM_26P0G_MASK 0x00000400L +#define PCIE_ESM_CAP_7__ESM_26P1G_MASK 0x00000800L +#define PCIE_ESM_CAP_7__ESM_26P2G_MASK 0x00001000L +#define PCIE_ESM_CAP_7__ESM_26P3G_MASK 0x00002000L +#define PCIE_ESM_CAP_7__ESM_26P4G_MASK 0x00004000L +#define PCIE_ESM_CAP_7__ESM_26P5G_MASK 0x00008000L +#define PCIE_ESM_CAP_7__ESM_26P6G_MASK 0x00010000L +#define PCIE_ESM_CAP_7__ESM_26P7G_MASK 0x00020000L +#define PCIE_ESM_CAP_7__ESM_26P8G_MASK 0x00040000L +#define PCIE_ESM_CAP_7__ESM_26P9G_MASK 0x00080000L +#define PCIE_ESM_CAP_7__ESM_27P0G_MASK 0x00100000L +#define PCIE_ESM_CAP_7__ESM_27P1G_MASK 0x00200000L +#define PCIE_ESM_CAP_7__ESM_27P2G_MASK 0x00400000L +#define PCIE_ESM_CAP_7__ESM_27P3G_MASK 0x00800000L +#define PCIE_ESM_CAP_7__ESM_27P4G_MASK 0x01000000L +#define PCIE_ESM_CAP_7__ESM_27P5G_MASK 0x02000000L +#define PCIE_ESM_CAP_7__ESM_27P6G_MASK 0x04000000L +#define PCIE_ESM_CAP_7__ESM_27P7G_MASK 0x08000000L +#define PCIE_ESM_CAP_7__ESM_27P8G_MASK 0x10000000L +#define PCIE_ESM_CAP_7__ESM_27P9G_MASK 0x20000000L +#define PCIE_ESM_CAP_7__ESM_28P0G_MASK 0x40000000L +//PSWUSCFG0_PCIE_DLF_ENH_CAP_LIST +#define PSWUSCFG0_PCIE_DLF_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define PSWUSCFG0_PCIE_DLF_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define PSWUSCFG0_PCIE_DLF_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define PSWUSCFG0_PCIE_DLF_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define PSWUSCFG0_PCIE_DLF_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define PSWUSCFG0_PCIE_DLF_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//PSWUSCFG0_DATA_LINK_FEATURE_CAP +#define PSWUSCFG0_DATA_LINK_FEATURE_CAP__LOCAL_DLF_SCALED_FLOW_CONTROL_SUPPORTED__SHIFT 0x0 +#define PSWUSCFG0_DATA_LINK_FEATURE_CAP__LOCAL_DLF_SUPPORTED_22_1__SHIFT 0x1 +#define PSWUSCFG0_DATA_LINK_FEATURE_CAP__DLF_EXCHANGE_ENABLE__SHIFT 0x1f +#define PSWUSCFG0_DATA_LINK_FEATURE_CAP__LOCAL_DLF_SCALED_FLOW_CONTROL_SUPPORTED_MASK 0x00000001L +#define PSWUSCFG0_DATA_LINK_FEATURE_CAP__LOCAL_DLF_SUPPORTED_22_1_MASK 0x007FFFFEL +#define PSWUSCFG0_DATA_LINK_FEATURE_CAP__DLF_EXCHANGE_ENABLE_MASK 0x80000000L +//PSWUSCFG0_DATA_LINK_FEATURE_STATUS +#define PSWUSCFG0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED__SHIFT 0x0 +#define PSWUSCFG0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_VALID__SHIFT 0x1f +#define PSWUSCFG0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_MASK 0x007FFFFFL +#define PSWUSCFG0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_VALID_MASK 0x80000000L +//PCIE_PHY_16GT_ENH_CAP_LIST +#define PCIE_PHY_16GT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define PCIE_PHY_16GT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define PCIE_PHY_16GT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define PCIE_PHY_16GT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define PCIE_PHY_16GT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define PCIE_PHY_16GT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//PSWUSCFG0_LINK_CAP_16GT +#define PSWUSCFG0_LINK_CAP_16GT__RESERVED__SHIFT 0x0 +#define PSWUSCFG0_LINK_CAP_16GT__RESERVED_MASK 0xFFFFFFFFL +//PSWUSCFG0_LINK_CNTL_16GT +#define PSWUSCFG0_LINK_CNTL_16GT__RESERVED__SHIFT 0x0 +#define PSWUSCFG0_LINK_CNTL_16GT__RESERVED_MASK 0xFFFFFFFFL +//PSWUSCFG0_LINK_STATUS_16GT +#define PSWUSCFG0_LINK_STATUS_16GT__EQUALIZATION_COMPLETE_16GT__SHIFT 0x0 +#define PSWUSCFG0_LINK_STATUS_16GT__EQUALIZATION_PHASE1_SUCCESS_16GT__SHIFT 0x1 +#define PSWUSCFG0_LINK_STATUS_16GT__EQUALIZATION_PHASE2_SUCCESS_16GT__SHIFT 0x2 +#define PSWUSCFG0_LINK_STATUS_16GT__EQUALIZATION_PHASE3_SUCCESS_16GT__SHIFT 0x3 +#define PSWUSCFG0_LINK_STATUS_16GT__LINK_EQUALIZATION_REQUEST_16GT__SHIFT 0x4 +#define PSWUSCFG0_LINK_STATUS_16GT__EQUALIZATION_COMPLETE_16GT_MASK 0x00000001L +#define PSWUSCFG0_LINK_STATUS_16GT__EQUALIZATION_PHASE1_SUCCESS_16GT_MASK 0x00000002L +#define PSWUSCFG0_LINK_STATUS_16GT__EQUALIZATION_PHASE2_SUCCESS_16GT_MASK 0x00000004L +#define PSWUSCFG0_LINK_STATUS_16GT__EQUALIZATION_PHASE3_SUCCESS_16GT_MASK 0x00000008L +#define PSWUSCFG0_LINK_STATUS_16GT__LINK_EQUALIZATION_REQUEST_16GT_MASK 0x00000010L +//PSWUSCFG0_LOCAL_PARITY_MISMATCH_STATUS_16GT +#define PSWUSCFG0_LOCAL_PARITY_MISMATCH_STATUS_16GT__LOCAL_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0 +#define PSWUSCFG0_LOCAL_PARITY_MISMATCH_STATUS_16GT__LOCAL_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL +//PSWUSCFG0_RTM1_PARITY_MISMATCH_STATUS_16GT +#define PSWUSCFG0_RTM1_PARITY_MISMATCH_STATUS_16GT__RTM1_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0 +#define PSWUSCFG0_RTM1_PARITY_MISMATCH_STATUS_16GT__RTM1_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL +//PSWUSCFG0_RTM2_PARITY_MISMATCH_STATUS_16GT +#define PSWUSCFG0_RTM2_PARITY_MISMATCH_STATUS_16GT__RTM2_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0 +#define PSWUSCFG0_RTM2_PARITY_MISMATCH_STATUS_16GT__RTM2_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL +//PSWUSCFG0_LANE_0_EQUALIZATION_CNTL_16GT +#define PSWUSCFG0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define PSWUSCFG0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_USP_16GT_TX_PRESET__SHIFT 0x4 +#define PSWUSCFG0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_DSP_16GT_TX_PRESET_MASK 0x0FL +#define PSWUSCFG0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_USP_16GT_TX_PRESET_MASK 0xF0L +//PSWUSCFG0_LANE_1_EQUALIZATION_CNTL_16GT +#define PSWUSCFG0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define PSWUSCFG0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_USP_16GT_TX_PRESET__SHIFT 0x4 +#define PSWUSCFG0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_DSP_16GT_TX_PRESET_MASK 0x0FL +#define PSWUSCFG0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_USP_16GT_TX_PRESET_MASK 0xF0L +//PSWUSCFG0_LANE_2_EQUALIZATION_CNTL_16GT +#define PSWUSCFG0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define PSWUSCFG0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_USP_16GT_TX_PRESET__SHIFT 0x4 +#define PSWUSCFG0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_DSP_16GT_TX_PRESET_MASK 0x0FL +#define PSWUSCFG0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_USP_16GT_TX_PRESET_MASK 0xF0L +//PSWUSCFG0_LANE_3_EQUALIZATION_CNTL_16GT +#define PSWUSCFG0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define PSWUSCFG0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_USP_16GT_TX_PRESET__SHIFT 0x4 +#define PSWUSCFG0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_DSP_16GT_TX_PRESET_MASK 0x0FL +#define PSWUSCFG0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_USP_16GT_TX_PRESET_MASK 0xF0L +//PSWUSCFG0_LANE_4_EQUALIZATION_CNTL_16GT +#define PSWUSCFG0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define PSWUSCFG0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_USP_16GT_TX_PRESET__SHIFT 0x4 +#define PSWUSCFG0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_DSP_16GT_TX_PRESET_MASK 0x0FL +#define PSWUSCFG0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_USP_16GT_TX_PRESET_MASK 0xF0L +//PSWUSCFG0_LANE_5_EQUALIZATION_CNTL_16GT +#define PSWUSCFG0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define PSWUSCFG0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_USP_16GT_TX_PRESET__SHIFT 0x4 +#define PSWUSCFG0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_DSP_16GT_TX_PRESET_MASK 0x0FL +#define PSWUSCFG0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_USP_16GT_TX_PRESET_MASK 0xF0L +//PSWUSCFG0_LANE_6_EQUALIZATION_CNTL_16GT +#define PSWUSCFG0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define PSWUSCFG0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_USP_16GT_TX_PRESET__SHIFT 0x4 +#define PSWUSCFG0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_DSP_16GT_TX_PRESET_MASK 0x0FL +#define PSWUSCFG0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_USP_16GT_TX_PRESET_MASK 0xF0L +//PSWUSCFG0_LANE_7_EQUALIZATION_CNTL_16GT +#define PSWUSCFG0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define PSWUSCFG0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_USP_16GT_TX_PRESET__SHIFT 0x4 +#define PSWUSCFG0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_DSP_16GT_TX_PRESET_MASK 0x0FL +#define PSWUSCFG0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_USP_16GT_TX_PRESET_MASK 0xF0L +//PSWUSCFG0_LANE_8_EQUALIZATION_CNTL_16GT +#define PSWUSCFG0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define PSWUSCFG0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_USP_16GT_TX_PRESET__SHIFT 0x4 +#define PSWUSCFG0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_DSP_16GT_TX_PRESET_MASK 0x0FL +#define PSWUSCFG0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_USP_16GT_TX_PRESET_MASK 0xF0L +//PSWUSCFG0_LANE_9_EQUALIZATION_CNTL_16GT +#define PSWUSCFG0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define PSWUSCFG0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_USP_16GT_TX_PRESET__SHIFT 0x4 +#define PSWUSCFG0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_DSP_16GT_TX_PRESET_MASK 0x0FL +#define PSWUSCFG0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_USP_16GT_TX_PRESET_MASK 0xF0L +//PSWUSCFG0_LANE_10_EQUALIZATION_CNTL_16GT +#define PSWUSCFG0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define PSWUSCFG0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_USP_16GT_TX_PRESET__SHIFT 0x4 +#define PSWUSCFG0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_DSP_16GT_TX_PRESET_MASK 0x0FL +#define PSWUSCFG0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_USP_16GT_TX_PRESET_MASK 0xF0L +//PSWUSCFG0_LANE_11_EQUALIZATION_CNTL_16GT +#define PSWUSCFG0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define PSWUSCFG0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_USP_16GT_TX_PRESET__SHIFT 0x4 +#define PSWUSCFG0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_DSP_16GT_TX_PRESET_MASK 0x0FL +#define PSWUSCFG0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_USP_16GT_TX_PRESET_MASK 0xF0L +//PSWUSCFG0_LANE_12_EQUALIZATION_CNTL_16GT +#define PSWUSCFG0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define PSWUSCFG0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_USP_16GT_TX_PRESET__SHIFT 0x4 +#define PSWUSCFG0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_DSP_16GT_TX_PRESET_MASK 0x0FL +#define PSWUSCFG0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_USP_16GT_TX_PRESET_MASK 0xF0L +//PSWUSCFG0_LANE_13_EQUALIZATION_CNTL_16GT +#define PSWUSCFG0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define PSWUSCFG0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_USP_16GT_TX_PRESET__SHIFT 0x4 +#define PSWUSCFG0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_DSP_16GT_TX_PRESET_MASK 0x0FL +#define PSWUSCFG0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_USP_16GT_TX_PRESET_MASK 0xF0L +//PSWUSCFG0_LANE_14_EQUALIZATION_CNTL_16GT +#define PSWUSCFG0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define PSWUSCFG0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_USP_16GT_TX_PRESET__SHIFT 0x4 +#define PSWUSCFG0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_DSP_16GT_TX_PRESET_MASK 0x0FL +#define PSWUSCFG0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_USP_16GT_TX_PRESET_MASK 0xF0L +//PSWUSCFG0_LANE_15_EQUALIZATION_CNTL_16GT +#define PSWUSCFG0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define PSWUSCFG0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_USP_16GT_TX_PRESET__SHIFT 0x4 +#define PSWUSCFG0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_DSP_16GT_TX_PRESET_MASK 0x0FL +#define PSWUSCFG0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_USP_16GT_TX_PRESET_MASK 0xF0L +//PCIE_MARGINING_ENH_CAP_LIST +#define PCIE_MARGINING_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define PCIE_MARGINING_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define PCIE_MARGINING_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define PCIE_MARGINING_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define PCIE_MARGINING_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define PCIE_MARGINING_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//PSWUSCFG0_MARGINING_PORT_CAP +#define PSWUSCFG0_MARGINING_PORT_CAP__MARGINING_USES_SOFTWARE__SHIFT 0x0 +#define PSWUSCFG0_MARGINING_PORT_CAP__MARGINING_USES_SOFTWARE_MASK 0x0001L +//PSWUSCFG0_MARGINING_PORT_STATUS +#define PSWUSCFG0_MARGINING_PORT_STATUS__MARGINING_READY__SHIFT 0x0 +#define PSWUSCFG0_MARGINING_PORT_STATUS__MARGINING_SOFTWARE_READY__SHIFT 0x1 +#define PSWUSCFG0_MARGINING_PORT_STATUS__MARGINING_READY_MASK 0x0001L +#define PSWUSCFG0_MARGINING_PORT_STATUS__MARGINING_SOFTWARE_READY_MASK 0x0002L +//PSWUSCFG0_LANE_0_MARGINING_LANE_CNTL +#define PSWUSCFG0_LANE_0_MARGINING_LANE_CNTL__LANE_0_RECEIVER_NUMBER__SHIFT 0x0 +#define PSWUSCFG0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_TYPE__SHIFT 0x3 +#define PSWUSCFG0_LANE_0_MARGINING_LANE_CNTL__LANE_0_USAGE_MODEL__SHIFT 0x6 +#define PSWUSCFG0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_PAYLOAD__SHIFT 0x8 +#define PSWUSCFG0_LANE_0_MARGINING_LANE_CNTL__LANE_0_RECEIVER_NUMBER_MASK 0x0007L +#define PSWUSCFG0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_TYPE_MASK 0x0038L +#define PSWUSCFG0_LANE_0_MARGINING_LANE_CNTL__LANE_0_USAGE_MODEL_MASK 0x0040L +#define PSWUSCFG0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_PAYLOAD_MASK 0xFF00L +//PSWUSCFG0_LANE_0_MARGINING_LANE_STATUS +#define PSWUSCFG0_LANE_0_MARGINING_LANE_STATUS__LANE_0_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define PSWUSCFG0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define PSWUSCFG0_LANE_0_MARGINING_LANE_STATUS__LANE_0_USAGE_MODEL_STATUS__SHIFT 0x6 +#define PSWUSCFG0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define PSWUSCFG0_LANE_0_MARGINING_LANE_STATUS__LANE_0_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define PSWUSCFG0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_TYPE_STATUS_MASK 0x0038L +#define PSWUSCFG0_LANE_0_MARGINING_LANE_STATUS__LANE_0_USAGE_MODEL_STATUS_MASK 0x0040L +#define PSWUSCFG0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//PSWUSCFG0_LANE_1_MARGINING_LANE_CNTL +#define PSWUSCFG0_LANE_1_MARGINING_LANE_CNTL__LANE_1_RECEIVER_NUMBER__SHIFT 0x0 +#define PSWUSCFG0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_TYPE__SHIFT 0x3 +#define PSWUSCFG0_LANE_1_MARGINING_LANE_CNTL__LANE_1_USAGE_MODEL__SHIFT 0x6 +#define PSWUSCFG0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_PAYLOAD__SHIFT 0x8 +#define PSWUSCFG0_LANE_1_MARGINING_LANE_CNTL__LANE_1_RECEIVER_NUMBER_MASK 0x0007L +#define PSWUSCFG0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_TYPE_MASK 0x0038L +#define PSWUSCFG0_LANE_1_MARGINING_LANE_CNTL__LANE_1_USAGE_MODEL_MASK 0x0040L +#define PSWUSCFG0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_PAYLOAD_MASK 0xFF00L +//PSWUSCFG0_LANE_1_MARGINING_LANE_STATUS +#define PSWUSCFG0_LANE_1_MARGINING_LANE_STATUS__LANE_1_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define PSWUSCFG0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define PSWUSCFG0_LANE_1_MARGINING_LANE_STATUS__LANE_1_USAGE_MODEL_STATUS__SHIFT 0x6 +#define PSWUSCFG0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define PSWUSCFG0_LANE_1_MARGINING_LANE_STATUS__LANE_1_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define PSWUSCFG0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_TYPE_STATUS_MASK 0x0038L +#define PSWUSCFG0_LANE_1_MARGINING_LANE_STATUS__LANE_1_USAGE_MODEL_STATUS_MASK 0x0040L +#define PSWUSCFG0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//PSWUSCFG0_LANE_2_MARGINING_LANE_CNTL +#define PSWUSCFG0_LANE_2_MARGINING_LANE_CNTL__LANE_2_RECEIVER_NUMBER__SHIFT 0x0 +#define PSWUSCFG0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_TYPE__SHIFT 0x3 +#define PSWUSCFG0_LANE_2_MARGINING_LANE_CNTL__LANE_2_USAGE_MODEL__SHIFT 0x6 +#define PSWUSCFG0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_PAYLOAD__SHIFT 0x8 +#define PSWUSCFG0_LANE_2_MARGINING_LANE_CNTL__LANE_2_RECEIVER_NUMBER_MASK 0x0007L +#define PSWUSCFG0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_TYPE_MASK 0x0038L +#define PSWUSCFG0_LANE_2_MARGINING_LANE_CNTL__LANE_2_USAGE_MODEL_MASK 0x0040L +#define PSWUSCFG0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_PAYLOAD_MASK 0xFF00L +//PSWUSCFG0_LANE_2_MARGINING_LANE_STATUS +#define PSWUSCFG0_LANE_2_MARGINING_LANE_STATUS__LANE_2_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define PSWUSCFG0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define PSWUSCFG0_LANE_2_MARGINING_LANE_STATUS__LANE_2_USAGE_MODEL_STATUS__SHIFT 0x6 +#define PSWUSCFG0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define PSWUSCFG0_LANE_2_MARGINING_LANE_STATUS__LANE_2_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define PSWUSCFG0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_TYPE_STATUS_MASK 0x0038L +#define PSWUSCFG0_LANE_2_MARGINING_LANE_STATUS__LANE_2_USAGE_MODEL_STATUS_MASK 0x0040L +#define PSWUSCFG0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//PSWUSCFG0_LANE_3_MARGINING_LANE_CNTL +#define PSWUSCFG0_LANE_3_MARGINING_LANE_CNTL__LANE_3_RECEIVER_NUMBER__SHIFT 0x0 +#define PSWUSCFG0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_TYPE__SHIFT 0x3 +#define PSWUSCFG0_LANE_3_MARGINING_LANE_CNTL__LANE_3_USAGE_MODEL__SHIFT 0x6 +#define PSWUSCFG0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_PAYLOAD__SHIFT 0x8 +#define PSWUSCFG0_LANE_3_MARGINING_LANE_CNTL__LANE_3_RECEIVER_NUMBER_MASK 0x0007L +#define PSWUSCFG0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_TYPE_MASK 0x0038L +#define PSWUSCFG0_LANE_3_MARGINING_LANE_CNTL__LANE_3_USAGE_MODEL_MASK 0x0040L +#define PSWUSCFG0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_PAYLOAD_MASK 0xFF00L +//PSWUSCFG0_LANE_3_MARGINING_LANE_STATUS +#define PSWUSCFG0_LANE_3_MARGINING_LANE_STATUS__LANE_3_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define PSWUSCFG0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define PSWUSCFG0_LANE_3_MARGINING_LANE_STATUS__LANE_3_USAGE_MODEL_STATUS__SHIFT 0x6 +#define PSWUSCFG0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define PSWUSCFG0_LANE_3_MARGINING_LANE_STATUS__LANE_3_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define PSWUSCFG0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_TYPE_STATUS_MASK 0x0038L +#define PSWUSCFG0_LANE_3_MARGINING_LANE_STATUS__LANE_3_USAGE_MODEL_STATUS_MASK 0x0040L +#define PSWUSCFG0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//PSWUSCFG0_LANE_4_MARGINING_LANE_CNTL +#define PSWUSCFG0_LANE_4_MARGINING_LANE_CNTL__LANE_4_RECEIVER_NUMBER__SHIFT 0x0 +#define PSWUSCFG0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_TYPE__SHIFT 0x3 +#define PSWUSCFG0_LANE_4_MARGINING_LANE_CNTL__LANE_4_USAGE_MODEL__SHIFT 0x6 +#define PSWUSCFG0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_PAYLOAD__SHIFT 0x8 +#define PSWUSCFG0_LANE_4_MARGINING_LANE_CNTL__LANE_4_RECEIVER_NUMBER_MASK 0x0007L +#define PSWUSCFG0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_TYPE_MASK 0x0038L +#define PSWUSCFG0_LANE_4_MARGINING_LANE_CNTL__LANE_4_USAGE_MODEL_MASK 0x0040L +#define PSWUSCFG0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_PAYLOAD_MASK 0xFF00L +//PSWUSCFG0_LANE_4_MARGINING_LANE_STATUS +#define PSWUSCFG0_LANE_4_MARGINING_LANE_STATUS__LANE_4_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define PSWUSCFG0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define PSWUSCFG0_LANE_4_MARGINING_LANE_STATUS__LANE_4_USAGE_MODEL_STATUS__SHIFT 0x6 +#define PSWUSCFG0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define PSWUSCFG0_LANE_4_MARGINING_LANE_STATUS__LANE_4_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define PSWUSCFG0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_TYPE_STATUS_MASK 0x0038L +#define PSWUSCFG0_LANE_4_MARGINING_LANE_STATUS__LANE_4_USAGE_MODEL_STATUS_MASK 0x0040L +#define PSWUSCFG0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//PSWUSCFG0_LANE_5_MARGINING_LANE_CNTL +#define PSWUSCFG0_LANE_5_MARGINING_LANE_CNTL__LANE_5_RECEIVER_NUMBER__SHIFT 0x0 +#define PSWUSCFG0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_TYPE__SHIFT 0x3 +#define PSWUSCFG0_LANE_5_MARGINING_LANE_CNTL__LANE_5_USAGE_MODEL__SHIFT 0x6 +#define PSWUSCFG0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_PAYLOAD__SHIFT 0x8 +#define PSWUSCFG0_LANE_5_MARGINING_LANE_CNTL__LANE_5_RECEIVER_NUMBER_MASK 0x0007L +#define PSWUSCFG0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_TYPE_MASK 0x0038L +#define PSWUSCFG0_LANE_5_MARGINING_LANE_CNTL__LANE_5_USAGE_MODEL_MASK 0x0040L +#define PSWUSCFG0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_PAYLOAD_MASK 0xFF00L +//PSWUSCFG0_LANE_5_MARGINING_LANE_STATUS +#define PSWUSCFG0_LANE_5_MARGINING_LANE_STATUS__LANE_5_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define PSWUSCFG0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define PSWUSCFG0_LANE_5_MARGINING_LANE_STATUS__LANE_5_USAGE_MODEL_STATUS__SHIFT 0x6 +#define PSWUSCFG0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define PSWUSCFG0_LANE_5_MARGINING_LANE_STATUS__LANE_5_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define PSWUSCFG0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_TYPE_STATUS_MASK 0x0038L +#define PSWUSCFG0_LANE_5_MARGINING_LANE_STATUS__LANE_5_USAGE_MODEL_STATUS_MASK 0x0040L +#define PSWUSCFG0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//PSWUSCFG0_LANE_6_MARGINING_LANE_CNTL +#define PSWUSCFG0_LANE_6_MARGINING_LANE_CNTL__LANE_6_RECEIVER_NUMBER__SHIFT 0x0 +#define PSWUSCFG0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_TYPE__SHIFT 0x3 +#define PSWUSCFG0_LANE_6_MARGINING_LANE_CNTL__LANE_6_USAGE_MODEL__SHIFT 0x6 +#define PSWUSCFG0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_PAYLOAD__SHIFT 0x8 +#define PSWUSCFG0_LANE_6_MARGINING_LANE_CNTL__LANE_6_RECEIVER_NUMBER_MASK 0x0007L +#define PSWUSCFG0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_TYPE_MASK 0x0038L +#define PSWUSCFG0_LANE_6_MARGINING_LANE_CNTL__LANE_6_USAGE_MODEL_MASK 0x0040L +#define PSWUSCFG0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_PAYLOAD_MASK 0xFF00L +//PSWUSCFG0_LANE_6_MARGINING_LANE_STATUS +#define PSWUSCFG0_LANE_6_MARGINING_LANE_STATUS__LANE_6_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define PSWUSCFG0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define PSWUSCFG0_LANE_6_MARGINING_LANE_STATUS__LANE_6_USAGE_MODEL_STATUS__SHIFT 0x6 +#define PSWUSCFG0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define PSWUSCFG0_LANE_6_MARGINING_LANE_STATUS__LANE_6_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define PSWUSCFG0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_TYPE_STATUS_MASK 0x0038L +#define PSWUSCFG0_LANE_6_MARGINING_LANE_STATUS__LANE_6_USAGE_MODEL_STATUS_MASK 0x0040L +#define PSWUSCFG0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//PSWUSCFG0_LANE_7_MARGINING_LANE_CNTL +#define PSWUSCFG0_LANE_7_MARGINING_LANE_CNTL__LANE_7_RECEIVER_NUMBER__SHIFT 0x0 +#define PSWUSCFG0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_TYPE__SHIFT 0x3 +#define PSWUSCFG0_LANE_7_MARGINING_LANE_CNTL__LANE_7_USAGE_MODEL__SHIFT 0x6 +#define PSWUSCFG0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_PAYLOAD__SHIFT 0x8 +#define PSWUSCFG0_LANE_7_MARGINING_LANE_CNTL__LANE_7_RECEIVER_NUMBER_MASK 0x0007L +#define PSWUSCFG0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_TYPE_MASK 0x0038L +#define PSWUSCFG0_LANE_7_MARGINING_LANE_CNTL__LANE_7_USAGE_MODEL_MASK 0x0040L +#define PSWUSCFG0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_PAYLOAD_MASK 0xFF00L +//PSWUSCFG0_LANE_7_MARGINING_LANE_STATUS +#define PSWUSCFG0_LANE_7_MARGINING_LANE_STATUS__LANE_7_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define PSWUSCFG0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define PSWUSCFG0_LANE_7_MARGINING_LANE_STATUS__LANE_7_USAGE_MODEL_STATUS__SHIFT 0x6 +#define PSWUSCFG0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define PSWUSCFG0_LANE_7_MARGINING_LANE_STATUS__LANE_7_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define PSWUSCFG0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_TYPE_STATUS_MASK 0x0038L +#define PSWUSCFG0_LANE_7_MARGINING_LANE_STATUS__LANE_7_USAGE_MODEL_STATUS_MASK 0x0040L +#define PSWUSCFG0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//PSWUSCFG0_LANE_8_MARGINING_LANE_CNTL +#define PSWUSCFG0_LANE_8_MARGINING_LANE_CNTL__LANE_8_RECEIVER_NUMBER__SHIFT 0x0 +#define PSWUSCFG0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_TYPE__SHIFT 0x3 +#define PSWUSCFG0_LANE_8_MARGINING_LANE_CNTL__LANE_8_USAGE_MODEL__SHIFT 0x6 +#define PSWUSCFG0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_PAYLOAD__SHIFT 0x8 +#define PSWUSCFG0_LANE_8_MARGINING_LANE_CNTL__LANE_8_RECEIVER_NUMBER_MASK 0x0007L +#define PSWUSCFG0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_TYPE_MASK 0x0038L +#define PSWUSCFG0_LANE_8_MARGINING_LANE_CNTL__LANE_8_USAGE_MODEL_MASK 0x0040L +#define PSWUSCFG0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_PAYLOAD_MASK 0xFF00L +//PSWUSCFG0_LANE_8_MARGINING_LANE_STATUS +#define PSWUSCFG0_LANE_8_MARGINING_LANE_STATUS__LANE_8_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define PSWUSCFG0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define PSWUSCFG0_LANE_8_MARGINING_LANE_STATUS__LANE_8_USAGE_MODEL_STATUS__SHIFT 0x6 +#define PSWUSCFG0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define PSWUSCFG0_LANE_8_MARGINING_LANE_STATUS__LANE_8_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define PSWUSCFG0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_TYPE_STATUS_MASK 0x0038L +#define PSWUSCFG0_LANE_8_MARGINING_LANE_STATUS__LANE_8_USAGE_MODEL_STATUS_MASK 0x0040L +#define PSWUSCFG0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//PSWUSCFG0_LANE_9_MARGINING_LANE_CNTL +#define PSWUSCFG0_LANE_9_MARGINING_LANE_CNTL__LANE_9_RECEIVER_NUMBER__SHIFT 0x0 +#define PSWUSCFG0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_TYPE__SHIFT 0x3 +#define PSWUSCFG0_LANE_9_MARGINING_LANE_CNTL__LANE_9_USAGE_MODEL__SHIFT 0x6 +#define PSWUSCFG0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_PAYLOAD__SHIFT 0x8 +#define PSWUSCFG0_LANE_9_MARGINING_LANE_CNTL__LANE_9_RECEIVER_NUMBER_MASK 0x0007L +#define PSWUSCFG0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_TYPE_MASK 0x0038L +#define PSWUSCFG0_LANE_9_MARGINING_LANE_CNTL__LANE_9_USAGE_MODEL_MASK 0x0040L +#define PSWUSCFG0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_PAYLOAD_MASK 0xFF00L +//PSWUSCFG0_LANE_9_MARGINING_LANE_STATUS +#define PSWUSCFG0_LANE_9_MARGINING_LANE_STATUS__LANE_9_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define PSWUSCFG0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define PSWUSCFG0_LANE_9_MARGINING_LANE_STATUS__LANE_9_USAGE_MODEL_STATUS__SHIFT 0x6 +#define PSWUSCFG0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define PSWUSCFG0_LANE_9_MARGINING_LANE_STATUS__LANE_9_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define PSWUSCFG0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_TYPE_STATUS_MASK 0x0038L +#define PSWUSCFG0_LANE_9_MARGINING_LANE_STATUS__LANE_9_USAGE_MODEL_STATUS_MASK 0x0040L +#define PSWUSCFG0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//PSWUSCFG0_LANE_10_MARGINING_LANE_CNTL +#define PSWUSCFG0_LANE_10_MARGINING_LANE_CNTL__LANE_10_RECEIVER_NUMBER__SHIFT 0x0 +#define PSWUSCFG0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_TYPE__SHIFT 0x3 +#define PSWUSCFG0_LANE_10_MARGINING_LANE_CNTL__LANE_10_USAGE_MODEL__SHIFT 0x6 +#define PSWUSCFG0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_PAYLOAD__SHIFT 0x8 +#define PSWUSCFG0_LANE_10_MARGINING_LANE_CNTL__LANE_10_RECEIVER_NUMBER_MASK 0x0007L +#define PSWUSCFG0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_TYPE_MASK 0x0038L +#define PSWUSCFG0_LANE_10_MARGINING_LANE_CNTL__LANE_10_USAGE_MODEL_MASK 0x0040L +#define PSWUSCFG0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_PAYLOAD_MASK 0xFF00L +//PSWUSCFG0_LANE_10_MARGINING_LANE_STATUS +#define PSWUSCFG0_LANE_10_MARGINING_LANE_STATUS__LANE_10_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define PSWUSCFG0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define PSWUSCFG0_LANE_10_MARGINING_LANE_STATUS__LANE_10_USAGE_MODEL_STATUS__SHIFT 0x6 +#define PSWUSCFG0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define PSWUSCFG0_LANE_10_MARGINING_LANE_STATUS__LANE_10_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define PSWUSCFG0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_TYPE_STATUS_MASK 0x0038L +#define PSWUSCFG0_LANE_10_MARGINING_LANE_STATUS__LANE_10_USAGE_MODEL_STATUS_MASK 0x0040L +#define PSWUSCFG0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//PSWUSCFG0_LANE_11_MARGINING_LANE_CNTL +#define PSWUSCFG0_LANE_11_MARGINING_LANE_CNTL__LANE_11_RECEIVER_NUMBER__SHIFT 0x0 +#define PSWUSCFG0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_TYPE__SHIFT 0x3 +#define PSWUSCFG0_LANE_11_MARGINING_LANE_CNTL__LANE_11_USAGE_MODEL__SHIFT 0x6 +#define PSWUSCFG0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_PAYLOAD__SHIFT 0x8 +#define PSWUSCFG0_LANE_11_MARGINING_LANE_CNTL__LANE_11_RECEIVER_NUMBER_MASK 0x0007L +#define PSWUSCFG0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_TYPE_MASK 0x0038L +#define PSWUSCFG0_LANE_11_MARGINING_LANE_CNTL__LANE_11_USAGE_MODEL_MASK 0x0040L +#define PSWUSCFG0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_PAYLOAD_MASK 0xFF00L +//PSWUSCFG0_LANE_11_MARGINING_LANE_STATUS +#define PSWUSCFG0_LANE_11_MARGINING_LANE_STATUS__LANE_11_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define PSWUSCFG0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define PSWUSCFG0_LANE_11_MARGINING_LANE_STATUS__LANE_11_USAGE_MODEL_STATUS__SHIFT 0x6 +#define PSWUSCFG0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define PSWUSCFG0_LANE_11_MARGINING_LANE_STATUS__LANE_11_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define PSWUSCFG0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_TYPE_STATUS_MASK 0x0038L +#define PSWUSCFG0_LANE_11_MARGINING_LANE_STATUS__LANE_11_USAGE_MODEL_STATUS_MASK 0x0040L +#define PSWUSCFG0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//PSWUSCFG0_LANE_12_MARGINING_LANE_CNTL +#define PSWUSCFG0_LANE_12_MARGINING_LANE_CNTL__LANE_12_RECEIVER_NUMBER__SHIFT 0x0 +#define PSWUSCFG0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_TYPE__SHIFT 0x3 +#define PSWUSCFG0_LANE_12_MARGINING_LANE_CNTL__LANE_12_USAGE_MODEL__SHIFT 0x6 +#define PSWUSCFG0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_PAYLOAD__SHIFT 0x8 +#define PSWUSCFG0_LANE_12_MARGINING_LANE_CNTL__LANE_12_RECEIVER_NUMBER_MASK 0x0007L +#define PSWUSCFG0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_TYPE_MASK 0x0038L +#define PSWUSCFG0_LANE_12_MARGINING_LANE_CNTL__LANE_12_USAGE_MODEL_MASK 0x0040L +#define PSWUSCFG0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_PAYLOAD_MASK 0xFF00L +//PSWUSCFG0_LANE_12_MARGINING_LANE_STATUS +#define PSWUSCFG0_LANE_12_MARGINING_LANE_STATUS__LANE_12_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define PSWUSCFG0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define PSWUSCFG0_LANE_12_MARGINING_LANE_STATUS__LANE_12_USAGE_MODEL_STATUS__SHIFT 0x6 +#define PSWUSCFG0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define PSWUSCFG0_LANE_12_MARGINING_LANE_STATUS__LANE_12_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define PSWUSCFG0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_TYPE_STATUS_MASK 0x0038L +#define PSWUSCFG0_LANE_12_MARGINING_LANE_STATUS__LANE_12_USAGE_MODEL_STATUS_MASK 0x0040L +#define PSWUSCFG0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//PSWUSCFG0_LANE_13_MARGINING_LANE_CNTL +#define PSWUSCFG0_LANE_13_MARGINING_LANE_CNTL__LANE_13_RECEIVER_NUMBER__SHIFT 0x0 +#define PSWUSCFG0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_TYPE__SHIFT 0x3 +#define PSWUSCFG0_LANE_13_MARGINING_LANE_CNTL__LANE_13_USAGE_MODEL__SHIFT 0x6 +#define PSWUSCFG0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_PAYLOAD__SHIFT 0x8 +#define PSWUSCFG0_LANE_13_MARGINING_LANE_CNTL__LANE_13_RECEIVER_NUMBER_MASK 0x0007L +#define PSWUSCFG0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_TYPE_MASK 0x0038L +#define PSWUSCFG0_LANE_13_MARGINING_LANE_CNTL__LANE_13_USAGE_MODEL_MASK 0x0040L +#define PSWUSCFG0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_PAYLOAD_MASK 0xFF00L +//PSWUSCFG0_LANE_13_MARGINING_LANE_STATUS +#define PSWUSCFG0_LANE_13_MARGINING_LANE_STATUS__LANE_13_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define PSWUSCFG0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define PSWUSCFG0_LANE_13_MARGINING_LANE_STATUS__LANE_13_USAGE_MODEL_STATUS__SHIFT 0x6 +#define PSWUSCFG0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define PSWUSCFG0_LANE_13_MARGINING_LANE_STATUS__LANE_13_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define PSWUSCFG0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_TYPE_STATUS_MASK 0x0038L +#define PSWUSCFG0_LANE_13_MARGINING_LANE_STATUS__LANE_13_USAGE_MODEL_STATUS_MASK 0x0040L +#define PSWUSCFG0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//PSWUSCFG0_LANE_14_MARGINING_LANE_CNTL +#define PSWUSCFG0_LANE_14_MARGINING_LANE_CNTL__LANE_14_RECEIVER_NUMBER__SHIFT 0x0 +#define PSWUSCFG0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_TYPE__SHIFT 0x3 +#define PSWUSCFG0_LANE_14_MARGINING_LANE_CNTL__LANE_14_USAGE_MODEL__SHIFT 0x6 +#define PSWUSCFG0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_PAYLOAD__SHIFT 0x8 +#define PSWUSCFG0_LANE_14_MARGINING_LANE_CNTL__LANE_14_RECEIVER_NUMBER_MASK 0x0007L +#define PSWUSCFG0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_TYPE_MASK 0x0038L +#define PSWUSCFG0_LANE_14_MARGINING_LANE_CNTL__LANE_14_USAGE_MODEL_MASK 0x0040L +#define PSWUSCFG0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_PAYLOAD_MASK 0xFF00L +//PSWUSCFG0_LANE_14_MARGINING_LANE_STATUS +#define PSWUSCFG0_LANE_14_MARGINING_LANE_STATUS__LANE_14_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define PSWUSCFG0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define PSWUSCFG0_LANE_14_MARGINING_LANE_STATUS__LANE_14_USAGE_MODEL_STATUS__SHIFT 0x6 +#define PSWUSCFG0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define PSWUSCFG0_LANE_14_MARGINING_LANE_STATUS__LANE_14_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define PSWUSCFG0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_TYPE_STATUS_MASK 0x0038L +#define PSWUSCFG0_LANE_14_MARGINING_LANE_STATUS__LANE_14_USAGE_MODEL_STATUS_MASK 0x0040L +#define PSWUSCFG0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//PSWUSCFG0_LANE_15_MARGINING_LANE_CNTL +#define PSWUSCFG0_LANE_15_MARGINING_LANE_CNTL__LANE_15_RECEIVER_NUMBER__SHIFT 0x0 +#define PSWUSCFG0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_TYPE__SHIFT 0x3 +#define PSWUSCFG0_LANE_15_MARGINING_LANE_CNTL__LANE_15_USAGE_MODEL__SHIFT 0x6 +#define PSWUSCFG0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_PAYLOAD__SHIFT 0x8 +#define PSWUSCFG0_LANE_15_MARGINING_LANE_CNTL__LANE_15_RECEIVER_NUMBER_MASK 0x0007L +#define PSWUSCFG0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_TYPE_MASK 0x0038L +#define PSWUSCFG0_LANE_15_MARGINING_LANE_CNTL__LANE_15_USAGE_MODEL_MASK 0x0040L +#define PSWUSCFG0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_PAYLOAD_MASK 0xFF00L +//PSWUSCFG0_LANE_15_MARGINING_LANE_STATUS +#define PSWUSCFG0_LANE_15_MARGINING_LANE_STATUS__LANE_15_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define PSWUSCFG0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define PSWUSCFG0_LANE_15_MARGINING_LANE_STATUS__LANE_15_USAGE_MODEL_STATUS__SHIFT 0x6 +#define PSWUSCFG0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define PSWUSCFG0_LANE_15_MARGINING_LANE_STATUS__LANE_15_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define PSWUSCFG0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_TYPE_STATUS_MASK 0x0038L +#define PSWUSCFG0_LANE_15_MARGINING_LANE_STATUS__LANE_15_USAGE_MODEL_STATUS_MASK 0x0040L +#define PSWUSCFG0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_bifcfgdecp +//BIF_CFG_DEV0_EPF0_0_VENDOR_ID +#define BIF_CFG_DEV0_EPF0_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_0_DEVICE_ID +#define BIF_CFG_DEV0_EPF0_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_0_COMMAND +#define BIF_CFG_DEV0_EPF0_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_COMMAND__AD_STEPPING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_0_COMMAND__SERR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_COMMAND__FAST_B2B_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_0_COMMAND__INT_DIS__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_COMMAND__AD_STEPPING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_0_COMMAND__SERR_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_0_COMMAND__FAST_B2B_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_0_COMMAND__INT_DIS_MASK 0x0400L +//BIF_CFG_DEV0_EPF0_0_STATUS +#define BIF_CFG_DEV0_EPF0_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_STATUS__INT_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_STATUS__CAP_LIST__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_STATUS__PCI_66_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_STATUS__DEVSEL_TIMING__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_0_STATUS__INT_STATUS_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_0_STATUS__CAP_LIST_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_0_STATUS__PCI_66_CAP_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_0_STATUS__DEVSEL_TIMING_MASK 0x0600L +#define BIF_CFG_DEV0_EPF0_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_REVISION_ID +#define BIF_CFG_DEV0_EPF0_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_0_PROG_INTERFACE +#define BIF_CFG_DEV0_EPF0_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_0_SUB_CLASS +#define BIF_CFG_DEV0_EPF0_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_0_BASE_CLASS +#define BIF_CFG_DEV0_EPF0_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_0_CACHE_LINE +#define BIF_CFG_DEV0_EPF0_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_0_LATENCY +#define BIF_CFG_DEV0_EPF0_0_LATENCY__LATENCY_TIMER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LATENCY__LATENCY_TIMER_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_0_HEADER +#define BIF_CFG_DEV0_EPF0_0_HEADER__HEADER_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_HEADER__DEVICE_TYPE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_0_HEADER__HEADER_TYPE_MASK 0x7FL +#define BIF_CFG_DEV0_EPF0_0_HEADER__DEVICE_TYPE_MASK 0x80L +//BIF_CFG_DEV0_EPF0_0_BIST +#define BIF_CFG_DEV0_EPF0_0_BIST__BIST_COMP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_BIST__BIST_STRT__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_BIST__BIST_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_0_BIST__BIST_COMP_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_0_BIST__BIST_STRT_MASK 0x40L +#define BIF_CFG_DEV0_EPF0_0_BIST__BIST_CAP_MASK 0x80L +//BIF_CFG_DEV0_EPF0_0_BASE_ADDR_1 +#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_BASE_ADDR_2 +#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_BASE_ADDR_3 +#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_BASE_ADDR_4 +#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_BASE_ADDR_5 +#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_BASE_ADDR_6 +#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_ADAPTER_ID +#define BIF_CFG_DEV0_EPF0_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_ROM_BASE_ADDR +#define BIF_CFG_DEV0_EPF0_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_CAP_PTR +#define BIF_CFG_DEV0_EPF0_0_CAP_PTR__CAP_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL +//BIF_CFG_DEV0_EPF0_0_INTERRUPT_LINE +#define BIF_CFG_DEV0_EPF0_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_0_INTERRUPT_PIN +#define BIF_CFG_DEV0_EPF0_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_0_MIN_GRANT +#define BIF_CFG_DEV0_EPF0_0_MIN_GRANT__MIN_GNT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_MIN_GRANT__MIN_GNT_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_0_MAX_LATENCY +#define BIF_CFG_DEV0_EPF0_0_MAX_LATENCY__MAX_LAT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_MAX_LATENCY__MAX_LAT_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_0_VENDOR_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_VENDOR_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_VENDOR_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_VENDOR_CAP_LIST__LENGTH__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_VENDOR_CAP_LIST__CAP_ID_MASK 0x000000FFL +#define BIF_CFG_DEV0_EPF0_0_VENDOR_CAP_LIST__NEXT_PTR_MASK 0x0000FF00L +#define BIF_CFG_DEV0_EPF0_0_VENDOR_CAP_LIST__LENGTH_MASK 0x00FF0000L +//BIF_CFG_DEV0_EPF0_0_ADAPTER_ID_W +#define BIF_CFG_DEV0_EPF0_0_ADAPTER_ID_W__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_ADAPTER_ID_W__SUBSYSTEM_ID__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_ADAPTER_ID_W__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_ADAPTER_ID_W__SUBSYSTEM_ID_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PMI_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PMI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PMI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PMI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_0_PMI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_PMI_CAP +#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__PME_CLOCK__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__DEV_SPECIFIC_INIT__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__AUX_CURRENT__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__D1_SUPPORT__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__D2_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__PME_SUPPORT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__VERSION_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__PME_CLOCK_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__DEV_SPECIFIC_INIT_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__AUX_CURRENT_MASK 0x01C0L +#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__D1_SUPPORT_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__D2_SUPPORT_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__PME_SUPPORT_MASK 0xF800L +//BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL +#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__POWER_STATE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__NO_SOFT_RESET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__PME_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__DATA_SELECT__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__DATA_SCALE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__PME_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__B2_B3_SUPPORT__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__BUS_PWR_EN__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__PMI_DATA__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__POWER_STATE_MASK 0x00000003L +#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__NO_SOFT_RESET_MASK 0x00000008L +#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__PME_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__DATA_SELECT_MASK 0x00001E00L +#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__DATA_SCALE_MASK 0x00006000L +#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__PME_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__B2_B3_SUPPORT_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__BUS_PWR_EN_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__PMI_DATA_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_0_PCIE_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_PCIE_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP__VERSION_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L +#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L +//BIF_CFG_DEV0_EPF0_0_DEVICE_CAP +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L +//BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS +#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_0_LINK_CAP +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_0_LINK_CNTL +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__LINK_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__LINK_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L +//BIF_CFG_DEV0_EPF0_0_LINK_STATUS +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L +//BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS2 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_0_LINK_CAP2 +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__RESERVED__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__RESERVED_MASK 0xFE000000L +//BIF_CFG_DEV0_EPF0_0_LINK_CNTL2 +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L +//BIF_CFG_DEV0_EPF0_0_LINK_STATUS2 +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L +//BIF_CFG_DEV0_EPF0_0_SLOT_CAP2 +#define BIF_CFG_DEV0_EPF0_0_SLOT_CAP2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_SLOT_CNTL2 +#define BIF_CFG_DEV0_EPF0_0_SLOT_CNTL2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_0_SLOT_STATUS2 +#define BIF_CFG_DEV0_EPF0_0_SLOT_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_0_MSI_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL +#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L +//BIF_CFG_DEV0_EPF0_0_MSI_MSG_ADDR_LO +#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_EPF0_0_MSI_MSG_ADDR_HI +#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_MSI_MSG_DATA +#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_0_MSI_MASK +#define BIF_CFG_DEV0_EPF0_0_MSI_MASK__MSI_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_MSI_MSG_DATA_64 +#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_0_MSI_MASK_64 +#define BIF_CFG_DEV0_EPF0_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_MSI_PENDING +#define BIF_CFG_DEV0_EPF0_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_MSI_PENDING_64 +#define BIF_CFG_DEV0_EPF0_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_MSIX_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_MSIX_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL +#define BIF_CFG_DEV0_EPF0_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_MSIX_TABLE +#define BIF_CFG_DEV0_EPF0_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_0_MSIX_PBA +#define BIF_CFG_DEV0_EPF0_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1__EXT_VC_COUNT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1__LOW_PRIORITY_EXT_VC_COUNT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1__REF_CLK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1__PORT_ARB_TABLE_ENTRY_SIZE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1__EXT_VC_COUNT_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1__LOW_PRIORITY_EXT_VC_COUNT_MASK 0x00000070L +#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1__REF_CLK_MASK 0x00000300L +#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1__PORT_ARB_TABLE_ENTRY_SIZE_MASK 0x00000C00L +//BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG2__VC_ARB_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG2__VC_ARB_TABLE_OFFSET__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG2__VC_ARB_CAP_MASK 0x000000FFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG2__VC_ARB_TABLE_OFFSET_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CNTL__LOAD_VC_ARB_TABLE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CNTL__VC_ARB_SELECT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CNTL__LOAD_VC_ARB_TABLE_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CNTL__VC_ARB_SELECT_MASK 0x000EL +//BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_STATUS +#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_STATUS__VC_ARB_TABLE_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_STATUS__VC_ARB_TABLE_STATUS_MASK 0x0001L +//BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP__REJECT_SNOOP_TRANS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP__MAX_TIME_SLOTS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_CAP_MASK 0x000000FFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP__REJECT_SNOOP_TRANS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP__MAX_TIME_SLOTS_MASK 0x003F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC0__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC1_7__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__PORT_ARB_SELECT__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__VC_ID__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__VC_ENABLE__SHIFT 0x1f +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC0_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC1_7_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__PORT_ARB_SELECT_MASK 0x000E0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__VC_ID_MASK 0x07000000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__VC_ENABLE_MASK 0x80000000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_STATUS +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_STATUS__VC_NEGOTIATION_PENDING__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_STATUS__VC_NEGOTIATION_PENDING_MASK 0x0002L +//BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP__REJECT_SNOOP_TRANS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP__MAX_TIME_SLOTS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_CAP_MASK 0x000000FFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP__REJECT_SNOOP_TRANS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP__MAX_TIME_SLOTS_MASK 0x003F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC0__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC1_7__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__PORT_ARB_SELECT__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__VC_ID__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__VC_ENABLE__SHIFT 0x1f +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC0_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC1_7_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__PORT_ARB_SELECT_MASK 0x000E0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__VC_ID_MASK 0x07000000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__VC_ENABLE_MASK 0x80000000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_STATUS +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_STATUS__VC_NEGOTIATION_PENDING__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_STATUS__VC_NEGOTIATION_PENDING_MASK 0x0002L +//BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_DW1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_DW1__SERIAL_NUMBER_LO__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_DW1__SERIAL_NUMBER_LO_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_DW2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_DW2__SERIAL_NUMBER_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_DW2__SERIAL_NUMBER_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L +//BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG3 +#define BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG3 +#define BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_BAR_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CNTL__BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CNTL__BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CNTL__BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CNTL__BAR_INDEX_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CNTL__BAR_SIZE_MASK 0x3F00L +//BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CNTL__BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CNTL__BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CNTL__BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CNTL__BAR_INDEX_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CNTL__BAR_SIZE_MASK 0x3F00L +//BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CNTL__BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CNTL__BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CNTL__BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CNTL__BAR_INDEX_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CNTL__BAR_SIZE_MASK 0x3F00L +//BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CNTL__BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CNTL__BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CNTL__BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CNTL__BAR_INDEX_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CNTL__BAR_SIZE_MASK 0x3F00L +//BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CNTL__BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CNTL__BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CNTL__BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CNTL__BAR_INDEX_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CNTL__BAR_SIZE_MASK 0x3F00L +//BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CNTL__BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CNTL__BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CNTL__BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CNTL__BAR_INDEX_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CNTL__BAR_SIZE_MASK 0x3F00L +//BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA_SELECT +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA_SELECT__DATA_SELECT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA_SELECT__DATA_SELECT_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__BASE_POWER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__DATA_SCALE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__PM_SUB_STATE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__PM_STATE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__TYPE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__POWER_RAIL__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__BASE_POWER_MASK 0x000000FFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__DATA_SCALE_MASK 0x00000300L +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__PM_SUB_STATE_MASK 0x00001C00L +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__PM_STATE_MASK 0x00006000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__TYPE_MASK 0x00038000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__POWER_RAIL_MASK 0x001C0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_CAP__SYSTEM_ALLOCATED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_CAP__SYSTEM_ALLOCATED_MASK 0x01L +//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__SUBSTATE_MAX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__TRANS_LAT_UNIT__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__PWR_ALLOC_SCALE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__TRANS_LAT_VAL_0__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__TRANS_LAT_VAL_1__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__SUBSTATE_MAX_MASK 0x0000001FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__TRANS_LAT_UNIT_MASK 0x00000300L +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__PWR_ALLOC_SCALE_MASK 0x00003000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__TRANS_LAT_VAL_0_MASK 0x00FF0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__TRANS_LAT_VAL_1_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_LATENCY_INDICATOR +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_STATUS +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_STATUS__SUBSTATE_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_STATUS__SUBSTATE_CNTL_ENABLED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_STATUS__SUBSTATE_STATUS_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_STATUS__SUBSTATE_CNTL_ENABLED_MASK 0x0100L +//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CNTL__SUBSTATE_CNTL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CNTL__SUBSTATE_CNTL_MASK 0x1FL +//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_SECONDARY_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SECONDARY_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_SECONDARY_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_LINK_CNTL3 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LINK_CNTL3__PERFORM_EQUALIZATION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LINK_CNTL3__LINK_EQUALIZATION_REQ_INT_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LINK_CNTL3__RESERVED__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LINK_CNTL3__PERFORM_EQUALIZATION_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LINK_CNTL3__LINK_EQUALIZATION_REQ_INT_EN_MASK 0x00000002L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LINK_CNTL3__RESERVED_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_ERROR_STATUS +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_ERROR_STATUS__LANE_ERROR_STATUS_BITS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_ERROR_STATUS__RESERVED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_ERROR_STATUS__LANE_ERROR_STATUS_BITS_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_ERROR_STATUS__RESERVED_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_PCIE_ACS_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__SOURCE_VALIDATION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__TRANSLATION_BLOCKING__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__UPSTREAM_FORWARDING__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__P2P_EGRESS_CONTROL__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__SOURCE_VALIDATION_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__TRANSLATION_BLOCKING_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__UPSTREAM_FORWARDING_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__P2P_EGRESS_CONTROL_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_0_PCIE_ATS_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CNTL__STU__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CNTL__STU_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_CNTL__PRI_ENABLE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_CNTL__PRI_RESET__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_CNTL__PRI_ENABLE_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_CNTL__PRI_RESET_MASK 0x0002L +//BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS +#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS__RESPONSE_FAILURE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS__UNEXPECTED_PAGE_REQ_GRP_INDEX__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS__STOPPED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS__PRG_RESPONSE_PASID_REQUIRED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS__RESPONSE_FAILURE_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS__UNEXPECTED_PAGE_REQ_GRP_INDEX_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS__STOPPED_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS__PRG_RESPONSE_PASID_REQUIRED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_PCIE_OUTSTAND_PAGE_REQ_CAPACITY +#define BIF_CFG_DEV0_EPF0_0_PCIE_OUTSTAND_PAGE_REQ_CAPACITY__OUTSTAND_PAGE_REQ_CAPACITY__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_OUTSTAND_PAGE_REQ_CAPACITY__OUTSTAND_PAGE_REQ_CAPACITY_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_OUTSTAND_PAGE_REQ_ALLOC +#define BIF_CFG_DEV0_EPF0_0_PCIE_OUTSTAND_PAGE_REQ_ALLOC__OUTSTAND_PAGE_REQ_ALLOC__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_OUTSTAND_PAGE_REQ_ALLOC__OUTSTAND_PAGE_REQ_ALLOC_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_PASID_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CAP__PASID_EXE_PERMISSION_SUPPORTED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CAP__PASID_PRIV_MODE_SUPPORTED__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CAP__MAX_PASID_WIDTH__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CAP__PASID_EXE_PERMISSION_SUPPORTED_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CAP__PASID_PRIV_MODE_SUPPORTED_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CAP__MAX_PASID_WIDTH_MASK 0x1F00L +//BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CNTL__PASID_ENABLE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CNTL__PASID_EXE_PERMISSION_ENABLE__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CNTL__PASID_PRIV_MODE_SUPPORTED_ENABLE__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CNTL__PASID_ENABLE_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CNTL__PASID_EXE_PERMISSION_ENABLE_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CNTL__PASID_PRIV_MODE_SUPPORTED_ENABLE_MASK 0x0004L +//BIF_CFG_DEV0_EPF0_0_PCIE_MC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_MC_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CAP__MC_MAX_GROUP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CAP__MC_WIN_SIZE_REQ__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CAP__MC_ECRC_REGEN_SUPP__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CAP__MC_MAX_GROUP_MASK 0x003FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CAP__MC_WIN_SIZE_REQ_MASK 0x3F00L +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CAP__MC_ECRC_REGEN_SUPP_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_PCIE_MC_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CNTL__MC_NUM_GROUP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CNTL__MC_ENABLE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CNTL__MC_NUM_GROUP_MASK 0x003FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CNTL__MC_ENABLE_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR0__MC_INDEX_POS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR0__MC_BASE_ADDR_0__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR0__MC_INDEX_POS_MASK 0x0000003FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR0__MC_BASE_ADDR_0_MASK 0xFFFFF000L +//BIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR1__MC_BASE_ADDR_1__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR1__MC_BASE_ADDR_1_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_MC_RCV0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_RCV0__MC_RECEIVE_0__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_RCV0__MC_RECEIVE_0_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_MC_RCV1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_RCV1__MC_RECEIVE_1__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_RCV1__MC_RECEIVE_1_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_ALL0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_ALL0__MC_BLOCK_ALL_0__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_ALL0__MC_BLOCK_ALL_0_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_ALL1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_ALL1__MC_BLOCK_ALL_1__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_ALL1__MC_BLOCK_ALL_1_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_UNTRANSLATED_0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_UNTRANSLATED_0__MC_BLOCK_UNTRANSLATED_0__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_UNTRANSLATED_0__MC_BLOCK_UNTRANSLATED_0_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_UNTRANSLATED_1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_UNTRANSLATED_1__MC_BLOCK_UNTRANSLATED_1__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_UNTRANSLATED_1__MC_BLOCK_UNTRANSLATED_1_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_LTR_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_VALUE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_SCALE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_VALUE__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_SCALE__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_VALUE_MASK 0x000003FFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_SCALE_MASK 0x00001C00L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_VALUE_MASK 0x03FF0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_SCALE_MASK 0x1C000000L +//BIF_CFG_DEV0_EPF0_0_PCIE_ARI_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L +//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP__SRIOV_ARI_CAP_HIERARCHY_PRESERVED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP__SRIOV_VF_TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_INTR_MSG_NUM__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_CAP_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP__SRIOV_ARI_CAP_HIERARCHY_PRESERVED_MASK 0x00000002L +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP__SRIOV_VF_TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00000004L +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_INTR_MSG_NUM_MASK 0xFFE00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_ENABLE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_ENABLE__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_INTR_ENABLE__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MSE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_ARI_CAP_HIERARCHY__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_ENABLE_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_ENABLE_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_INTR_ENABLE_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MSE_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_ARI_CAP_HIERARCHY_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x0020L +//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_STATUS +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_STATUS__SRIOV_VF_MIGRATION_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_STATUS__SRIOV_VF_MIGRATION_STATUS_MASK 0x0001L +//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_INITIAL_VFS +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_INITIAL_VFS__SRIOV_INITIAL_VFS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_INITIAL_VFS__SRIOV_INITIAL_VFS_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_TOTAL_VFS +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_TOTAL_VFS__SRIOV_TOTAL_VFS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_TOTAL_VFS__SRIOV_TOTAL_VFS_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_NUM_VFS +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_NUM_VFS__SRIOV_NUM_VFS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_NUM_VFS__SRIOV_NUM_VFS_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_FUNC_DEP_LINK +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_FUNC_DEP_LINK__SRIOV_FUNC_DEP_LINK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_FUNC_DEP_LINK__SRIOV_FUNC_DEP_LINK_MASK 0x00FFL +//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_FIRST_VF_OFFSET +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_FIRST_VF_OFFSET__SRIOV_FIRST_VF_OFFSET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_FIRST_VF_OFFSET__SRIOV_FIRST_VF_OFFSET_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_STRIDE +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_STRIDE__SRIOV_VF_STRIDE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_STRIDE__SRIOV_VF_STRIDE_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_DEVICE_ID +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_DEVICE_ID__SRIOV_VF_DEVICE_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_DEVICE_ID__SRIOV_VF_DEVICE_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_SUPPORTED_PAGE_SIZE +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_SUPPORTED_PAGE_SIZE__SRIOV_SUPPORTED_PAGE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_SUPPORTED_PAGE_SIZE__SRIOV_SUPPORTED_PAGE_SIZE_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_SYSTEM_PAGE_SIZE +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_SYSTEM_PAGE_SIZE__SRIOV_SYSTEM_PAGE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_SYSTEM_PAGE_SIZE__SRIOV_SYSTEM_PAGE_SIZE_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_0__VF_BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_0__VF_BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_1__VF_BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_1__VF_BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_2__VF_BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_2__VF_BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_3 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_3__VF_BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_3__VF_BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_4__VF_BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_4__VF_BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_5__VF_BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_5__VF_BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_BIF__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_BIF_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_NO_ST_MODE_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_INT_VEC_MODE_SUPPORTED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_DEV_SPC_MODE_SUPPORTED__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_EXTND_TPH_REQR_SUPPORED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_ST_TABLE_LOCATION__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_ST_TABLE_SIZE__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_NO_ST_MODE_SUPPORTED_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_INT_VEC_MODE_SUPPORTED_MASK 0x00000002L +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_DEV_SPC_MODE_SUPPORTED_MASK 0x00000004L +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_EXTND_TPH_REQR_SUPPORED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_ST_TABLE_LOCATION_MASK 0x00000600L +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_ST_TABLE_SIZE_MASK 0x07FF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CNTL__TPH_REQR_ST_MODE_SEL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CNTL__TPH_REQR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CNTL__TPH_REQR_ST_MODE_SEL_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CNTL__TPH_REQR_EN_MASK 0x00000300L +//BIF_CFG_DEV0_EPF0_0_PCIE_DLF_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PCIE_DLF_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DLF_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DLF_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DLF_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_DLF_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_DLF_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_CAP +#define BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_CAP__LOCAL_DLF_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_CAP__DLF_EXCHANGE_ENABLE__SHIFT 0x1f +#define BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_CAP__LOCAL_DLF_SUPPORTED_MASK 0x007FFFFFL +#define BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_CAP__DLF_EXCHANGE_ENABLE_MASK 0x80000000L +//BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_STATUS +#define BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_VALID__SHIFT 0x1f +#define BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_MASK 0x007FFFFFL +#define BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_VALID_MASK 0x80000000L +//BIF_CFG_DEV0_EPF0_0_PHY_16GT_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PHY_16GT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PHY_16GT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PHY_16GT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PHY_16GT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PHY_16GT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PHY_16GT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_LINK_CAP_16GT +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP_16GT__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP_16GT__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_LINK_CNTL_16GT +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL_16GT__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL_16GT__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__EQUALIZATION_COMPLETE_16GT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__EQUALIZATION_PHASE1_SUCCESS_16GT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__EQUALIZATION_PHASE2_SUCCESS_16GT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__EQUALIZATION_PHASE3_SUCCESS_16GT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__LINK_EQUALIZATION_REQUEST_16GT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__EQUALIZATION_COMPLETE_16GT_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__EQUALIZATION_PHASE1_SUCCESS_16GT_MASK 0x00000002L +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__EQUALIZATION_PHASE2_SUCCESS_16GT_MASK 0x00000004L +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__EQUALIZATION_PHASE3_SUCCESS_16GT_MASK 0x00000008L +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__LINK_EQUALIZATION_REQUEST_16GT_MASK 0x00000010L +//BIF_CFG_DEV0_EPF0_0_LOCAL_PARITY_MISMATCH_STATUS_16GT +#define BIF_CFG_DEV0_EPF0_0_LOCAL_PARITY_MISMATCH_STATUS_16GT__LOCAL_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LOCAL_PARITY_MISMATCH_STATUS_16GT__LOCAL_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_0_RTM1_PARITY_MISMATCH_STATUS_16GT +#define BIF_CFG_DEV0_EPF0_0_RTM1_PARITY_MISMATCH_STATUS_16GT__RTM1_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_RTM1_PARITY_MISMATCH_STATUS_16GT__RTM1_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_0_RTM2_PARITY_MISMATCH_STATUS_16GT +#define BIF_CFG_DEV0_EPF0_0_RTM2_PARITY_MISMATCH_STATUS_16GT__RTM2_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_RTM2_PARITY_MISMATCH_STATUS_16GT__RTM2_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_0_LANE_0_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF0_0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_0_LANE_1_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF0_0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_0_LANE_2_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF0_0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_0_LANE_3_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF0_0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_0_LANE_4_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF0_0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_0_LANE_5_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF0_0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_0_LANE_6_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF0_0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_0_LANE_7_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF0_0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_0_LANE_8_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF0_0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_0_LANE_9_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF0_0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_0_LANE_10_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF0_0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_0_LANE_11_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF0_0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_0_LANE_12_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF0_0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_0_LANE_13_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF0_0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_0_LANE_14_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF0_0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_0_LANE_15_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF0_0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_0_MARGINING_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_MARGINING_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_MARGINING_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_MARGINING_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_MARGINING_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_MARGINING_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_MARGINING_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_MARGINING_PORT_CAP +#define BIF_CFG_DEV0_EPF0_0_MARGINING_PORT_CAP__MARGINING_USES_SOFTWARE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_MARGINING_PORT_CAP__MARGINING_USES_SOFTWARE_MASK 0x0001L +//BIF_CFG_DEV0_EPF0_0_MARGINING_PORT_STATUS +#define BIF_CFG_DEV0_EPF0_0_MARGINING_PORT_STATUS__MARGINING_READY__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_MARGINING_PORT_STATUS__MARGINING_SOFTWARE_READY__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_MARGINING_PORT_STATUS__MARGINING_READY_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_0_MARGINING_PORT_STATUS__MARGINING_SOFTWARE_READY_MASK 0x0002L +//BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_INDEX_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_SIZE_MASK 0x00003F00L +//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_INDEX_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_SIZE_MASK 0x00003F00L +//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_INDEX_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_SIZE_MASK 0x00003F00L +//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_INDEX_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_SIZE_MASK 0x00003F00L +//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_INDEX_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_SIZE_MASK 0x00003F00L +//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_INDEX_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_SIZE_MASK 0x00003F00L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_REV__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_LENGTH__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_REV_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_LENGTH_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW__VF_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW__VF_NUM__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW__VF_EN_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW__VF_NUM_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_CMD_COMPLETE_INTR_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_HANG_NEED_FLR_INTR_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_CMD_COMPLETE_INTR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_HANG_NEED_FLR_INTR_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_CMD_COMPLETE_INTR_EN__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_HANG_NEED_FLR_INTR_EN__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_CMD_COMPLETE_INTR_EN__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_HANG_NEED_FLR_INTR_EN__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__HVVM_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__HVVM_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_CMD_COMPLETE_INTR_EN_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00000002L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_HANG_NEED_FLR_INTR_EN_MASK 0x00000004L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00000008L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_CMD_COMPLETE_INTR_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_HANG_NEED_FLR_INTR_EN_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_CMD_COMPLETE_INTR_EN_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_HANG_NEED_FLR_INTR_EN_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_CMD_COMPLETE_INTR_EN_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_HANG_NEED_FLR_INTR_EN_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__HVVM_MAILBOX_TRN_ACK_INTR_EN_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__HVVM_MAILBOX_RCV_VALID_INTR_EN_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_CMD_COMPLETE_INTR_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_CMD_COMPLETE_INTR_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_HANG_NEED_FLR_INTR_STATUS__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_CMD_COMPLETE_INTR_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_HANG_NEED_FLR_INTR_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_CMD_COMPLETE_INTR_STATUS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__HVVM_MAILBOX_TRN_ACK_INTR_STATUS__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__HVVM_MAILBOX_RCV_VALID_INTR_STATUS__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_CMD_COMPLETE_INTR_STATUS_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00000002L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_HANG_NEED_FLR_INTR_STATUS_MASK 0x00000004L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00000008L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_CMD_COMPLETE_INTR_STATUS_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_HANG_NEED_FLR_INTR_STATUS_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_CMD_COMPLETE_INTR_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_HANG_NEED_FLR_INTR_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_CMD_COMPLETE_INTR_STATUS_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_HANG_NEED_FLR_INTR_STATUS_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__HVVM_MAILBOX_TRN_ACK_INTR_STATUS_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__HVVM_MAILBOX_RCV_VALID_INTR_STATUS_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_RESET_CONTROL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_RESET_CONTROL__SOFT_PF_FLR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_RESET_CONTROL__SOFT_PF_FLR_MASK 0x0001L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__VF_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__TRN_MSG_DATA__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__TRN_MSG_VALID__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__RCV_MSG_DATA__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__RCV_MSG_ACK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__VF_INDEX_MASK 0x000000FFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__TRN_MSG_DATA_MASK 0x00000F00L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__TRN_MSG_VALID_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__RCV_MSG_DATA_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__RCV_MSG_ACK_MASK 0x01000000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF0_TRN_ACK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF0_RCV_VALID__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF1_TRN_ACK__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF1_RCV_VALID__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF2_TRN_ACK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF2_RCV_VALID__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF3_TRN_ACK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF3_RCV_VALID__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF4_TRN_ACK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF4_RCV_VALID__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF5_TRN_ACK__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF5_RCV_VALID__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF6_TRN_ACK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF6_RCV_VALID__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF7_TRN_ACK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF7_RCV_VALID__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF8_TRN_ACK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF8_RCV_VALID__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF9_TRN_ACK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF9_RCV_VALID__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF10_TRN_ACK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF10_RCV_VALID__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF11_TRN_ACK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF11_RCV_VALID__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF12_TRN_ACK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF12_RCV_VALID__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF13_TRN_ACK__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF13_RCV_VALID__SHIFT 0x1b +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF14_TRN_ACK__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF14_RCV_VALID__SHIFT 0x1d +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF15_TRN_ACK__SHIFT 0x1e +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF15_RCV_VALID__SHIFT 0x1f +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF0_TRN_ACK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF0_RCV_VALID_MASK 0x00000002L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF1_TRN_ACK_MASK 0x00000004L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF1_RCV_VALID_MASK 0x00000008L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF2_TRN_ACK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF2_RCV_VALID_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF3_TRN_ACK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF3_RCV_VALID_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF4_TRN_ACK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF4_RCV_VALID_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF5_TRN_ACK_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF5_RCV_VALID_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF6_TRN_ACK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF6_RCV_VALID_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF7_TRN_ACK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF7_RCV_VALID_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF8_TRN_ACK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF8_RCV_VALID_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF9_TRN_ACK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF9_RCV_VALID_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF10_TRN_ACK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF10_RCV_VALID_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF11_TRN_ACK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF11_RCV_VALID_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF12_TRN_ACK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF12_RCV_VALID_MASK 0x02000000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF13_TRN_ACK_MASK 0x04000000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF13_RCV_VALID_MASK 0x08000000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF14_TRN_ACK_MASK 0x10000000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF14_RCV_VALID_MASK 0x20000000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF15_TRN_ACK_MASK 0x40000000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF15_RCV_VALID_MASK 0x80000000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF16_TRN_ACK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF16_RCV_VALID__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF17_TRN_ACK__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF17_RCV_VALID__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF18_TRN_ACK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF18_RCV_VALID__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF19_TRN_ACK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF19_RCV_VALID__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF20_TRN_ACK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF20_RCV_VALID__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF21_TRN_ACK__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF21_RCV_VALID__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF22_TRN_ACK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF22_RCV_VALID__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF23_TRN_ACK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF23_RCV_VALID__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF24_TRN_ACK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF24_RCV_VALID__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF25_TRN_ACK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF25_RCV_VALID__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF26_TRN_ACK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF26_RCV_VALID__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF27_TRN_ACK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF27_RCV_VALID__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF28_TRN_ACK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF28_RCV_VALID__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF29_TRN_ACK__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF29_RCV_VALID__SHIFT 0x1b +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF30_TRN_ACK__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF30_RCV_VALID__SHIFT 0x1d +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__PF_TRN_ACK__SHIFT 0x1e +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__PF_RCV_VALID__SHIFT 0x1f +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF16_TRN_ACK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF16_RCV_VALID_MASK 0x00000002L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF17_TRN_ACK_MASK 0x00000004L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF17_RCV_VALID_MASK 0x00000008L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF18_TRN_ACK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF18_RCV_VALID_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF19_TRN_ACK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF19_RCV_VALID_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF20_TRN_ACK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF20_RCV_VALID_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF21_TRN_ACK_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF21_RCV_VALID_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF22_TRN_ACK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF22_RCV_VALID_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF23_TRN_ACK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF23_RCV_VALID_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF24_TRN_ACK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF24_RCV_VALID_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF25_TRN_ACK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF25_RCV_VALID_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF26_TRN_ACK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF26_RCV_VALID_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF27_TRN_ACK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF27_RCV_VALID_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF28_TRN_ACK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF28_RCV_VALID_MASK 0x02000000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF29_TRN_ACK_MASK 0x04000000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF29_RCV_VALID_MASK 0x08000000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF30_TRN_ACK_MASK 0x10000000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF30_RCV_VALID_MASK 0x20000000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__PF_TRN_ACK_MASK 0x40000000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__PF_RCV_VALID_MASK 0x80000000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__CONTEXT_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__LOC__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__CONTEXT_OFFSET__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__CONTEXT_SIZE_MASK 0x0000007FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__LOC_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__CONTEXT_OFFSET_MASK 0xFFFFFC00L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB__TOTAL_FB_AVAILABLE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB__TOTAL_FB_CONSUMED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB__TOTAL_FB_AVAILABLE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB__TOTAL_FB_CONSUMED_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__UVDSCH_OFFSET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__VCESCH_OFFSET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__GFXSCH_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__UVD1SCH_OFFSET__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__UVDSCH_OFFSET_MASK 0x000000FFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__VCESCH_OFFSET_MASK 0x0000FF00L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__GFXSCH_OFFSET_MASK 0x00FF0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__UVD1SCH_OFFSET_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE__P2P_OVER_XGMI_ENABLE_VF__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE__P2P_OVER_XGMI_ENABLE_PF__SHIFT 0x1f +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE__P2P_OVER_XGMI_ENABLE_VF_MASK 0x7FFFFFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE__P2P_OVER_XGMI_ENABLE_PF_MASK 0x80000000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB__VF0_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB__VF0_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB__VF0_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB__VF0_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB__VF1_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB__VF1_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB__VF1_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB__VF1_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB__VF2_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB__VF2_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB__VF2_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB__VF2_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB__VF3_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB__VF3_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB__VF3_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB__VF3_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB__VF4_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB__VF4_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB__VF4_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB__VF4_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB__VF5_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB__VF5_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB__VF5_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB__VF5_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB__VF6_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB__VF6_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB__VF6_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB__VF6_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB__VF7_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB__VF7_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB__VF7_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB__VF7_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB__VF8_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB__VF8_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB__VF8_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB__VF8_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB__VF9_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB__VF9_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB__VF9_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB__VF9_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB__VF10_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB__VF10_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB__VF10_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB__VF10_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB__VF11_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB__VF11_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB__VF11_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB__VF11_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB__VF12_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB__VF12_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB__VF12_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB__VF12_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB__VF13_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB__VF13_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB__VF13_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB__VF13_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB__VF14_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB__VF14_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB__VF14_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB__VF14_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB__VF15_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB__VF15_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB__VF15_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB__VF15_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB__VF16_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB__VF16_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB__VF16_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB__VF16_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB__VF17_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB__VF17_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB__VF17_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB__VF17_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB__VF18_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB__VF18_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB__VF18_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB__VF18_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB__VF19_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB__VF19_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB__VF19_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB__VF19_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB__VF20_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB__VF20_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB__VF20_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB__VF20_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB__VF21_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB__VF21_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB__VF21_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB__VF21_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB__VF22_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB__VF22_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB__VF22_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB__VF22_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB__VF23_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB__VF23_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB__VF23_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB__VF23_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB__VF24_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB__VF24_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB__VF24_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB__VF24_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB__VF25_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB__VF25_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB__VF25_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB__VF25_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB__VF26_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB__VF26_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB__VF26_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB__VF26_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB__VF27_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB__VF27_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB__VF27_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB__VF27_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB__VF28_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB__VF28_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB__VF28_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB__VF28_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB__VF29_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB__VF29_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB__VF29_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB__VF29_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB__VF30_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB__VF30_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB__VF30_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB__VF30_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW0__DW0__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW0__DW0_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW1__DW1__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW1__DW1_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW2__DW2__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW2__DW2_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW3 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW3__DW3__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW3__DW3_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW4__DW4__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW4__DW4_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW5__DW5__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW5__DW5_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW6 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW6__DW6__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW6__DW6_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW7 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW7__DW7__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW7__DW7_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW8__DW8__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW8__DW8_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW0__DW0__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW0__DW0_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW1__DW1__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW1__DW1_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW2__DW2__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW2__DW2_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW3 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW3__DW3__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW3__DW3_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW4__DW4__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW4__DW4_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW5__DW5__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW5__DW5_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW6 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW6__DW6__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW6__DW6_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW7 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW7__DW7__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW7__DW7_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW8__DW8__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW8__DW8_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW0__DW0__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW0__DW0_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW1__DW1__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW1__DW1_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW2__DW2__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW2__DW2_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW3 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW3__DW3__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW3__DW3_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW4__DW4__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW4__DW4_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW5__DW5__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW5__DW5_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW6 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW6__DW6__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW6__DW6_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW7 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW7__DW7__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW7__DW7_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW8__DW8__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW8__DW8_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW0__DW0__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW0__DW0_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW1__DW1__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW1__DW1_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW2__DW2__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW2__DW2_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW3 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW3__DW3__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW3__DW3_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW4__DW4__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW4__DW4_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW5__DW5__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW5__DW5_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW6 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW6__DW6__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW6__DW6_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW7 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW7__DW7__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW7__DW7_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW8__DW8__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW8__DW8_MASK 0xFFFFFFFFL + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf1_bifcfgdecp +//BIF_CFG_DEV0_EPF1_0_VENDOR_ID +#define BIF_CFG_DEV0_EPF1_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF1_0_DEVICE_ID +#define BIF_CFG_DEV0_EPF1_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF1_0_COMMAND +#define BIF_CFG_DEV0_EPF1_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF1_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_COMMAND__AD_STEPPING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF1_0_COMMAND__SERR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_COMMAND__FAST_B2B_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF1_0_COMMAND__INT_DIS__SHIFT 0xa +#define BIF_CFG_DEV0_EPF1_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF1_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF1_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF1_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF1_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF1_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF1_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_COMMAND__AD_STEPPING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF1_0_COMMAND__SERR_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF1_0_COMMAND__FAST_B2B_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF1_0_COMMAND__INT_DIS_MASK 0x0400L +//BIF_CFG_DEV0_EPF1_0_STATUS +#define BIF_CFG_DEV0_EPF1_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_STATUS__INT_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_STATUS__CAP_LIST__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_STATUS__PCI_66_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF1_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_STATUS__DEVSEL_TIMING__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF1_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF1_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd +#define BIF_CFG_DEV0_EPF1_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe +#define BIF_CFG_DEV0_EPF1_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF1_0_STATUS__INT_STATUS_MASK 0x0008L +#define BIF_CFG_DEV0_EPF1_0_STATUS__CAP_LIST_MASK 0x0010L +#define BIF_CFG_DEV0_EPF1_0_STATUS__PCI_66_CAP_MASK 0x0020L +#define BIF_CFG_DEV0_EPF1_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L +#define BIF_CFG_DEV0_EPF1_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L +#define BIF_CFG_DEV0_EPF1_0_STATUS__DEVSEL_TIMING_MASK 0x0600L +#define BIF_CFG_DEV0_EPF1_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L +#define BIF_CFG_DEV0_EPF1_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L +#define BIF_CFG_DEV0_EPF1_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L +#define BIF_CFG_DEV0_EPF1_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L +#define BIF_CFG_DEV0_EPF1_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_REVISION_ID +#define BIF_CFG_DEV0_EPF1_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL +#define BIF_CFG_DEV0_EPF1_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L +//BIF_CFG_DEV0_EPF1_0_PROG_INTERFACE +#define BIF_CFG_DEV0_EPF1_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL +//BIF_CFG_DEV0_EPF1_0_SUB_CLASS +#define BIF_CFG_DEV0_EPF1_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF1_0_BASE_CLASS +#define BIF_CFG_DEV0_EPF1_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF1_0_CACHE_LINE +#define BIF_CFG_DEV0_EPF1_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL +//BIF_CFG_DEV0_EPF1_0_LATENCY +#define BIF_CFG_DEV0_EPF1_0_LATENCY__LATENCY_TIMER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LATENCY__LATENCY_TIMER_MASK 0xFFL +//BIF_CFG_DEV0_EPF1_0_HEADER +#define BIF_CFG_DEV0_EPF1_0_HEADER__HEADER_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_HEADER__DEVICE_TYPE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF1_0_HEADER__HEADER_TYPE_MASK 0x7FL +#define BIF_CFG_DEV0_EPF1_0_HEADER__DEVICE_TYPE_MASK 0x80L +//BIF_CFG_DEV0_EPF1_0_BIST +#define BIF_CFG_DEV0_EPF1_0_BIST__BIST_COMP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_BIST__BIST_STRT__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_BIST__BIST_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF1_0_BIST__BIST_COMP_MASK 0x0FL +#define BIF_CFG_DEV0_EPF1_0_BIST__BIST_STRT_MASK 0x40L +#define BIF_CFG_DEV0_EPF1_0_BIST__BIST_CAP_MASK 0x80L +//BIF_CFG_DEV0_EPF1_0_BASE_ADDR_1 +#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_BASE_ADDR_2 +#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_BASE_ADDR_3 +#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_BASE_ADDR_4 +#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_BASE_ADDR_5 +#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_BASE_ADDR_6 +#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_ADAPTER_ID +#define BIF_CFG_DEV0_EPF1_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_ROM_BASE_ADDR +#define BIF_CFG_DEV0_EPF1_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_CAP_PTR +#define BIF_CFG_DEV0_EPF1_0_CAP_PTR__CAP_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL +//BIF_CFG_DEV0_EPF1_0_INTERRUPT_LINE +#define BIF_CFG_DEV0_EPF1_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL +//BIF_CFG_DEV0_EPF1_0_INTERRUPT_PIN +#define BIF_CFG_DEV0_EPF1_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL +//BIF_CFG_DEV0_EPF1_0_MIN_GRANT +#define BIF_CFG_DEV0_EPF1_0_MIN_GRANT__MIN_GNT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_MIN_GRANT__MIN_GNT_MASK 0xFFL +//BIF_CFG_DEV0_EPF1_0_MAX_LATENCY +#define BIF_CFG_DEV0_EPF1_0_MAX_LATENCY__MAX_LAT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_MAX_LATENCY__MAX_LAT_MASK 0xFFL +//BIF_CFG_DEV0_EPF1_0_VENDOR_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_VENDOR_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_VENDOR_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_VENDOR_CAP_LIST__LENGTH__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_VENDOR_CAP_LIST__CAP_ID_MASK 0x000000FFL +#define BIF_CFG_DEV0_EPF1_0_VENDOR_CAP_LIST__NEXT_PTR_MASK 0x0000FF00L +#define BIF_CFG_DEV0_EPF1_0_VENDOR_CAP_LIST__LENGTH_MASK 0x00FF0000L +//BIF_CFG_DEV0_EPF1_0_ADAPTER_ID_W +#define BIF_CFG_DEV0_EPF1_0_ADAPTER_ID_W__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_ADAPTER_ID_W__SUBSYSTEM_ID__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_ADAPTER_ID_W__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_ADAPTER_ID_W__SUBSYSTEM_ID_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PMI_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PMI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PMI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PMI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF1_0_PMI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_PMI_CAP +#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__PME_CLOCK__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__DEV_SPECIFIC_INIT__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__AUX_CURRENT__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__D1_SUPPORT__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__D2_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__PME_SUPPORT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__VERSION_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__PME_CLOCK_MASK 0x0008L +#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0_MASK 0x0010L +#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__DEV_SPECIFIC_INIT_MASK 0x0020L +#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__AUX_CURRENT_MASK 0x01C0L +#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__D1_SUPPORT_MASK 0x0200L +#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__D2_SUPPORT_MASK 0x0400L +#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__PME_SUPPORT_MASK 0xF800L +//BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL +#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__POWER_STATE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__NO_SOFT_RESET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__PME_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__DATA_SELECT__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__DATA_SCALE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__PME_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__B2_B3_SUPPORT__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__BUS_PWR_EN__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__PMI_DATA__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__POWER_STATE_MASK 0x00000003L +#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__NO_SOFT_RESET_MASK 0x00000008L +#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__PME_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__DATA_SELECT_MASK 0x00001E00L +#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__DATA_SCALE_MASK 0x00006000L +#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__PME_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__B2_B3_SUPPORT_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__BUS_PWR_EN_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__PMI_DATA_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF1_0_PCIE_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_PCIE_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP__VERSION_MASK 0x000FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L +#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L +#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L +//BIF_CFG_DEV0_EPF1_0_DEVICE_CAP +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L +//BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS +#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF1_0_LINK_CAP +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF1_0_LINK_CNTL +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__LINK_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__LINK_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L +//BIF_CFG_DEV0_EPF1_0_LINK_STATUS +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L +//BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS2 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF1_0_LINK_CAP2 +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__RESERVED__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__RESERVED_MASK 0xFE000000L +//BIF_CFG_DEV0_EPF1_0_LINK_CNTL2 +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L +//BIF_CFG_DEV0_EPF1_0_LINK_STATUS2 +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L +//BIF_CFG_DEV0_EPF1_0_SLOT_CAP2 +#define BIF_CFG_DEV0_EPF1_0_SLOT_CAP2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_SLOT_CNTL2 +#define BIF_CFG_DEV0_EPF1_0_SLOT_CNTL2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF1_0_SLOT_STATUS2 +#define BIF_CFG_DEV0_EPF1_0_SLOT_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF1_0_MSI_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF1_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL +#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL +#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L +#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L +#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L +//BIF_CFG_DEV0_EPF1_0_MSI_MSG_ADDR_LO +#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_EPF1_0_MSI_MSG_ADDR_HI +#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_MSI_MSG_DATA +#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF1_0_MSI_MASK +#define BIF_CFG_DEV0_EPF1_0_MSI_MASK__MSI_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_MSI_MSG_DATA_64 +#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF1_0_MSI_MASK_64 +#define BIF_CFG_DEV0_EPF1_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_MSI_PENDING +#define BIF_CFG_DEV0_EPF1_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_MSI_PENDING_64 +#define BIF_CFG_DEV0_EPF1_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_MSIX_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF1_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_MSIX_MSG_CNTL +#define BIF_CFG_DEV0_EPF1_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF1_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL +#define BIF_CFG_DEV0_EPF1_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L +#define BIF_CFG_DEV0_EPF1_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_MSIX_TABLE +#define BIF_CFG_DEV0_EPF1_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF1_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF1_0_MSIX_PBA +#define BIF_CFG_DEV0_EPF1_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF1_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG1__EXT_VC_COUNT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG1__LOW_PRIORITY_EXT_VC_COUNT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG1__REF_CLK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG1__PORT_ARB_TABLE_ENTRY_SIZE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG1__EXT_VC_COUNT_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG1__LOW_PRIORITY_EXT_VC_COUNT_MASK 0x00000070L +#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG1__REF_CLK_MASK 0x00000300L +#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG1__PORT_ARB_TABLE_ENTRY_SIZE_MASK 0x00000C00L +//BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG2__VC_ARB_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG2__VC_ARB_TABLE_OFFSET__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG2__VC_ARB_CAP_MASK 0x000000FFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG2__VC_ARB_TABLE_OFFSET_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CNTL__LOAD_VC_ARB_TABLE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CNTL__VC_ARB_SELECT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CNTL__LOAD_VC_ARB_TABLE_MASK 0x0001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CNTL__VC_ARB_SELECT_MASK 0x000EL +//BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_STATUS +#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_STATUS__VC_ARB_TABLE_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_STATUS__VC_ARB_TABLE_STATUS_MASK 0x0001L +//BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CAP__REJECT_SNOOP_TRANS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CAP__MAX_TIME_SLOTS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_CAP_MASK 0x000000FFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CAP__REJECT_SNOOP_TRANS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CAP__MAX_TIME_SLOTS_MASK 0x003F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC0__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC1_7__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__PORT_ARB_SELECT__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__VC_ID__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__VC_ENABLE__SHIFT 0x1f +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC0_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC1_7_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__PORT_ARB_SELECT_MASK 0x000E0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__VC_ID_MASK 0x07000000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__VC_ENABLE_MASK 0x80000000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_STATUS +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_STATUS__VC_NEGOTIATION_PENDING__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_STATUS__VC_NEGOTIATION_PENDING_MASK 0x0002L +//BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CAP__REJECT_SNOOP_TRANS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CAP__MAX_TIME_SLOTS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_CAP_MASK 0x000000FFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CAP__REJECT_SNOOP_TRANS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CAP__MAX_TIME_SLOTS_MASK 0x003F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC0__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC1_7__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__PORT_ARB_SELECT__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__VC_ID__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__VC_ENABLE__SHIFT 0x1f +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC0_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC1_7_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__PORT_ARB_SELECT_MASK 0x000E0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__VC_ID_MASK 0x07000000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__VC_ENABLE_MASK 0x80000000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_STATUS +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_STATUS__VC_NEGOTIATION_PENDING__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_STATUS__VC_NEGOTIATION_PENDING_MASK 0x0002L +//BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_DW1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_DW1__SERIAL_NUMBER_LO__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_DW1__SERIAL_NUMBER_LO_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_DW2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_DW2__SERIAL_NUMBER_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_DW2__SERIAL_NUMBER_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L +//BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L +//BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L +//BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L +//BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L +//BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L +//BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG3 +#define BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG3 +#define BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_BAR_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CNTL__BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CNTL__BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CNTL__BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CNTL__BAR_INDEX_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CNTL__BAR_SIZE_MASK 0x3F00L +//BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CNTL__BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CNTL__BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CNTL__BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CNTL__BAR_INDEX_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CNTL__BAR_SIZE_MASK 0x3F00L +//BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CNTL__BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CNTL__BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CNTL__BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CNTL__BAR_INDEX_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CNTL__BAR_SIZE_MASK 0x3F00L +//BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CNTL__BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CNTL__BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CNTL__BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CNTL__BAR_INDEX_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CNTL__BAR_SIZE_MASK 0x3F00L +//BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CNTL__BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CNTL__BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CNTL__BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CNTL__BAR_INDEX_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CNTL__BAR_SIZE_MASK 0x3F00L +//BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CNTL__BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CNTL__BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CNTL__BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CNTL__BAR_INDEX_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CNTL__BAR_SIZE_MASK 0x3F00L +//BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA_SELECT +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA_SELECT__DATA_SELECT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA_SELECT__DATA_SELECT_MASK 0xFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__BASE_POWER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__DATA_SCALE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__PM_SUB_STATE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__PM_STATE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__TYPE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__POWER_RAIL__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__BASE_POWER_MASK 0x000000FFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__DATA_SCALE_MASK 0x00000300L +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__PM_SUB_STATE_MASK 0x00001C00L +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__PM_STATE_MASK 0x00006000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__TYPE_MASK 0x00038000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__POWER_RAIL_MASK 0x001C0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_CAP__SYSTEM_ALLOCATED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_CAP__SYSTEM_ALLOCATED_MASK 0x01L +//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__SUBSTATE_MAX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__TRANS_LAT_UNIT__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__PWR_ALLOC_SCALE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__TRANS_LAT_VAL_0__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__TRANS_LAT_VAL_1__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__SUBSTATE_MAX_MASK 0x0000001FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__TRANS_LAT_UNIT_MASK 0x00000300L +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__PWR_ALLOC_SCALE_MASK 0x00003000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__TRANS_LAT_VAL_0_MASK 0x00FF0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__TRANS_LAT_VAL_1_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_LATENCY_INDICATOR +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS_MASK 0xFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_STATUS +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_STATUS__SUBSTATE_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_STATUS__SUBSTATE_CNTL_ENABLED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_STATUS__SUBSTATE_STATUS_MASK 0x001FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_STATUS__SUBSTATE_CNTL_ENABLED_MASK 0x0100L +//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CNTL__SUBSTATE_CNTL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CNTL__SUBSTATE_CNTL_MASK 0x1FL +//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_SECONDARY_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SECONDARY_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_SECONDARY_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_LINK_CNTL3 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LINK_CNTL3__PERFORM_EQUALIZATION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LINK_CNTL3__LINK_EQUALIZATION_REQ_INT_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LINK_CNTL3__RESERVED__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LINK_CNTL3__PERFORM_EQUALIZATION_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LINK_CNTL3__LINK_EQUALIZATION_REQ_INT_EN_MASK 0x00000002L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LINK_CNTL3__RESERVED_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_ERROR_STATUS +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_ERROR_STATUS__LANE_ERROR_STATUS_BITS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_ERROR_STATUS__RESERVED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_ERROR_STATUS__LANE_ERROR_STATUS_BITS_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_ERROR_STATUS__RESERVED_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_PCIE_ACS_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__SOURCE_VALIDATION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__TRANSLATION_BLOCKING__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__UPSTREAM_FORWARDING__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__P2P_EGRESS_CONTROL__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__SOURCE_VALIDATION_MASK 0x0001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__TRANSLATION_BLOCKING_MASK 0x0002L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT_MASK 0x0004L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT_MASK 0x0008L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__UPSTREAM_FORWARDING_MASK 0x0010L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__P2P_EGRESS_CONTROL_MASK 0x0020L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN_MASK 0x0040L +//BIF_CFG_DEV0_EPF1_0_PCIE_ATS_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CNTL__STU__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CNTL__STU_MASK 0x001FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_CNTL__PRI_ENABLE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_CNTL__PRI_RESET__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_CNTL__PRI_ENABLE_MASK 0x0001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_CNTL__PRI_RESET_MASK 0x0002L +//BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_STATUS +#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_STATUS__RESPONSE_FAILURE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_STATUS__UNEXPECTED_PAGE_REQ_GRP_INDEX__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_STATUS__STOPPED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_STATUS__PRG_RESPONSE_PASID_REQUIRED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_STATUS__RESPONSE_FAILURE_MASK 0x0001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_STATUS__UNEXPECTED_PAGE_REQ_GRP_INDEX_MASK 0x0002L +#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_STATUS__STOPPED_MASK 0x0100L +#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_STATUS__PRG_RESPONSE_PASID_REQUIRED_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_PCIE_OUTSTAND_PAGE_REQ_CAPACITY +#define BIF_CFG_DEV0_EPF1_0_PCIE_OUTSTAND_PAGE_REQ_CAPACITY__OUTSTAND_PAGE_REQ_CAPACITY__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_OUTSTAND_PAGE_REQ_CAPACITY__OUTSTAND_PAGE_REQ_CAPACITY_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_OUTSTAND_PAGE_REQ_ALLOC +#define BIF_CFG_DEV0_EPF1_0_PCIE_OUTSTAND_PAGE_REQ_ALLOC__OUTSTAND_PAGE_REQ_ALLOC__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_OUTSTAND_PAGE_REQ_ALLOC__OUTSTAND_PAGE_REQ_ALLOC_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_PASID_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CAP__PASID_EXE_PERMISSION_SUPPORTED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CAP__PASID_PRIV_MODE_SUPPORTED__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CAP__MAX_PASID_WIDTH__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CAP__PASID_EXE_PERMISSION_SUPPORTED_MASK 0x0002L +#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CAP__PASID_PRIV_MODE_SUPPORTED_MASK 0x0004L +#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CAP__MAX_PASID_WIDTH_MASK 0x1F00L +//BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CNTL__PASID_ENABLE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CNTL__PASID_EXE_PERMISSION_ENABLE__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CNTL__PASID_PRIV_MODE_SUPPORTED_ENABLE__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CNTL__PASID_ENABLE_MASK 0x0001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CNTL__PASID_EXE_PERMISSION_ENABLE_MASK 0x0002L +#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CNTL__PASID_PRIV_MODE_SUPPORTED_ENABLE_MASK 0x0004L +//BIF_CFG_DEV0_EPF1_0_PCIE_MC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_MC_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_CAP__MC_MAX_GROUP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_CAP__MC_WIN_SIZE_REQ__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_CAP__MC_ECRC_REGEN_SUPP__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_CAP__MC_MAX_GROUP_MASK 0x003FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_CAP__MC_WIN_SIZE_REQ_MASK 0x3F00L +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_CAP__MC_ECRC_REGEN_SUPP_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_PCIE_MC_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_CNTL__MC_NUM_GROUP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_CNTL__MC_ENABLE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_CNTL__MC_NUM_GROUP_MASK 0x003FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_CNTL__MC_ENABLE_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_PCIE_MC_ADDR0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ADDR0__MC_INDEX_POS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ADDR0__MC_BASE_ADDR_0__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ADDR0__MC_INDEX_POS_MASK 0x0000003FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ADDR0__MC_BASE_ADDR_0_MASK 0xFFFFF000L +//BIF_CFG_DEV0_EPF1_0_PCIE_MC_ADDR1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ADDR1__MC_BASE_ADDR_1__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ADDR1__MC_BASE_ADDR_1_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_MC_RCV0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_RCV0__MC_RECEIVE_0__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_RCV0__MC_RECEIVE_0_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_MC_RCV1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_RCV1__MC_RECEIVE_1__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_RCV1__MC_RECEIVE_1_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_ALL0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_ALL0__MC_BLOCK_ALL_0__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_ALL0__MC_BLOCK_ALL_0_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_ALL1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_ALL1__MC_BLOCK_ALL_1__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_ALL1__MC_BLOCK_ALL_1_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_UNTRANSLATED_0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_UNTRANSLATED_0__MC_BLOCK_UNTRANSLATED_0__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_UNTRANSLATED_0__MC_BLOCK_UNTRANSLATED_0_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_UNTRANSLATED_1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_UNTRANSLATED_1__MC_BLOCK_UNTRANSLATED_1__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_UNTRANSLATED_1__MC_BLOCK_UNTRANSLATED_1_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_LTR_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_LTR_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_VALUE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_SCALE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_VALUE__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_SCALE__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_VALUE_MASK 0x000003FFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_SCALE_MASK 0x00001C00L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_VALUE_MASK 0x03FF0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_SCALE_MASK 0x1C000000L +//BIF_CFG_DEV0_EPF1_0_PCIE_ARI_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L +//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CAP__SRIOV_ARI_CAP_HIERARCHY_PRESERVED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CAP__SRIOV_VF_TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_INTR_MSG_NUM__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_CAP_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CAP__SRIOV_ARI_CAP_HIERARCHY_PRESERVED_MASK 0x00000002L +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CAP__SRIOV_VF_TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00000004L +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_INTR_MSG_NUM_MASK 0xFFE00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_VF_ENABLE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_ENABLE__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_INTR_ENABLE__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MSE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_ARI_CAP_HIERARCHY__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_VF_TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_VF_ENABLE_MASK 0x0001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_ENABLE_MASK 0x0002L +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_INTR_ENABLE_MASK 0x0004L +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MSE_MASK 0x0008L +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_ARI_CAP_HIERARCHY_MASK 0x0010L +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_VF_TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x0020L +//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_STATUS +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_STATUS__SRIOV_VF_MIGRATION_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_STATUS__SRIOV_VF_MIGRATION_STATUS_MASK 0x0001L +//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_INITIAL_VFS +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_INITIAL_VFS__SRIOV_INITIAL_VFS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_INITIAL_VFS__SRIOV_INITIAL_VFS_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_TOTAL_VFS +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_TOTAL_VFS__SRIOV_TOTAL_VFS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_TOTAL_VFS__SRIOV_TOTAL_VFS_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_NUM_VFS +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_NUM_VFS__SRIOV_NUM_VFS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_NUM_VFS__SRIOV_NUM_VFS_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_FUNC_DEP_LINK +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_FUNC_DEP_LINK__SRIOV_FUNC_DEP_LINK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_FUNC_DEP_LINK__SRIOV_FUNC_DEP_LINK_MASK 0x00FFL +//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_FIRST_VF_OFFSET +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_FIRST_VF_OFFSET__SRIOV_FIRST_VF_OFFSET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_FIRST_VF_OFFSET__SRIOV_FIRST_VF_OFFSET_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_STRIDE +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_STRIDE__SRIOV_VF_STRIDE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_STRIDE__SRIOV_VF_STRIDE_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_DEVICE_ID +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_DEVICE_ID__SRIOV_VF_DEVICE_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_DEVICE_ID__SRIOV_VF_DEVICE_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_SUPPORTED_PAGE_SIZE +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_SUPPORTED_PAGE_SIZE__SRIOV_SUPPORTED_PAGE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_SUPPORTED_PAGE_SIZE__SRIOV_SUPPORTED_PAGE_SIZE_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_SYSTEM_PAGE_SIZE +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_SYSTEM_PAGE_SIZE__SRIOV_SYSTEM_PAGE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_SYSTEM_PAGE_SIZE__SRIOV_SYSTEM_PAGE_SIZE_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_0__VF_BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_0__VF_BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_1__VF_BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_1__VF_BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_2__VF_BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_2__VF_BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_3 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_3__VF_BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_3__VF_BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_4__VF_BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_4__VF_BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_5__VF_BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_5__VF_BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_BIF__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_BIF_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_NO_ST_MODE_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_INT_VEC_MODE_SUPPORTED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_DEV_SPC_MODE_SUPPORTED__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_EXTND_TPH_REQR_SUPPORED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_ST_TABLE_LOCATION__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_ST_TABLE_SIZE__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_NO_ST_MODE_SUPPORTED_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_INT_VEC_MODE_SUPPORTED_MASK 0x00000002L +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_DEV_SPC_MODE_SUPPORTED_MASK 0x00000004L +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_EXTND_TPH_REQR_SUPPORED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_ST_TABLE_LOCATION_MASK 0x00000600L +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_ST_TABLE_SIZE_MASK 0x07FF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CNTL__TPH_REQR_ST_MODE_SEL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CNTL__TPH_REQR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CNTL__TPH_REQR_ST_MODE_SEL_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CNTL__TPH_REQR_EN_MASK 0x00000300L +//BIF_CFG_DEV0_EPF1_0_PCIE_DLF_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PCIE_DLF_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DLF_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DLF_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DLF_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_DLF_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_DLF_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_CAP +#define BIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_CAP__LOCAL_DLF_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_CAP__DLF_EXCHANGE_ENABLE__SHIFT 0x1f +#define BIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_CAP__LOCAL_DLF_SUPPORTED_MASK 0x007FFFFFL +#define BIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_CAP__DLF_EXCHANGE_ENABLE_MASK 0x80000000L +//BIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_STATUS +#define BIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_VALID__SHIFT 0x1f +#define BIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_MASK 0x007FFFFFL +#define BIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_VALID_MASK 0x80000000L +//BIF_CFG_DEV0_EPF1_0_PHY_16GT_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PHY_16GT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PHY_16GT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PHY_16GT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PHY_16GT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PHY_16GT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PHY_16GT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_LINK_CAP_16GT +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP_16GT__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP_16GT__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_LINK_CNTL_16GT +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL_16GT__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL_16GT__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT__EQUALIZATION_COMPLETE_16GT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT__EQUALIZATION_PHASE1_SUCCESS_16GT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT__EQUALIZATION_PHASE2_SUCCESS_16GT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT__EQUALIZATION_PHASE3_SUCCESS_16GT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT__LINK_EQUALIZATION_REQUEST_16GT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT__EQUALIZATION_COMPLETE_16GT_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT__EQUALIZATION_PHASE1_SUCCESS_16GT_MASK 0x00000002L +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT__EQUALIZATION_PHASE2_SUCCESS_16GT_MASK 0x00000004L +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT__EQUALIZATION_PHASE3_SUCCESS_16GT_MASK 0x00000008L +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT__LINK_EQUALIZATION_REQUEST_16GT_MASK 0x00000010L +//BIF_CFG_DEV0_EPF1_0_LOCAL_PARITY_MISMATCH_STATUS_16GT +#define BIF_CFG_DEV0_EPF1_0_LOCAL_PARITY_MISMATCH_STATUS_16GT__LOCAL_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LOCAL_PARITY_MISMATCH_STATUS_16GT__LOCAL_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF1_0_RTM1_PARITY_MISMATCH_STATUS_16GT +#define BIF_CFG_DEV0_EPF1_0_RTM1_PARITY_MISMATCH_STATUS_16GT__RTM1_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_RTM1_PARITY_MISMATCH_STATUS_16GT__RTM1_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF1_0_RTM2_PARITY_MISMATCH_STATUS_16GT +#define BIF_CFG_DEV0_EPF1_0_RTM2_PARITY_MISMATCH_STATUS_16GT__RTM2_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_RTM2_PARITY_MISMATCH_STATUS_16GT__RTM2_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF1_0_LANE_0_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF1_0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF1_0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF1_0_LANE_1_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF1_0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF1_0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF1_0_LANE_2_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF1_0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF1_0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF1_0_LANE_3_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF1_0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF1_0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF1_0_LANE_4_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF1_0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF1_0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF1_0_LANE_5_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF1_0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF1_0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF1_0_LANE_6_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF1_0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF1_0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF1_0_LANE_7_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF1_0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF1_0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF1_0_LANE_8_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF1_0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF1_0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF1_0_LANE_9_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF1_0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF1_0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF1_0_LANE_10_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF1_0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF1_0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF1_0_LANE_11_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF1_0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF1_0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF1_0_LANE_12_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF1_0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF1_0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF1_0_LANE_13_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF1_0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF1_0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF1_0_LANE_14_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF1_0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF1_0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF1_0_LANE_15_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF1_0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF1_0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF1_0_MARGINING_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_MARGINING_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_MARGINING_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_MARGINING_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_MARGINING_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_MARGINING_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_MARGINING_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_MARGINING_PORT_CAP +#define BIF_CFG_DEV0_EPF1_0_MARGINING_PORT_CAP__MARGINING_USES_SOFTWARE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_MARGINING_PORT_CAP__MARGINING_USES_SOFTWARE_MASK 0x0001L +//BIF_CFG_DEV0_EPF1_0_MARGINING_PORT_STATUS +#define BIF_CFG_DEV0_EPF1_0_MARGINING_PORT_STATUS__MARGINING_READY__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_MARGINING_PORT_STATUS__MARGINING_SOFTWARE_READY__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_MARGINING_PORT_STATUS__MARGINING_READY_MASK 0x0001L +#define BIF_CFG_DEV0_EPF1_0_MARGINING_PORT_STATUS__MARGINING_SOFTWARE_READY_MASK 0x0002L +//BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_INDEX_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_SIZE_MASK 0x00003F00L +//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_INDEX_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_SIZE_MASK 0x00003F00L +//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_INDEX_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_SIZE_MASK 0x00003F00L +//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_INDEX_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_SIZE_MASK 0x00003F00L +//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_INDEX_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_SIZE_MASK 0x00003F00L +//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_INDEX_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_SIZE_MASK 0x00003F00L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_REV__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_LENGTH__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_REV_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_LENGTH_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW__VF_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW__VF_NUM__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW__VF_EN_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW__VF_NUM_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_CMD_COMPLETE_INTR_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_HANG_NEED_FLR_INTR_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_CMD_COMPLETE_INTR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_HANG_NEED_FLR_INTR_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_CMD_COMPLETE_INTR_EN__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0xd +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_HANG_NEED_FLR_INTR_EN__SHIFT 0xe +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_CMD_COMPLETE_INTR_EN__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_HANG_NEED_FLR_INTR_EN__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__HVVM_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__HVVM_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_CMD_COMPLETE_INTR_EN_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00000002L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_HANG_NEED_FLR_INTR_EN_MASK 0x00000004L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00000008L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_CMD_COMPLETE_INTR_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_HANG_NEED_FLR_INTR_EN_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_CMD_COMPLETE_INTR_EN_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_HANG_NEED_FLR_INTR_EN_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_CMD_COMPLETE_INTR_EN_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_HANG_NEED_FLR_INTR_EN_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__HVVM_MAILBOX_TRN_ACK_INTR_EN_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__HVVM_MAILBOX_RCV_VALID_INTR_EN_MASK 0x02000000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_CMD_COMPLETE_INTR_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_CMD_COMPLETE_INTR_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_HANG_NEED_FLR_INTR_STATUS__SHIFT 0xa +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0xb +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_CMD_COMPLETE_INTR_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_HANG_NEED_FLR_INTR_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_CMD_COMPLETE_INTR_STATUS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__HVVM_MAILBOX_TRN_ACK_INTR_STATUS__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__HVVM_MAILBOX_RCV_VALID_INTR_STATUS__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_CMD_COMPLETE_INTR_STATUS_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00000002L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_HANG_NEED_FLR_INTR_STATUS_MASK 0x00000004L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00000008L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_CMD_COMPLETE_INTR_STATUS_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_HANG_NEED_FLR_INTR_STATUS_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_CMD_COMPLETE_INTR_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_HANG_NEED_FLR_INTR_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_CMD_COMPLETE_INTR_STATUS_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_HANG_NEED_FLR_INTR_STATUS_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__HVVM_MAILBOX_TRN_ACK_INTR_STATUS_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__HVVM_MAILBOX_RCV_VALID_INTR_STATUS_MASK 0x02000000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_RESET_CONTROL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_RESET_CONTROL__SOFT_PF_FLR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_RESET_CONTROL__SOFT_PF_FLR_MASK 0x0001L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__VF_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__TRN_MSG_DATA__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__TRN_MSG_VALID__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__RCV_MSG_DATA__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__RCV_MSG_ACK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__VF_INDEX_MASK 0x000000FFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__TRN_MSG_DATA_MASK 0x00000F00L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__TRN_MSG_VALID_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__RCV_MSG_DATA_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__RCV_MSG_ACK_MASK 0x01000000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF0_TRN_ACK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF0_RCV_VALID__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF1_TRN_ACK__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF1_RCV_VALID__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF2_TRN_ACK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF2_RCV_VALID__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF3_TRN_ACK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF3_RCV_VALID__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF4_TRN_ACK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF4_RCV_VALID__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF5_TRN_ACK__SHIFT 0xa +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF5_RCV_VALID__SHIFT 0xb +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF6_TRN_ACK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF6_RCV_VALID__SHIFT 0xd +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF7_TRN_ACK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF7_RCV_VALID__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF8_TRN_ACK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF8_RCV_VALID__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF9_TRN_ACK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF9_RCV_VALID__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF10_TRN_ACK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF10_RCV_VALID__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF11_TRN_ACK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF11_RCV_VALID__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF12_TRN_ACK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF12_RCV_VALID__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF13_TRN_ACK__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF13_RCV_VALID__SHIFT 0x1b +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF14_TRN_ACK__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF14_RCV_VALID__SHIFT 0x1d +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF15_TRN_ACK__SHIFT 0x1e +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF15_RCV_VALID__SHIFT 0x1f +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF0_TRN_ACK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF0_RCV_VALID_MASK 0x00000002L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF1_TRN_ACK_MASK 0x00000004L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF1_RCV_VALID_MASK 0x00000008L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF2_TRN_ACK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF2_RCV_VALID_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF3_TRN_ACK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF3_RCV_VALID_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF4_TRN_ACK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF4_RCV_VALID_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF5_TRN_ACK_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF5_RCV_VALID_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF6_TRN_ACK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF6_RCV_VALID_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF7_TRN_ACK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF7_RCV_VALID_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF8_TRN_ACK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF8_RCV_VALID_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF9_TRN_ACK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF9_RCV_VALID_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF10_TRN_ACK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF10_RCV_VALID_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF11_TRN_ACK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF11_RCV_VALID_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF12_TRN_ACK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF12_RCV_VALID_MASK 0x02000000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF13_TRN_ACK_MASK 0x04000000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF13_RCV_VALID_MASK 0x08000000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF14_TRN_ACK_MASK 0x10000000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF14_RCV_VALID_MASK 0x20000000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF15_TRN_ACK_MASK 0x40000000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF15_RCV_VALID_MASK 0x80000000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF16_TRN_ACK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF16_RCV_VALID__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF17_TRN_ACK__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF17_RCV_VALID__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF18_TRN_ACK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF18_RCV_VALID__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF19_TRN_ACK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF19_RCV_VALID__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF20_TRN_ACK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF20_RCV_VALID__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF21_TRN_ACK__SHIFT 0xa +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF21_RCV_VALID__SHIFT 0xb +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF22_TRN_ACK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF22_RCV_VALID__SHIFT 0xd +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF23_TRN_ACK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF23_RCV_VALID__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF24_TRN_ACK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF24_RCV_VALID__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF25_TRN_ACK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF25_RCV_VALID__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF26_TRN_ACK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF26_RCV_VALID__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF27_TRN_ACK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF27_RCV_VALID__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF28_TRN_ACK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF28_RCV_VALID__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF29_TRN_ACK__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF29_RCV_VALID__SHIFT 0x1b +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF30_TRN_ACK__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF30_RCV_VALID__SHIFT 0x1d +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__PF_TRN_ACK__SHIFT 0x1e +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__PF_RCV_VALID__SHIFT 0x1f +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF16_TRN_ACK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF16_RCV_VALID_MASK 0x00000002L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF17_TRN_ACK_MASK 0x00000004L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF17_RCV_VALID_MASK 0x00000008L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF18_TRN_ACK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF18_RCV_VALID_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF19_TRN_ACK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF19_RCV_VALID_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF20_TRN_ACK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF20_RCV_VALID_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF21_TRN_ACK_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF21_RCV_VALID_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF22_TRN_ACK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF22_RCV_VALID_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF23_TRN_ACK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF23_RCV_VALID_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF24_TRN_ACK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF24_RCV_VALID_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF25_TRN_ACK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF25_RCV_VALID_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF26_TRN_ACK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF26_RCV_VALID_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF27_TRN_ACK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF27_RCV_VALID_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF28_TRN_ACK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF28_RCV_VALID_MASK 0x02000000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF29_TRN_ACK_MASK 0x04000000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF29_RCV_VALID_MASK 0x08000000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF30_TRN_ACK_MASK 0x10000000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF30_RCV_VALID_MASK 0x20000000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__PF_TRN_ACK_MASK 0x40000000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__PF_RCV_VALID_MASK 0x80000000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__CONTEXT_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__LOC__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__CONTEXT_OFFSET__SHIFT 0xa +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__CONTEXT_SIZE_MASK 0x0000007FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__LOC_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__CONTEXT_OFFSET_MASK 0xFFFFFC00L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB__TOTAL_FB_AVAILABLE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB__TOTAL_FB_CONSUMED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB__TOTAL_FB_AVAILABLE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB__TOTAL_FB_CONSUMED_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__UVDSCH_OFFSET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__VCESCH_OFFSET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__GFXSCH_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__UVD1SCH_OFFSET__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__UVDSCH_OFFSET_MASK 0x000000FFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__VCESCH_OFFSET_MASK 0x0000FF00L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__GFXSCH_OFFSET_MASK 0x00FF0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__UVD1SCH_OFFSET_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE__P2P_OVER_XGMI_ENABLE_VF__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE__P2P_OVER_XGMI_ENABLE_PF__SHIFT 0x1f +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE__P2P_OVER_XGMI_ENABLE_VF_MASK 0x7FFFFFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE__P2P_OVER_XGMI_ENABLE_PF_MASK 0x80000000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB__VF0_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB__VF0_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB__VF0_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB__VF0_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB__VF1_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB__VF1_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB__VF1_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB__VF1_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB__VF2_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB__VF2_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB__VF2_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB__VF2_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB__VF3_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB__VF3_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB__VF3_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB__VF3_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB__VF4_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB__VF4_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB__VF4_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB__VF4_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB__VF5_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB__VF5_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB__VF5_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB__VF5_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB__VF6_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB__VF6_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB__VF6_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB__VF6_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB__VF7_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB__VF7_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB__VF7_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB__VF7_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB__VF8_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB__VF8_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB__VF8_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB__VF8_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB__VF9_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB__VF9_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB__VF9_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB__VF9_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB__VF10_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB__VF10_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB__VF10_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB__VF10_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB__VF11_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB__VF11_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB__VF11_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB__VF11_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB__VF12_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB__VF12_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB__VF12_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB__VF12_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB__VF13_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB__VF13_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB__VF13_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB__VF13_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB__VF14_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB__VF14_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB__VF14_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB__VF14_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB__VF15_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB__VF15_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB__VF15_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB__VF15_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB__VF16_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB__VF16_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB__VF16_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB__VF16_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB__VF17_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB__VF17_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB__VF17_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB__VF17_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB__VF18_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB__VF18_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB__VF18_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB__VF18_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB__VF19_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB__VF19_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB__VF19_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB__VF19_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB__VF20_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB__VF20_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB__VF20_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB__VF20_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB__VF21_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB__VF21_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB__VF21_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB__VF21_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB__VF22_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB__VF22_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB__VF22_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB__VF22_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB__VF23_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB__VF23_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB__VF23_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB__VF23_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB__VF24_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB__VF24_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB__VF24_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB__VF24_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB__VF25_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB__VF25_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB__VF25_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB__VF25_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB__VF26_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB__VF26_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB__VF26_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB__VF26_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB__VF27_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB__VF27_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB__VF27_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB__VF27_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB__VF28_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB__VF28_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB__VF28_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB__VF28_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB__VF29_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB__VF29_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB__VF29_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB__VF29_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB__VF30_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB__VF30_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB__VF30_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB__VF30_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW0__DW0__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW0__DW0_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW1__DW1__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW1__DW1_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW2__DW2__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW2__DW2_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW3 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW3__DW3__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW3__DW3_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW4__DW4__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW4__DW4_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW5__DW5__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW5__DW5_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW6 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW6__DW6__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW6__DW6_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW7 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW7__DW7__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW7__DW7_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW8__DW8__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW8__DW8_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW0__DW0__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW0__DW0_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW1__DW1__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW1__DW1_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW2__DW2__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW2__DW2_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW3 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW3__DW3__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW3__DW3_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW4__DW4__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW4__DW4_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW5__DW5__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW5__DW5_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW6 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW6__DW6__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW6__DW6_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW7 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW7__DW7__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW7__DW7_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW8__DW8__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW8__DW8_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW0__DW0__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW0__DW0_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW1__DW1__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW1__DW1_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW2__DW2__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW2__DW2_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW3 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW3__DW3__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW3__DW3_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW4__DW4__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW4__DW4_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW5__DW5__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW5__DW5_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW6 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW6__DW6__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW6__DW6_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW7 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW7__DW7__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW7__DW7_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW8__DW8__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW8__DW8_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW0__DW0__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW0__DW0_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW1__DW1__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW1__DW1_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW2__DW2__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW2__DW2_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW3 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW3__DW3__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW3__DW3_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW4__DW4__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW4__DW4_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW5__DW5__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW5__DW5_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW6 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW6__DW6__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW6__DW6_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW7 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW7__DW7__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW7__DW7_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW8__DW8__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW8__DW8_MASK 0xFFFFFFFFL + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_swds_bifcfgdecp +//BIF_CFG_DEV0_SWDS0_VENDOR_ID +#define BIF_CFG_DEV0_SWDS0_VENDOR_ID__VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_SWDS0_DEVICE_ID +#define BIF_CFG_DEV0_SWDS0_DEVICE_ID__DEVICE_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_SWDS0_COMMAND +#define BIF_CFG_DEV0_SWDS0_COMMAND__IOEN_DN__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_COMMAND__MEMEN_DN__SHIFT 0x1 +#define BIF_CFG_DEV0_SWDS0_COMMAND__BUS_MASTER_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_SWDS0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_COMMAND__AD_STEPPING__SHIFT 0x7 +#define BIF_CFG_DEV0_SWDS0_COMMAND__SERR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_COMMAND__FAST_B2B_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_SWDS0_COMMAND__INT_DIS__SHIFT 0xa +#define BIF_CFG_DEV0_SWDS0_COMMAND__IOEN_DN_MASK 0x0001L +#define BIF_CFG_DEV0_SWDS0_COMMAND__MEMEN_DN_MASK 0x0002L +#define BIF_CFG_DEV0_SWDS0_COMMAND__BUS_MASTER_EN_MASK 0x0004L +#define BIF_CFG_DEV0_SWDS0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L +#define BIF_CFG_DEV0_SWDS0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L +#define BIF_CFG_DEV0_SWDS0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L +#define BIF_CFG_DEV0_SWDS0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_COMMAND__AD_STEPPING_MASK 0x0080L +#define BIF_CFG_DEV0_SWDS0_COMMAND__SERR_EN_MASK 0x0100L +#define BIF_CFG_DEV0_SWDS0_COMMAND__FAST_B2B_EN_MASK 0x0200L +#define BIF_CFG_DEV0_SWDS0_COMMAND__INT_DIS_MASK 0x0400L +//BIF_CFG_DEV0_SWDS0_STATUS +#define BIF_CFG_DEV0_SWDS0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_STATUS__INT_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_STATUS__CAP_LIST__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_STATUS__PCI_66_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7 +#define BIF_CFG_DEV0_SWDS0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_STATUS__DEVSEL_TIMING__SHIFT 0x9 +#define BIF_CFG_DEV0_SWDS0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb +#define BIF_CFG_DEV0_SWDS0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd +#define BIF_CFG_DEV0_SWDS0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe +#define BIF_CFG_DEV0_SWDS0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L +#define BIF_CFG_DEV0_SWDS0_STATUS__INT_STATUS_MASK 0x0008L +#define BIF_CFG_DEV0_SWDS0_STATUS__CAP_LIST_MASK 0x0010L +#define BIF_CFG_DEV0_SWDS0_STATUS__PCI_66_CAP_MASK 0x0020L +#define BIF_CFG_DEV0_SWDS0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L +#define BIF_CFG_DEV0_SWDS0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L +#define BIF_CFG_DEV0_SWDS0_STATUS__DEVSEL_TIMING_MASK 0x0600L +#define BIF_CFG_DEV0_SWDS0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L +#define BIF_CFG_DEV0_SWDS0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L +#define BIF_CFG_DEV0_SWDS0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L +#define BIF_CFG_DEV0_SWDS0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L +#define BIF_CFG_DEV0_SWDS0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L +//BIF_CFG_DEV0_SWDS0_REVISION_ID +#define BIF_CFG_DEV0_SWDS0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL +#define BIF_CFG_DEV0_SWDS0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L +//BIF_CFG_DEV0_SWDS0_PROG_INTERFACE +#define BIF_CFG_DEV0_SWDS0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL +//BIF_CFG_DEV0_SWDS0_SUB_CLASS +#define BIF_CFG_DEV0_SWDS0_SUB_CLASS__SUB_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_SUB_CLASS__SUB_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_SWDS0_BASE_CLASS +#define BIF_CFG_DEV0_SWDS0_BASE_CLASS__BASE_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_BASE_CLASS__BASE_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_SWDS0_CACHE_LINE +#define BIF_CFG_DEV0_SWDS0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL +//BIF_CFG_DEV0_SWDS0_LATENCY +#define BIF_CFG_DEV0_SWDS0_LATENCY__LATENCY_TIMER__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LATENCY__LATENCY_TIMER_MASK 0xFFL +//BIF_CFG_DEV0_SWDS0_HEADER +#define BIF_CFG_DEV0_SWDS0_HEADER__HEADER_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_HEADER__DEVICE_TYPE__SHIFT 0x7 +#define BIF_CFG_DEV0_SWDS0_HEADER__HEADER_TYPE_MASK 0x7FL +#define BIF_CFG_DEV0_SWDS0_HEADER__DEVICE_TYPE_MASK 0x80L +//BIF_CFG_DEV0_SWDS0_BIST +#define BIF_CFG_DEV0_SWDS0_BIST__BIST_COMP__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_BIST__BIST_STRT__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_BIST__BIST_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_SWDS0_BIST__BIST_COMP_MASK 0x0FL +#define BIF_CFG_DEV0_SWDS0_BIST__BIST_STRT_MASK 0x40L +#define BIF_CFG_DEV0_SWDS0_BIST__BIST_CAP_MASK 0x80L +//BIF_CFG_DEV0_SWDS0_BASE_ADDR_1 +#define BIF_CFG_DEV0_SWDS0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_SWDS0_SUB_BUS_NUMBER_LATENCY +#define BIF_CFG_DEV0_SWDS0_SUB_BUS_NUMBER_LATENCY__PRIMARY_BUS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_SUB_BUS_NUMBER_LATENCY__SECONDARY_BUS__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_SUB_BUS_NUMBER_LATENCY__SUB_BUS_NUM__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_SUB_BUS_NUMBER_LATENCY__SECONDARY_LATENCY_TIMER__SHIFT 0x18 +#define BIF_CFG_DEV0_SWDS0_SUB_BUS_NUMBER_LATENCY__PRIMARY_BUS_MASK 0x000000FFL +#define BIF_CFG_DEV0_SWDS0_SUB_BUS_NUMBER_LATENCY__SECONDARY_BUS_MASK 0x0000FF00L +#define BIF_CFG_DEV0_SWDS0_SUB_BUS_NUMBER_LATENCY__SUB_BUS_NUM_MASK 0x00FF0000L +#define BIF_CFG_DEV0_SWDS0_SUB_BUS_NUMBER_LATENCY__SECONDARY_LATENCY_TIMER_MASK 0xFF000000L +//BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT +#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT__IO_BASE_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT__IO_BASE__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT__IO_LIMIT_TYPE__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT__IO_LIMIT__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT__IO_BASE_TYPE_MASK 0x000FL +#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT__IO_BASE_MASK 0x00F0L +#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT__IO_LIMIT_TYPE_MASK 0x0F00L +#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT__IO_LIMIT_MASK 0xF000L +//BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS +#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__PCI_66_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7 +#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__DEVSEL_TIMING__SHIFT 0x9 +#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb +#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd +#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__RECEIVED_SYSTEM_ERROR__SHIFT 0xe +#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__PCI_66_CAP_MASK 0x0020L +#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L +#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L +#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__DEVSEL_TIMING_MASK 0x0600L +#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L +#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L +#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L +#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__RECEIVED_SYSTEM_ERROR_MASK 0x4000L +#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L +//BIF_CFG_DEV0_SWDS0_MEM_BASE_LIMIT +#define BIF_CFG_DEV0_SWDS0_MEM_BASE_LIMIT__MEM_BASE_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_MEM_BASE_LIMIT__MEM_BASE_31_20__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_MEM_BASE_LIMIT__MEM_LIMIT_TYPE__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_MEM_BASE_LIMIT__MEM_LIMIT_31_20__SHIFT 0x14 +#define BIF_CFG_DEV0_SWDS0_MEM_BASE_LIMIT__MEM_BASE_TYPE_MASK 0x0000000FL +#define BIF_CFG_DEV0_SWDS0_MEM_BASE_LIMIT__MEM_BASE_31_20_MASK 0x0000FFF0L +#define BIF_CFG_DEV0_SWDS0_MEM_BASE_LIMIT__MEM_LIMIT_TYPE_MASK 0x000F0000L +#define BIF_CFG_DEV0_SWDS0_MEM_BASE_LIMIT__MEM_LIMIT_31_20_MASK 0xFFF00000L +//BIF_CFG_DEV0_SWDS0_PREF_BASE_LIMIT +#define BIF_CFG_DEV0_SWDS0_PREF_BASE_LIMIT__PREF_MEM_BASE_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PREF_BASE_LIMIT__PREF_MEM_BASE_31_20__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PREF_BASE_LIMIT__PREF_MEM_LIMIT_TYPE__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_PREF_BASE_LIMIT__PREF_MEM_LIMIT_31_20__SHIFT 0x14 +#define BIF_CFG_DEV0_SWDS0_PREF_BASE_LIMIT__PREF_MEM_BASE_TYPE_MASK 0x0000000FL +#define BIF_CFG_DEV0_SWDS0_PREF_BASE_LIMIT__PREF_MEM_BASE_31_20_MASK 0x0000FFF0L +#define BIF_CFG_DEV0_SWDS0_PREF_BASE_LIMIT__PREF_MEM_LIMIT_TYPE_MASK 0x000F0000L +#define BIF_CFG_DEV0_SWDS0_PREF_BASE_LIMIT__PREF_MEM_LIMIT_31_20_MASK 0xFFF00000L +//BIF_CFG_DEV0_SWDS0_PREF_BASE_UPPER +#define BIF_CFG_DEV0_SWDS0_PREF_BASE_UPPER__PREF_BASE_UPPER__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PREF_BASE_UPPER__PREF_BASE_UPPER_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_SWDS0_PREF_LIMIT_UPPER +#define BIF_CFG_DEV0_SWDS0_PREF_LIMIT_UPPER__PREF_LIMIT_UPPER__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PREF_LIMIT_UPPER__PREF_LIMIT_UPPER_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT_HI +#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT_HI__IO_BASE_31_16__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT_HI__IO_LIMIT_31_16__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT_HI__IO_BASE_31_16_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT_HI__IO_LIMIT_31_16_MASK 0xFFFF0000L +//BIF_CFG_DEV0_SWDS0_CAP_PTR +#define BIF_CFG_DEV0_SWDS0_CAP_PTR__CAP_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_CAP_PTR__CAP_PTR_MASK 0x000000FFL +//BIF_CFG_DEV0_SWDS0_INTERRUPT_LINE +#define BIF_CFG_DEV0_SWDS0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL +//BIF_CFG_DEV0_SWDS0_INTERRUPT_PIN +#define BIF_CFG_DEV0_SWDS0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL +//BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL +#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__PARITY_RESPONSE_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__SERR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__ISA_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__VGA_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__VGA_DEC__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__MASTER_ABORT_MODE__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__SECONDARY_BUS_RESET__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__FAST_B2B_EN__SHIFT 0x7 +#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__PARITY_RESPONSE_EN_MASK 0x0001L +#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__SERR_EN_MASK 0x0002L +#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__ISA_EN_MASK 0x0004L +#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__VGA_EN_MASK 0x0008L +#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__VGA_DEC_MASK 0x0010L +#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__MASTER_ABORT_MODE_MASK 0x0020L +#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__SECONDARY_BUS_RESET_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__FAST_B2B_EN_MASK 0x0080L +//BIF_CFG_DEV0_SWDS0_PMI_CAP_LIST +#define BIF_CFG_DEV0_SWDS0_PMI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PMI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PMI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_SWDS0_PMI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_PMI_CAP +#define BIF_CFG_DEV0_SWDS0_PMI_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PMI_CAP__PME_CLOCK__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PMI_CAP__DEV_SPECIFIC_INIT__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_PMI_CAP__AUX_CURRENT__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_PMI_CAP__D1_SUPPORT__SHIFT 0x9 +#define BIF_CFG_DEV0_SWDS0_PMI_CAP__D2_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_SWDS0_PMI_CAP__PME_SUPPORT__SHIFT 0xb +#define BIF_CFG_DEV0_SWDS0_PMI_CAP__VERSION_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_PMI_CAP__PME_CLOCK_MASK 0x0008L +#define BIF_CFG_DEV0_SWDS0_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0_MASK 0x0010L +#define BIF_CFG_DEV0_SWDS0_PMI_CAP__DEV_SPECIFIC_INIT_MASK 0x0020L +#define BIF_CFG_DEV0_SWDS0_PMI_CAP__AUX_CURRENT_MASK 0x01C0L +#define BIF_CFG_DEV0_SWDS0_PMI_CAP__D1_SUPPORT_MASK 0x0200L +#define BIF_CFG_DEV0_SWDS0_PMI_CAP__D2_SUPPORT_MASK 0x0400L +#define BIF_CFG_DEV0_SWDS0_PMI_CAP__PME_SUPPORT_MASK 0xF800L +//BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL +#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__POWER_STATE__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__NO_SOFT_RESET__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__PME_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__DATA_SELECT__SHIFT 0x9 +#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__DATA_SCALE__SHIFT 0xd +#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__PME_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__B2_B3_SUPPORT__SHIFT 0x16 +#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__BUS_PWR_EN__SHIFT 0x17 +#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__PMI_DATA__SHIFT 0x18 +#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__POWER_STATE_MASK 0x00000003L +#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__NO_SOFT_RESET_MASK 0x00000008L +#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__PME_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__DATA_SELECT_MASK 0x00001E00L +#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__DATA_SCALE_MASK 0x00006000L +#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__PME_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__B2_B3_SUPPORT_MASK 0x00400000L +#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__BUS_PWR_EN_MASK 0x00800000L +#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__PMI_DATA_MASK 0xFF000000L +//BIF_CFG_DEV0_SWDS0_PCIE_CAP_LIST +#define BIF_CFG_DEV0_SWDS0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_SWDS0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_PCIE_CAP +#define BIF_CFG_DEV0_SWDS0_PCIE_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9 +#define BIF_CFG_DEV0_SWDS0_PCIE_CAP__VERSION_MASK 0x000FL +#define BIF_CFG_DEV0_SWDS0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L +#define BIF_CFG_DEV0_SWDS0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L +#define BIF_CFG_DEV0_SWDS0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L +//BIF_CFG_DEV0_SWDS0_DEVICE_CAP +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L +//BIF_CFG_DEV0_SWDS0_DEVICE_CNTL +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__BRIDGE_CFG_RETRY_EN__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__BRIDGE_CFG_RETRY_EN_MASK 0x8000L +//BIF_CFG_DEV0_SWDS0_DEVICE_STATUS +#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1 +#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2 +#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L +#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L +#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L +#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L +#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L +#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L +#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L +//BIF_CFG_DEV0_SWDS0_LINK_CAP +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__PM_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12 +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13 +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14 +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15 +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16 +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__PORT_NUMBER__SHIFT 0x18 +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L +//BIF_CFG_DEV0_SWDS0_LINK_CNTL +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__PM_CONTROL__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__LINK_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7 +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__PM_CONTROL_MASK 0x0003L +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__LINK_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L +//BIF_CFG_DEV0_SWDS0_LINK_STATUS +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L +//BIF_CFG_DEV0_SWDS0_SLOT_CAP +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__ATTN_BUTTON_PRESENT__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__PWR_CONTROLLER_PRESENT__SHIFT 0x1 +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__MRL_SENSOR_PRESENT__SHIFT 0x2 +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__ATTN_INDICATOR_PRESENT__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__PWR_INDICATOR_PRESENT__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__HOTPLUG_SURPRISE__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__HOTPLUG_CAPABLE__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__SLOT_PWR_LIMIT_VALUE__SHIFT 0x7 +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__SLOT_PWR_LIMIT_SCALE__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__ELECTROMECH_INTERLOCK_PRESENT__SHIFT 0x11 +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__NO_COMMAND_COMPLETED_SUPPORTED__SHIFT 0x12 +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__PHYSICAL_SLOT_NUM__SHIFT 0x13 +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__ATTN_BUTTON_PRESENT_MASK 0x00000001L +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__PWR_CONTROLLER_PRESENT_MASK 0x00000002L +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__MRL_SENSOR_PRESENT_MASK 0x00000004L +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__ATTN_INDICATOR_PRESENT_MASK 0x00000008L +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__PWR_INDICATOR_PRESENT_MASK 0x00000010L +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__HOTPLUG_SURPRISE_MASK 0x00000020L +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__HOTPLUG_CAPABLE_MASK 0x00000040L +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__SLOT_PWR_LIMIT_VALUE_MASK 0x00007F80L +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__SLOT_PWR_LIMIT_SCALE_MASK 0x00018000L +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__ELECTROMECH_INTERLOCK_PRESENT_MASK 0x00020000L +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__NO_COMMAND_COMPLETED_SUPPORTED_MASK 0x00040000L +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__PHYSICAL_SLOT_NUM_MASK 0xFFF80000L +//BIF_CFG_DEV0_SWDS0_SLOT_CNTL +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__ATTN_BUTTON_PRESSED_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__PWR_FAULT_DETECTED_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__MRL_SENSOR_CHANGED_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__PRESENCE_DETECT_CHANGED_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__COMMAND_COMPLETED_INTR_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__HOTPLUG_INTR_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__ATTN_INDICATOR_CNTL__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__PWR_INDICATOR_CNTL__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__PWR_CONTROLLER_CNTL__SHIFT 0xa +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__ELECTROMECH_INTERLOCK_CNTL__SHIFT 0xb +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__DL_STATE_CHANGED_EN__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__ATTN_BUTTON_PRESSED_EN_MASK 0x0001L +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__PWR_FAULT_DETECTED_EN_MASK 0x0002L +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__MRL_SENSOR_CHANGED_EN_MASK 0x0004L +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__PRESENCE_DETECT_CHANGED_EN_MASK 0x0008L +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__COMMAND_COMPLETED_INTR_EN_MASK 0x0010L +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__HOTPLUG_INTR_EN_MASK 0x0020L +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__ATTN_INDICATOR_CNTL_MASK 0x00C0L +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__PWR_INDICATOR_CNTL_MASK 0x0300L +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__PWR_CONTROLLER_CNTL_MASK 0x0400L +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__ELECTROMECH_INTERLOCK_CNTL_MASK 0x0800L +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__DL_STATE_CHANGED_EN_MASK 0x1000L +//BIF_CFG_DEV0_SWDS0_SLOT_STATUS +#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__ATTN_BUTTON_PRESSED__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__PWR_FAULT_DETECTED__SHIFT 0x1 +#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__MRL_SENSOR_CHANGED__SHIFT 0x2 +#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__PRESENCE_DETECT_CHANGED__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__COMMAND_COMPLETED__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__MRL_SENSOR_STATE__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__PRESENCE_DETECT_STATE__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__ELECTROMECH_INTERLOCK_STATUS__SHIFT 0x7 +#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__DL_STATE_CHANGED__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__ATTN_BUTTON_PRESSED_MASK 0x0001L +#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__PWR_FAULT_DETECTED_MASK 0x0002L +#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__MRL_SENSOR_CHANGED_MASK 0x0004L +#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__PRESENCE_DETECT_CHANGED_MASK 0x0008L +#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__COMMAND_COMPLETED_MASK 0x0010L +#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__MRL_SENSOR_STATE_MASK 0x0020L +#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__PRESENCE_DETECT_STATE_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__ELECTROMECH_INTERLOCK_STATUS_MASK 0x0080L +#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__DL_STATE_CHANGED_MASK 0x0100L +//BIF_CFG_DEV0_SWDS0_DEVICE_CAP2 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L +//BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L +//BIF_CFG_DEV0_SWDS0_DEVICE_STATUS2 +#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_SWDS0_LINK_CAP2 +#define BIF_CFG_DEV0_SWDS0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1 +#define BIF_CFG_DEV0_SWDS0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17 +#define BIF_CFG_DEV0_SWDS0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18 +#define BIF_CFG_DEV0_SWDS0_LINK_CAP2__RESERVED__SHIFT 0x19 +#define BIF_CFG_DEV0_SWDS0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL +#define BIF_CFG_DEV0_SWDS0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_SWDS0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L +#define BIF_CFG_DEV0_SWDS0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L +#define BIF_CFG_DEV0_SWDS0_LINK_CAP2__RESERVED_MASK 0xFE000000L +//BIF_CFG_DEV0_SWDS0_LINK_CNTL2 +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7 +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L +//BIF_CFG_DEV0_SWDS0_LINK_STATUS2 +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1 +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2 +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7 +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L +//BIF_CFG_DEV0_SWDS0_SLOT_CAP2 +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_SWDS0_SLOT_CNTL2 +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_SWDS0_SLOT_STATUS2 +#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_SWDS0_MSI_CAP_LIST +#define BIF_CFG_DEV0_SWDS0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_SWDS0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL +#define BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7 +#define BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L +#define BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL +#define BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L +#define BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L +#define BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L +//BIF_CFG_DEV0_SWDS0_MSI_MSG_ADDR_LO +#define BIF_CFG_DEV0_SWDS0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2 +#define BIF_CFG_DEV0_SWDS0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_SWDS0_MSI_MSG_ADDR_HI +#define BIF_CFG_DEV0_SWDS0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_SWDS0_MSI_MSG_DATA +#define BIF_CFG_DEV0_SWDS0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL +//BIF_CFG_DEV0_SWDS0_MSI_MSG_DATA_64 +#define BIF_CFG_DEV0_SWDS0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL +//BIF_CFG_DEV0_SWDS0_SSID_CAP_LIST +#define BIF_CFG_DEV0_SWDS0_SSID_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_SSID_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_SSID_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_SWDS0_SSID_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_SSID_CAP +#define BIF_CFG_DEV0_SWDS0_SSID_CAP__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_SSID_CAP__SUBSYSTEM_ID__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_SSID_CAP__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_SWDS0_SSID_CAP__SUBSYSTEM_ID_MASK 0xFFFF0000L +//BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST +#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_HDR +#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14 +#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L +#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L +//BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC1 +#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC2 +#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_SWDS0_PCIE_VC_ENH_CAP_LIST +#define BIF_CFG_DEV0_SWDS0_PCIE_VC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_SWDS0_PCIE_VC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_SWDS0_PCIE_VC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG1 +#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG1__EXT_VC_COUNT__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG1__LOW_PRIORITY_EXT_VC_COUNT__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG1__REF_CLK__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG1__PORT_ARB_TABLE_ENTRY_SIZE__SHIFT 0xa +#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG1__EXT_VC_COUNT_MASK 0x00000007L +#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG1__LOW_PRIORITY_EXT_VC_COUNT_MASK 0x00000070L +#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG1__REF_CLK_MASK 0x00000300L +#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG1__PORT_ARB_TABLE_ENTRY_SIZE_MASK 0x00000C00L +//BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG2 +#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG2__VC_ARB_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG2__VC_ARB_TABLE_OFFSET__SHIFT 0x18 +#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG2__VC_ARB_CAP_MASK 0x000000FFL +#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG2__VC_ARB_TABLE_OFFSET_MASK 0xFF000000L +//BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CNTL +#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CNTL__LOAD_VC_ARB_TABLE__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CNTL__VC_ARB_SELECT__SHIFT 0x1 +#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CNTL__LOAD_VC_ARB_TABLE_MASK 0x0001L +#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CNTL__VC_ARB_SELECT_MASK 0x000EL +//BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_STATUS +#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_STATUS__VC_ARB_TABLE_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_STATUS__VC_ARB_TABLE_STATUS_MASK 0x0001L +//BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CAP +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CAP__REJECT_SNOOP_TRANS__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CAP__MAX_TIME_SLOTS__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET__SHIFT 0x18 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_CAP_MASK 0x000000FFL +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CAP__REJECT_SNOOP_TRANS_MASK 0x00008000L +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CAP__MAX_TIME_SLOTS_MASK 0x003F0000L +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET_MASK 0xFF000000L +//BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC0__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC1_7__SHIFT 0x1 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__PORT_ARB_SELECT__SHIFT 0x11 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__VC_ID__SHIFT 0x18 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__VC_ENABLE__SHIFT 0x1f +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC0_MASK 0x00000001L +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC1_7_MASK 0x000000FEL +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE_MASK 0x00010000L +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__PORT_ARB_SELECT_MASK 0x000E0000L +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__VC_ID_MASK 0x07000000L +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__VC_ENABLE_MASK 0x80000000L +//BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_STATUS +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_STATUS__VC_NEGOTIATION_PENDING__SHIFT 0x1 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS_MASK 0x0001L +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_STATUS__VC_NEGOTIATION_PENDING_MASK 0x0002L +//BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CAP +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CAP__REJECT_SNOOP_TRANS__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CAP__MAX_TIME_SLOTS__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET__SHIFT 0x18 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_CAP_MASK 0x000000FFL +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CAP__REJECT_SNOOP_TRANS_MASK 0x00008000L +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CAP__MAX_TIME_SLOTS_MASK 0x003F0000L +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET_MASK 0xFF000000L +//BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC0__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC1_7__SHIFT 0x1 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__PORT_ARB_SELECT__SHIFT 0x11 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__VC_ID__SHIFT 0x18 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__VC_ENABLE__SHIFT 0x1f +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC0_MASK 0x00000001L +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC1_7_MASK 0x000000FEL +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE_MASK 0x00010000L +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__PORT_ARB_SELECT_MASK 0x000E0000L +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__VC_ID_MASK 0x07000000L +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__VC_ENABLE_MASK 0x80000000L +//BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_STATUS +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_STATUS__VC_NEGOTIATION_PENDING__SHIFT 0x1 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS_MASK 0x0001L +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_STATUS__VC_NEGOTIATION_PENDING_MASK 0x0002L +//BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST +#define BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_DW1 +#define BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_DW1__SERIAL_NUMBER_LO__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_DW1__SERIAL_NUMBER_LO_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_DW2 +#define BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_DW2__SERIAL_NUMBER_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_DW2__SERIAL_NUMBER_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L +//BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L +//BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L +//BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7 +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L +//BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7 +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L +//BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9 +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L +//BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG0 +#define BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG1 +#define BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG2 +#define BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG3 +#define BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG0 +#define BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG1 +#define BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG2 +#define BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG3 +#define BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_SWDS0_PCIE_SECONDARY_ENH_CAP_LIST +#define BIF_CFG_DEV0_SWDS0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_PCIE_SECONDARY_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_SWDS0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_SWDS0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_SWDS0_PCIE_SECONDARY_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_SWDS0_PCIE_LINK_CNTL3 +#define BIF_CFG_DEV0_SWDS0_PCIE_LINK_CNTL3__PERFORM_EQUALIZATION__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_LINK_CNTL3__LINK_EQUALIZATION_REQ_INT_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_SWDS0_PCIE_LINK_CNTL3__RESERVED__SHIFT 0x2 +#define BIF_CFG_DEV0_SWDS0_PCIE_LINK_CNTL3__PERFORM_EQUALIZATION_MASK 0x00000001L +#define BIF_CFG_DEV0_SWDS0_PCIE_LINK_CNTL3__LINK_EQUALIZATION_REQ_INT_EN_MASK 0x00000002L +#define BIF_CFG_DEV0_SWDS0_PCIE_LINK_CNTL3__RESERVED_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_SWDS0_PCIE_LANE_ERROR_STATUS +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_ERROR_STATUS__LANE_ERROR_STATUS_BITS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_ERROR_STATUS__RESERVED__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_ERROR_STATUS__LANE_ERROR_STATUS_BITS_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_ERROR_STATUS__RESERVED_MASK 0xFFFF0000L +//BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_SWDS0_PCIE_ACS_ENH_CAP_LIST +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__SOURCE_VALIDATION__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__TRANSLATION_BLOCKING__SHIFT 0x1 +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT__SHIFT 0x2 +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__UPSTREAM_FORWARDING__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__P2P_EGRESS_CONTROL__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__SOURCE_VALIDATION_MASK 0x0001L +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__TRANSLATION_BLOCKING_MASK 0x0002L +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT_MASK 0x0004L +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT_MASK 0x0008L +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__UPSTREAM_FORWARDING_MASK 0x0010L +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__P2P_EGRESS_CONTROL_MASK 0x0020L +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN_MASK 0x0001L +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN_MASK 0x0002L +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN_MASK 0x0004L +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN_MASK 0x0010L +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN_MASK 0x0020L +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN_MASK 0x0040L +//BIF_CFG_DEV0_SWDS0_PCIE_DLF_ENH_CAP_LIST +#define BIF_CFG_DEV0_SWDS0_PCIE_DLF_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_DLF_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_PCIE_DLF_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_SWDS0_PCIE_DLF_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_SWDS0_PCIE_DLF_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_SWDS0_PCIE_DLF_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_CAP +#define BIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_CAP__LOCAL_DLF_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_CAP__DLF_EXCHANGE_ENABLE__SHIFT 0x1f +#define BIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_CAP__LOCAL_DLF_SUPPORTED_MASK 0x007FFFFFL +#define BIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_CAP__DLF_EXCHANGE_ENABLE_MASK 0x80000000L +//BIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_STATUS +#define BIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_VALID__SHIFT 0x1f +#define BIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_MASK 0x007FFFFFL +#define BIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_VALID_MASK 0x80000000L +//BIF_CFG_DEV0_SWDS0_PHY_16GT_ENH_CAP_LIST +#define BIF_CFG_DEV0_SWDS0_PHY_16GT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PHY_16GT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_PHY_16GT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_SWDS0_PHY_16GT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_SWDS0_PHY_16GT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_SWDS0_PHY_16GT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_SWDS0_LINK_CAP_16GT +#define BIF_CFG_DEV0_SWDS0_LINK_CAP_16GT__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LINK_CAP_16GT__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_SWDS0_LINK_CNTL_16GT +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL_16GT__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL_16GT__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT__EQUALIZATION_COMPLETE_16GT__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT__EQUALIZATION_PHASE1_SUCCESS_16GT__SHIFT 0x1 +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT__EQUALIZATION_PHASE2_SUCCESS_16GT__SHIFT 0x2 +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT__EQUALIZATION_PHASE3_SUCCESS_16GT__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT__LINK_EQUALIZATION_REQUEST_16GT__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT__EQUALIZATION_COMPLETE_16GT_MASK 0x00000001L +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT__EQUALIZATION_PHASE1_SUCCESS_16GT_MASK 0x00000002L +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT__EQUALIZATION_PHASE2_SUCCESS_16GT_MASK 0x00000004L +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT__EQUALIZATION_PHASE3_SUCCESS_16GT_MASK 0x00000008L +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT__LINK_EQUALIZATION_REQUEST_16GT_MASK 0x00000010L +//BIF_CFG_DEV0_SWDS0_LOCAL_PARITY_MISMATCH_STATUS_16GT +#define BIF_CFG_DEV0_SWDS0_LOCAL_PARITY_MISMATCH_STATUS_16GT__LOCAL_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LOCAL_PARITY_MISMATCH_STATUS_16GT__LOCAL_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL +//BIF_CFG_DEV0_SWDS0_RTM1_PARITY_MISMATCH_STATUS_16GT +#define BIF_CFG_DEV0_SWDS0_RTM1_PARITY_MISMATCH_STATUS_16GT__RTM1_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_RTM1_PARITY_MISMATCH_STATUS_16GT__RTM1_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL +//BIF_CFG_DEV0_SWDS0_RTM2_PARITY_MISMATCH_STATUS_16GT +#define BIF_CFG_DEV0_SWDS0_RTM2_PARITY_MISMATCH_STATUS_16GT__RTM2_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_RTM2_PARITY_MISMATCH_STATUS_16GT__RTM2_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL +//BIF_CFG_DEV0_SWDS0_LANE_0_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_SWDS0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_SWDS0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_SWDS0_LANE_1_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_SWDS0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_SWDS0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_SWDS0_LANE_2_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_SWDS0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_SWDS0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_SWDS0_LANE_3_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_SWDS0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_SWDS0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_SWDS0_LANE_4_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_SWDS0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_SWDS0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_SWDS0_LANE_5_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_SWDS0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_SWDS0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_SWDS0_LANE_6_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_SWDS0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_SWDS0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_SWDS0_LANE_7_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_SWDS0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_SWDS0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_SWDS0_LANE_8_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_SWDS0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_SWDS0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_SWDS0_LANE_9_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_SWDS0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_SWDS0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_SWDS0_LANE_10_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_SWDS0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_SWDS0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_SWDS0_LANE_11_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_SWDS0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_SWDS0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_SWDS0_LANE_12_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_SWDS0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_SWDS0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_SWDS0_LANE_13_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_SWDS0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_SWDS0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_SWDS0_LANE_14_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_SWDS0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_SWDS0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_SWDS0_LANE_15_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_SWDS0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_SWDS0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_SWDS0_MARGINING_ENH_CAP_LIST +#define BIF_CFG_DEV0_SWDS0_MARGINING_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_MARGINING_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_MARGINING_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_SWDS0_MARGINING_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_SWDS0_MARGINING_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_SWDS0_MARGINING_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_SWDS0_MARGINING_PORT_CAP +#define BIF_CFG_DEV0_SWDS0_MARGINING_PORT_CAP__MARGINING_USES_SOFTWARE__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_MARGINING_PORT_CAP__MARGINING_USES_SOFTWARE_MASK 0x0001L +//BIF_CFG_DEV0_SWDS0_MARGINING_PORT_STATUS +#define BIF_CFG_DEV0_SWDS0_MARGINING_PORT_STATUS__MARGINING_READY__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_MARGINING_PORT_STATUS__MARGINING_SOFTWARE_READY__SHIFT 0x1 +#define BIF_CFG_DEV0_SWDS0_MARGINING_PORT_STATUS__MARGINING_READY_MASK 0x0001L +#define BIF_CFG_DEV0_SWDS0_MARGINING_PORT_STATUS__MARGINING_SOFTWARE_READY_MASK 0x0002L +//BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_CNTL__LANE_0_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_CNTL__LANE_0_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_CNTL__LANE_0_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_CNTL__LANE_0_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_STATUS__LANE_0_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_STATUS__LANE_0_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_STATUS__LANE_0_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_STATUS__LANE_0_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_CNTL__LANE_1_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_CNTL__LANE_1_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_CNTL__LANE_1_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_CNTL__LANE_1_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_STATUS__LANE_1_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_STATUS__LANE_1_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_STATUS__LANE_1_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_STATUS__LANE_1_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_CNTL__LANE_2_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_CNTL__LANE_2_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_CNTL__LANE_2_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_CNTL__LANE_2_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_STATUS__LANE_2_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_STATUS__LANE_2_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_STATUS__LANE_2_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_STATUS__LANE_2_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_CNTL__LANE_3_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_CNTL__LANE_3_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_CNTL__LANE_3_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_CNTL__LANE_3_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_STATUS__LANE_3_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_STATUS__LANE_3_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_STATUS__LANE_3_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_STATUS__LANE_3_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_CNTL__LANE_4_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_CNTL__LANE_4_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_CNTL__LANE_4_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_CNTL__LANE_4_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_STATUS__LANE_4_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_STATUS__LANE_4_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_STATUS__LANE_4_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_STATUS__LANE_4_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_CNTL__LANE_5_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_CNTL__LANE_5_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_CNTL__LANE_5_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_CNTL__LANE_5_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_STATUS__LANE_5_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_STATUS__LANE_5_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_STATUS__LANE_5_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_STATUS__LANE_5_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_CNTL__LANE_6_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_CNTL__LANE_6_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_CNTL__LANE_6_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_CNTL__LANE_6_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_STATUS__LANE_6_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_STATUS__LANE_6_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_STATUS__LANE_6_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_STATUS__LANE_6_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_CNTL__LANE_7_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_CNTL__LANE_7_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_CNTL__LANE_7_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_CNTL__LANE_7_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_STATUS__LANE_7_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_STATUS__LANE_7_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_STATUS__LANE_7_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_STATUS__LANE_7_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_CNTL__LANE_8_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_CNTL__LANE_8_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_CNTL__LANE_8_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_CNTL__LANE_8_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_STATUS__LANE_8_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_STATUS__LANE_8_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_STATUS__LANE_8_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_STATUS__LANE_8_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_CNTL__LANE_9_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_CNTL__LANE_9_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_CNTL__LANE_9_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_CNTL__LANE_9_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_STATUS__LANE_9_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_STATUS__LANE_9_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_STATUS__LANE_9_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_STATUS__LANE_9_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_CNTL__LANE_10_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_CNTL__LANE_10_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_CNTL__LANE_10_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_CNTL__LANE_10_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_STATUS__LANE_10_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_STATUS__LANE_10_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_STATUS__LANE_10_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_STATUS__LANE_10_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_CNTL__LANE_11_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_CNTL__LANE_11_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_CNTL__LANE_11_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_CNTL__LANE_11_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_STATUS__LANE_11_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_STATUS__LANE_11_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_STATUS__LANE_11_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_STATUS__LANE_11_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_CNTL__LANE_12_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_CNTL__LANE_12_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_CNTL__LANE_12_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_CNTL__LANE_12_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_STATUS__LANE_12_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_STATUS__LANE_12_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_STATUS__LANE_12_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_STATUS__LANE_12_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_CNTL__LANE_13_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_CNTL__LANE_13_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_CNTL__LANE_13_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_CNTL__LANE_13_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_STATUS__LANE_13_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_STATUS__LANE_13_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_STATUS__LANE_13_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_STATUS__LANE_13_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_CNTL__LANE_14_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_CNTL__LANE_14_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_CNTL__LANE_14_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_CNTL__LANE_14_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_STATUS__LANE_14_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_STATUS__LANE_14_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_STATUS__LANE_14_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_STATUS__LANE_14_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_CNTL__LANE_15_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_CNTL__LANE_15_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_CNTL__LANE_15_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_CNTL__LANE_15_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_STATUS__LANE_15_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_STATUS__LANE_15_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_STATUS__LANE_15_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_STATUS__LANE_15_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf0_bifcfgdecp +//BIF_CFG_DEV0_EPF0_VF0_0_VENDOR_ID +#define BIF_CFG_DEV0_EPF0_VF0_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_ID +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_COMMAND +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__AD_STEPPING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__SERR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__FAST_B2B_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__INT_DIS__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__AD_STEPPING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__SERR_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__FAST_B2B_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__INT_DIS_MASK 0x0400L +//BIF_CFG_DEV0_EPF0_VF0_0_STATUS +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__INT_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__CAP_LIST__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__PCI_66_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__DEVSEL_TIMING__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__INT_STATUS_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__CAP_LIST_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__PCI_66_CAP_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__DEVSEL_TIMING_MASK 0x0600L +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF0_0_REVISION_ID +#define BIF_CFG_DEV0_EPF0_VF0_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF0_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF0_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_VF0_0_PROG_INTERFACE +#define BIF_CFG_DEV0_EPF0_VF0_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF0_0_SUB_CLASS +#define BIF_CFG_DEV0_EPF0_VF0_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF0_0_BASE_CLASS +#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF0_0_CACHE_LINE +#define BIF_CFG_DEV0_EPF0_VF0_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF0_0_LATENCY +#define BIF_CFG_DEV0_EPF0_VF0_0_LATENCY__LATENCY_TIMER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_LATENCY__LATENCY_TIMER_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF0_0_HEADER +#define BIF_CFG_DEV0_EPF0_VF0_0_HEADER__HEADER_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_HEADER__DEVICE_TYPE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF0_0_HEADER__HEADER_TYPE_MASK 0x7FL +#define BIF_CFG_DEV0_EPF0_VF0_0_HEADER__DEVICE_TYPE_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF0_0_BIST +#define BIF_CFG_DEV0_EPF0_VF0_0_BIST__BIST_COMP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_BIST__BIST_STRT__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF0_0_BIST__BIST_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF0_0_BIST__BIST_COMP_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF0_0_BIST__BIST_STRT_MASK 0x40L +#define BIF_CFG_DEV0_EPF0_VF0_0_BIST__BIST_CAP_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_1 +#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_2 +#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_3 +#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_4 +#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_5 +#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_6 +#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_ADAPTER_ID +#define BIF_CFG_DEV0_EPF0_VF0_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF0_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF0_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_VF0_0_ROM_BASE_ADDR +#define BIF_CFG_DEV0_EPF0_VF0_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_CAP_PTR +#define BIF_CFG_DEV0_EPF0_VF0_0_CAP_PTR__CAP_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL +//BIF_CFG_DEV0_EPF0_VF0_0_INTERRUPT_LINE +#define BIF_CFG_DEV0_EPF0_VF0_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF0_0_INTERRUPT_PIN +#define BIF_CFG_DEV0_EPF0_VF0_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP__VERSION_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L +//BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L +//BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__LINK_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__LINK_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L +//BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L +//BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2__RESERVED__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2__RESERVED_MASK 0xFE000000L +//BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L +//BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L +//BIF_CFG_DEV0_EPF0_VF0_0_SLOT_CAP2 +#define BIF_CFG_DEV0_EPF0_VF0_0_SLOT_CAP2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_SLOT_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF0_0_SLOT_CNTL2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_SLOT_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF0_0_SLOT_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_MSI_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L +//BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_ADDR_LO +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_ADDR_HI +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_DATA +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_MSI_MASK +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MASK__MSI_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_DATA_64 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_MSI_MASK_64 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_MSI_PENDING +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_MSI_PENDING_64 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_MSIX_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF0_0_MSIX_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL +#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF0_0_MSIX_TABLE +#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF0_0_MSIX_PBA +#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_HDR +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC1 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC2 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG1 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG2 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG3 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG1 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG2 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG3 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CAP +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CNTL +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CNTL__STU__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CNTL__STU_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CAP +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CNTL +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf1_bifcfgdecp +//BIF_CFG_DEV0_EPF0_VF1_0_VENDOR_ID +#define BIF_CFG_DEV0_EPF0_VF1_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_ID +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_COMMAND +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__AD_STEPPING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__SERR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__FAST_B2B_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__INT_DIS__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__AD_STEPPING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__SERR_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__FAST_B2B_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__INT_DIS_MASK 0x0400L +//BIF_CFG_DEV0_EPF0_VF1_0_STATUS +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__INT_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__CAP_LIST__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__PCI_66_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__DEVSEL_TIMING__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__INT_STATUS_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__CAP_LIST_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__PCI_66_CAP_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__DEVSEL_TIMING_MASK 0x0600L +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF1_0_REVISION_ID +#define BIF_CFG_DEV0_EPF0_VF1_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF1_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF1_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_VF1_0_PROG_INTERFACE +#define BIF_CFG_DEV0_EPF0_VF1_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF1_0_SUB_CLASS +#define BIF_CFG_DEV0_EPF0_VF1_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF1_0_BASE_CLASS +#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF1_0_CACHE_LINE +#define BIF_CFG_DEV0_EPF0_VF1_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF1_0_LATENCY +#define BIF_CFG_DEV0_EPF0_VF1_0_LATENCY__LATENCY_TIMER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_LATENCY__LATENCY_TIMER_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF1_0_HEADER +#define BIF_CFG_DEV0_EPF0_VF1_0_HEADER__HEADER_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_HEADER__DEVICE_TYPE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF1_0_HEADER__HEADER_TYPE_MASK 0x7FL +#define BIF_CFG_DEV0_EPF0_VF1_0_HEADER__DEVICE_TYPE_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF1_0_BIST +#define BIF_CFG_DEV0_EPF0_VF1_0_BIST__BIST_COMP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_BIST__BIST_STRT__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF1_0_BIST__BIST_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF1_0_BIST__BIST_COMP_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF1_0_BIST__BIST_STRT_MASK 0x40L +#define BIF_CFG_DEV0_EPF0_VF1_0_BIST__BIST_CAP_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_1 +#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_2 +#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_3 +#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_4 +#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_5 +#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_6 +#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_ADAPTER_ID +#define BIF_CFG_DEV0_EPF0_VF1_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF1_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF1_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_VF1_0_ROM_BASE_ADDR +#define BIF_CFG_DEV0_EPF0_VF1_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_CAP_PTR +#define BIF_CFG_DEV0_EPF0_VF1_0_CAP_PTR__CAP_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL +//BIF_CFG_DEV0_EPF0_VF1_0_INTERRUPT_LINE +#define BIF_CFG_DEV0_EPF0_VF1_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF1_0_INTERRUPT_PIN +#define BIF_CFG_DEV0_EPF0_VF1_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP__VERSION_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L +//BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L +//BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__LINK_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__LINK_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L +//BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L +//BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2__RESERVED__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2__RESERVED_MASK 0xFE000000L +//BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L +//BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L +//BIF_CFG_DEV0_EPF0_VF1_0_SLOT_CAP2 +#define BIF_CFG_DEV0_EPF0_VF1_0_SLOT_CAP2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_SLOT_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF1_0_SLOT_CNTL2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_SLOT_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF1_0_SLOT_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_MSI_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L +//BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_ADDR_LO +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_ADDR_HI +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_DATA +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_MSI_MASK +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MASK__MSI_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_DATA_64 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_MSI_MASK_64 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_MSI_PENDING +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_MSI_PENDING_64 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_MSIX_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF1_0_MSIX_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL +#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF1_0_MSIX_TABLE +#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF1_0_MSIX_PBA +#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_HDR +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC1 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC2 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG1 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG2 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG3 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG1 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG2 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG3 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CAP +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CNTL +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CNTL__STU__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CNTL__STU_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CAP +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CNTL +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf2_bifcfgdecp +//BIF_CFG_DEV0_EPF0_VF2_0_VENDOR_ID +#define BIF_CFG_DEV0_EPF0_VF2_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_ID +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_COMMAND +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__AD_STEPPING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__SERR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__FAST_B2B_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__INT_DIS__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__AD_STEPPING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__SERR_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__FAST_B2B_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__INT_DIS_MASK 0x0400L +//BIF_CFG_DEV0_EPF0_VF2_0_STATUS +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__INT_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__CAP_LIST__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__PCI_66_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__DEVSEL_TIMING__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__INT_STATUS_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__CAP_LIST_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__PCI_66_CAP_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__DEVSEL_TIMING_MASK 0x0600L +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF2_0_REVISION_ID +#define BIF_CFG_DEV0_EPF0_VF2_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF2_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF2_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_VF2_0_PROG_INTERFACE +#define BIF_CFG_DEV0_EPF0_VF2_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF2_0_SUB_CLASS +#define BIF_CFG_DEV0_EPF0_VF2_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF2_0_BASE_CLASS +#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF2_0_CACHE_LINE +#define BIF_CFG_DEV0_EPF0_VF2_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF2_0_LATENCY +#define BIF_CFG_DEV0_EPF0_VF2_0_LATENCY__LATENCY_TIMER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_LATENCY__LATENCY_TIMER_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF2_0_HEADER +#define BIF_CFG_DEV0_EPF0_VF2_0_HEADER__HEADER_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_HEADER__DEVICE_TYPE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF2_0_HEADER__HEADER_TYPE_MASK 0x7FL +#define BIF_CFG_DEV0_EPF0_VF2_0_HEADER__DEVICE_TYPE_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF2_0_BIST +#define BIF_CFG_DEV0_EPF0_VF2_0_BIST__BIST_COMP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_BIST__BIST_STRT__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF2_0_BIST__BIST_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF2_0_BIST__BIST_COMP_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF2_0_BIST__BIST_STRT_MASK 0x40L +#define BIF_CFG_DEV0_EPF0_VF2_0_BIST__BIST_CAP_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_1 +#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_2 +#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_3 +#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_4 +#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_5 +#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_6 +#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_ADAPTER_ID +#define BIF_CFG_DEV0_EPF0_VF2_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF2_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF2_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_VF2_0_ROM_BASE_ADDR +#define BIF_CFG_DEV0_EPF0_VF2_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_CAP_PTR +#define BIF_CFG_DEV0_EPF0_VF2_0_CAP_PTR__CAP_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL +//BIF_CFG_DEV0_EPF0_VF2_0_INTERRUPT_LINE +#define BIF_CFG_DEV0_EPF0_VF2_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF2_0_INTERRUPT_PIN +#define BIF_CFG_DEV0_EPF0_VF2_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP__VERSION_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L +//BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L +//BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__LINK_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__LINK_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L +//BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L +//BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2__RESERVED__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2__RESERVED_MASK 0xFE000000L +//BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L +//BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L +//BIF_CFG_DEV0_EPF0_VF2_0_SLOT_CAP2 +#define BIF_CFG_DEV0_EPF0_VF2_0_SLOT_CAP2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_SLOT_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF2_0_SLOT_CNTL2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_SLOT_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF2_0_SLOT_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_MSI_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L +//BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_ADDR_LO +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_ADDR_HI +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_DATA +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_MSI_MASK +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MASK__MSI_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_DATA_64 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_MSI_MASK_64 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_MSI_PENDING +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_MSI_PENDING_64 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_MSIX_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF2_0_MSIX_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL +#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF2_0_MSIX_TABLE +#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF2_0_MSIX_PBA +#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_HDR +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC1 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC2 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG1 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG2 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG3 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG1 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG2 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG3 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CAP +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CNTL +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CNTL__STU__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CNTL__STU_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CAP +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CNTL +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf3_bifcfgdecp +//BIF_CFG_DEV0_EPF0_VF3_0_VENDOR_ID +#define BIF_CFG_DEV0_EPF0_VF3_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_ID +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_COMMAND +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__AD_STEPPING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__SERR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__FAST_B2B_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__INT_DIS__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__AD_STEPPING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__SERR_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__FAST_B2B_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__INT_DIS_MASK 0x0400L +//BIF_CFG_DEV0_EPF0_VF3_0_STATUS +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__INT_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__CAP_LIST__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__PCI_66_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__DEVSEL_TIMING__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__INT_STATUS_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__CAP_LIST_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__PCI_66_CAP_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__DEVSEL_TIMING_MASK 0x0600L +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF3_0_REVISION_ID +#define BIF_CFG_DEV0_EPF0_VF3_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF3_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF3_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_VF3_0_PROG_INTERFACE +#define BIF_CFG_DEV0_EPF0_VF3_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF3_0_SUB_CLASS +#define BIF_CFG_DEV0_EPF0_VF3_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF3_0_BASE_CLASS +#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF3_0_CACHE_LINE +#define BIF_CFG_DEV0_EPF0_VF3_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF3_0_LATENCY +#define BIF_CFG_DEV0_EPF0_VF3_0_LATENCY__LATENCY_TIMER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_LATENCY__LATENCY_TIMER_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF3_0_HEADER +#define BIF_CFG_DEV0_EPF0_VF3_0_HEADER__HEADER_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_HEADER__DEVICE_TYPE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF3_0_HEADER__HEADER_TYPE_MASK 0x7FL +#define BIF_CFG_DEV0_EPF0_VF3_0_HEADER__DEVICE_TYPE_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF3_0_BIST +#define BIF_CFG_DEV0_EPF0_VF3_0_BIST__BIST_COMP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_BIST__BIST_STRT__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF3_0_BIST__BIST_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF3_0_BIST__BIST_COMP_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF3_0_BIST__BIST_STRT_MASK 0x40L +#define BIF_CFG_DEV0_EPF0_VF3_0_BIST__BIST_CAP_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_1 +#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_2 +#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_3 +#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_4 +#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_5 +#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_6 +#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_ADAPTER_ID +#define BIF_CFG_DEV0_EPF0_VF3_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF3_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF3_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_VF3_0_ROM_BASE_ADDR +#define BIF_CFG_DEV0_EPF0_VF3_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_CAP_PTR +#define BIF_CFG_DEV0_EPF0_VF3_0_CAP_PTR__CAP_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL +//BIF_CFG_DEV0_EPF0_VF3_0_INTERRUPT_LINE +#define BIF_CFG_DEV0_EPF0_VF3_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF3_0_INTERRUPT_PIN +#define BIF_CFG_DEV0_EPF0_VF3_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP__VERSION_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L +//BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L +//BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__LINK_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__LINK_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L +//BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L +//BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2__RESERVED__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2__RESERVED_MASK 0xFE000000L +//BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L +//BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L +//BIF_CFG_DEV0_EPF0_VF3_0_SLOT_CAP2 +#define BIF_CFG_DEV0_EPF0_VF3_0_SLOT_CAP2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_SLOT_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF3_0_SLOT_CNTL2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_SLOT_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF3_0_SLOT_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_MSI_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L +//BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_ADDR_LO +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_ADDR_HI +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_DATA +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_MSI_MASK +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MASK__MSI_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_DATA_64 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_MSI_MASK_64 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_MSI_PENDING +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_MSI_PENDING_64 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_MSIX_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF3_0_MSIX_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL +#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF3_0_MSIX_TABLE +#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF3_0_MSIX_PBA +#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_HDR +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC1 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC2 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG1 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG2 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG3 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG1 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG2 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG3 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CAP +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CNTL +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CNTL__STU__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CNTL__STU_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CAP +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CNTL +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf4_bifcfgdecp +//BIF_CFG_DEV0_EPF0_VF4_0_VENDOR_ID +#define BIF_CFG_DEV0_EPF0_VF4_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_ID +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_COMMAND +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__AD_STEPPING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__SERR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__FAST_B2B_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__INT_DIS__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__AD_STEPPING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__SERR_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__FAST_B2B_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__INT_DIS_MASK 0x0400L +//BIF_CFG_DEV0_EPF0_VF4_0_STATUS +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__INT_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__CAP_LIST__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__PCI_66_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__DEVSEL_TIMING__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__INT_STATUS_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__CAP_LIST_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__PCI_66_CAP_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__DEVSEL_TIMING_MASK 0x0600L +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF4_0_REVISION_ID +#define BIF_CFG_DEV0_EPF0_VF4_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF4_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF4_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_VF4_0_PROG_INTERFACE +#define BIF_CFG_DEV0_EPF0_VF4_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF4_0_SUB_CLASS +#define BIF_CFG_DEV0_EPF0_VF4_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF4_0_BASE_CLASS +#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF4_0_CACHE_LINE +#define BIF_CFG_DEV0_EPF0_VF4_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF4_0_LATENCY +#define BIF_CFG_DEV0_EPF0_VF4_0_LATENCY__LATENCY_TIMER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_LATENCY__LATENCY_TIMER_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF4_0_HEADER +#define BIF_CFG_DEV0_EPF0_VF4_0_HEADER__HEADER_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_HEADER__DEVICE_TYPE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF4_0_HEADER__HEADER_TYPE_MASK 0x7FL +#define BIF_CFG_DEV0_EPF0_VF4_0_HEADER__DEVICE_TYPE_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF4_0_BIST +#define BIF_CFG_DEV0_EPF0_VF4_0_BIST__BIST_COMP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_BIST__BIST_STRT__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF4_0_BIST__BIST_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF4_0_BIST__BIST_COMP_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF4_0_BIST__BIST_STRT_MASK 0x40L +#define BIF_CFG_DEV0_EPF0_VF4_0_BIST__BIST_CAP_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_1 +#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_2 +#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_3 +#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_4 +#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_5 +#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_6 +#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_ADAPTER_ID +#define BIF_CFG_DEV0_EPF0_VF4_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF4_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF4_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_VF4_0_ROM_BASE_ADDR +#define BIF_CFG_DEV0_EPF0_VF4_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_CAP_PTR +#define BIF_CFG_DEV0_EPF0_VF4_0_CAP_PTR__CAP_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL +//BIF_CFG_DEV0_EPF0_VF4_0_INTERRUPT_LINE +#define BIF_CFG_DEV0_EPF0_VF4_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF4_0_INTERRUPT_PIN +#define BIF_CFG_DEV0_EPF0_VF4_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP__VERSION_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L +//BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L +//BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__LINK_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__LINK_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L +//BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L +//BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2__RESERVED__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2__RESERVED_MASK 0xFE000000L +//BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L +//BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L +//BIF_CFG_DEV0_EPF0_VF4_0_SLOT_CAP2 +#define BIF_CFG_DEV0_EPF0_VF4_0_SLOT_CAP2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_SLOT_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF4_0_SLOT_CNTL2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_SLOT_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF4_0_SLOT_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_MSI_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L +//BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_ADDR_LO +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_ADDR_HI +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_DATA +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_MSI_MASK +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MASK__MSI_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_DATA_64 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_MSI_MASK_64 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_MSI_PENDING +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_MSI_PENDING_64 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_MSIX_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF4_0_MSIX_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL +#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF4_0_MSIX_TABLE +#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF4_0_MSIX_PBA +#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_HDR +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC1 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC2 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG1 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG2 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG3 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG1 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG2 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG3 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CAP +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CNTL +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CNTL__STU__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CNTL__STU_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CAP +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CNTL +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf5_bifcfgdecp +//BIF_CFG_DEV0_EPF0_VF5_0_VENDOR_ID +#define BIF_CFG_DEV0_EPF0_VF5_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_ID +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_COMMAND +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__AD_STEPPING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__SERR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__FAST_B2B_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__INT_DIS__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__AD_STEPPING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__SERR_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__FAST_B2B_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__INT_DIS_MASK 0x0400L +//BIF_CFG_DEV0_EPF0_VF5_0_STATUS +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__INT_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__CAP_LIST__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__PCI_66_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__DEVSEL_TIMING__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__INT_STATUS_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__CAP_LIST_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__PCI_66_CAP_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__DEVSEL_TIMING_MASK 0x0600L +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF5_0_REVISION_ID +#define BIF_CFG_DEV0_EPF0_VF5_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF5_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF5_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_VF5_0_PROG_INTERFACE +#define BIF_CFG_DEV0_EPF0_VF5_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF5_0_SUB_CLASS +#define BIF_CFG_DEV0_EPF0_VF5_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF5_0_BASE_CLASS +#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF5_0_CACHE_LINE +#define BIF_CFG_DEV0_EPF0_VF5_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF5_0_LATENCY +#define BIF_CFG_DEV0_EPF0_VF5_0_LATENCY__LATENCY_TIMER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_LATENCY__LATENCY_TIMER_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF5_0_HEADER +#define BIF_CFG_DEV0_EPF0_VF5_0_HEADER__HEADER_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_HEADER__DEVICE_TYPE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF5_0_HEADER__HEADER_TYPE_MASK 0x7FL +#define BIF_CFG_DEV0_EPF0_VF5_0_HEADER__DEVICE_TYPE_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF5_0_BIST +#define BIF_CFG_DEV0_EPF0_VF5_0_BIST__BIST_COMP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_BIST__BIST_STRT__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF5_0_BIST__BIST_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF5_0_BIST__BIST_COMP_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF5_0_BIST__BIST_STRT_MASK 0x40L +#define BIF_CFG_DEV0_EPF0_VF5_0_BIST__BIST_CAP_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_1 +#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_2 +#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_3 +#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_4 +#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_5 +#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_6 +#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_ADAPTER_ID +#define BIF_CFG_DEV0_EPF0_VF5_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF5_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF5_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_VF5_0_ROM_BASE_ADDR +#define BIF_CFG_DEV0_EPF0_VF5_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_CAP_PTR +#define BIF_CFG_DEV0_EPF0_VF5_0_CAP_PTR__CAP_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL +//BIF_CFG_DEV0_EPF0_VF5_0_INTERRUPT_LINE +#define BIF_CFG_DEV0_EPF0_VF5_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF5_0_INTERRUPT_PIN +#define BIF_CFG_DEV0_EPF0_VF5_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP__VERSION_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L +//BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L +//BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__LINK_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__LINK_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L +//BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L +//BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2__RESERVED__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2__RESERVED_MASK 0xFE000000L +//BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L +//BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L +//BIF_CFG_DEV0_EPF0_VF5_0_SLOT_CAP2 +#define BIF_CFG_DEV0_EPF0_VF5_0_SLOT_CAP2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_SLOT_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF5_0_SLOT_CNTL2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_SLOT_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF5_0_SLOT_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_MSI_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L +//BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_ADDR_LO +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_ADDR_HI +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_DATA +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_MSI_MASK +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MASK__MSI_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_DATA_64 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_MSI_MASK_64 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_MSI_PENDING +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_MSI_PENDING_64 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_MSIX_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF5_0_MSIX_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL +#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF5_0_MSIX_TABLE +#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF5_0_MSIX_PBA +#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_HDR +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC1 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC2 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG1 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG2 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG3 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG1 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG2 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG3 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CAP +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CNTL +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CNTL__STU__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CNTL__STU_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CAP +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CNTL +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf6_bifcfgdecp +//BIF_CFG_DEV0_EPF0_VF6_0_VENDOR_ID +#define BIF_CFG_DEV0_EPF0_VF6_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_ID +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_COMMAND +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__AD_STEPPING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__SERR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__FAST_B2B_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__INT_DIS__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__AD_STEPPING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__SERR_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__FAST_B2B_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__INT_DIS_MASK 0x0400L +//BIF_CFG_DEV0_EPF0_VF6_0_STATUS +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__INT_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__CAP_LIST__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__PCI_66_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__DEVSEL_TIMING__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__INT_STATUS_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__CAP_LIST_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__PCI_66_CAP_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__DEVSEL_TIMING_MASK 0x0600L +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF6_0_REVISION_ID +#define BIF_CFG_DEV0_EPF0_VF6_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF6_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF6_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_VF6_0_PROG_INTERFACE +#define BIF_CFG_DEV0_EPF0_VF6_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF6_0_SUB_CLASS +#define BIF_CFG_DEV0_EPF0_VF6_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF6_0_BASE_CLASS +#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF6_0_CACHE_LINE +#define BIF_CFG_DEV0_EPF0_VF6_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF6_0_LATENCY +#define BIF_CFG_DEV0_EPF0_VF6_0_LATENCY__LATENCY_TIMER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_LATENCY__LATENCY_TIMER_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF6_0_HEADER +#define BIF_CFG_DEV0_EPF0_VF6_0_HEADER__HEADER_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_HEADER__DEVICE_TYPE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF6_0_HEADER__HEADER_TYPE_MASK 0x7FL +#define BIF_CFG_DEV0_EPF0_VF6_0_HEADER__DEVICE_TYPE_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF6_0_BIST +#define BIF_CFG_DEV0_EPF0_VF6_0_BIST__BIST_COMP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_BIST__BIST_STRT__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF6_0_BIST__BIST_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF6_0_BIST__BIST_COMP_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF6_0_BIST__BIST_STRT_MASK 0x40L +#define BIF_CFG_DEV0_EPF0_VF6_0_BIST__BIST_CAP_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_1 +#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_2 +#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_3 +#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_4 +#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_5 +#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_6 +#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_ADAPTER_ID +#define BIF_CFG_DEV0_EPF0_VF6_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF6_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF6_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_VF6_0_ROM_BASE_ADDR +#define BIF_CFG_DEV0_EPF0_VF6_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_CAP_PTR +#define BIF_CFG_DEV0_EPF0_VF6_0_CAP_PTR__CAP_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL +//BIF_CFG_DEV0_EPF0_VF6_0_INTERRUPT_LINE +#define BIF_CFG_DEV0_EPF0_VF6_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF6_0_INTERRUPT_PIN +#define BIF_CFG_DEV0_EPF0_VF6_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP__VERSION_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L +//BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L +//BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__LINK_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__LINK_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L +//BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L +//BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2__RESERVED__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2__RESERVED_MASK 0xFE000000L +//BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L +//BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L +//BIF_CFG_DEV0_EPF0_VF6_0_SLOT_CAP2 +#define BIF_CFG_DEV0_EPF0_VF6_0_SLOT_CAP2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_SLOT_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF6_0_SLOT_CNTL2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_SLOT_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF6_0_SLOT_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_MSI_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L +//BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_ADDR_LO +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_ADDR_HI +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_DATA +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_MSI_MASK +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MASK__MSI_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_DATA_64 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_MSI_MASK_64 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_MSI_PENDING +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_MSI_PENDING_64 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_MSIX_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF6_0_MSIX_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL +#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF6_0_MSIX_TABLE +#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF6_0_MSIX_PBA +#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_HDR +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC1 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC2 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG1 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG2 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG3 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG1 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG2 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG3 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CAP +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CNTL +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CNTL__STU__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CNTL__STU_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CAP +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CNTL +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf7_bifcfgdecp +//BIF_CFG_DEV0_EPF0_VF7_0_VENDOR_ID +#define BIF_CFG_DEV0_EPF0_VF7_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_ID +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_COMMAND +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__AD_STEPPING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__SERR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__FAST_B2B_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__INT_DIS__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__AD_STEPPING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__SERR_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__FAST_B2B_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__INT_DIS_MASK 0x0400L +//BIF_CFG_DEV0_EPF0_VF7_0_STATUS +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__INT_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__CAP_LIST__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__PCI_66_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__DEVSEL_TIMING__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__INT_STATUS_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__CAP_LIST_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__PCI_66_CAP_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__DEVSEL_TIMING_MASK 0x0600L +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF7_0_REVISION_ID +#define BIF_CFG_DEV0_EPF0_VF7_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF7_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF7_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_VF7_0_PROG_INTERFACE +#define BIF_CFG_DEV0_EPF0_VF7_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF7_0_SUB_CLASS +#define BIF_CFG_DEV0_EPF0_VF7_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF7_0_BASE_CLASS +#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF7_0_CACHE_LINE +#define BIF_CFG_DEV0_EPF0_VF7_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF7_0_LATENCY +#define BIF_CFG_DEV0_EPF0_VF7_0_LATENCY__LATENCY_TIMER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_LATENCY__LATENCY_TIMER_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF7_0_HEADER +#define BIF_CFG_DEV0_EPF0_VF7_0_HEADER__HEADER_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_HEADER__DEVICE_TYPE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF7_0_HEADER__HEADER_TYPE_MASK 0x7FL +#define BIF_CFG_DEV0_EPF0_VF7_0_HEADER__DEVICE_TYPE_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF7_0_BIST +#define BIF_CFG_DEV0_EPF0_VF7_0_BIST__BIST_COMP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_BIST__BIST_STRT__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF7_0_BIST__BIST_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF7_0_BIST__BIST_COMP_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF7_0_BIST__BIST_STRT_MASK 0x40L +#define BIF_CFG_DEV0_EPF0_VF7_0_BIST__BIST_CAP_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_1 +#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_2 +#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_3 +#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_4 +#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_5 +#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_6 +#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_ADAPTER_ID +#define BIF_CFG_DEV0_EPF0_VF7_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF7_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF7_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_VF7_0_ROM_BASE_ADDR +#define BIF_CFG_DEV0_EPF0_VF7_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_CAP_PTR +#define BIF_CFG_DEV0_EPF0_VF7_0_CAP_PTR__CAP_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL +//BIF_CFG_DEV0_EPF0_VF7_0_INTERRUPT_LINE +#define BIF_CFG_DEV0_EPF0_VF7_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF7_0_INTERRUPT_PIN +#define BIF_CFG_DEV0_EPF0_VF7_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP__VERSION_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L +//BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L +//BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__LINK_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__LINK_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L +//BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L +//BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2__RESERVED__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2__RESERVED_MASK 0xFE000000L +//BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L +//BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L +//BIF_CFG_DEV0_EPF0_VF7_0_SLOT_CAP2 +#define BIF_CFG_DEV0_EPF0_VF7_0_SLOT_CAP2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_SLOT_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF7_0_SLOT_CNTL2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_SLOT_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF7_0_SLOT_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_MSI_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L +//BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_ADDR_LO +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_ADDR_HI +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_DATA +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_MSI_MASK +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MASK__MSI_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_DATA_64 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_MSI_MASK_64 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_MSI_PENDING +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_MSI_PENDING_64 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_MSIX_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF7_0_MSIX_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL +#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF7_0_MSIX_TABLE +#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF7_0_MSIX_PBA +#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_HDR +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC1 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC2 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG1 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG2 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG3 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG1 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG2 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG3 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CAP +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CNTL +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CNTL__STU__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CNTL__STU_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CAP +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CNTL +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf8_bifcfgdecp +//BIF_CFG_DEV0_EPF0_VF8_0_VENDOR_ID +#define BIF_CFG_DEV0_EPF0_VF8_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_ID +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_COMMAND +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__AD_STEPPING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__SERR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__FAST_B2B_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__INT_DIS__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__AD_STEPPING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__SERR_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__FAST_B2B_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__INT_DIS_MASK 0x0400L +//BIF_CFG_DEV0_EPF0_VF8_0_STATUS +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__INT_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__CAP_LIST__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__PCI_66_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__DEVSEL_TIMING__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__INT_STATUS_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__CAP_LIST_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__PCI_66_CAP_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__DEVSEL_TIMING_MASK 0x0600L +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF8_0_REVISION_ID +#define BIF_CFG_DEV0_EPF0_VF8_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF8_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF8_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_VF8_0_PROG_INTERFACE +#define BIF_CFG_DEV0_EPF0_VF8_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF8_0_SUB_CLASS +#define BIF_CFG_DEV0_EPF0_VF8_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF8_0_BASE_CLASS +#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF8_0_CACHE_LINE +#define BIF_CFG_DEV0_EPF0_VF8_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF8_0_LATENCY +#define BIF_CFG_DEV0_EPF0_VF8_0_LATENCY__LATENCY_TIMER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_LATENCY__LATENCY_TIMER_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF8_0_HEADER +#define BIF_CFG_DEV0_EPF0_VF8_0_HEADER__HEADER_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_HEADER__DEVICE_TYPE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF8_0_HEADER__HEADER_TYPE_MASK 0x7FL +#define BIF_CFG_DEV0_EPF0_VF8_0_HEADER__DEVICE_TYPE_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF8_0_BIST +#define BIF_CFG_DEV0_EPF0_VF8_0_BIST__BIST_COMP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_BIST__BIST_STRT__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF8_0_BIST__BIST_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF8_0_BIST__BIST_COMP_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF8_0_BIST__BIST_STRT_MASK 0x40L +#define BIF_CFG_DEV0_EPF0_VF8_0_BIST__BIST_CAP_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_1 +#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_2 +#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_3 +#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_4 +#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_5 +#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_6 +#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_ADAPTER_ID +#define BIF_CFG_DEV0_EPF0_VF8_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF8_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF8_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_VF8_0_ROM_BASE_ADDR +#define BIF_CFG_DEV0_EPF0_VF8_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_CAP_PTR +#define BIF_CFG_DEV0_EPF0_VF8_0_CAP_PTR__CAP_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL +//BIF_CFG_DEV0_EPF0_VF8_0_INTERRUPT_LINE +#define BIF_CFG_DEV0_EPF0_VF8_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF8_0_INTERRUPT_PIN +#define BIF_CFG_DEV0_EPF0_VF8_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP__VERSION_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L +//BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L +//BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__LINK_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__LINK_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L +//BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L +//BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2__RESERVED__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2__RESERVED_MASK 0xFE000000L +//BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L +//BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L +//BIF_CFG_DEV0_EPF0_VF8_0_SLOT_CAP2 +#define BIF_CFG_DEV0_EPF0_VF8_0_SLOT_CAP2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_SLOT_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF8_0_SLOT_CNTL2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_SLOT_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF8_0_SLOT_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_MSI_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L +//BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_ADDR_LO +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_ADDR_HI +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_DATA +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_MSI_MASK +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MASK__MSI_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_DATA_64 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_MSI_MASK_64 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_MSI_PENDING +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_MSI_PENDING_64 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_MSIX_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF8_0_MSIX_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL +#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF8_0_MSIX_TABLE +#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF8_0_MSIX_PBA +#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_HDR +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC1 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC2 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG1 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG2 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG3 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG1 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG2 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG3 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CAP +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CNTL +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CNTL__STU__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CNTL__STU_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CAP +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CNTL +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf9_bifcfgdecp +//BIF_CFG_DEV0_EPF0_VF9_0_VENDOR_ID +#define BIF_CFG_DEV0_EPF0_VF9_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_ID +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_COMMAND +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__AD_STEPPING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__SERR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__FAST_B2B_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__INT_DIS__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__AD_STEPPING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__SERR_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__FAST_B2B_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__INT_DIS_MASK 0x0400L +//BIF_CFG_DEV0_EPF0_VF9_0_STATUS +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__INT_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__CAP_LIST__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__PCI_66_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__DEVSEL_TIMING__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__INT_STATUS_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__CAP_LIST_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__PCI_66_CAP_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__DEVSEL_TIMING_MASK 0x0600L +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF9_0_REVISION_ID +#define BIF_CFG_DEV0_EPF0_VF9_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF9_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF9_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_VF9_0_PROG_INTERFACE +#define BIF_CFG_DEV0_EPF0_VF9_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF9_0_SUB_CLASS +#define BIF_CFG_DEV0_EPF0_VF9_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF9_0_BASE_CLASS +#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF9_0_CACHE_LINE +#define BIF_CFG_DEV0_EPF0_VF9_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF9_0_LATENCY +#define BIF_CFG_DEV0_EPF0_VF9_0_LATENCY__LATENCY_TIMER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_LATENCY__LATENCY_TIMER_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF9_0_HEADER +#define BIF_CFG_DEV0_EPF0_VF9_0_HEADER__HEADER_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_HEADER__DEVICE_TYPE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF9_0_HEADER__HEADER_TYPE_MASK 0x7FL +#define BIF_CFG_DEV0_EPF0_VF9_0_HEADER__DEVICE_TYPE_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF9_0_BIST +#define BIF_CFG_DEV0_EPF0_VF9_0_BIST__BIST_COMP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_BIST__BIST_STRT__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF9_0_BIST__BIST_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF9_0_BIST__BIST_COMP_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF9_0_BIST__BIST_STRT_MASK 0x40L +#define BIF_CFG_DEV0_EPF0_VF9_0_BIST__BIST_CAP_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_1 +#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_2 +#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_3 +#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_4 +#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_5 +#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_6 +#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_ADAPTER_ID +#define BIF_CFG_DEV0_EPF0_VF9_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF9_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF9_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_VF9_0_ROM_BASE_ADDR +#define BIF_CFG_DEV0_EPF0_VF9_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_CAP_PTR +#define BIF_CFG_DEV0_EPF0_VF9_0_CAP_PTR__CAP_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL +//BIF_CFG_DEV0_EPF0_VF9_0_INTERRUPT_LINE +#define BIF_CFG_DEV0_EPF0_VF9_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF9_0_INTERRUPT_PIN +#define BIF_CFG_DEV0_EPF0_VF9_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP__VERSION_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L +//BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L +//BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__LINK_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__LINK_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L +//BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L +//BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2__RESERVED__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2__RESERVED_MASK 0xFE000000L +//BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L +//BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L +//BIF_CFG_DEV0_EPF0_VF9_0_SLOT_CAP2 +#define BIF_CFG_DEV0_EPF0_VF9_0_SLOT_CAP2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_SLOT_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF9_0_SLOT_CNTL2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_SLOT_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF9_0_SLOT_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_MSI_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L +//BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_ADDR_LO +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_ADDR_HI +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_DATA +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_MSI_MASK +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MASK__MSI_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_DATA_64 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_MSI_MASK_64 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_MSI_PENDING +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_MSI_PENDING_64 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_MSIX_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF9_0_MSIX_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL +#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF9_0_MSIX_TABLE +#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF9_0_MSIX_PBA +#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_HDR +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC1 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC2 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG1 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG2 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG3 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG1 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG2 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG3 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CAP +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CNTL +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CNTL__STU__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CNTL__STU_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CAP +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CNTL +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf10_bifcfgdecp +//BIF_CFG_DEV0_EPF0_VF10_0_VENDOR_ID +#define BIF_CFG_DEV0_EPF0_VF10_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_ID +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_COMMAND +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__AD_STEPPING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__SERR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__FAST_B2B_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__INT_DIS__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__AD_STEPPING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__SERR_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__FAST_B2B_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__INT_DIS_MASK 0x0400L +//BIF_CFG_DEV0_EPF0_VF10_0_STATUS +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__INT_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__CAP_LIST__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__PCI_66_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__DEVSEL_TIMING__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__INT_STATUS_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__CAP_LIST_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__PCI_66_CAP_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__DEVSEL_TIMING_MASK 0x0600L +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF10_0_REVISION_ID +#define BIF_CFG_DEV0_EPF0_VF10_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF10_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF10_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_VF10_0_PROG_INTERFACE +#define BIF_CFG_DEV0_EPF0_VF10_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF10_0_SUB_CLASS +#define BIF_CFG_DEV0_EPF0_VF10_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF10_0_BASE_CLASS +#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF10_0_CACHE_LINE +#define BIF_CFG_DEV0_EPF0_VF10_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF10_0_LATENCY +#define BIF_CFG_DEV0_EPF0_VF10_0_LATENCY__LATENCY_TIMER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_LATENCY__LATENCY_TIMER_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF10_0_HEADER +#define BIF_CFG_DEV0_EPF0_VF10_0_HEADER__HEADER_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_HEADER__DEVICE_TYPE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF10_0_HEADER__HEADER_TYPE_MASK 0x7FL +#define BIF_CFG_DEV0_EPF0_VF10_0_HEADER__DEVICE_TYPE_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF10_0_BIST +#define BIF_CFG_DEV0_EPF0_VF10_0_BIST__BIST_COMP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_BIST__BIST_STRT__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF10_0_BIST__BIST_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF10_0_BIST__BIST_COMP_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF10_0_BIST__BIST_STRT_MASK 0x40L +#define BIF_CFG_DEV0_EPF0_VF10_0_BIST__BIST_CAP_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_1 +#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_2 +#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_3 +#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_4 +#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_5 +#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_6 +#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_ADAPTER_ID +#define BIF_CFG_DEV0_EPF0_VF10_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF10_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF10_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_VF10_0_ROM_BASE_ADDR +#define BIF_CFG_DEV0_EPF0_VF10_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_CAP_PTR +#define BIF_CFG_DEV0_EPF0_VF10_0_CAP_PTR__CAP_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL +//BIF_CFG_DEV0_EPF0_VF10_0_INTERRUPT_LINE +#define BIF_CFG_DEV0_EPF0_VF10_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF10_0_INTERRUPT_PIN +#define BIF_CFG_DEV0_EPF0_VF10_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP__VERSION_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L +//BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L +//BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__LINK_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__LINK_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L +//BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L +//BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2__RESERVED__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2__RESERVED_MASK 0xFE000000L +//BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L +//BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L +//BIF_CFG_DEV0_EPF0_VF10_0_SLOT_CAP2 +#define BIF_CFG_DEV0_EPF0_VF10_0_SLOT_CAP2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_SLOT_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF10_0_SLOT_CNTL2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_SLOT_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF10_0_SLOT_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_MSI_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L +//BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_ADDR_LO +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_ADDR_HI +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_DATA +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_MSI_MASK +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MASK__MSI_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_DATA_64 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_MSI_MASK_64 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_MSI_PENDING +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_MSI_PENDING_64 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_MSIX_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF10_0_MSIX_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL +#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF10_0_MSIX_TABLE +#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF10_0_MSIX_PBA +#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_HDR +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC1 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC2 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG1 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG2 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG3 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG1 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG2 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG3 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CAP +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CNTL +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CNTL__STU__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CNTL__STU_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CAP +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CNTL +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf11_bifcfgdecp +//BIF_CFG_DEV0_EPF0_VF11_0_VENDOR_ID +#define BIF_CFG_DEV0_EPF0_VF11_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_ID +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_COMMAND +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__AD_STEPPING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__SERR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__FAST_B2B_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__INT_DIS__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__AD_STEPPING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__SERR_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__FAST_B2B_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__INT_DIS_MASK 0x0400L +//BIF_CFG_DEV0_EPF0_VF11_0_STATUS +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__INT_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__CAP_LIST__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__PCI_66_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__DEVSEL_TIMING__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__INT_STATUS_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__CAP_LIST_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__PCI_66_CAP_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__DEVSEL_TIMING_MASK 0x0600L +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF11_0_REVISION_ID +#define BIF_CFG_DEV0_EPF0_VF11_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF11_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF11_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_VF11_0_PROG_INTERFACE +#define BIF_CFG_DEV0_EPF0_VF11_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF11_0_SUB_CLASS +#define BIF_CFG_DEV0_EPF0_VF11_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF11_0_BASE_CLASS +#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF11_0_CACHE_LINE +#define BIF_CFG_DEV0_EPF0_VF11_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF11_0_LATENCY +#define BIF_CFG_DEV0_EPF0_VF11_0_LATENCY__LATENCY_TIMER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_LATENCY__LATENCY_TIMER_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF11_0_HEADER +#define BIF_CFG_DEV0_EPF0_VF11_0_HEADER__HEADER_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_HEADER__DEVICE_TYPE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF11_0_HEADER__HEADER_TYPE_MASK 0x7FL +#define BIF_CFG_DEV0_EPF0_VF11_0_HEADER__DEVICE_TYPE_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF11_0_BIST +#define BIF_CFG_DEV0_EPF0_VF11_0_BIST__BIST_COMP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_BIST__BIST_STRT__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF11_0_BIST__BIST_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF11_0_BIST__BIST_COMP_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF11_0_BIST__BIST_STRT_MASK 0x40L +#define BIF_CFG_DEV0_EPF0_VF11_0_BIST__BIST_CAP_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_1 +#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_2 +#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_3 +#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_4 +#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_5 +#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_6 +#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_ADAPTER_ID +#define BIF_CFG_DEV0_EPF0_VF11_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF11_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF11_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_VF11_0_ROM_BASE_ADDR +#define BIF_CFG_DEV0_EPF0_VF11_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_CAP_PTR +#define BIF_CFG_DEV0_EPF0_VF11_0_CAP_PTR__CAP_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL +//BIF_CFG_DEV0_EPF0_VF11_0_INTERRUPT_LINE +#define BIF_CFG_DEV0_EPF0_VF11_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF11_0_INTERRUPT_PIN +#define BIF_CFG_DEV0_EPF0_VF11_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP__VERSION_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L +//BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L +//BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__LINK_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__LINK_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L +//BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L +//BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2__RESERVED__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2__RESERVED_MASK 0xFE000000L +//BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L +//BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L +//BIF_CFG_DEV0_EPF0_VF11_0_SLOT_CAP2 +#define BIF_CFG_DEV0_EPF0_VF11_0_SLOT_CAP2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_SLOT_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF11_0_SLOT_CNTL2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_SLOT_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF11_0_SLOT_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_MSI_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L +//BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_ADDR_LO +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_ADDR_HI +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_DATA +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_MSI_MASK +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MASK__MSI_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_DATA_64 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_MSI_MASK_64 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_MSI_PENDING +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_MSI_PENDING_64 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_MSIX_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF11_0_MSIX_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL +#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF11_0_MSIX_TABLE +#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF11_0_MSIX_PBA +#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_HDR +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC1 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC2 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG1 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG2 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG3 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG1 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG2 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG3 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CAP +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CNTL +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CNTL__STU__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CNTL__STU_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CAP +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CNTL +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf12_bifcfgdecp +//BIF_CFG_DEV0_EPF0_VF12_0_VENDOR_ID +#define BIF_CFG_DEV0_EPF0_VF12_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_ID +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_COMMAND +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__AD_STEPPING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__SERR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__FAST_B2B_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__INT_DIS__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__AD_STEPPING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__SERR_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__FAST_B2B_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__INT_DIS_MASK 0x0400L +//BIF_CFG_DEV0_EPF0_VF12_0_STATUS +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__INT_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__CAP_LIST__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__PCI_66_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__DEVSEL_TIMING__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__INT_STATUS_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__CAP_LIST_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__PCI_66_CAP_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__DEVSEL_TIMING_MASK 0x0600L +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF12_0_REVISION_ID +#define BIF_CFG_DEV0_EPF0_VF12_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF12_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF12_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_VF12_0_PROG_INTERFACE +#define BIF_CFG_DEV0_EPF0_VF12_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF12_0_SUB_CLASS +#define BIF_CFG_DEV0_EPF0_VF12_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF12_0_BASE_CLASS +#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF12_0_CACHE_LINE +#define BIF_CFG_DEV0_EPF0_VF12_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF12_0_LATENCY +#define BIF_CFG_DEV0_EPF0_VF12_0_LATENCY__LATENCY_TIMER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_LATENCY__LATENCY_TIMER_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF12_0_HEADER +#define BIF_CFG_DEV0_EPF0_VF12_0_HEADER__HEADER_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_HEADER__DEVICE_TYPE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF12_0_HEADER__HEADER_TYPE_MASK 0x7FL +#define BIF_CFG_DEV0_EPF0_VF12_0_HEADER__DEVICE_TYPE_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF12_0_BIST +#define BIF_CFG_DEV0_EPF0_VF12_0_BIST__BIST_COMP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_BIST__BIST_STRT__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF12_0_BIST__BIST_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF12_0_BIST__BIST_COMP_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF12_0_BIST__BIST_STRT_MASK 0x40L +#define BIF_CFG_DEV0_EPF0_VF12_0_BIST__BIST_CAP_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_1 +#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_2 +#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_3 +#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_4 +#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_5 +#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_6 +#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_ADAPTER_ID +#define BIF_CFG_DEV0_EPF0_VF12_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF12_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF12_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_VF12_0_ROM_BASE_ADDR +#define BIF_CFG_DEV0_EPF0_VF12_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_CAP_PTR +#define BIF_CFG_DEV0_EPF0_VF12_0_CAP_PTR__CAP_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL +//BIF_CFG_DEV0_EPF0_VF12_0_INTERRUPT_LINE +#define BIF_CFG_DEV0_EPF0_VF12_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF12_0_INTERRUPT_PIN +#define BIF_CFG_DEV0_EPF0_VF12_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP__VERSION_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L +//BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L +//BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__LINK_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__LINK_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L +//BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L +//BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2__RESERVED__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2__RESERVED_MASK 0xFE000000L +//BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L +//BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L +//BIF_CFG_DEV0_EPF0_VF12_0_SLOT_CAP2 +#define BIF_CFG_DEV0_EPF0_VF12_0_SLOT_CAP2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_SLOT_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF12_0_SLOT_CNTL2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_SLOT_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF12_0_SLOT_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_MSI_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L +//BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_ADDR_LO +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_ADDR_HI +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_DATA +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_MSI_MASK +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MASK__MSI_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_DATA_64 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_MSI_MASK_64 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_MSI_PENDING +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_MSI_PENDING_64 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_MSIX_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF12_0_MSIX_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL +#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF12_0_MSIX_TABLE +#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF12_0_MSIX_PBA +#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_HDR +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC1 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC2 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG1 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG2 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG3 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG1 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG2 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG3 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CAP +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CNTL +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CNTL__STU__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CNTL__STU_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CAP +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CNTL +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf13_bifcfgdecp +//BIF_CFG_DEV0_EPF0_VF13_0_VENDOR_ID +#define BIF_CFG_DEV0_EPF0_VF13_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_ID +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_COMMAND +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__AD_STEPPING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__SERR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__FAST_B2B_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__INT_DIS__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__AD_STEPPING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__SERR_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__FAST_B2B_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__INT_DIS_MASK 0x0400L +//BIF_CFG_DEV0_EPF0_VF13_0_STATUS +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__INT_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__CAP_LIST__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__PCI_66_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__DEVSEL_TIMING__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__INT_STATUS_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__CAP_LIST_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__PCI_66_CAP_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__DEVSEL_TIMING_MASK 0x0600L +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF13_0_REVISION_ID +#define BIF_CFG_DEV0_EPF0_VF13_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF13_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF13_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_VF13_0_PROG_INTERFACE +#define BIF_CFG_DEV0_EPF0_VF13_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF13_0_SUB_CLASS +#define BIF_CFG_DEV0_EPF0_VF13_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF13_0_BASE_CLASS +#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF13_0_CACHE_LINE +#define BIF_CFG_DEV0_EPF0_VF13_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF13_0_LATENCY +#define BIF_CFG_DEV0_EPF0_VF13_0_LATENCY__LATENCY_TIMER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_LATENCY__LATENCY_TIMER_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF13_0_HEADER +#define BIF_CFG_DEV0_EPF0_VF13_0_HEADER__HEADER_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_HEADER__DEVICE_TYPE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF13_0_HEADER__HEADER_TYPE_MASK 0x7FL +#define BIF_CFG_DEV0_EPF0_VF13_0_HEADER__DEVICE_TYPE_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF13_0_BIST +#define BIF_CFG_DEV0_EPF0_VF13_0_BIST__BIST_COMP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_BIST__BIST_STRT__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF13_0_BIST__BIST_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF13_0_BIST__BIST_COMP_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF13_0_BIST__BIST_STRT_MASK 0x40L +#define BIF_CFG_DEV0_EPF0_VF13_0_BIST__BIST_CAP_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_1 +#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_2 +#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_3 +#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_4 +#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_5 +#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_6 +#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_ADAPTER_ID +#define BIF_CFG_DEV0_EPF0_VF13_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF13_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF13_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_VF13_0_ROM_BASE_ADDR +#define BIF_CFG_DEV0_EPF0_VF13_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_CAP_PTR +#define BIF_CFG_DEV0_EPF0_VF13_0_CAP_PTR__CAP_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL +//BIF_CFG_DEV0_EPF0_VF13_0_INTERRUPT_LINE +#define BIF_CFG_DEV0_EPF0_VF13_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF13_0_INTERRUPT_PIN +#define BIF_CFG_DEV0_EPF0_VF13_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP__VERSION_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L +//BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L +//BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__LINK_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__LINK_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L +//BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L +//BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2__RESERVED__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2__RESERVED_MASK 0xFE000000L +//BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L +//BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L +//BIF_CFG_DEV0_EPF0_VF13_0_SLOT_CAP2 +#define BIF_CFG_DEV0_EPF0_VF13_0_SLOT_CAP2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_SLOT_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF13_0_SLOT_CNTL2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_SLOT_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF13_0_SLOT_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_MSI_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L +//BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_ADDR_LO +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_ADDR_HI +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_DATA +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_MSI_MASK +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MASK__MSI_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_DATA_64 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_MSI_MASK_64 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_MSI_PENDING +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_MSI_PENDING_64 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_MSIX_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF13_0_MSIX_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL +#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF13_0_MSIX_TABLE +#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF13_0_MSIX_PBA +#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_HDR +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC1 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC2 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG1 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG2 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG3 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG1 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG2 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG3 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CAP +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CNTL +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CNTL__STU__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CNTL__STU_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CAP +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CNTL +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf14_bifcfgdecp +//BIF_CFG_DEV0_EPF0_VF14_0_VENDOR_ID +#define BIF_CFG_DEV0_EPF0_VF14_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_ID +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_COMMAND +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__AD_STEPPING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__SERR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__FAST_B2B_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__INT_DIS__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__AD_STEPPING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__SERR_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__FAST_B2B_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__INT_DIS_MASK 0x0400L +//BIF_CFG_DEV0_EPF0_VF14_0_STATUS +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__INT_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__CAP_LIST__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__PCI_66_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__DEVSEL_TIMING__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__INT_STATUS_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__CAP_LIST_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__PCI_66_CAP_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__DEVSEL_TIMING_MASK 0x0600L +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF14_0_REVISION_ID +#define BIF_CFG_DEV0_EPF0_VF14_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF14_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF14_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_VF14_0_PROG_INTERFACE +#define BIF_CFG_DEV0_EPF0_VF14_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF14_0_SUB_CLASS +#define BIF_CFG_DEV0_EPF0_VF14_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF14_0_BASE_CLASS +#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF14_0_CACHE_LINE +#define BIF_CFG_DEV0_EPF0_VF14_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF14_0_LATENCY +#define BIF_CFG_DEV0_EPF0_VF14_0_LATENCY__LATENCY_TIMER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_LATENCY__LATENCY_TIMER_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF14_0_HEADER +#define BIF_CFG_DEV0_EPF0_VF14_0_HEADER__HEADER_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_HEADER__DEVICE_TYPE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF14_0_HEADER__HEADER_TYPE_MASK 0x7FL +#define BIF_CFG_DEV0_EPF0_VF14_0_HEADER__DEVICE_TYPE_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF14_0_BIST +#define BIF_CFG_DEV0_EPF0_VF14_0_BIST__BIST_COMP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_BIST__BIST_STRT__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF14_0_BIST__BIST_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF14_0_BIST__BIST_COMP_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF14_0_BIST__BIST_STRT_MASK 0x40L +#define BIF_CFG_DEV0_EPF0_VF14_0_BIST__BIST_CAP_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_1 +#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_2 +#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_3 +#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_4 +#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_5 +#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_6 +#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_ADAPTER_ID +#define BIF_CFG_DEV0_EPF0_VF14_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF14_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF14_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_VF14_0_ROM_BASE_ADDR +#define BIF_CFG_DEV0_EPF0_VF14_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_CAP_PTR +#define BIF_CFG_DEV0_EPF0_VF14_0_CAP_PTR__CAP_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL +//BIF_CFG_DEV0_EPF0_VF14_0_INTERRUPT_LINE +#define BIF_CFG_DEV0_EPF0_VF14_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF14_0_INTERRUPT_PIN +#define BIF_CFG_DEV0_EPF0_VF14_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP__VERSION_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L +//BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L +//BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__LINK_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__LINK_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L +//BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L +//BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2__RESERVED__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2__RESERVED_MASK 0xFE000000L +//BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L +//BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L +//BIF_CFG_DEV0_EPF0_VF14_0_SLOT_CAP2 +#define BIF_CFG_DEV0_EPF0_VF14_0_SLOT_CAP2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_SLOT_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF14_0_SLOT_CNTL2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_SLOT_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF14_0_SLOT_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_MSI_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L +//BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_ADDR_LO +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_ADDR_HI +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_DATA +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_MSI_MASK +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MASK__MSI_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_DATA_64 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_MSI_MASK_64 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_MSI_PENDING +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_MSI_PENDING_64 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_MSIX_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF14_0_MSIX_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL +#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF14_0_MSIX_TABLE +#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF14_0_MSIX_PBA +#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_HDR +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC1 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC2 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG1 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG2 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG3 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG1 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG2 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG3 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CAP +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CNTL +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CNTL__STU__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CNTL__STU_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CAP +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CNTL +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf15_bifcfgdecp +//BIF_CFG_DEV0_EPF0_VF15_0_VENDOR_ID +#define BIF_CFG_DEV0_EPF0_VF15_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_ID +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_COMMAND +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__AD_STEPPING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__SERR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__FAST_B2B_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__INT_DIS__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__AD_STEPPING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__SERR_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__FAST_B2B_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__INT_DIS_MASK 0x0400L +//BIF_CFG_DEV0_EPF0_VF15_0_STATUS +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__INT_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__CAP_LIST__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__PCI_66_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__DEVSEL_TIMING__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__INT_STATUS_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__CAP_LIST_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__PCI_66_CAP_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__DEVSEL_TIMING_MASK 0x0600L +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF15_0_REVISION_ID +#define BIF_CFG_DEV0_EPF0_VF15_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF15_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF15_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_VF15_0_PROG_INTERFACE +#define BIF_CFG_DEV0_EPF0_VF15_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF15_0_SUB_CLASS +#define BIF_CFG_DEV0_EPF0_VF15_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF15_0_BASE_CLASS +#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF15_0_CACHE_LINE +#define BIF_CFG_DEV0_EPF0_VF15_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF15_0_LATENCY +#define BIF_CFG_DEV0_EPF0_VF15_0_LATENCY__LATENCY_TIMER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_LATENCY__LATENCY_TIMER_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF15_0_HEADER +#define BIF_CFG_DEV0_EPF0_VF15_0_HEADER__HEADER_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_HEADER__DEVICE_TYPE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF15_0_HEADER__HEADER_TYPE_MASK 0x7FL +#define BIF_CFG_DEV0_EPF0_VF15_0_HEADER__DEVICE_TYPE_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF15_0_BIST +#define BIF_CFG_DEV0_EPF0_VF15_0_BIST__BIST_COMP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_BIST__BIST_STRT__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF15_0_BIST__BIST_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF15_0_BIST__BIST_COMP_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF15_0_BIST__BIST_STRT_MASK 0x40L +#define BIF_CFG_DEV0_EPF0_VF15_0_BIST__BIST_CAP_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_1 +#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_2 +#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_3 +#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_4 +#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_5 +#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_6 +#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_ADAPTER_ID +#define BIF_CFG_DEV0_EPF0_VF15_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF15_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF15_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_VF15_0_ROM_BASE_ADDR +#define BIF_CFG_DEV0_EPF0_VF15_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_CAP_PTR +#define BIF_CFG_DEV0_EPF0_VF15_0_CAP_PTR__CAP_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL +//BIF_CFG_DEV0_EPF0_VF15_0_INTERRUPT_LINE +#define BIF_CFG_DEV0_EPF0_VF15_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF15_0_INTERRUPT_PIN +#define BIF_CFG_DEV0_EPF0_VF15_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP__VERSION_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L +//BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L +//BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__LINK_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__LINK_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L +//BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L +//BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2__RESERVED__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2__RESERVED_MASK 0xFE000000L +//BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L +//BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L +//BIF_CFG_DEV0_EPF0_VF15_0_SLOT_CAP2 +#define BIF_CFG_DEV0_EPF0_VF15_0_SLOT_CAP2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_SLOT_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF15_0_SLOT_CNTL2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_SLOT_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF15_0_SLOT_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_MSI_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L +//BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_ADDR_LO +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_ADDR_HI +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_DATA +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_MSI_MASK +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MASK__MSI_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_DATA_64 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_MSI_MASK_64 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_MSI_PENDING +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_MSI_PENDING_64 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_MSIX_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF15_0_MSIX_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL +#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF15_0_MSIX_TABLE +#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF15_0_MSIX_PBA +#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_HDR +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC1 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC2 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG1 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG2 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG3 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG1 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG2 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG3 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CAP +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CNTL +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CNTL__STU__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CNTL__STU_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CAP +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CNTL +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L + + +// addressBlock: nbio_nbif0_bif_bx_pf_SYSPFVFDEC +//MM_INDEX +#define MM_INDEX__MM_OFFSET__SHIFT 0x0 +#define MM_INDEX__MM_APER__SHIFT 0x1f +#define MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL +#define MM_INDEX__MM_APER_MASK 0x80000000L +//MM_DATA +#define MM_DATA__MM_DATA__SHIFT 0x0 +#define MM_DATA__MM_DATA_MASK 0xFFFFFFFFL +//MM_INDEX_HI +#define MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0 +#define MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL + + +// addressBlock: nbio_nbif0_bif_bx_SYSDEC +//SYSHUB_INDEX_OVLP +#define SYSHUB_INDEX_OVLP__SYSHUB_OFFSET__SHIFT 0x0 +#define SYSHUB_INDEX_OVLP__SYSHUB_OFFSET_MASK 0x003FFFFFL +//SYSHUB_DATA_OVLP +#define SYSHUB_DATA_OVLP__SYSHUB_DATA__SHIFT 0x0 +#define SYSHUB_DATA_OVLP__SYSHUB_DATA_MASK 0xFFFFFFFFL +//PCIE_INDEX +#define PCIE_INDEX__PCIE_INDEX__SHIFT 0x0 +#define PCIE_INDEX__PCIE_INDEX_MASK 0xFFFFFFFFL +//PCIE_DATA +#define PCIE_DATA__PCIE_DATA__SHIFT 0x0 +#define PCIE_DATA__PCIE_DATA_MASK 0xFFFFFFFFL +//PCIE_INDEX2 +#define PCIE_INDEX2__PCIE_INDEX2__SHIFT 0x0 +#define PCIE_INDEX2__PCIE_INDEX2_MASK 0xFFFFFFFFL +//PCIE_DATA2 +#define PCIE_DATA2__PCIE_DATA2__SHIFT 0x0 +#define PCIE_DATA2__PCIE_DATA2_MASK 0xFFFFFFFFL +//SBIOS_SCRATCH_0 +#define SBIOS_SCRATCH_0__SBIOS_SCRATCH_DW__SHIFT 0x0 +#define SBIOS_SCRATCH_0__SBIOS_SCRATCH_DW_MASK 0xFFFFFFFFL +//SBIOS_SCRATCH_1 +#define SBIOS_SCRATCH_1__SBIOS_SCRATCH_DW__SHIFT 0x0 +#define SBIOS_SCRATCH_1__SBIOS_SCRATCH_DW_MASK 0xFFFFFFFFL +//SBIOS_SCRATCH_2 +#define SBIOS_SCRATCH_2__SBIOS_SCRATCH_DW__SHIFT 0x0 +#define SBIOS_SCRATCH_2__SBIOS_SCRATCH_DW_MASK 0xFFFFFFFFL +//SBIOS_SCRATCH_3 +#define SBIOS_SCRATCH_3__SBIOS_SCRATCH_DW__SHIFT 0x0 +#define SBIOS_SCRATCH_3__SBIOS_SCRATCH_DW_MASK 0xFFFFFFFFL +//BIOS_SCRATCH_0 +#define BIOS_SCRATCH_0__BIOS_SCRATCH_0__SHIFT 0x0 +#define BIOS_SCRATCH_0__BIOS_SCRATCH_0_MASK 0xFFFFFFFFL +//BIOS_SCRATCH_1 +#define BIOS_SCRATCH_1__BIOS_SCRATCH_1__SHIFT 0x0 +#define BIOS_SCRATCH_1__BIOS_SCRATCH_1_MASK 0xFFFFFFFFL +//BIOS_SCRATCH_2 +#define BIOS_SCRATCH_2__BIOS_SCRATCH_2__SHIFT 0x0 +#define BIOS_SCRATCH_2__BIOS_SCRATCH_2_MASK 0xFFFFFFFFL +//BIOS_SCRATCH_3 +#define BIOS_SCRATCH_3__BIOS_SCRATCH_3__SHIFT 0x0 +#define BIOS_SCRATCH_3__BIOS_SCRATCH_3_MASK 0xFFFFFFFFL +//BIOS_SCRATCH_4 +#define BIOS_SCRATCH_4__BIOS_SCRATCH_4__SHIFT 0x0 +#define BIOS_SCRATCH_4__BIOS_SCRATCH_4_MASK 0xFFFFFFFFL +//BIOS_SCRATCH_5 +#define BIOS_SCRATCH_5__BIOS_SCRATCH_5__SHIFT 0x0 +#define BIOS_SCRATCH_5__BIOS_SCRATCH_5_MASK 0xFFFFFFFFL +//BIOS_SCRATCH_6 +#define BIOS_SCRATCH_6__BIOS_SCRATCH_6__SHIFT 0x0 +#define BIOS_SCRATCH_6__BIOS_SCRATCH_6_MASK 0xFFFFFFFFL +//BIOS_SCRATCH_7 +#define BIOS_SCRATCH_7__BIOS_SCRATCH_7__SHIFT 0x0 +#define BIOS_SCRATCH_7__BIOS_SCRATCH_7_MASK 0xFFFFFFFFL +//BIOS_SCRATCH_8 +#define BIOS_SCRATCH_8__BIOS_SCRATCH_8__SHIFT 0x0 +#define BIOS_SCRATCH_8__BIOS_SCRATCH_8_MASK 0xFFFFFFFFL +//BIOS_SCRATCH_9 +#define BIOS_SCRATCH_9__BIOS_SCRATCH_9__SHIFT 0x0 +#define BIOS_SCRATCH_9__BIOS_SCRATCH_9_MASK 0xFFFFFFFFL +//BIOS_SCRATCH_10 +#define BIOS_SCRATCH_10__BIOS_SCRATCH_10__SHIFT 0x0 +#define BIOS_SCRATCH_10__BIOS_SCRATCH_10_MASK 0xFFFFFFFFL +//BIOS_SCRATCH_11 +#define BIOS_SCRATCH_11__BIOS_SCRATCH_11__SHIFT 0x0 +#define BIOS_SCRATCH_11__BIOS_SCRATCH_11_MASK 0xFFFFFFFFL +//BIOS_SCRATCH_12 +#define BIOS_SCRATCH_12__BIOS_SCRATCH_12__SHIFT 0x0 +#define BIOS_SCRATCH_12__BIOS_SCRATCH_12_MASK 0xFFFFFFFFL +//BIOS_SCRATCH_13 +#define BIOS_SCRATCH_13__BIOS_SCRATCH_13__SHIFT 0x0 +#define BIOS_SCRATCH_13__BIOS_SCRATCH_13_MASK 0xFFFFFFFFL +//BIOS_SCRATCH_14 +#define BIOS_SCRATCH_14__BIOS_SCRATCH_14__SHIFT 0x0 +#define BIOS_SCRATCH_14__BIOS_SCRATCH_14_MASK 0xFFFFFFFFL +//BIOS_SCRATCH_15 +#define BIOS_SCRATCH_15__BIOS_SCRATCH_15__SHIFT 0x0 +#define BIOS_SCRATCH_15__BIOS_SCRATCH_15_MASK 0xFFFFFFFFL +//BIF_RLC_INTR_CNTL +#define BIF_RLC_INTR_CNTL__RLC_CMD_COMPLETE__SHIFT 0x0 +#define BIF_RLC_INTR_CNTL__RLC_HANG_SELF_RECOVERED__SHIFT 0x1 +#define BIF_RLC_INTR_CNTL__RLC_HANG_NEED_FLR__SHIFT 0x2 +#define BIF_RLC_INTR_CNTL__RLC_VM_BUSY_TRANSITION__SHIFT 0x3 +#define BIF_RLC_INTR_CNTL__RLC_CMD_COMPLETE_MASK 0x00000001L +#define BIF_RLC_INTR_CNTL__RLC_HANG_SELF_RECOVERED_MASK 0x00000002L +#define BIF_RLC_INTR_CNTL__RLC_HANG_NEED_FLR_MASK 0x00000004L +#define BIF_RLC_INTR_CNTL__RLC_VM_BUSY_TRANSITION_MASK 0x00000008L +//BIF_VCE_INTR_CNTL +#define BIF_VCE_INTR_CNTL__VCE_CMD_COMPLETE__SHIFT 0x0 +#define BIF_VCE_INTR_CNTL__VCE_HANG_SELF_RECOVERED__SHIFT 0x1 +#define BIF_VCE_INTR_CNTL__VCE_HANG_NEED_FLR__SHIFT 0x2 +#define BIF_VCE_INTR_CNTL__VCE_VM_BUSY_TRANSITION__SHIFT 0x3 +#define BIF_VCE_INTR_CNTL__VCE_CMD_COMPLETE_MASK 0x00000001L +#define BIF_VCE_INTR_CNTL__VCE_HANG_SELF_RECOVERED_MASK 0x00000002L +#define BIF_VCE_INTR_CNTL__VCE_HANG_NEED_FLR_MASK 0x00000004L +#define BIF_VCE_INTR_CNTL__VCE_VM_BUSY_TRANSITION_MASK 0x00000008L +//BIF_UVD_INTR_CNTL +#define BIF_UVD_INTR_CNTL__UVD_CMD_COMPLETE__SHIFT 0x0 +#define BIF_UVD_INTR_CNTL__UVD_HANG_SELF_RECOVERED__SHIFT 0x1 +#define BIF_UVD_INTR_CNTL__UVD_HANG_NEED_FLR__SHIFT 0x2 +#define BIF_UVD_INTR_CNTL__UVD_VM_BUSY_TRANSITION__SHIFT 0x3 +#define BIF_UVD_INTR_CNTL__UVD_INST_SEL__SHIFT 0x1c +#define BIF_UVD_INTR_CNTL__UVD_CMD_COMPLETE_MASK 0x00000001L +#define BIF_UVD_INTR_CNTL__UVD_HANG_SELF_RECOVERED_MASK 0x00000002L +#define BIF_UVD_INTR_CNTL__UVD_HANG_NEED_FLR_MASK 0x00000004L +#define BIF_UVD_INTR_CNTL__UVD_VM_BUSY_TRANSITION_MASK 0x00000008L +#define BIF_UVD_INTR_CNTL__UVD_INST_SEL_MASK 0xF0000000L +//GFX_MMIOREG_CAM_ADDR0 +#define GFX_MMIOREG_CAM_ADDR0__CAM_ADDR0__SHIFT 0x0 +#define GFX_MMIOREG_CAM_ADDR0__CAM_ADDR0_MASK 0x000FFFFFL +//GFX_MMIOREG_CAM_REMAP_ADDR0 +#define GFX_MMIOREG_CAM_REMAP_ADDR0__CAM_REMAP_ADDR0__SHIFT 0x0 +#define GFX_MMIOREG_CAM_REMAP_ADDR0__CAM_REMAP_ADDR0_MASK 0x000FFFFFL +//GFX_MMIOREG_CAM_ADDR1 +#define GFX_MMIOREG_CAM_ADDR1__CAM_ADDR1__SHIFT 0x0 +#define GFX_MMIOREG_CAM_ADDR1__CAM_ADDR1_MASK 0x000FFFFFL +//GFX_MMIOREG_CAM_REMAP_ADDR1 +#define GFX_MMIOREG_CAM_REMAP_ADDR1__CAM_REMAP_ADDR1__SHIFT 0x0 +#define GFX_MMIOREG_CAM_REMAP_ADDR1__CAM_REMAP_ADDR1_MASK 0x000FFFFFL +//GFX_MMIOREG_CAM_ADDR2 +#define GFX_MMIOREG_CAM_ADDR2__CAM_ADDR2__SHIFT 0x0 +#define GFX_MMIOREG_CAM_ADDR2__CAM_ADDR2_MASK 0x000FFFFFL +//GFX_MMIOREG_CAM_REMAP_ADDR2 +#define GFX_MMIOREG_CAM_REMAP_ADDR2__CAM_REMAP_ADDR2__SHIFT 0x0 +#define GFX_MMIOREG_CAM_REMAP_ADDR2__CAM_REMAP_ADDR2_MASK 0x000FFFFFL +//GFX_MMIOREG_CAM_ADDR3 +#define GFX_MMIOREG_CAM_ADDR3__CAM_ADDR3__SHIFT 0x0 +#define GFX_MMIOREG_CAM_ADDR3__CAM_ADDR3_MASK 0x000FFFFFL +//GFX_MMIOREG_CAM_REMAP_ADDR3 +#define GFX_MMIOREG_CAM_REMAP_ADDR3__CAM_REMAP_ADDR3__SHIFT 0x0 +#define GFX_MMIOREG_CAM_REMAP_ADDR3__CAM_REMAP_ADDR3_MASK 0x000FFFFFL +//GFX_MMIOREG_CAM_ADDR4 +#define GFX_MMIOREG_CAM_ADDR4__CAM_ADDR4__SHIFT 0x0 +#define GFX_MMIOREG_CAM_ADDR4__CAM_ADDR4_MASK 0x000FFFFFL +//GFX_MMIOREG_CAM_REMAP_ADDR4 +#define GFX_MMIOREG_CAM_REMAP_ADDR4__CAM_REMAP_ADDR4__SHIFT 0x0 +#define GFX_MMIOREG_CAM_REMAP_ADDR4__CAM_REMAP_ADDR4_MASK 0x000FFFFFL +//GFX_MMIOREG_CAM_ADDR5 +#define GFX_MMIOREG_CAM_ADDR5__CAM_ADDR5__SHIFT 0x0 +#define GFX_MMIOREG_CAM_ADDR5__CAM_ADDR5_MASK 0x000FFFFFL +//GFX_MMIOREG_CAM_REMAP_ADDR5 +#define GFX_MMIOREG_CAM_REMAP_ADDR5__CAM_REMAP_ADDR5__SHIFT 0x0 +#define GFX_MMIOREG_CAM_REMAP_ADDR5__CAM_REMAP_ADDR5_MASK 0x000FFFFFL +//GFX_MMIOREG_CAM_ADDR6 +#define GFX_MMIOREG_CAM_ADDR6__CAM_ADDR6__SHIFT 0x0 +#define GFX_MMIOREG_CAM_ADDR6__CAM_ADDR6_MASK 0x000FFFFFL +//GFX_MMIOREG_CAM_REMAP_ADDR6 +#define GFX_MMIOREG_CAM_REMAP_ADDR6__CAM_REMAP_ADDR6__SHIFT 0x0 +#define GFX_MMIOREG_CAM_REMAP_ADDR6__CAM_REMAP_ADDR6_MASK 0x000FFFFFL +//GFX_MMIOREG_CAM_ADDR7 +#define GFX_MMIOREG_CAM_ADDR7__CAM_ADDR7__SHIFT 0x0 +#define GFX_MMIOREG_CAM_ADDR7__CAM_ADDR7_MASK 0x000FFFFFL +//GFX_MMIOREG_CAM_REMAP_ADDR7 +#define GFX_MMIOREG_CAM_REMAP_ADDR7__CAM_REMAP_ADDR7__SHIFT 0x0 +#define GFX_MMIOREG_CAM_REMAP_ADDR7__CAM_REMAP_ADDR7_MASK 0x000FFFFFL +//GFX_MMIOREG_CAM_CNTL +#define GFX_MMIOREG_CAM_CNTL__CAM_ENABLE__SHIFT 0x0 +#define GFX_MMIOREG_CAM_CNTL__CAM_ENABLE_MASK 0x000000FFL +//GFX_MMIOREG_CAM_ZERO_CPL +#define GFX_MMIOREG_CAM_ZERO_CPL__CAM_ZERO_CPL__SHIFT 0x0 +#define GFX_MMIOREG_CAM_ZERO_CPL__CAM_ZERO_CPL_MASK 0xFFFFFFFFL +//GFX_MMIOREG_CAM_ONE_CPL +#define GFX_MMIOREG_CAM_ONE_CPL__CAM_ONE_CPL__SHIFT 0x0 +#define GFX_MMIOREG_CAM_ONE_CPL__CAM_ONE_CPL_MASK 0xFFFFFFFFL +//GFX_MMIOREG_CAM_PROGRAMMABLE_CPL +#define GFX_MMIOREG_CAM_PROGRAMMABLE_CPL__CAM_PROGRAMMABLE_CPL__SHIFT 0x0 +#define GFX_MMIOREG_CAM_PROGRAMMABLE_CPL__CAM_PROGRAMMABLE_CPL_MASK 0xFFFFFFFFL + + +// addressBlock: nbio_nbif0_syshub_mmreg_syshubdec +//SYSHUB_INDEX +#define SYSHUB_INDEX__INDEX__SHIFT 0x0 +#define SYSHUB_INDEX__INDEX_MASK 0xFFFFFFFFL +//SYSHUB_DATA +#define SYSHUB_DATA__DATA__SHIFT 0x0 +#define SYSHUB_DATA__DATA_MASK 0xFFFFFFFFL + + +// addressBlock: nbio_nbif0_rcc_strap_BIFDEC1 +//RCC_DEV0_EPF0_STRAP0 +#define RCC_DEV0_EPF0_STRAP0__STRAP_DEVICE_ID_DEV0_F0__SHIFT 0x0 +#define RCC_DEV0_EPF0_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F0__SHIFT 0x10 +#define RCC_DEV0_EPF0_STRAP0__STRAP_MINOR_REV_ID_DEV0_F0__SHIFT 0x14 +#define RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT 0x18 +#define RCC_DEV0_EPF0_STRAP0__STRAP_FUNC_EN_DEV0_F0__SHIFT 0x1c +#define RCC_DEV0_EPF0_STRAP0__STRAP_LEGACY_DEVICE_TYPE_EN_DEV0_F0__SHIFT 0x1d +#define RCC_DEV0_EPF0_STRAP0__STRAP_D1_SUPPORT_DEV0_F0__SHIFT 0x1e +#define RCC_DEV0_EPF0_STRAP0__STRAP_D2_SUPPORT_DEV0_F0__SHIFT 0x1f +#define RCC_DEV0_EPF0_STRAP0__STRAP_DEVICE_ID_DEV0_F0_MASK 0x0000FFFFL +#define RCC_DEV0_EPF0_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F0_MASK 0x000F0000L +#define RCC_DEV0_EPF0_STRAP0__STRAP_MINOR_REV_ID_DEV0_F0_MASK 0x00F00000L +#define RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK 0x0F000000L +#define RCC_DEV0_EPF0_STRAP0__STRAP_FUNC_EN_DEV0_F0_MASK 0x10000000L +#define RCC_DEV0_EPF0_STRAP0__STRAP_LEGACY_DEVICE_TYPE_EN_DEV0_F0_MASK 0x20000000L +#define RCC_DEV0_EPF0_STRAP0__STRAP_D1_SUPPORT_DEV0_F0_MASK 0x40000000L +#define RCC_DEV0_EPF0_STRAP0__STRAP_D2_SUPPORT_DEV0_F0_MASK 0x80000000L + + +// addressBlock: nbio_nbif0_rcc_ep_dev0_BIFDEC1 +//EP_PCIE_SCRATCH +#define EP_PCIE_SCRATCH__PCIE_SCRATCH__SHIFT 0x0 +#define EP_PCIE_SCRATCH__PCIE_SCRATCH_MASK 0xFFFFFFFFL +//EP_PCIE_CNTL +#define EP_PCIE_CNTL__UR_ERR_REPORT_DIS__SHIFT 0x7 +#define EP_PCIE_CNTL__PCIE_MALFORM_ATOMIC_OPS__SHIFT 0x8 +#define EP_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR__SHIFT 0x1e +#define EP_PCIE_CNTL__UR_ERR_REPORT_DIS_MASK 0x00000080L +#define EP_PCIE_CNTL__PCIE_MALFORM_ATOMIC_OPS_MASK 0x00000100L +#define EP_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR_MASK 0x40000000L +//EP_PCIE_INT_CNTL +#define EP_PCIE_INT_CNTL__CORR_ERR_INT_EN__SHIFT 0x0 +#define EP_PCIE_INT_CNTL__NON_FATAL_ERR_INT_EN__SHIFT 0x1 +#define EP_PCIE_INT_CNTL__FATAL_ERR_INT_EN__SHIFT 0x2 +#define EP_PCIE_INT_CNTL__USR_DETECTED_INT_EN__SHIFT 0x3 +#define EP_PCIE_INT_CNTL__MISC_ERR_INT_EN__SHIFT 0x4 +#define EP_PCIE_INT_CNTL__POWER_STATE_CHG_INT_EN__SHIFT 0x6 +#define EP_PCIE_INT_CNTL__CORR_ERR_INT_EN_MASK 0x00000001L +#define EP_PCIE_INT_CNTL__NON_FATAL_ERR_INT_EN_MASK 0x00000002L +#define EP_PCIE_INT_CNTL__FATAL_ERR_INT_EN_MASK 0x00000004L +#define EP_PCIE_INT_CNTL__USR_DETECTED_INT_EN_MASK 0x00000008L +#define EP_PCIE_INT_CNTL__MISC_ERR_INT_EN_MASK 0x00000010L +#define EP_PCIE_INT_CNTL__POWER_STATE_CHG_INT_EN_MASK 0x00000040L +//EP_PCIE_INT_STATUS +#define EP_PCIE_INT_STATUS__CORR_ERR_INT_STATUS__SHIFT 0x0 +#define EP_PCIE_INT_STATUS__NON_FATAL_ERR_INT_STATUS__SHIFT 0x1 +#define EP_PCIE_INT_STATUS__FATAL_ERR_INT_STATUS__SHIFT 0x2 +#define EP_PCIE_INT_STATUS__USR_DETECTED_INT_STATUS__SHIFT 0x3 +#define EP_PCIE_INT_STATUS__MISC_ERR_INT_STATUS__SHIFT 0x4 +#define EP_PCIE_INT_STATUS__POWER_STATE_CHG_INT_STATUS__SHIFT 0x6 +#define EP_PCIE_INT_STATUS__CORR_ERR_INT_STATUS_MASK 0x00000001L +#define EP_PCIE_INT_STATUS__NON_FATAL_ERR_INT_STATUS_MASK 0x00000002L +#define EP_PCIE_INT_STATUS__FATAL_ERR_INT_STATUS_MASK 0x00000004L +#define EP_PCIE_INT_STATUS__USR_DETECTED_INT_STATUS_MASK 0x00000008L +#define EP_PCIE_INT_STATUS__MISC_ERR_INT_STATUS_MASK 0x00000010L +#define EP_PCIE_INT_STATUS__POWER_STATE_CHG_INT_STATUS_MASK 0x00000040L +//EP_PCIE_RX_CNTL2 +#define EP_PCIE_RX_CNTL2__RX_IGNORE_EP_INVALIDPASID_UR__SHIFT 0x0 +#define EP_PCIE_RX_CNTL2__RX_IGNORE_EP_INVALIDPASID_UR_MASK 0x00000001L +//EP_PCIE_BUS_CNTL +#define EP_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS__SHIFT 0x7 +#define EP_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS_MASK 0x00000080L +//EP_PCIE_CFG_CNTL +#define EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG__SHIFT 0x0 +#define EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG__SHIFT 0x1 +#define EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG__SHIFT 0x2 +#define EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG__SHIFT 0x3 +#define EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG_MASK 0x00000001L +#define EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG_MASK 0x00000002L +#define EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG_MASK 0x00000004L +#define EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG_MASK 0x00000008L +//EP_PCIE_TX_LTR_CNTL +#define EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_SHORT_VALUE__SHIFT 0x0 +#define EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_LONG_VALUE__SHIFT 0x3 +#define EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_REQUIREMENT__SHIFT 0x6 +#define EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_SHORT_VALUE__SHIFT 0x7 +#define EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_LONG_VALUE__SHIFT 0xa +#define EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_REQUIREMENT__SHIFT 0xd +#define EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0__SHIFT 0xe +#define EP_PCIE_TX_LTR_CNTL__LTR_PRIV_RST_LTR_IN_DL_DOWN__SHIFT 0xf +#define EP_PCIE_TX_LTR_CNTL__TX_CHK_FC_FOR_L1__SHIFT 0x10 +#define EP_PCIE_TX_LTR_CNTL__LTR_DSTATE_USING_WDATA_EN__SHIFT 0x11 +#define EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_SHORT_VALUE_MASK 0x00000007L +#define EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_LONG_VALUE_MASK 0x00000038L +#define EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_REQUIREMENT_MASK 0x00000040L +#define EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_SHORT_VALUE_MASK 0x00000380L +#define EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_LONG_VALUE_MASK 0x00001C00L +#define EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_REQUIREMENT_MASK 0x00002000L +#define EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK 0x00004000L +#define EP_PCIE_TX_LTR_CNTL__LTR_PRIV_RST_LTR_IN_DL_DOWN_MASK 0x00008000L +#define EP_PCIE_TX_LTR_CNTL__TX_CHK_FC_FOR_L1_MASK 0x00010000L +#define EP_PCIE_TX_LTR_CNTL__LTR_DSTATE_USING_WDATA_EN_MASK 0x00020000L +//PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_0 +#define PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_1 +#define PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_2 +#define PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_3 +#define PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_4 +#define PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_5 +#define PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_6 +#define PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_7 +#define PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//EP_PCIE_F0_DPA_CAP +#define EP_PCIE_F0_DPA_CAP__TRANS_LAT_UNIT__SHIFT 0x8 +#define EP_PCIE_F0_DPA_CAP__PWR_ALLOC_SCALE__SHIFT 0xc +#define EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_0__SHIFT 0x10 +#define EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_1__SHIFT 0x18 +#define EP_PCIE_F0_DPA_CAP__TRANS_LAT_UNIT_MASK 0x00000300L +#define EP_PCIE_F0_DPA_CAP__PWR_ALLOC_SCALE_MASK 0x00003000L +#define EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_0_MASK 0x00FF0000L +#define EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_1_MASK 0xFF000000L +//EP_PCIE_F0_DPA_LATENCY_INDICATOR +#define EP_PCIE_F0_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS__SHIFT 0x0 +#define EP_PCIE_F0_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS_MASK 0xFFL +//EP_PCIE_F0_DPA_CNTL +#define EP_PCIE_F0_DPA_CNTL__SUBSTATE_STATUS__SHIFT 0x0 +#define EP_PCIE_F0_DPA_CNTL__DPA_COMPLIANCE_MODE__SHIFT 0x8 +#define EP_PCIE_F0_DPA_CNTL__SUBSTATE_STATUS_MASK 0x001FL +#define EP_PCIE_F0_DPA_CNTL__DPA_COMPLIANCE_MODE_MASK 0x0100L +//PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0 +#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1 +#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2 +#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3 +#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4 +#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5 +#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6 +#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7 +#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//EP_PCIE_PME_CONTROL +#define EP_PCIE_PME_CONTROL__PME_SERVICE_TIMER__SHIFT 0x0 +#define EP_PCIE_PME_CONTROL__PME_SERVICE_TIMER_MASK 0x1FL +//EP_PCIEP_RESERVED +#define EP_PCIEP_RESERVED__PCIEP_RESERVED__SHIFT 0x0 +#define EP_PCIEP_RESERVED__PCIEP_RESERVED_MASK 0xFFFFFFFFL +//EP_PCIE_TX_CNTL +#define EP_PCIE_TX_CNTL__TX_SNR_OVERRIDE__SHIFT 0xa +#define EP_PCIE_TX_CNTL__TX_RO_OVERRIDE__SHIFT 0xc +#define EP_PCIE_TX_CNTL__TX_F0_TPH_DIS__SHIFT 0x18 +#define EP_PCIE_TX_CNTL__TX_F1_TPH_DIS__SHIFT 0x19 +#define EP_PCIE_TX_CNTL__TX_F2_TPH_DIS__SHIFT 0x1a +#define EP_PCIE_TX_CNTL__TX_SNR_OVERRIDE_MASK 0x00000C00L +#define EP_PCIE_TX_CNTL__TX_RO_OVERRIDE_MASK 0x00003000L +#define EP_PCIE_TX_CNTL__TX_F0_TPH_DIS_MASK 0x01000000L +#define EP_PCIE_TX_CNTL__TX_F1_TPH_DIS_MASK 0x02000000L +#define EP_PCIE_TX_CNTL__TX_F2_TPH_DIS_MASK 0x04000000L +//EP_PCIE_TX_REQUESTER_ID +#define EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_FUNCTION__SHIFT 0x0 +#define EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_DEVICE__SHIFT 0x3 +#define EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_BUS__SHIFT 0x8 +#define EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_FUNCTION_MASK 0x00000007L +#define EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_DEVICE_MASK 0x000000F8L +#define EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_BUS_MASK 0x0000FF00L +//EP_PCIE_ERR_CNTL +#define EP_PCIE_ERR_CNTL__ERR_REPORTING_DIS__SHIFT 0x0 +#define EP_PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT__SHIFT 0x8 +#define EP_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY__SHIFT 0x11 +#define EP_PCIE_ERR_CNTL__STRAP_POISONED_ADVISORY_NONFATAL__SHIFT 0x12 +#define EP_PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED__SHIFT 0x18 +#define EP_PCIE_ERR_CNTL__AER_HDR_LOG_F1_TIMER_EXPIRED__SHIFT 0x19 +#define EP_PCIE_ERR_CNTL__AER_HDR_LOG_F2_TIMER_EXPIRED__SHIFT 0x1a +#define EP_PCIE_ERR_CNTL__AER_HDR_LOG_F3_TIMER_EXPIRED__SHIFT 0x1b +#define EP_PCIE_ERR_CNTL__AER_HDR_LOG_F4_TIMER_EXPIRED__SHIFT 0x1c +#define EP_PCIE_ERR_CNTL__AER_HDR_LOG_F5_TIMER_EXPIRED__SHIFT 0x1d +#define EP_PCIE_ERR_CNTL__AER_HDR_LOG_F6_TIMER_EXPIRED__SHIFT 0x1e +#define EP_PCIE_ERR_CNTL__AER_HDR_LOG_F7_TIMER_EXPIRED__SHIFT 0x1f +#define EP_PCIE_ERR_CNTL__ERR_REPORTING_DIS_MASK 0x00000001L +#define EP_PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT_MASK 0x00000700L +#define EP_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY_MASK 0x00020000L +#define EP_PCIE_ERR_CNTL__STRAP_POISONED_ADVISORY_NONFATAL_MASK 0x00040000L +#define EP_PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED_MASK 0x01000000L +#define EP_PCIE_ERR_CNTL__AER_HDR_LOG_F1_TIMER_EXPIRED_MASK 0x02000000L +#define EP_PCIE_ERR_CNTL__AER_HDR_LOG_F2_TIMER_EXPIRED_MASK 0x04000000L +#define EP_PCIE_ERR_CNTL__AER_HDR_LOG_F3_TIMER_EXPIRED_MASK 0x08000000L +#define EP_PCIE_ERR_CNTL__AER_HDR_LOG_F4_TIMER_EXPIRED_MASK 0x10000000L +#define EP_PCIE_ERR_CNTL__AER_HDR_LOG_F5_TIMER_EXPIRED_MASK 0x20000000L +#define EP_PCIE_ERR_CNTL__AER_HDR_LOG_F6_TIMER_EXPIRED_MASK 0x40000000L +#define EP_PCIE_ERR_CNTL__AER_HDR_LOG_F7_TIMER_EXPIRED_MASK 0x80000000L +//EP_PCIE_RX_CNTL +#define EP_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR__SHIFT 0x8 +#define EP_PCIE_RX_CNTL__RX_IGNORE_TC_ERR__SHIFT 0x9 +#define EP_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS__SHIFT 0x14 +#define EP_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR__SHIFT 0x15 +#define EP_PCIE_RX_CNTL__RX_IGNORE_MAXPREFIX_ERR__SHIFT 0x16 +#define EP_PCIE_RX_CNTL__RX_IGNORE_INVALIDPASID_ERR__SHIFT 0x18 +#define EP_PCIE_RX_CNTL__RX_IGNORE_NOT_PASID_UR__SHIFT 0x19 +#define EP_PCIE_RX_CNTL__RX_TPH_DIS__SHIFT 0x1a +#define EP_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR_MASK 0x00000100L +#define EP_PCIE_RX_CNTL__RX_IGNORE_TC_ERR_MASK 0x00000200L +#define EP_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS_MASK 0x00100000L +#define EP_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR_MASK 0x00200000L +#define EP_PCIE_RX_CNTL__RX_IGNORE_MAXPREFIX_ERR_MASK 0x00400000L +#define EP_PCIE_RX_CNTL__RX_IGNORE_INVALIDPASID_ERR_MASK 0x01000000L +#define EP_PCIE_RX_CNTL__RX_IGNORE_NOT_PASID_UR_MASK 0x02000000L +#define EP_PCIE_RX_CNTL__RX_TPH_DIS_MASK 0x04000000L +//EP_PCIE_LC_SPEED_CNTL +#define EP_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP__SHIFT 0x0 +#define EP_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP__SHIFT 0x1 +#define EP_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP__SHIFT 0x2 +#define EP_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP_MASK 0x00000001L +#define EP_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP_MASK 0x00000002L +#define EP_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP_MASK 0x00000004L + + +// addressBlock: nbio_nbif0_rcc_dwn_dev0_BIFDEC1 +//DN_PCIE_RESERVED +#define DN_PCIE_RESERVED__PCIE_RESERVED__SHIFT 0x0 +#define DN_PCIE_RESERVED__PCIE_RESERVED_MASK 0xFFFFFFFFL +//DN_PCIE_SCRATCH +#define DN_PCIE_SCRATCH__PCIE_SCRATCH__SHIFT 0x0 +#define DN_PCIE_SCRATCH__PCIE_SCRATCH_MASK 0xFFFFFFFFL +//DN_PCIE_CNTL +#define DN_PCIE_CNTL__HWINIT_WR_LOCK__SHIFT 0x0 +#define DN_PCIE_CNTL__UR_ERR_REPORT_DIS_DN__SHIFT 0x7 +#define DN_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR__SHIFT 0x1e +#define DN_PCIE_CNTL__HWINIT_WR_LOCK_MASK 0x00000001L +#define DN_PCIE_CNTL__UR_ERR_REPORT_DIS_DN_MASK 0x00000080L +#define DN_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR_MASK 0x40000000L +//DN_PCIE_CONFIG_CNTL +#define DN_PCIE_CONFIG_CNTL__CI_EXTENDED_TAG_EN_OVERRIDE__SHIFT 0x19 +#define DN_PCIE_CONFIG_CNTL__CI_EXTENDED_TAG_EN_OVERRIDE_MASK 0x06000000L +//DN_PCIE_RX_CNTL2 +#define DN_PCIE_RX_CNTL2__FLR_EXTEND_MODE__SHIFT 0x1c +#define DN_PCIE_RX_CNTL2__FLR_EXTEND_MODE_MASK 0x70000000L +//DN_PCIE_BUS_CNTL +#define DN_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS__SHIFT 0x7 +#define DN_PCIE_BUS_CNTL__AER_CPL_TIMEOUT_RO_DIS_SWDN__SHIFT 0x8 +#define DN_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS_MASK 0x00000080L +#define DN_PCIE_BUS_CNTL__AER_CPL_TIMEOUT_RO_DIS_SWDN_MASK 0x00000100L +//DN_PCIE_CFG_CNTL +#define DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG__SHIFT 0x0 +#define DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG__SHIFT 0x1 +#define DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG__SHIFT 0x2 +#define DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG__SHIFT 0x3 +#define DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG_MASK 0x00000001L +#define DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG_MASK 0x00000002L +#define DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG_MASK 0x00000004L +#define DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG_MASK 0x00000008L + + +// addressBlock: nbio_nbif0_rcc_dwnp_dev0_BIFDEC1 +//PCIE_ERR_CNTL +#define PCIE_ERR_CNTL__ERR_REPORTING_DIS__SHIFT 0x0 +#define PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY__SHIFT 0x11 +#define PCIE_ERR_CNTL__ERR_REPORTING_DIS_MASK 0x00000001L +#define PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY_MASK 0x00020000L +//PCIE_RX_CNTL +#define PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR__SHIFT 0x8 +#define PCIE_RX_CNTL__RX_IGNORE_TC_ERR_DN__SHIFT 0x9 +#define PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS__SHIFT 0x14 +#define PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR_DN__SHIFT 0x15 +#define PCIE_RX_CNTL__RX_RCB_FLR_TIMEOUT_DIS__SHIFT 0x1b +#define PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR_MASK 0x00000100L +#define PCIE_RX_CNTL__RX_IGNORE_TC_ERR_DN_MASK 0x00000200L +#define PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS_MASK 0x00100000L +#define PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR_DN_MASK 0x00200000L +#define PCIE_RX_CNTL__RX_RCB_FLR_TIMEOUT_DIS_MASK 0x08000000L +//PCIE_LC_SPEED_CNTL +#define PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP__SHIFT 0x0 +#define PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP__SHIFT 0x1 +#define PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP__SHIFT 0x2 +#define PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP_MASK 0x00000001L +#define PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP_MASK 0x00000002L +#define PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP_MASK 0x00000004L +//PCIE_LC_CNTL2 +#define PCIE_LC_CNTL2__LC_LINK_BW_NOTIFICATION_DIS__SHIFT 0x1b +#define PCIE_LC_CNTL2__LC_LINK_BW_NOTIFICATION_DIS_MASK 0x08000000L +//PCIEP_STRAP_MISC +#define PCIEP_STRAP_MISC__STRAP_MULTI_FUNC_EN__SHIFT 0xa +#define PCIEP_STRAP_MISC__STRAP_MULTI_FUNC_EN_MASK 0x00000400L +//LTR_MSG_INFO_FROM_EP +#define LTR_MSG_INFO_FROM_EP__LTR_MSG_INFO_FROM_EP__SHIFT 0x0 +#define LTR_MSG_INFO_FROM_EP__LTR_MSG_INFO_FROM_EP_MASK 0xFFFFFFFFL + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_BIFPFVFDEC1[13440..14975] +//RCC_ERR_LOG +#define RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0 +#define RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1 +#define RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L +#define RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L +//RCC_DOORBELL_APER_EN +#define RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0 +#define RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L +//RCC_CONFIG_MEMSIZE +#define RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0 +#define RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL +//RCC_CONFIG_RESERVED +#define RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0 +#define RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL +//RCC_IOV_FUNC_IDENTIFIER +#define RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0 +#define RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f +#define RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L +#define RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L + + +// addressBlock: nbio_nbif0_rcc_dev0_BIFDEC1 +//RCC_ERR_INT_CNTL +#define RCC_ERR_INT_CNTL__INVALID_REG_ACCESS_IN_SRIOV_INT_EN__SHIFT 0x0 +#define RCC_ERR_INT_CNTL__INVALID_REG_ACCESS_IN_SRIOV_INT_EN_MASK 0x00000001L +//RCC_BACO_CNTL_MISC +#define RCC_BACO_CNTL_MISC__BIF_ROM_REQ_DIS__SHIFT 0x0 +#define RCC_BACO_CNTL_MISC__BIF_AZ_REQ_DIS__SHIFT 0x1 +#define RCC_BACO_CNTL_MISC__BIF_ROM_REQ_DIS_MASK 0x00000001L +#define RCC_BACO_CNTL_MISC__BIF_AZ_REQ_DIS_MASK 0x00000002L +//RCC_RESET_EN +#define RCC_RESET_EN__DB_APER_RESET_EN__SHIFT 0xf +#define RCC_RESET_EN__DB_APER_RESET_EN_MASK 0x00008000L +//RCC_VDM_SUPPORT +#define RCC_VDM_SUPPORT__MCTP_SUPPORT__SHIFT 0x0 +#define RCC_VDM_SUPPORT__AMPTP_SUPPORT__SHIFT 0x1 +#define RCC_VDM_SUPPORT__OTHER_VDM_SUPPORT__SHIFT 0x2 +#define RCC_VDM_SUPPORT__ROUTE_TO_RC_CHECK_IN_RCMODE__SHIFT 0x3 +#define RCC_VDM_SUPPORT__ROUTE_BROADCAST_CHECK_IN_RCMODE__SHIFT 0x4 +#define RCC_VDM_SUPPORT__MCTP_SUPPORT_MASK 0x00000001L +#define RCC_VDM_SUPPORT__AMPTP_SUPPORT_MASK 0x00000002L +#define RCC_VDM_SUPPORT__OTHER_VDM_SUPPORT_MASK 0x00000004L +#define RCC_VDM_SUPPORT__ROUTE_TO_RC_CHECK_IN_RCMODE_MASK 0x00000008L +#define RCC_VDM_SUPPORT__ROUTE_BROADCAST_CHECK_IN_RCMODE_MASK 0x00000010L +//RCC_MARGIN_PARAM_CNTL0 +#define RCC_MARGIN_PARAM_CNTL0__MARGINING_VOLTAGE_SUPPORTED__SHIFT 0x0 +#define RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_LEFTRIGHT_TIMING__SHIFT 0x1 +#define RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_UPDOWN_VOLTAGE__SHIFT 0x2 +#define RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_ERROR_SAMPLER__SHIFT 0x3 +#define RCC_MARGIN_PARAM_CNTL0__MARGINING_SAMPLE_REPORTING_METHOD__SHIFT 0x4 +#define RCC_MARGIN_PARAM_CNTL0__MARGINING_NUM_TIMING_STEPS__SHIFT 0x5 +#define RCC_MARGIN_PARAM_CNTL0__MARGINING_MAX_TIMING_OFFSET__SHIFT 0xb +#define RCC_MARGIN_PARAM_CNTL0__MARGINING_NUM_VOLTAGE_STEPS__SHIFT 0x12 +#define RCC_MARGIN_PARAM_CNTL0__MARGINING_MAX_VOLTAGE_OFFSET__SHIFT 0x19 +#define RCC_MARGIN_PARAM_CNTL0__MARGINING_VOLTAGE_SUPPORTED_MASK 0x00000001L +#define RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_LEFTRIGHT_TIMING_MASK 0x00000002L +#define RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_UPDOWN_VOLTAGE_MASK 0x00000004L +#define RCC_MARGIN_PARAM_CNTL0__MARGINING_IND_ERROR_SAMPLER_MASK 0x00000008L +#define RCC_MARGIN_PARAM_CNTL0__MARGINING_SAMPLE_REPORTING_METHOD_MASK 0x00000010L +#define RCC_MARGIN_PARAM_CNTL0__MARGINING_NUM_TIMING_STEPS_MASK 0x000007E0L +#define RCC_MARGIN_PARAM_CNTL0__MARGINING_MAX_TIMING_OFFSET_MASK 0x0003F800L +#define RCC_MARGIN_PARAM_CNTL0__MARGINING_NUM_VOLTAGE_STEPS_MASK 0x01FC0000L +#define RCC_MARGIN_PARAM_CNTL0__MARGINING_MAX_VOLTAGE_OFFSET_MASK 0xFE000000L +//RCC_MARGIN_PARAM_CNTL1 +#define RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLING_RATE_VOLTAGE__SHIFT 0x0 +#define RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLING_RATE_TIMING__SHIFT 0x6 +#define RCC_MARGIN_PARAM_CNTL1__MARGINING_MAX_LANES__SHIFT 0xc +#define RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLE_COUNT__SHIFT 0x11 +#define RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLING_RATE_VOLTAGE_MASK 0x0000003FL +#define RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLING_RATE_TIMING_MASK 0x00000FC0L +#define RCC_MARGIN_PARAM_CNTL1__MARGINING_MAX_LANES_MASK 0x0001F000L +#define RCC_MARGIN_PARAM_CNTL1__MARGINING_SAMPLE_COUNT_MASK 0x00FE0000L +//RCC_PEER_REG_RANGE0 +#define RCC_PEER_REG_RANGE0__START_ADDR__SHIFT 0x0 +#define RCC_PEER_REG_RANGE0__END_ADDR__SHIFT 0x10 +#define RCC_PEER_REG_RANGE0__START_ADDR_MASK 0x0000FFFFL +#define RCC_PEER_REG_RANGE0__END_ADDR_MASK 0xFFFF0000L +//RCC_PEER_REG_RANGE1 +#define RCC_PEER_REG_RANGE1__START_ADDR__SHIFT 0x0 +#define RCC_PEER_REG_RANGE1__END_ADDR__SHIFT 0x10 +#define RCC_PEER_REG_RANGE1__START_ADDR_MASK 0x0000FFFFL +#define RCC_PEER_REG_RANGE1__END_ADDR_MASK 0xFFFF0000L +//RCC_BUS_CNTL +#define RCC_BUS_CNTL__PMI_IO_DIS__SHIFT 0x2 +#define RCC_BUS_CNTL__PMI_MEM_DIS__SHIFT 0x3 +#define RCC_BUS_CNTL__PMI_BM_DIS__SHIFT 0x4 +#define RCC_BUS_CNTL__PMI_IO_DIS_DN__SHIFT 0x5 +#define RCC_BUS_CNTL__PMI_MEM_DIS_DN__SHIFT 0x6 +#define RCC_BUS_CNTL__PMI_IO_DIS_UP__SHIFT 0x7 +#define RCC_BUS_CNTL__PMI_MEM_DIS_UP__SHIFT 0x8 +#define RCC_BUS_CNTL__DN_SEC_SIG_CPLCA_WITH_EP_ERR__SHIFT 0x10 +#define RCC_BUS_CNTL__DN_SEC_RCV_CPLCA_WITH_EP_ERR__SHIFT 0x11 +#define RCC_BUS_CNTL__DN_SEC_RCV_CPLUR_WITH_EP_ERR__SHIFT 0x12 +#define RCC_BUS_CNTL__DN_PRI_SIG_CPLCA_WITH_EP_ERR__SHIFT 0x13 +#define RCC_BUS_CNTL__DN_PRI_RCV_CPLCA_WITH_EP_ERR__SHIFT 0x14 +#define RCC_BUS_CNTL__DN_PRI_RCV_CPLUR_WITH_EP_ERR__SHIFT 0x15 +#define RCC_BUS_CNTL__MAX_PAYLOAD_SIZE_MODE__SHIFT 0x18 +#define RCC_BUS_CNTL__PRIV_MAX_PAYLOAD_SIZE__SHIFT 0x19 +#define RCC_BUS_CNTL__MAX_READ_REQUEST_SIZE_MODE__SHIFT 0x1c +#define RCC_BUS_CNTL__PRIV_MAX_READ_REQUEST_SIZE__SHIFT 0x1d +#define RCC_BUS_CNTL__PMI_IO_DIS_MASK 0x00000004L +#define RCC_BUS_CNTL__PMI_MEM_DIS_MASK 0x00000008L +#define RCC_BUS_CNTL__PMI_BM_DIS_MASK 0x00000010L +#define RCC_BUS_CNTL__PMI_IO_DIS_DN_MASK 0x00000020L +#define RCC_BUS_CNTL__PMI_MEM_DIS_DN_MASK 0x00000040L +#define RCC_BUS_CNTL__PMI_IO_DIS_UP_MASK 0x00000080L +#define RCC_BUS_CNTL__PMI_MEM_DIS_UP_MASK 0x00000100L +#define RCC_BUS_CNTL__DN_SEC_SIG_CPLCA_WITH_EP_ERR_MASK 0x00010000L +#define RCC_BUS_CNTL__DN_SEC_RCV_CPLCA_WITH_EP_ERR_MASK 0x00020000L +#define RCC_BUS_CNTL__DN_SEC_RCV_CPLUR_WITH_EP_ERR_MASK 0x00040000L +#define RCC_BUS_CNTL__DN_PRI_SIG_CPLCA_WITH_EP_ERR_MASK 0x00080000L +#define RCC_BUS_CNTL__DN_PRI_RCV_CPLCA_WITH_EP_ERR_MASK 0x00100000L +#define RCC_BUS_CNTL__DN_PRI_RCV_CPLUR_WITH_EP_ERR_MASK 0x00200000L +#define RCC_BUS_CNTL__MAX_PAYLOAD_SIZE_MODE_MASK 0x01000000L +#define RCC_BUS_CNTL__PRIV_MAX_PAYLOAD_SIZE_MASK 0x0E000000L +#define RCC_BUS_CNTL__MAX_READ_REQUEST_SIZE_MODE_MASK 0x10000000L +#define RCC_BUS_CNTL__PRIV_MAX_READ_REQUEST_SIZE_MASK 0xE0000000L +//RCC_CONFIG_CNTL +#define RCC_CONFIG_CNTL__CFG_VGA_RAM_EN__SHIFT 0x0 +#define RCC_CONFIG_CNTL__GENMO_MONO_ADDRESS_B__SHIFT 0x2 +#define RCC_CONFIG_CNTL__GRPH_ADRSEL__SHIFT 0x3 +#define RCC_CONFIG_CNTL__CFG_VGA_RAM_EN_MASK 0x00000001L +#define RCC_CONFIG_CNTL__GENMO_MONO_ADDRESS_B_MASK 0x00000004L +#define RCC_CONFIG_CNTL__GRPH_ADRSEL_MASK 0x00000018L +//RCC_CONFIG_F0_BASE +#define RCC_CONFIG_F0_BASE__F0_BASE__SHIFT 0x0 +#define RCC_CONFIG_F0_BASE__F0_BASE_MASK 0xFFFFFFFFL +//RCC_CONFIG_APER_SIZE +#define RCC_CONFIG_APER_SIZE__APER_SIZE__SHIFT 0x0 +#define RCC_CONFIG_APER_SIZE__APER_SIZE_MASK 0xFFFFFFFFL +//RCC_CONFIG_REG_APER_SIZE +#define RCC_CONFIG_REG_APER_SIZE__REG_APER_SIZE__SHIFT 0x0 +#define RCC_CONFIG_REG_APER_SIZE__REG_APER_SIZE_MASK 0x000FFFFFL +//RCC_XDMA_LO +#define RCC_XDMA_LO__BIF_XDMA_LOWER_BOUND__SHIFT 0x0 +#define RCC_XDMA_LO__BIF_XDMA_APER_EN__SHIFT 0x1f +#define RCC_XDMA_LO__BIF_XDMA_LOWER_BOUND_MASK 0x7FFFFFFFL +#define RCC_XDMA_LO__BIF_XDMA_APER_EN_MASK 0x80000000L +//RCC_XDMA_HI +#define RCC_XDMA_HI__BIF_XDMA_UPPER_BOUND__SHIFT 0x0 +#define RCC_XDMA_HI__BIF_XDMA_UPPER_BOUND_MASK 0x7FFFFFFFL +//RCC_FEATURES_CONTROL_MISC +#define RCC_FEATURES_CONTROL_MISC__UR_PSN_PKT_REPORT_POISON_DIS__SHIFT 0x4 +#define RCC_FEATURES_CONTROL_MISC__POST_PSN_ONLY_PKT_REPORT_UR_ALL_DIS__SHIFT 0x5 +#define RCC_FEATURES_CONTROL_MISC__POST_PSN_ONLY_PKT_REPORT_UR_PART_DIS__SHIFT 0x6 +#define RCC_FEATURES_CONTROL_MISC__INIT_PFFLR_CRS_RET_DIS__SHIFT 0x7 +#define RCC_FEATURES_CONTROL_MISC__ATC_PRG_RESP_PASID_UR_EN__SHIFT 0x8 +#define RCC_FEATURES_CONTROL_MISC__RX_IGNORE_TRANSMRD_UR__SHIFT 0x9 +#define RCC_FEATURES_CONTROL_MISC__RX_IGNORE_TRANSMWR_UR__SHIFT 0xa +#define RCC_FEATURES_CONTROL_MISC__RX_IGNORE_ATSTRANSREQ_UR__SHIFT 0xb +#define RCC_FEATURES_CONTROL_MISC__RX_IGNORE_PAGEREQMSG_UR__SHIFT 0xc +#define RCC_FEATURES_CONTROL_MISC__RX_IGNORE_INVCPL_UR__SHIFT 0xd +#define RCC_FEATURES_CONTROL_MISC__CLR_MSI_X_PENDING_WHEN_DISABLED_DIS__SHIFT 0xe +#define RCC_FEATURES_CONTROL_MISC__CHECK_BME_ON_PENDING_PKT_GEN_DIS__SHIFT 0xf +#define RCC_FEATURES_CONTROL_MISC__PSN_CHECK_ON_PAYLOAD_DIS__SHIFT 0x10 +#define RCC_FEATURES_CONTROL_MISC__CLR_MSI_PENDING_ON_MULTIEN_DIS__SHIFT 0x11 +#define RCC_FEATURES_CONTROL_MISC__SET_DEVICE_ERR_FOR_ECRC_EN__SHIFT 0x12 +#define RCC_FEATURES_CONTROL_MISC__HOST_POISON_FLAG_CHECK_FOR_CHAIN_DIS__SHIFT 0x13 +#define RCC_FEATURES_CONTROL_MISC__UR_PSN_PKT_REPORT_POISON_DIS_MASK 0x00000010L +#define RCC_FEATURES_CONTROL_MISC__POST_PSN_ONLY_PKT_REPORT_UR_ALL_DIS_MASK 0x00000020L +#define RCC_FEATURES_CONTROL_MISC__POST_PSN_ONLY_PKT_REPORT_UR_PART_DIS_MASK 0x00000040L +#define RCC_FEATURES_CONTROL_MISC__INIT_PFFLR_CRS_RET_DIS_MASK 0x00000080L +#define RCC_FEATURES_CONTROL_MISC__ATC_PRG_RESP_PASID_UR_EN_MASK 0x00000100L +#define RCC_FEATURES_CONTROL_MISC__RX_IGNORE_TRANSMRD_UR_MASK 0x00000200L +#define RCC_FEATURES_CONTROL_MISC__RX_IGNORE_TRANSMWR_UR_MASK 0x00000400L +#define RCC_FEATURES_CONTROL_MISC__RX_IGNORE_ATSTRANSREQ_UR_MASK 0x00000800L +#define RCC_FEATURES_CONTROL_MISC__RX_IGNORE_PAGEREQMSG_UR_MASK 0x00001000L +#define RCC_FEATURES_CONTROL_MISC__RX_IGNORE_INVCPL_UR_MASK 0x00002000L +#define RCC_FEATURES_CONTROL_MISC__CLR_MSI_X_PENDING_WHEN_DISABLED_DIS_MASK 0x00004000L +#define RCC_FEATURES_CONTROL_MISC__CHECK_BME_ON_PENDING_PKT_GEN_DIS_MASK 0x00008000L +#define RCC_FEATURES_CONTROL_MISC__PSN_CHECK_ON_PAYLOAD_DIS_MASK 0x00010000L +#define RCC_FEATURES_CONTROL_MISC__CLR_MSI_PENDING_ON_MULTIEN_DIS_MASK 0x00020000L +#define RCC_FEATURES_CONTROL_MISC__SET_DEVICE_ERR_FOR_ECRC_EN_MASK 0x00040000L +#define RCC_FEATURES_CONTROL_MISC__HOST_POISON_FLAG_CHECK_FOR_CHAIN_DIS_MASK 0x00080000L +//RCC_BUSNUM_CNTL1 +#define RCC_BUSNUM_CNTL1__ID_MASK__SHIFT 0x0 +#define RCC_BUSNUM_CNTL1__ID_MASK_MASK 0x000000FFL +//RCC_BUSNUM_LIST0 +#define RCC_BUSNUM_LIST0__ID0__SHIFT 0x0 +#define RCC_BUSNUM_LIST0__ID1__SHIFT 0x8 +#define RCC_BUSNUM_LIST0__ID2__SHIFT 0x10 +#define RCC_BUSNUM_LIST0__ID3__SHIFT 0x18 +#define RCC_BUSNUM_LIST0__ID0_MASK 0x000000FFL +#define RCC_BUSNUM_LIST0__ID1_MASK 0x0000FF00L +#define RCC_BUSNUM_LIST0__ID2_MASK 0x00FF0000L +#define RCC_BUSNUM_LIST0__ID3_MASK 0xFF000000L +//RCC_BUSNUM_LIST1 +#define RCC_BUSNUM_LIST1__ID4__SHIFT 0x0 +#define RCC_BUSNUM_LIST1__ID5__SHIFT 0x8 +#define RCC_BUSNUM_LIST1__ID6__SHIFT 0x10 +#define RCC_BUSNUM_LIST1__ID7__SHIFT 0x18 +#define RCC_BUSNUM_LIST1__ID4_MASK 0x000000FFL +#define RCC_BUSNUM_LIST1__ID5_MASK 0x0000FF00L +#define RCC_BUSNUM_LIST1__ID6_MASK 0x00FF0000L +#define RCC_BUSNUM_LIST1__ID7_MASK 0xFF000000L +//RCC_BUSNUM_CNTL2 +#define RCC_BUSNUM_CNTL2__AUTOUPDATE_SEL__SHIFT 0x0 +#define RCC_BUSNUM_CNTL2__AUTOUPDATE_EN__SHIFT 0x8 +#define RCC_BUSNUM_CNTL2__HDPREG_CNTL__SHIFT 0x10 +#define RCC_BUSNUM_CNTL2__ERROR_MULTIPLE_ID_MATCH__SHIFT 0x11 +#define RCC_BUSNUM_CNTL2__AUTOUPDATE_SEL_MASK 0x000000FFL +#define RCC_BUSNUM_CNTL2__AUTOUPDATE_EN_MASK 0x00000100L +#define RCC_BUSNUM_CNTL2__HDPREG_CNTL_MASK 0x00010000L +#define RCC_BUSNUM_CNTL2__ERROR_MULTIPLE_ID_MATCH_MASK 0x00020000L +//RCC_CAPTURE_HOST_BUSNUM +#define RCC_CAPTURE_HOST_BUSNUM__CHECK_EN__SHIFT 0x0 +#define RCC_CAPTURE_HOST_BUSNUM__CHECK_EN_MASK 0x00000001L +//RCC_HOST_BUSNUM +#define RCC_HOST_BUSNUM__HOST_ID__SHIFT 0x0 +#define RCC_HOST_BUSNUM__HOST_ID_MASK 0x0000FFFFL +//RCC_PEER0_FB_OFFSET_HI +#define RCC_PEER0_FB_OFFSET_HI__PEER0_FB_OFFSET_HI__SHIFT 0x0 +#define RCC_PEER0_FB_OFFSET_HI__PEER0_FB_OFFSET_HI_MASK 0x000FFFFFL +//RCC_PEER0_FB_OFFSET_LO +#define RCC_PEER0_FB_OFFSET_LO__PEER0_FB_OFFSET_LO__SHIFT 0x0 +#define RCC_PEER0_FB_OFFSET_LO__PEER0_FB_EN__SHIFT 0x1f +#define RCC_PEER0_FB_OFFSET_LO__PEER0_FB_OFFSET_LO_MASK 0x000FFFFFL +#define RCC_PEER0_FB_OFFSET_LO__PEER0_FB_EN_MASK 0x80000000L +//RCC_PEER1_FB_OFFSET_HI +#define RCC_PEER1_FB_OFFSET_HI__PEER1_FB_OFFSET_HI__SHIFT 0x0 +#define RCC_PEER1_FB_OFFSET_HI__PEER1_FB_OFFSET_HI_MASK 0x000FFFFFL +//RCC_PEER1_FB_OFFSET_LO +#define RCC_PEER1_FB_OFFSET_LO__PEER1_FB_OFFSET_LO__SHIFT 0x0 +#define RCC_PEER1_FB_OFFSET_LO__PEER1_FB_EN__SHIFT 0x1f +#define RCC_PEER1_FB_OFFSET_LO__PEER1_FB_OFFSET_LO_MASK 0x000FFFFFL +#define RCC_PEER1_FB_OFFSET_LO__PEER1_FB_EN_MASK 0x80000000L +//RCC_PEER2_FB_OFFSET_HI +#define RCC_PEER2_FB_OFFSET_HI__PEER2_FB_OFFSET_HI__SHIFT 0x0 +#define RCC_PEER2_FB_OFFSET_HI__PEER2_FB_OFFSET_HI_MASK 0x000FFFFFL +//RCC_PEER2_FB_OFFSET_LO +#define RCC_PEER2_FB_OFFSET_LO__PEER2_FB_OFFSET_LO__SHIFT 0x0 +#define RCC_PEER2_FB_OFFSET_LO__PEER2_FB_EN__SHIFT 0x1f +#define RCC_PEER2_FB_OFFSET_LO__PEER2_FB_OFFSET_LO_MASK 0x000FFFFFL +#define RCC_PEER2_FB_OFFSET_LO__PEER2_FB_EN_MASK 0x80000000L +//RCC_PEER3_FB_OFFSET_HI +#define RCC_PEER3_FB_OFFSET_HI__PEER3_FB_OFFSET_HI__SHIFT 0x0 +#define RCC_PEER3_FB_OFFSET_HI__PEER3_FB_OFFSET_HI_MASK 0x000FFFFFL +//RCC_PEER3_FB_OFFSET_LO +#define RCC_PEER3_FB_OFFSET_LO__PEER3_FB_OFFSET_LO__SHIFT 0x0 +#define RCC_PEER3_FB_OFFSET_LO__PEER3_FB_EN__SHIFT 0x1f +#define RCC_PEER3_FB_OFFSET_LO__PEER3_FB_OFFSET_LO_MASK 0x000FFFFFL +#define RCC_PEER3_FB_OFFSET_LO__PEER3_FB_EN_MASK 0x80000000L +//RCC_CMN_LINK_CNTL +#define RCC_CMN_LINK_CNTL__BLOCK_PME_ON_L0S_DIS__SHIFT 0x0 +#define RCC_CMN_LINK_CNTL__BLOCK_PME_ON_L1_DIS__SHIFT 0x1 +#define RCC_CMN_LINK_CNTL__BLOCK_PME_ON_LDN_DIS__SHIFT 0x2 +#define RCC_CMN_LINK_CNTL__PM_L1_IDLE_CHECK_DMA_EN__SHIFT 0x3 +#define RCC_CMN_LINK_CNTL__VLINK_IN_L1LTR_TIMER__SHIFT 0x10 +#define RCC_CMN_LINK_CNTL__BLOCK_PME_ON_L0S_DIS_MASK 0x00000001L +#define RCC_CMN_LINK_CNTL__BLOCK_PME_ON_L1_DIS_MASK 0x00000002L +#define RCC_CMN_LINK_CNTL__BLOCK_PME_ON_LDN_DIS_MASK 0x00000004L +#define RCC_CMN_LINK_CNTL__PM_L1_IDLE_CHECK_DMA_EN_MASK 0x00000008L +#define RCC_CMN_LINK_CNTL__VLINK_IN_L1LTR_TIMER_MASK 0xFFFF0000L +//RCC_EP_REQUESTERID_RESTORE +#define RCC_EP_REQUESTERID_RESTORE__EP_REQID_BUS__SHIFT 0x0 +#define RCC_EP_REQUESTERID_RESTORE__EP_REQID_DEV__SHIFT 0x8 +#define RCC_EP_REQUESTERID_RESTORE__EP_REQID_BUS_MASK 0x000000FFL +#define RCC_EP_REQUESTERID_RESTORE__EP_REQID_DEV_MASK 0x00001F00L +//RCC_LTR_LSWITCH_CNTL +#define RCC_LTR_LSWITCH_CNTL__LSWITCH_LATENCY_VALUE__SHIFT 0x0 +#define RCC_LTR_LSWITCH_CNTL__LSWITCH_LATENCY_VALUE_MASK 0x000003FFL +//RCC_MH_ARB_CNTL +#define RCC_MH_ARB_CNTL__MH_ARB_MODE__SHIFT 0x0 +#define RCC_MH_ARB_CNTL__MH_ARB_FIX_PRIORITY__SHIFT 0x1 +#define RCC_MH_ARB_CNTL__MH_ARB_MODE_MASK 0x00000001L +#define RCC_MH_ARB_CNTL__MH_ARB_FIX_PRIORITY_MASK 0x00007FFEL + + +// addressBlock: nbio_nbif0_bif_bx_BIFDEC1 +//BIF_MM_INDACCESS_CNTL +#define BIF_MM_INDACCESS_CNTL__MM_INDACCESS_DIS__SHIFT 0x1 +#define BIF_MM_INDACCESS_CNTL__MM_INDACCESS_DIS_MASK 0x00000002L +//BUS_CNTL +#define BUS_CNTL__VGA_REG_COHERENCY_DIS__SHIFT 0x6 +#define BUS_CNTL__VGA_MEM_COHERENCY_DIS__SHIFT 0x7 +#define BUS_CNTL__SET_AZ_TC__SHIFT 0xa +#define BUS_CNTL__SET_MC_TC__SHIFT 0xd +#define BUS_CNTL__ZERO_BE_WR_EN__SHIFT 0x10 +#define BUS_CNTL__ZERO_BE_RD_EN__SHIFT 0x11 +#define BUS_CNTL__RD_STALL_IO_WR__SHIFT 0x12 +#define BUS_CNTL__PRECEEDINGWR_STALL_VGA_FB_FLUSH_DIS__SHIFT 0x19 +#define BUS_CNTL__PRECEEDINGWR_STALL_VGA_REG_FLUSH_DIS__SHIFT 0x1a +#define BUS_CNTL__HDP_REG_FLUSH_VF_MASK_EN__SHIFT 0x1d +#define BUS_CNTL__VGAFB_ZERO_BE_WR_EN__SHIFT 0x1e +#define BUS_CNTL__VGAFB_ZERO_BE_RD_EN__SHIFT 0x1f +#define BUS_CNTL__VGA_REG_COHERENCY_DIS_MASK 0x00000040L +#define BUS_CNTL__VGA_MEM_COHERENCY_DIS_MASK 0x00000080L +#define BUS_CNTL__SET_AZ_TC_MASK 0x00001C00L +#define BUS_CNTL__SET_MC_TC_MASK 0x0000E000L +#define BUS_CNTL__ZERO_BE_WR_EN_MASK 0x00010000L +#define BUS_CNTL__ZERO_BE_RD_EN_MASK 0x00020000L +#define BUS_CNTL__RD_STALL_IO_WR_MASK 0x00040000L +#define BUS_CNTL__PRECEEDINGWR_STALL_VGA_FB_FLUSH_DIS_MASK 0x02000000L +#define BUS_CNTL__PRECEEDINGWR_STALL_VGA_REG_FLUSH_DIS_MASK 0x04000000L +#define BUS_CNTL__HDP_REG_FLUSH_VF_MASK_EN_MASK 0x20000000L +#define BUS_CNTL__VGAFB_ZERO_BE_WR_EN_MASK 0x40000000L +#define BUS_CNTL__VGAFB_ZERO_BE_RD_EN_MASK 0x80000000L +//BIF_SCRATCH0 +#define BIF_SCRATCH0__BIF_SCRATCH0__SHIFT 0x0 +#define BIF_SCRATCH0__BIF_SCRATCH0_MASK 0xFFFFFFFFL +//BIF_SCRATCH1 +#define BIF_SCRATCH1__BIF_SCRATCH1__SHIFT 0x0 +#define BIF_SCRATCH1__BIF_SCRATCH1_MASK 0xFFFFFFFFL +//BX_RESET_EN +#define BX_RESET_EN__RESET_ON_VFENABLE_LOW_EN__SHIFT 0x10 +#define BX_RESET_EN__RESET_ON_VFENABLE_LOW_EN_MASK 0x00010000L +//MM_CFGREGS_CNTL +#define MM_CFGREGS_CNTL__MM_CFG_FUNC_SEL__SHIFT 0x0 +#define MM_CFGREGS_CNTL__MM_CFG_DEV_SEL__SHIFT 0x6 +#define MM_CFGREGS_CNTL__MM_WR_TO_CFG_EN__SHIFT 0x1f +#define MM_CFGREGS_CNTL__MM_CFG_FUNC_SEL_MASK 0x00000007L +#define MM_CFGREGS_CNTL__MM_CFG_DEV_SEL_MASK 0x000000C0L +#define MM_CFGREGS_CNTL__MM_WR_TO_CFG_EN_MASK 0x80000000L +//BX_RESET_CNTL +#define BX_RESET_CNTL__LINK_TRAIN_EN__SHIFT 0x0 +#define BX_RESET_CNTL__LINK_TRAIN_EN_MASK 0x00000001L +//INTERRUPT_CNTL +#define INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE__SHIFT 0x0 +#define INTERRUPT_CNTL__IH_DUMMY_RD_EN__SHIFT 0x1 +#define INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN__SHIFT 0x3 +#define INTERRUPT_CNTL__IH_INTR_DLY_CNTR__SHIFT 0x4 +#define INTERRUPT_CNTL__GEN_IH_INT_EN__SHIFT 0x8 +#define INTERRUPT_CNTL__BIF_RB_REQ_NONSNOOP_EN__SHIFT 0xf +#define INTERRUPT_CNTL__DUMMYRD_BYPASS_IN_MSI_EN__SHIFT 0x10 +#define INTERRUPT_CNTL__ALWAYS_SEND_INTPKT_AFTER_DUMMYRD_DIS__SHIFT 0x11 +#define INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK 0x00000001L +#define INTERRUPT_CNTL__IH_DUMMY_RD_EN_MASK 0x00000002L +#define INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK 0x00000008L +#define INTERRUPT_CNTL__IH_INTR_DLY_CNTR_MASK 0x000000F0L +#define INTERRUPT_CNTL__GEN_IH_INT_EN_MASK 0x00000100L +#define INTERRUPT_CNTL__BIF_RB_REQ_NONSNOOP_EN_MASK 0x00008000L +#define INTERRUPT_CNTL__DUMMYRD_BYPASS_IN_MSI_EN_MASK 0x00010000L +#define INTERRUPT_CNTL__ALWAYS_SEND_INTPKT_AFTER_DUMMYRD_DIS_MASK 0x00020000L +//INTERRUPT_CNTL2 +#define INTERRUPT_CNTL2__IH_DUMMY_RD_ADDR__SHIFT 0x0 +#define INTERRUPT_CNTL2__IH_DUMMY_RD_ADDR_MASK 0xFFFFFFFFL +//CLKREQB_PAD_CNTL +#define CLKREQB_PAD_CNTL__CLKREQB_PAD_A__SHIFT 0x0 +#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SEL__SHIFT 0x1 +#define CLKREQB_PAD_CNTL__CLKREQB_PAD_MODE__SHIFT 0x2 +#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SPARE__SHIFT 0x3 +#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SN0__SHIFT 0x5 +#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SN1__SHIFT 0x6 +#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SN2__SHIFT 0x7 +#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SN3__SHIFT 0x8 +#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SLEWN__SHIFT 0x9 +#define CLKREQB_PAD_CNTL__CLKREQB_PAD_WAKE__SHIFT 0xa +#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SCHMEN__SHIFT 0xb +#define CLKREQB_PAD_CNTL__CLKREQB_PAD_CNTL_EN__SHIFT 0xc +#define CLKREQB_PAD_CNTL__CLKREQB_PAD_Y__SHIFT 0xd +#define CLKREQB_PAD_CNTL__CLKREQB_PAD_A_MASK 0x00000001L +#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SEL_MASK 0x00000002L +#define CLKREQB_PAD_CNTL__CLKREQB_PAD_MODE_MASK 0x00000004L +#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SPARE_MASK 0x00000018L +#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SN0_MASK 0x00000020L +#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SN1_MASK 0x00000040L +#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SN2_MASK 0x00000080L +#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SN3_MASK 0x00000100L +#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SLEWN_MASK 0x00000200L +#define CLKREQB_PAD_CNTL__CLKREQB_PAD_WAKE_MASK 0x00000400L +#define CLKREQB_PAD_CNTL__CLKREQB_PAD_SCHMEN_MASK 0x00000800L +#define CLKREQB_PAD_CNTL__CLKREQB_PAD_CNTL_EN_MASK 0x00001000L +#define CLKREQB_PAD_CNTL__CLKREQB_PAD_Y_MASK 0x00002000L +//BIF_FEATURES_CONTROL_MISC +#define BIF_FEATURES_CONTROL_MISC__MST_BIF_REQ_EP_DIS__SHIFT 0x0 +#define BIF_FEATURES_CONTROL_MISC__SLV_BIF_CPL_EP_DIS__SHIFT 0x1 +#define BIF_FEATURES_CONTROL_MISC__BIF_SLV_REQ_EP_DIS__SHIFT 0x2 +#define BIF_FEATURES_CONTROL_MISC__BIF_MST_CPL_EP_DIS__SHIFT 0x3 +#define BIF_FEATURES_CONTROL_MISC__BIF_RB_SET_OVERFLOW_EN__SHIFT 0xc +#define BIF_FEATURES_CONTROL_MISC__ATOMIC_ERR_INT_DIS__SHIFT 0xd +#define BIF_FEATURES_CONTROL_MISC__BME_HDL_NONVIR_EN__SHIFT 0xf +#define BIF_FEATURES_CONTROL_MISC__DOORBELL_SELFRING_GPA_APER_CHK_48BIT_ADDR__SHIFT 0x18 +#define BIF_FEATURES_CONTROL_MISC__MST_BIF_REQ_EP_DIS_MASK 0x00000001L +#define BIF_FEATURES_CONTROL_MISC__SLV_BIF_CPL_EP_DIS_MASK 0x00000002L +#define BIF_FEATURES_CONTROL_MISC__BIF_SLV_REQ_EP_DIS_MASK 0x00000004L +#define BIF_FEATURES_CONTROL_MISC__BIF_MST_CPL_EP_DIS_MASK 0x00000008L +#define BIF_FEATURES_CONTROL_MISC__BIF_RB_SET_OVERFLOW_EN_MASK 0x00001000L +#define BIF_FEATURES_CONTROL_MISC__ATOMIC_ERR_INT_DIS_MASK 0x00002000L +#define BIF_FEATURES_CONTROL_MISC__BME_HDL_NONVIR_EN_MASK 0x00008000L +#define BIF_FEATURES_CONTROL_MISC__DOORBELL_SELFRING_GPA_APER_CHK_48BIT_ADDR_MASK 0x01000000L +//BIF_DOORBELL_CNTL +#define BIF_DOORBELL_CNTL__SELF_RING_DIS__SHIFT 0x0 +#define BIF_DOORBELL_CNTL__TRANS_CHECK_DIS__SHIFT 0x1 +#define BIF_DOORBELL_CNTL__UNTRANS_LBACK_EN__SHIFT 0x2 +#define BIF_DOORBELL_CNTL__NON_CONSECUTIVE_BE_ZERO_DIS__SHIFT 0x3 +#define BIF_DOORBELL_CNTL__DOORBELL_MONITOR_EN__SHIFT 0x4 +#define BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_DIS__SHIFT 0x18 +#define BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_MODE_0__SHIFT 0x19 +#define BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_MODE_1__SHIFT 0x1a +#define BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_MODE_2__SHIFT 0x1b +#define BIF_DOORBELL_CNTL__SELF_RING_DIS_MASK 0x00000001L +#define BIF_DOORBELL_CNTL__TRANS_CHECK_DIS_MASK 0x00000002L +#define BIF_DOORBELL_CNTL__UNTRANS_LBACK_EN_MASK 0x00000004L +#define BIF_DOORBELL_CNTL__NON_CONSECUTIVE_BE_ZERO_DIS_MASK 0x00000008L +#define BIF_DOORBELL_CNTL__DOORBELL_MONITOR_EN_MASK 0x00000010L +#define BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_DIS_MASK 0x01000000L +#define BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_MODE_0_MASK 0x02000000L +#define BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_MODE_1_MASK 0x04000000L +#define BIF_DOORBELL_CNTL__DB_MNTR_INTGEN_MODE_2_MASK 0x08000000L +//BIF_DOORBELL_INT_CNTL +#define BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_STATUS__SHIFT 0x0 +#define BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_STATUS__SHIFT 0x1 +#define BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS__SHIFT 0x2 +#define BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_CLEAR__SHIFT 0x10 +#define BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_CLEAR__SHIFT 0x11 +#define BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR__SHIFT 0x12 +#define BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_DISABLE__SHIFT 0x18 +#define BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_DISABLE__SHIFT 0x19 +#define BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_DISABLE__SHIFT 0x1a +#define BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_STATUS_MASK 0x00000001L +#define BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_STATUS_MASK 0x00000002L +#define BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS_MASK 0x00000004L +#define BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_CLEAR_MASK 0x00010000L +#define BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_CLEAR_MASK 0x00020000L +#define BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR_MASK 0x00040000L +#define BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_DISABLE_MASK 0x01000000L +#define BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_DISABLE_MASK 0x02000000L +#define BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_DISABLE_MASK 0x04000000L +//BIF_FB_EN +#define BIF_FB_EN__FB_READ_EN__SHIFT 0x0 +#define BIF_FB_EN__FB_WRITE_EN__SHIFT 0x1 +#define BIF_FB_EN__FB_READ_EN_MASK 0x00000001L +#define BIF_FB_EN__FB_WRITE_EN_MASK 0x00000002L +//BIF_BUSY_DELAY_CNTR +#define BIF_BUSY_DELAY_CNTR__DELAY_CNT__SHIFT 0x0 +#define BIF_BUSY_DELAY_CNTR__DELAY_CNT_MASK 0x0000003FL +//BIF_MST_TRANS_PENDING_VF +#define BIF_MST_TRANS_PENDING_VF__BIF_MST_TRANS_PENDING__SHIFT 0x0 +#define BIF_MST_TRANS_PENDING_VF__BIF_MST_TRANS_PENDING_MASK 0x7FFFFFFFL +//BIF_SLV_TRANS_PENDING_VF +#define BIF_SLV_TRANS_PENDING_VF__BIF_SLV_TRANS_PENDING__SHIFT 0x0 +#define BIF_SLV_TRANS_PENDING_VF__BIF_SLV_TRANS_PENDING_MASK 0x7FFFFFFFL +//BACO_CNTL +#define BACO_CNTL__BACO_EN__SHIFT 0x0 +#define BACO_CNTL__BACO_DUMMY_EN__SHIFT 0x2 +#define BACO_CNTL__BACO_POWER_OFF__SHIFT 0x3 +#define BACO_CNTL__BACO_DSTATE_BYPASS__SHIFT 0x5 +#define BACO_CNTL__BACO_RST_INTR_MASK__SHIFT 0x6 +#define BACO_CNTL__BACO_MODE__SHIFT 0x8 +#define BACO_CNTL__RCU_BIF_CONFIG_DONE__SHIFT 0x9 +#define BACO_CNTL__BACO_AUTO_EXIT__SHIFT 0x1f +#define BACO_CNTL__BACO_EN_MASK 0x00000001L +#define BACO_CNTL__BACO_DUMMY_EN_MASK 0x00000004L +#define BACO_CNTL__BACO_POWER_OFF_MASK 0x00000008L +#define BACO_CNTL__BACO_DSTATE_BYPASS_MASK 0x00000020L +#define BACO_CNTL__BACO_RST_INTR_MASK_MASK 0x00000040L +#define BACO_CNTL__BACO_MODE_MASK 0x00000100L +#define BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK 0x00000200L +#define BACO_CNTL__BACO_AUTO_EXIT_MASK 0x80000000L +//BIF_BACO_EXIT_TIME0 +#define BIF_BACO_EXIT_TIME0__BACO_EXIT_PXEN_CLR_TIMER__SHIFT 0x0 +#define BIF_BACO_EXIT_TIME0__BACO_EXIT_PXEN_CLR_TIMER_MASK 0x000FFFFFL +//BIF_BACO_EXIT_TIMER1 +#define BIF_BACO_EXIT_TIMER1__BACO_EXIT_SIDEBAND_TIMER__SHIFT 0x0 +#define BIF_BACO_EXIT_TIMER1__BACO_HW_AUTO_FLUSH_EN__SHIFT 0x18 +#define BIF_BACO_EXIT_TIMER1__BACO_HW_EXIT_ENDING_AUTO_BY_RSMU_INTR_CLR__SHIFT 0x19 +#define BIF_BACO_EXIT_TIMER1__BACO_HW_EXIT_DIS__SHIFT 0x1a +#define BIF_BACO_EXIT_TIMER1__PX_EN_OE_IN_PX_EN_HIGH__SHIFT 0x1b +#define BIF_BACO_EXIT_TIMER1__PX_EN_OE_IN_PX_EN_LOW__SHIFT 0x1c +#define BIF_BACO_EXIT_TIMER1__BACO_MODE_SEL__SHIFT 0x1d +#define BIF_BACO_EXIT_TIMER1__AUTO_BACO_EXIT_CLR_BY_HW_DIS__SHIFT 0x1f +#define BIF_BACO_EXIT_TIMER1__BACO_EXIT_SIDEBAND_TIMER_MASK 0x000FFFFFL +#define BIF_BACO_EXIT_TIMER1__BACO_HW_AUTO_FLUSH_EN_MASK 0x01000000L +#define BIF_BACO_EXIT_TIMER1__BACO_HW_EXIT_ENDING_AUTO_BY_RSMU_INTR_CLR_MASK 0x02000000L +#define BIF_BACO_EXIT_TIMER1__BACO_HW_EXIT_DIS_MASK 0x04000000L +#define BIF_BACO_EXIT_TIMER1__PX_EN_OE_IN_PX_EN_HIGH_MASK 0x08000000L +#define BIF_BACO_EXIT_TIMER1__PX_EN_OE_IN_PX_EN_LOW_MASK 0x10000000L +#define BIF_BACO_EXIT_TIMER1__BACO_MODE_SEL_MASK 0x60000000L +#define BIF_BACO_EXIT_TIMER1__AUTO_BACO_EXIT_CLR_BY_HW_DIS_MASK 0x80000000L +//BIF_BACO_EXIT_TIMER2 +#define BIF_BACO_EXIT_TIMER2__BACO_EXIT_LCLK_BAK_TIMER__SHIFT 0x0 +#define BIF_BACO_EXIT_TIMER2__BACO_EXIT_LCLK_BAK_TIMER_MASK 0x000FFFFFL +//BIF_BACO_EXIT_TIMER3 +#define BIF_BACO_EXIT_TIMER3__BACO_EXIT_DUMMY_EN_CLR_TIMER__SHIFT 0x0 +#define BIF_BACO_EXIT_TIMER3__BACO_EXIT_DUMMY_EN_CLR_TIMER_MASK 0x000FFFFFL +//BIF_BACO_EXIT_TIMER4 +#define BIF_BACO_EXIT_TIMER4__BACO_EXIT_BACO_EN_CLR_TIMER__SHIFT 0x0 +#define BIF_BACO_EXIT_TIMER4__BACO_EXIT_BACO_EN_CLR_TIMER_MASK 0x000FFFFFL +//MEM_TYPE_CNTL +#define MEM_TYPE_CNTL__BF_MEM_PHY_G5_G3__SHIFT 0x0 +#define MEM_TYPE_CNTL__BF_MEM_PHY_G5_G3_MASK 0x00000001L +//NBIF_GFX_ADDR_LUT_CNTL +#define NBIF_GFX_ADDR_LUT_CNTL__LUT_ENABLE__SHIFT 0x0 +#define NBIF_GFX_ADDR_LUT_CNTL__MSI_ADDR_MODE__SHIFT 0x1 +#define NBIF_GFX_ADDR_LUT_CNTL__LUT_ENABLE_MASK 0x00000001L +#define NBIF_GFX_ADDR_LUT_CNTL__MSI_ADDR_MODE_MASK 0x00000002L +//NBIF_GFX_ADDR_LUT_0 +#define NBIF_GFX_ADDR_LUT_0__ADDR__SHIFT 0x0 +#define NBIF_GFX_ADDR_LUT_0__ADDR_MASK 0x00FFFFFFL +//NBIF_GFX_ADDR_LUT_1 +#define NBIF_GFX_ADDR_LUT_1__ADDR__SHIFT 0x0 +#define NBIF_GFX_ADDR_LUT_1__ADDR_MASK 0x00FFFFFFL +//NBIF_GFX_ADDR_LUT_2 +#define NBIF_GFX_ADDR_LUT_2__ADDR__SHIFT 0x0 +#define NBIF_GFX_ADDR_LUT_2__ADDR_MASK 0x00FFFFFFL +//NBIF_GFX_ADDR_LUT_3 +#define NBIF_GFX_ADDR_LUT_3__ADDR__SHIFT 0x0 +#define NBIF_GFX_ADDR_LUT_3__ADDR_MASK 0x00FFFFFFL +//NBIF_GFX_ADDR_LUT_4 +#define NBIF_GFX_ADDR_LUT_4__ADDR__SHIFT 0x0 +#define NBIF_GFX_ADDR_LUT_4__ADDR_MASK 0x00FFFFFFL +//NBIF_GFX_ADDR_LUT_5 +#define NBIF_GFX_ADDR_LUT_5__ADDR__SHIFT 0x0 +#define NBIF_GFX_ADDR_LUT_5__ADDR_MASK 0x00FFFFFFL +//NBIF_GFX_ADDR_LUT_6 +#define NBIF_GFX_ADDR_LUT_6__ADDR__SHIFT 0x0 +#define NBIF_GFX_ADDR_LUT_6__ADDR_MASK 0x00FFFFFFL +//NBIF_GFX_ADDR_LUT_7 +#define NBIF_GFX_ADDR_LUT_7__ADDR__SHIFT 0x0 +#define NBIF_GFX_ADDR_LUT_7__ADDR_MASK 0x00FFFFFFL +//NBIF_GFX_ADDR_LUT_8 +#define NBIF_GFX_ADDR_LUT_8__ADDR__SHIFT 0x0 +#define NBIF_GFX_ADDR_LUT_8__ADDR_MASK 0x00FFFFFFL +//NBIF_GFX_ADDR_LUT_9 +#define NBIF_GFX_ADDR_LUT_9__ADDR__SHIFT 0x0 +#define NBIF_GFX_ADDR_LUT_9__ADDR_MASK 0x00FFFFFFL +//NBIF_GFX_ADDR_LUT_10 +#define NBIF_GFX_ADDR_LUT_10__ADDR__SHIFT 0x0 +#define NBIF_GFX_ADDR_LUT_10__ADDR_MASK 0x00FFFFFFL +//NBIF_GFX_ADDR_LUT_11 +#define NBIF_GFX_ADDR_LUT_11__ADDR__SHIFT 0x0 +#define NBIF_GFX_ADDR_LUT_11__ADDR_MASK 0x00FFFFFFL +//NBIF_GFX_ADDR_LUT_12 +#define NBIF_GFX_ADDR_LUT_12__ADDR__SHIFT 0x0 +#define NBIF_GFX_ADDR_LUT_12__ADDR_MASK 0x00FFFFFFL +//NBIF_GFX_ADDR_LUT_13 +#define NBIF_GFX_ADDR_LUT_13__ADDR__SHIFT 0x0 +#define NBIF_GFX_ADDR_LUT_13__ADDR_MASK 0x00FFFFFFL +//NBIF_GFX_ADDR_LUT_14 +#define NBIF_GFX_ADDR_LUT_14__ADDR__SHIFT 0x0 +#define NBIF_GFX_ADDR_LUT_14__ADDR_MASK 0x00FFFFFFL +//NBIF_GFX_ADDR_LUT_15 +#define NBIF_GFX_ADDR_LUT_15__ADDR__SHIFT 0x0 +#define NBIF_GFX_ADDR_LUT_15__ADDR_MASK 0x00FFFFFFL +//REMAP_HDP_MEM_FLUSH_CNTL +#define REMAP_HDP_MEM_FLUSH_CNTL__ADDRESS__SHIFT 0x2 +#define REMAP_HDP_MEM_FLUSH_CNTL__ADDRESS_MASK 0x0007FFFCL +//REMAP_HDP_REG_FLUSH_CNTL +#define REMAP_HDP_REG_FLUSH_CNTL__ADDRESS__SHIFT 0x2 +#define REMAP_HDP_REG_FLUSH_CNTL__ADDRESS_MASK 0x0007FFFCL +//BIF_RB_CNTL +#define BIF_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define BIF_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define BIF_RB_CNTL__WPTR_WRITEBACK_ENABLE__SHIFT 0x8 +#define BIF_RB_CNTL__WPTR_WRITEBACK_TIMER__SHIFT 0x9 +#define BIF_RB_CNTL__BIF_RB_TRAN__SHIFT 0x11 +#define BIF_RB_CNTL__RB_INTR_FIX_PRIORITY__SHIFT 0x1a +#define BIF_RB_CNTL__RB_INTR_ARB_MODE__SHIFT 0x1d +#define BIF_RB_CNTL__RB_RST_BY_FLR_DISABLE__SHIFT 0x1e +#define BIF_RB_CNTL__WPTR_OVERFLOW_CLEAR__SHIFT 0x1f +#define BIF_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define BIF_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define BIF_RB_CNTL__WPTR_WRITEBACK_ENABLE_MASK 0x00000100L +#define BIF_RB_CNTL__WPTR_WRITEBACK_TIMER_MASK 0x00003E00L +#define BIF_RB_CNTL__BIF_RB_TRAN_MASK 0x00020000L +#define BIF_RB_CNTL__RB_INTR_FIX_PRIORITY_MASK 0x1C000000L +#define BIF_RB_CNTL__RB_INTR_ARB_MODE_MASK 0x20000000L +#define BIF_RB_CNTL__RB_RST_BY_FLR_DISABLE_MASK 0x40000000L +#define BIF_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK 0x80000000L +//BIF_RB_BASE +#define BIF_RB_BASE__ADDR__SHIFT 0x0 +#define BIF_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//BIF_RB_RPTR +#define BIF_RB_RPTR__OFFSET__SHIFT 0x2 +#define BIF_RB_RPTR__OFFSET_MASK 0x0003FFFCL +//BIF_RB_WPTR +#define BIF_RB_WPTR__BIF_RB_OVERFLOW__SHIFT 0x0 +#define BIF_RB_WPTR__OFFSET__SHIFT 0x2 +#define BIF_RB_WPTR__BIF_RB_OVERFLOW_MASK 0x00000001L +#define BIF_RB_WPTR__OFFSET_MASK 0x0003FFFCL +//BIF_RB_WPTR_ADDR_HI +#define BIF_RB_WPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define BIF_RB_WPTR_ADDR_HI__ADDR_MASK 0x000000FFL +//BIF_RB_WPTR_ADDR_LO +#define BIF_RB_WPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define BIF_RB_WPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//MAILBOX_INDEX +#define MAILBOX_INDEX__MAILBOX_INDEX__SHIFT 0x0 +#define MAILBOX_INDEX__MAILBOX_INDEX_MASK 0x0000001FL +//BIF_MP1_INTR_CTRL +#define BIF_MP1_INTR_CTRL__BACO_EXIT_DONE__SHIFT 0x0 +#define BIF_MP1_INTR_CTRL__BACO_EXIT_DONE_MASK 0x00000001L +//BIF_UVD_GPUIOV_CFG_SIZE +#define BIF_UVD_GPUIOV_CFG_SIZE__UVD_GPUIOV_CFG_SIZE__SHIFT 0x0 +#define BIF_UVD_GPUIOV_CFG_SIZE__UVD_GPUIOV_CFG_SIZE_MASK 0x0000000FL +//BIF_VCE_GPUIOV_CFG_SIZE +#define BIF_VCE_GPUIOV_CFG_SIZE__VCE_GPUIOV_CFG_SIZE__SHIFT 0x0 +#define BIF_VCE_GPUIOV_CFG_SIZE__VCE_GPUIOV_CFG_SIZE_MASK 0x0000000FL +//BIF_GFX_SDMA_GPUIOV_CFG_SIZE +#define BIF_GFX_SDMA_GPUIOV_CFG_SIZE__GFX_SDMA_GPUIOV_CFG_SIZE__SHIFT 0x0 +#define BIF_GFX_SDMA_GPUIOV_CFG_SIZE__GFX_SDMA_GPUIOV_CFG_SIZE_MASK 0x0000000FL +//BIF_PERSTB_PAD_CNTL +#define BIF_PERSTB_PAD_CNTL__PERSTB_PAD_CNTL__SHIFT 0x0 +#define BIF_PERSTB_PAD_CNTL__PERSTB_PAD_CNTL_MASK 0x0000FFFFL +//BIF_PX_EN_PAD_CNTL +#define BIF_PX_EN_PAD_CNTL__PX_EN_PAD_CNTL__SHIFT 0x0 +#define BIF_PX_EN_PAD_CNTL__PX_EN_PAD_CNTL_MASK 0x000000FFL +//BIF_REFPADKIN_PAD_CNTL +#define BIF_REFPADKIN_PAD_CNTL__REFPADKIN_PAD_CNTL__SHIFT 0x0 +#define BIF_REFPADKIN_PAD_CNTL__REFPADKIN_PAD_CNTL_MASK 0x000000FFL +//BIF_CLKREQB_PAD_CNTL +#define BIF_CLKREQB_PAD_CNTL__CLKREQB_PAD_CNTL__SHIFT 0x0 +#define BIF_CLKREQB_PAD_CNTL__CLKREQB_PAD_CNTL_MASK 0x00FFFFFFL +//BIF_PWRBRK_PAD_CNTL +#define BIF_PWRBRK_PAD_CNTL__PWRBRK_PAD_CNTL__SHIFT 0x0 +#define BIF_PWRBRK_PAD_CNTL__PWRBRK_PAD_CNTL_MASK 0x000000FFL +//BIF_WAKEB_PAD_CNTL +#define BIF_WAKEB_PAD_CNTL__GPIO33_ITXIMPSEL__SHIFT 0x0 +#define BIF_WAKEB_PAD_CNTL__GPIO33_ICTFEN__SHIFT 0x1 +#define BIF_WAKEB_PAD_CNTL__GPIO33_IPD__SHIFT 0x2 +#define BIF_WAKEB_PAD_CNTL__GPIO33_IPU__SHIFT 0x3 +#define BIF_WAKEB_PAD_CNTL__GPIO33_IRXEN__SHIFT 0x4 +#define BIF_WAKEB_PAD_CNTL__GPIO33_IRXSEL0__SHIFT 0x5 +#define BIF_WAKEB_PAD_CNTL__GPIO33_IRXSEL1__SHIFT 0x6 +#define BIF_WAKEB_PAD_CNTL__GPIO33_RESERVED__SHIFT 0x7 +#define BIF_WAKEB_PAD_CNTL__GPIO33_ITXIMPSEL_MASK 0x00000001L +#define BIF_WAKEB_PAD_CNTL__GPIO33_ICTFEN_MASK 0x00000002L +#define BIF_WAKEB_PAD_CNTL__GPIO33_IPD_MASK 0x00000004L +#define BIF_WAKEB_PAD_CNTL__GPIO33_IPU_MASK 0x00000008L +#define BIF_WAKEB_PAD_CNTL__GPIO33_IRXEN_MASK 0x00000010L +#define BIF_WAKEB_PAD_CNTL__GPIO33_IRXSEL0_MASK 0x00000020L +#define BIF_WAKEB_PAD_CNTL__GPIO33_IRXSEL1_MASK 0x00000040L +#define BIF_WAKEB_PAD_CNTL__GPIO33_RESERVED_MASK 0x00000080L + + +// addressBlock: nbio_nbif0_bif_bx_pf_BIFPFVFDEC1 +//BIF_BME_STATUS +#define BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0 +#define BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10 +#define BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L +#define BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L +//BIF_ATOMIC_ERR_LOG +//DOORBELL_SELFRING_GPA_APER_BASE_HIGH +#define DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0 +#define DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL +//DOORBELL_SELFRING_GPA_APER_BASE_LOW +#define DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0 +#define DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL +//DOORBELL_SELFRING_GPA_APER_CNTL +#define DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0 +#define DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1 +#define DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8 +#define DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L +#define DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L +#define DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L +//HDP_REG_COHERENCY_FLUSH_CNTL +#define HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0 +#define HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L +//HDP_MEM_COHERENCY_FLUSH_CNTL +#define HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0 +#define HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L +//GPU_HDP_FLUSH_REQ +#define GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0 +#define GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1 +#define GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2 +#define GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3 +#define GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4 +#define GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5 +#define GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6 +#define GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7 +#define GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8 +#define GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9 +#define GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa +#define GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb +#define GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L +#define GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L +#define GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L +#define GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L +#define GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L +#define GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L +#define GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L +#define GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L +#define GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L +#define GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L +#define GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L +#define GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L +//GPU_HDP_FLUSH_DONE +#define GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0 +#define GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1 +#define GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2 +#define GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3 +#define GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4 +#define GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5 +#define GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6 +#define GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7 +#define GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8 +#define GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9 +#define GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa +#define GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb +#define GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L +#define GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L +#define GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L +#define GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L +#define GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L +#define GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L +#define GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L +#define GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L +#define GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L +#define GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L +#define GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L +#define GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L +//BIF_TRANS_PENDING +#define BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0 +#define BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1 +#define BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L +#define BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L +//NBIF_GFX_ADDR_LUT_BYPASS +#define NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0 +#define NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L +//MAILBOX_MSGBUF_TRN_DW0 +#define MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0 +#define MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL +//MAILBOX_MSGBUF_TRN_DW1 +#define MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0 +#define MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL +//MAILBOX_MSGBUF_TRN_DW2 +#define MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0 +#define MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL +//MAILBOX_MSGBUF_TRN_DW3 +#define MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0 +#define MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL +//MAILBOX_MSGBUF_RCV_DW0 +#define MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0 +#define MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL +//MAILBOX_MSGBUF_RCV_DW1 +#define MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0 +#define MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL +//MAILBOX_MSGBUF_RCV_DW2 +#define MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0 +#define MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL +//MAILBOX_MSGBUF_RCV_DW3 +#define MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0 +#define MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL +//MAILBOX_CONTROL +#define MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0 +#define MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1 +#define MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8 +#define MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9 +#define MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L +#define MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L +#define MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L +#define MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L +//MAILBOX_INT_CNTL +#define MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0 +#define MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1 +#define MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L +#define MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L +//BIF_VMHV_MAILBOX +#define BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0 +#define BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1 +#define BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8 +#define BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf +#define BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10 +#define BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17 +#define BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18 +#define BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19 +#define BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L +#define BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L +#define BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L +#define BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L +#define BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L +#define BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L +#define BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L +#define BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L + + +// addressBlock: nbio_nbif0_gdc_GDCDEC +//NGDC_SDP_PORT_CTRL +#define NGDC_SDP_PORT_CTRL__SDP_DISCON_HYSTERESIS__SHIFT 0x0 +#define NGDC_SDP_PORT_CTRL__SDP_DISCON_HYSTERESIS_MASK 0x000000FFL +//SHUB_REGS_IF_CTL +#define SHUB_REGS_IF_CTL__SHUB_REGS_DROP_NONPF_MMREGREQ_SETERR_DIS__SHIFT 0x0 +#define SHUB_REGS_IF_CTL__SHUB_REGS_DROP_NONPF_MMREGREQ_SETERR_DIS_MASK 0x00000001L +//NGDC_MGCG_CTRL +#define NGDC_MGCG_CTRL__NGDC_MGCG_EN__SHIFT 0x0 +#define NGDC_MGCG_CTRL__NGDC_MGCG_MODE__SHIFT 0x1 +#define NGDC_MGCG_CTRL__NGDC_MGCG_HYSTERESIS__SHIFT 0x2 +#define NGDC_MGCG_CTRL__NGDC_MGCG_HST_DIS__SHIFT 0xa +#define NGDC_MGCG_CTRL__NGDC_MGCG_DMA_DIS__SHIFT 0xb +#define NGDC_MGCG_CTRL__NGDC_MGCG_REG_DIS__SHIFT 0xc +#define NGDC_MGCG_CTRL__NGDC_MGCG_AER_DIS__SHIFT 0xd +#define NGDC_MGCG_CTRL__NGDC_MGCG_EN_MASK 0x00000001L +#define NGDC_MGCG_CTRL__NGDC_MGCG_MODE_MASK 0x00000002L +#define NGDC_MGCG_CTRL__NGDC_MGCG_HYSTERESIS_MASK 0x000003FCL +#define NGDC_MGCG_CTRL__NGDC_MGCG_HST_DIS_MASK 0x00000400L +#define NGDC_MGCG_CTRL__NGDC_MGCG_DMA_DIS_MASK 0x00000800L +#define NGDC_MGCG_CTRL__NGDC_MGCG_REG_DIS_MASK 0x00001000L +#define NGDC_MGCG_CTRL__NGDC_MGCG_AER_DIS_MASK 0x00002000L +//NGDC_RESERVED_0 +#define NGDC_RESERVED_0__RESERVED__SHIFT 0x0 +#define NGDC_RESERVED_0__RESERVED_MASK 0xFFFFFFFFL +//NGDC_RESERVED_1 +#define NGDC_RESERVED_1__RESERVED__SHIFT 0x0 +#define NGDC_RESERVED_1__RESERVED_MASK 0xFFFFFFFFL +//NGDC_SDP_PORT_CTRL_SOCCLK +#define NGDC_SDP_PORT_CTRL_SOCCLK__SDP_DISCON_HYSTERESIS_SOCCLK__SHIFT 0x0 +#define NGDC_SDP_PORT_CTRL_SOCCLK__SDP_DISCON_HYSTERESIS_SOCCLK_MASK 0x000000FFL +//BIF_SDMA0_DOORBELL_RANGE +#define BIF_SDMA0_DOORBELL_RANGE__OFFSET__SHIFT 0x2 +#define BIF_SDMA0_DOORBELL_RANGE__SIZE__SHIFT 0x10 +#define BIF_SDMA0_DOORBELL_RANGE__OFFSET_MASK 0x00000FFCL +#define BIF_SDMA0_DOORBELL_RANGE__SIZE_MASK 0x001F0000L +//BIF_SDMA1_DOORBELL_RANGE +#define BIF_SDMA1_DOORBELL_RANGE__OFFSET__SHIFT 0x2 +#define BIF_SDMA1_DOORBELL_RANGE__SIZE__SHIFT 0x10 +#define BIF_SDMA1_DOORBELL_RANGE__OFFSET_MASK 0x00000FFCL +#define BIF_SDMA1_DOORBELL_RANGE__SIZE_MASK 0x001F0000L +//BIF_IH_DOORBELL_RANGE +#define BIF_IH_DOORBELL_RANGE__OFFSET__SHIFT 0x2 +#define BIF_IH_DOORBELL_RANGE__SIZE__SHIFT 0x10 +#define BIF_IH_DOORBELL_RANGE__OFFSET_MASK 0x00000FFCL +#define BIF_IH_DOORBELL_RANGE__SIZE_MASK 0x001F0000L +//BIF_MMSCH0_DOORBELL_RANGE +#define BIF_MMSCH0_DOORBELL_RANGE__OFFSET__SHIFT 0x2 +#define BIF_MMSCH0_DOORBELL_RANGE__SIZE__SHIFT 0x10 +#define BIF_MMSCH0_DOORBELL_RANGE__OFFSET_MASK 0x00000FFCL +#define BIF_MMSCH0_DOORBELL_RANGE__SIZE_MASK 0x001F0000L +//BIF_ACV_DOORBELL_RANGE +#define BIF_ACV_DOORBELL_RANGE__OFFSET__SHIFT 0x2 +#define BIF_ACV_DOORBELL_RANGE__SIZE__SHIFT 0x10 +#define BIF_ACV_DOORBELL_RANGE__OFFSET_MASK 0x00000FFCL +#define BIF_ACV_DOORBELL_RANGE__SIZE_MASK 0x001F0000L +//BIF_DOORBELL_FENCE_CNTL +#define BIF_DOORBELL_FENCE_CNTL__DOORBELL_FENCE_CP_ENABLE__SHIFT 0x0 +#define BIF_DOORBELL_FENCE_CNTL__DOORBELL_FENCE_SDMA0_ENABLE__SHIFT 0x1 +#define BIF_DOORBELL_FENCE_CNTL__DOORBELL_FENCE_SDMA1_ENABLE__SHIFT 0x2 +#define BIF_DOORBELL_FENCE_CNTL__DOORBELL_FENCE_ACV_ENABLE__SHIFT 0x3 +#define BIF_DOORBELL_FENCE_CNTL__DOORBELL_FENCE_ONCE_TRIGGER_DIS__SHIFT 0x10 +#define BIF_DOORBELL_FENCE_CNTL__DOORBELL_FENCE_CP_ENABLE_MASK 0x00000001L +#define BIF_DOORBELL_FENCE_CNTL__DOORBELL_FENCE_SDMA0_ENABLE_MASK 0x00000002L +#define BIF_DOORBELL_FENCE_CNTL__DOORBELL_FENCE_SDMA1_ENABLE_MASK 0x00000004L +#define BIF_DOORBELL_FENCE_CNTL__DOORBELL_FENCE_ACV_ENABLE_MASK 0x00000008L +#define BIF_DOORBELL_FENCE_CNTL__DOORBELL_FENCE_ONCE_TRIGGER_DIS_MASK 0x00010000L +//S2A_MISC_CNTL +#define S2A_MISC_CNTL__DOORBELL_64BIT_SUPPORT_SDMA0_DIS__SHIFT 0x0 +#define S2A_MISC_CNTL__DOORBELL_64BIT_SUPPORT_SDMA1_DIS__SHIFT 0x1 +#define S2A_MISC_CNTL__DOORBELL_64BIT_SUPPORT_CP_DIS__SHIFT 0x2 +#define S2A_MISC_CNTL__AXI_HST_CPL_EP_DIS__SHIFT 0x3 +#define S2A_MISC_CNTL__DOORBELL_64BIT_SUPPORT_ACV_DIS__SHIFT 0x4 +#define S2A_MISC_CNTL__ATM_ARB_MODE__SHIFT 0x8 +#define S2A_MISC_CNTL__RB_ARB_MODE__SHIFT 0xa +#define S2A_MISC_CNTL__HSTR_ARB_MODE__SHIFT 0xc +#define S2A_MISC_CNTL__WRSP_ARB_MODE__SHIFT 0x10 +#define S2A_MISC_CNTL__DOORBELL_64BIT_SUPPORT_SDMA0_DIS_MASK 0x00000001L +#define S2A_MISC_CNTL__DOORBELL_64BIT_SUPPORT_SDMA1_DIS_MASK 0x00000002L +#define S2A_MISC_CNTL__DOORBELL_64BIT_SUPPORT_CP_DIS_MASK 0x00000004L +#define S2A_MISC_CNTL__AXI_HST_CPL_EP_DIS_MASK 0x00000008L +#define S2A_MISC_CNTL__DOORBELL_64BIT_SUPPORT_ACV_DIS_MASK 0x00000010L +#define S2A_MISC_CNTL__ATM_ARB_MODE_MASK 0x00000300L +#define S2A_MISC_CNTL__RB_ARB_MODE_MASK 0x00000C00L +#define S2A_MISC_CNTL__HSTR_ARB_MODE_MASK 0x00003000L +#define S2A_MISC_CNTL__WRSP_ARB_MODE_MASK 0x000F0000L + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_BIFDEC2 +//GFXMSIX_VECT0_ADDR_LO +#define GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//GFXMSIX_VECT0_ADDR_HI +#define GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//GFXMSIX_VECT0_MSG_DATA +#define GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//GFXMSIX_VECT0_CONTROL +#define GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0 +#define GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L +//GFXMSIX_VECT1_ADDR_LO +#define GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//GFXMSIX_VECT1_ADDR_HI +#define GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//GFXMSIX_VECT1_MSG_DATA +#define GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//GFXMSIX_VECT1_CONTROL +#define GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0 +#define GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L +//GFXMSIX_VECT2_ADDR_LO +#define GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//GFXMSIX_VECT2_ADDR_HI +#define GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//GFXMSIX_VECT2_MSG_DATA +#define GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//GFXMSIX_VECT2_CONTROL +#define GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0 +#define GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L +//GFXMSIX_PBA +#define GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0 +#define GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1 +#define GFXMSIX_PBA__MSIX_PENDING_BITS_2__SHIFT 0x2 +#define GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L +#define GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L +#define GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L + + +// addressBlock: nbio_nbif0_gdc_GDCDEC +//GDC0_NGDC_SDP_PORT_CTRL +#define GDC0_NGDC_SDP_PORT_CTRL__SDP_DISCON_HYSTERESIS__SHIFT 0x0 +#define GDC0_NGDC_SDP_PORT_CTRL__SDP_DISCON_HYSTERESIS_MASK 0x000000FFL +//GDC0_SHUB_REGS_IF_CTL +#define GDC0_SHUB_REGS_IF_CTL__SHUB_REGS_DROP_NONPF_MMREGREQ_SETERR_DIS__SHIFT 0x0 +#define GDC0_SHUB_REGS_IF_CTL__SHUB_REGS_DROP_NONPF_MMREGREQ_SETERR_DIS_MASK 0x00000001L +//GDC0_NGDC_MGCG_CTRL +#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_EN__SHIFT 0x0 +#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_MODE__SHIFT 0x1 +#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_HYSTERESIS__SHIFT 0x2 +#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_HST_DIS__SHIFT 0xa +#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_DMA_DIS__SHIFT 0xb +#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_REG_DIS__SHIFT 0xc +#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_AER_DIS__SHIFT 0xd +#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_EN_MASK 0x00000001L +#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_MODE_MASK 0x00000002L +#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_HYSTERESIS_MASK 0x000003FCL +#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_HST_DIS_MASK 0x00000400L +#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_DMA_DIS_MASK 0x00000800L +#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_REG_DIS_MASK 0x00001000L +#define GDC0_NGDC_MGCG_CTRL__NGDC_MGCG_AER_DIS_MASK 0x00002000L +//GDC0_NGDC_RESERVED_0 +#define GDC0_NGDC_RESERVED_0__RESERVED__SHIFT 0x0 +#define GDC0_NGDC_RESERVED_0__RESERVED_MASK 0xFFFFFFFFL +//GDC0_NGDC_RESERVED_1 +#define GDC0_NGDC_RESERVED_1__RESERVED__SHIFT 0x0 +#define GDC0_NGDC_RESERVED_1__RESERVED_MASK 0xFFFFFFFFL +//GDC0_NGDC_SDP_PORT_CTRL_SOCCLK +#define GDC0_NGDC_SDP_PORT_CTRL_SOCCLK__SDP_DISCON_HYSTERESIS_SOCCLK__SHIFT 0x0 +#define GDC0_NGDC_SDP_PORT_CTRL_SOCCLK__SDP_DISCON_HYSTERESIS_SOCCLK_MASK 0x000000FFL +//GDC0_BIF_SDMA0_DOORBELL_RANGE +#define GDC0_BIF_SDMA0_DOORBELL_RANGE__OFFSET__SHIFT 0x2 +#define GDC0_BIF_SDMA0_DOORBELL_RANGE__SIZE__SHIFT 0x10 +#define GDC0_BIF_SDMA0_DOORBELL_RANGE__OFFSET_MASK 0x00000FFCL +#define GDC0_BIF_SDMA0_DOORBELL_RANGE__SIZE_MASK 0x001F0000L +//GDC0_BIF_SDMA1_DOORBELL_RANGE +#define GDC0_BIF_SDMA1_DOORBELL_RANGE__OFFSET__SHIFT 0x2 +#define GDC0_BIF_SDMA1_DOORBELL_RANGE__SIZE__SHIFT 0x10 +#define GDC0_BIF_SDMA1_DOORBELL_RANGE__OFFSET_MASK 0x00000FFCL +#define GDC0_BIF_SDMA1_DOORBELL_RANGE__SIZE_MASK 0x001F0000L +//GDC0_BIF_IH_DOORBELL_RANGE +#define GDC0_BIF_IH_DOORBELL_RANGE__OFFSET__SHIFT 0x2 +#define GDC0_BIF_IH_DOORBELL_RANGE__SIZE__SHIFT 0x10 +#define GDC0_BIF_IH_DOORBELL_RANGE__OFFSET_MASK 0x00000FFCL +#define GDC0_BIF_IH_DOORBELL_RANGE__SIZE_MASK 0x001F0000L +//GDC0_BIF_MMSCH0_DOORBELL_RANGE +#define GDC0_BIF_MMSCH0_DOORBELL_RANGE__OFFSET__SHIFT 0x2 +#define GDC0_BIF_MMSCH0_DOORBELL_RANGE__SIZE__SHIFT 0x10 +#define GDC0_BIF_MMSCH0_DOORBELL_RANGE__OFFSET_MASK 0x00000FFCL +#define GDC0_BIF_MMSCH0_DOORBELL_RANGE__SIZE_MASK 0x001F0000L +//GDC0_BIF_ACV_DOORBELL_RANGE +#define GDC0_BIF_ACV_DOORBELL_RANGE__OFFSET__SHIFT 0x2 +#define GDC0_BIF_ACV_DOORBELL_RANGE__SIZE__SHIFT 0x10 +#define GDC0_BIF_ACV_DOORBELL_RANGE__OFFSET_MASK 0x00000FFCL +#define GDC0_BIF_ACV_DOORBELL_RANGE__SIZE_MASK 0x001F0000L +//GDC0_BIF_DOORBELL_FENCE_CNTL +#define GDC0_BIF_DOORBELL_FENCE_CNTL__DOORBELL_FENCE_CP_ENABLE__SHIFT 0x0 +#define GDC0_BIF_DOORBELL_FENCE_CNTL__DOORBELL_FENCE_SDMA0_ENABLE__SHIFT 0x1 +#define GDC0_BIF_DOORBELL_FENCE_CNTL__DOORBELL_FENCE_SDMA1_ENABLE__SHIFT 0x2 +#define GDC0_BIF_DOORBELL_FENCE_CNTL__DOORBELL_FENCE_ACV_ENABLE__SHIFT 0x3 +#define GDC0_BIF_DOORBELL_FENCE_CNTL__DOORBELL_FENCE_ONCE_TRIGGER_DIS__SHIFT 0x10 +#define GDC0_BIF_DOORBELL_FENCE_CNTL__DOORBELL_FENCE_CP_ENABLE_MASK 0x00000001L +#define GDC0_BIF_DOORBELL_FENCE_CNTL__DOORBELL_FENCE_SDMA0_ENABLE_MASK 0x00000002L +#define GDC0_BIF_DOORBELL_FENCE_CNTL__DOORBELL_FENCE_SDMA1_ENABLE_MASK 0x00000004L +#define GDC0_BIF_DOORBELL_FENCE_CNTL__DOORBELL_FENCE_ACV_ENABLE_MASK 0x00000008L +#define GDC0_BIF_DOORBELL_FENCE_CNTL__DOORBELL_FENCE_ONCE_TRIGGER_DIS_MASK 0x00010000L +//GDC0_S2A_MISC_CNTL +#define GDC0_S2A_MISC_CNTL__DOORBELL_64BIT_SUPPORT_SDMA0_DIS__SHIFT 0x0 +#define GDC0_S2A_MISC_CNTL__DOORBELL_64BIT_SUPPORT_SDMA1_DIS__SHIFT 0x1 +#define GDC0_S2A_MISC_CNTL__DOORBELL_64BIT_SUPPORT_CP_DIS__SHIFT 0x2 +#define GDC0_S2A_MISC_CNTL__AXI_HST_CPL_EP_DIS__SHIFT 0x3 +#define GDC0_S2A_MISC_CNTL__DOORBELL_64BIT_SUPPORT_ACV_DIS__SHIFT 0x4 +#define GDC0_S2A_MISC_CNTL__ATM_ARB_MODE__SHIFT 0x8 +#define GDC0_S2A_MISC_CNTL__RB_ARB_MODE__SHIFT 0xa +#define GDC0_S2A_MISC_CNTL__HSTR_ARB_MODE__SHIFT 0xc +#define GDC0_S2A_MISC_CNTL__WRSP_ARB_MODE__SHIFT 0x10 +#define GDC0_S2A_MISC_CNTL__DOORBELL_64BIT_SUPPORT_SDMA0_DIS_MASK 0x00000001L +#define GDC0_S2A_MISC_CNTL__DOORBELL_64BIT_SUPPORT_SDMA1_DIS_MASK 0x00000002L +#define GDC0_S2A_MISC_CNTL__DOORBELL_64BIT_SUPPORT_CP_DIS_MASK 0x00000004L +#define GDC0_S2A_MISC_CNTL__AXI_HST_CPL_EP_DIS_MASK 0x00000008L +#define GDC0_S2A_MISC_CNTL__DOORBELL_64BIT_SUPPORT_ACV_DIS_MASK 0x00000010L +#define GDC0_S2A_MISC_CNTL__ATM_ARB_MODE_MASK 0x00000300L +#define GDC0_S2A_MISC_CNTL__RB_ARB_MODE_MASK 0x00000C00L +#define GDC0_S2A_MISC_CNTL__HSTR_ARB_MODE_MASK 0x00003000L +#define GDC0_S2A_MISC_CNTL__WRSP_ARB_MODE_MASK 0x000F0000L + + +// addressBlock: nbio_nbif0_syshub_mmreg_syshubdirect +//SYSHUB_DS_CTRL_SOCCLK +#define SYSHUB_DS_CTRL_SOCCLK__HST_CL0_SOCCLK_DEEPSLEEP_ALLOW_ENABLE__SHIFT 0x0 +#define SYSHUB_DS_CTRL_SOCCLK__HST_CL1_SOCCLK_DEEPSLEEP_ALLOW_ENABLE__SHIFT 0x1 +#define SYSHUB_DS_CTRL_SOCCLK__HST_CL2_SOCCLK_DEEPSLEEP_ALLOW_ENABLE__SHIFT 0x2 +#define SYSHUB_DS_CTRL_SOCCLK__HST_CL3_SOCCLK_DEEPSLEEP_ALLOW_ENABLE__SHIFT 0x3 +#define SYSHUB_DS_CTRL_SOCCLK__HST_CL4_SOCCLK_DEEPSLEEP_ALLOW_ENABLE__SHIFT 0x4 +#define SYSHUB_DS_CTRL_SOCCLK__HST_CL5_SOCCLK_DEEPSLEEP_ALLOW_ENABLE__SHIFT 0x5 +#define SYSHUB_DS_CTRL_SOCCLK__HST_CL6_SOCCLK_DEEPSLEEP_ALLOW_ENABLE__SHIFT 0x6 +#define SYSHUB_DS_CTRL_SOCCLK__HST_CL7_SOCCLK_DEEPSLEEP_ALLOW_ENABLE__SHIFT 0x7 +#define SYSHUB_DS_CTRL_SOCCLK__DMA_CL0_SOCCLK_DEEPSLEEP_ALLOW_ENABLE__SHIFT 0x8 +#define SYSHUB_DS_CTRL_SOCCLK__DMA_CL1_SOCCLK_DEEPSLEEP_ALLOW_ENABLE__SHIFT 0x9 +#define SYSHUB_DS_CTRL_SOCCLK__SYSHUB_SOCCLK_DEEPSLEEP_ALLOW_ENABLE__SHIFT 0x1c +#define SYSHUB_DS_CTRL_SOCCLK__SYSHUB_SOCCLK_DS_EN__SHIFT 0x1f +#define SYSHUB_DS_CTRL_SOCCLK__HST_CL0_SOCCLK_DEEPSLEEP_ALLOW_ENABLE_MASK 0x00000001L +#define SYSHUB_DS_CTRL_SOCCLK__HST_CL1_SOCCLK_DEEPSLEEP_ALLOW_ENABLE_MASK 0x00000002L +#define SYSHUB_DS_CTRL_SOCCLK__HST_CL2_SOCCLK_DEEPSLEEP_ALLOW_ENABLE_MASK 0x00000004L +#define SYSHUB_DS_CTRL_SOCCLK__HST_CL3_SOCCLK_DEEPSLEEP_ALLOW_ENABLE_MASK 0x00000008L +#define SYSHUB_DS_CTRL_SOCCLK__HST_CL4_SOCCLK_DEEPSLEEP_ALLOW_ENABLE_MASK 0x00000010L +#define SYSHUB_DS_CTRL_SOCCLK__HST_CL5_SOCCLK_DEEPSLEEP_ALLOW_ENABLE_MASK 0x00000020L +#define SYSHUB_DS_CTRL_SOCCLK__HST_CL6_SOCCLK_DEEPSLEEP_ALLOW_ENABLE_MASK 0x00000040L +#define SYSHUB_DS_CTRL_SOCCLK__HST_CL7_SOCCLK_DEEPSLEEP_ALLOW_ENABLE_MASK 0x00000080L +#define SYSHUB_DS_CTRL_SOCCLK__DMA_CL0_SOCCLK_DEEPSLEEP_ALLOW_ENABLE_MASK 0x00000100L +#define SYSHUB_DS_CTRL_SOCCLK__DMA_CL1_SOCCLK_DEEPSLEEP_ALLOW_ENABLE_MASK 0x00000200L +#define SYSHUB_DS_CTRL_SOCCLK__SYSHUB_SOCCLK_DEEPSLEEP_ALLOW_ENABLE_MASK 0x10000000L +#define SYSHUB_DS_CTRL_SOCCLK__SYSHUB_SOCCLK_DS_EN_MASK 0x80000000L +//SYSHUB_DS_CTRL2_SOCCLK +#define SYSHUB_DS_CTRL2_SOCCLK__SYSHUB_SOCCLK_DS_TIMER__SHIFT 0x0 +#define SYSHUB_DS_CTRL2_SOCCLK__SYSHUB_SOCCLK_DS_TIMER_MASK 0x0000FFFFL +//SYSHUB_BGEN_ENHANCEMENT_BYPASS_EN_SOCCLK +#define SYSHUB_BGEN_ENHANCEMENT_BYPASS_EN_SOCCLK__SYSHUB_bgen_socclk_HST_SW0_bypass_en__SHIFT 0x0 +#define SYSHUB_BGEN_ENHANCEMENT_BYPASS_EN_SOCCLK__SYSHUB_bgen_socclk_HST_SW1_bypass_en__SHIFT 0x1 +#define SYSHUB_BGEN_ENHANCEMENT_BYPASS_EN_SOCCLK__SYSHUB_bgen_socclk_HST_SW2_bypass_en__SHIFT 0x2 +#define SYSHUB_BGEN_ENHANCEMENT_BYPASS_EN_SOCCLK__SYSHUB_bgen_socclk_DMA_SW0_bypass_en__SHIFT 0x10 +#define SYSHUB_BGEN_ENHANCEMENT_BYPASS_EN_SOCCLK__SYSHUB_bgen_socclk_HST_SW0_bypass_en_MASK 0x00000001L +#define SYSHUB_BGEN_ENHANCEMENT_BYPASS_EN_SOCCLK__SYSHUB_bgen_socclk_HST_SW1_bypass_en_MASK 0x00000002L +#define SYSHUB_BGEN_ENHANCEMENT_BYPASS_EN_SOCCLK__SYSHUB_bgen_socclk_HST_SW2_bypass_en_MASK 0x00000004L +#define SYSHUB_BGEN_ENHANCEMENT_BYPASS_EN_SOCCLK__SYSHUB_bgen_socclk_DMA_SW0_bypass_en_MASK 0x00010000L +//SYSHUB_BGEN_ENHANCEMENT_IMM_EN_SOCCLK +#define SYSHUB_BGEN_ENHANCEMENT_IMM_EN_SOCCLK__SYSHUB_bgen_socclk_HST_SW0_imm_en__SHIFT 0x0 +#define SYSHUB_BGEN_ENHANCEMENT_IMM_EN_SOCCLK__SYSHUB_bgen_socclk_HST_SW1_imm_en__SHIFT 0x1 +#define SYSHUB_BGEN_ENHANCEMENT_IMM_EN_SOCCLK__SYSHUB_bgen_socclk_HST_SW2_imm_en__SHIFT 0x2 +#define SYSHUB_BGEN_ENHANCEMENT_IMM_EN_SOCCLK__SYSHUB_bgen_socclk_DMA_SW0_imm_en__SHIFT 0x10 +#define SYSHUB_BGEN_ENHANCEMENT_IMM_EN_SOCCLK__SYSHUB_bgen_socclk_HST_SW0_imm_en_MASK 0x00000001L +#define SYSHUB_BGEN_ENHANCEMENT_IMM_EN_SOCCLK__SYSHUB_bgen_socclk_HST_SW1_imm_en_MASK 0x00000002L +#define SYSHUB_BGEN_ENHANCEMENT_IMM_EN_SOCCLK__SYSHUB_bgen_socclk_HST_SW2_imm_en_MASK 0x00000004L +#define SYSHUB_BGEN_ENHANCEMENT_IMM_EN_SOCCLK__SYSHUB_bgen_socclk_DMA_SW0_imm_en_MASK 0x00010000L +//SYSHUB_TRANS_IDLE_SOCCLK +#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF0_SOCCLK__SHIFT 0x0 +#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF1_SOCCLK__SHIFT 0x1 +#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF2_SOCCLK__SHIFT 0x2 +#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF3_SOCCLK__SHIFT 0x3 +#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF4_SOCCLK__SHIFT 0x4 +#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF5_SOCCLK__SHIFT 0x5 +#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF6_SOCCLK__SHIFT 0x6 +#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF7_SOCCLK__SHIFT 0x7 +#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF8_SOCCLK__SHIFT 0x8 +#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF9_SOCCLK__SHIFT 0x9 +#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF10_SOCCLK__SHIFT 0xa +#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF11_SOCCLK__SHIFT 0xb +#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF12_SOCCLK__SHIFT 0xc +#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF13_SOCCLK__SHIFT 0xd +#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF14_SOCCLK__SHIFT 0xe +#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF15_SOCCLK__SHIFT 0xf +#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_PF_SOCCLK__SHIFT 0x10 +#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF0_SOCCLK_MASK 0x00000001L +#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF1_SOCCLK_MASK 0x00000002L +#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF2_SOCCLK_MASK 0x00000004L +#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF3_SOCCLK_MASK 0x00000008L +#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF4_SOCCLK_MASK 0x00000010L +#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF5_SOCCLK_MASK 0x00000020L +#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF6_SOCCLK_MASK 0x00000040L +#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF7_SOCCLK_MASK 0x00000080L +#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF8_SOCCLK_MASK 0x00000100L +#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF9_SOCCLK_MASK 0x00000200L +#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF10_SOCCLK_MASK 0x00000400L +#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF11_SOCCLK_MASK 0x00000800L +#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF12_SOCCLK_MASK 0x00001000L +#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF13_SOCCLK_MASK 0x00002000L +#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF14_SOCCLK_MASK 0x00004000L +#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_VF15_SOCCLK_MASK 0x00008000L +#define SYSHUB_TRANS_IDLE_SOCCLK__SYSHUB_TRANS_IDLE_PF_SOCCLK_MASK 0x00010000L +//SYSHUB_HP_TIMER_SOCCLK +#define SYSHUB_HP_TIMER_SOCCLK__SYSHUB_HP_TIMER_SOCCLK__SHIFT 0x0 +#define SYSHUB_HP_TIMER_SOCCLK__SYSHUB_HP_TIMER_SOCCLK_MASK 0xFFFFFFFFL +//SYSHUB_MGCG_CTRL_SOCCLK +#define SYSHUB_MGCG_CTRL_SOCCLK__SYSHUB_MGCG_EN_SOCCLK__SHIFT 0x0 +#define SYSHUB_MGCG_CTRL_SOCCLK__SYSHUB_MGCG_MODE_SOCCLK__SHIFT 0x1 +#define SYSHUB_MGCG_CTRL_SOCCLK__SYSHUB_MGCG_HYSTERESIS_SOCCLK__SHIFT 0x2 +#define SYSHUB_MGCG_CTRL_SOCCLK__SYSHUB_MGCG_HST_DIS_SOCCLK__SHIFT 0xa +#define SYSHUB_MGCG_CTRL_SOCCLK__SYSHUB_MGCG_DMA_DIS_SOCCLK__SHIFT 0xb +#define SYSHUB_MGCG_CTRL_SOCCLK__SYSHUB_MGCG_REG_DIS_SOCCLK__SHIFT 0xc +#define SYSHUB_MGCG_CTRL_SOCCLK__SYSHUB_MGCG_AER_DIS_SOCCLK__SHIFT 0xd +#define SYSHUB_MGCG_CTRL_SOCCLK__SYSHUB_MGCG_EN_SOCCLK_MASK 0x00000001L +#define SYSHUB_MGCG_CTRL_SOCCLK__SYSHUB_MGCG_MODE_SOCCLK_MASK 0x00000002L +#define SYSHUB_MGCG_CTRL_SOCCLK__SYSHUB_MGCG_HYSTERESIS_SOCCLK_MASK 0x000003FCL +#define SYSHUB_MGCG_CTRL_SOCCLK__SYSHUB_MGCG_HST_DIS_SOCCLK_MASK 0x00000400L +#define SYSHUB_MGCG_CTRL_SOCCLK__SYSHUB_MGCG_DMA_DIS_SOCCLK_MASK 0x00000800L +#define SYSHUB_MGCG_CTRL_SOCCLK__SYSHUB_MGCG_REG_DIS_SOCCLK_MASK 0x00001000L +#define SYSHUB_MGCG_CTRL_SOCCLK__SYSHUB_MGCG_AER_DIS_SOCCLK_MASK 0x00002000L +//SYSHUB_CPF_DOORBELL_RS_RESET_SOCCLK +#define SYSHUB_CPF_DOORBELL_RS_RESET_SOCCLK__SYSHUB_CPF_DOORBELL_RS_RESET_SOCCLK__SHIFT 0x0 +#define SYSHUB_CPF_DOORBELL_RS_RESET_SOCCLK__SYSHUB_CPF_DOORBELL_RS_RESET_SOCCLK_MASK 0x00000001L +//SYSHUB_SCRATCH_SOCCLK +#define SYSHUB_SCRATCH_SOCCLK__SCRATCH_SOCCLK__SHIFT 0x0 +#define SYSHUB_SCRATCH_SOCCLK__SCRATCH_SOCCLK_MASK 0xFFFFFFFFL +//SYSHUB_CL_MASK_SOCCLK +#define SYSHUB_CL_MASK_SOCCLK__MP1DRAM_MASK_DIS_SOCCLK__SHIFT 0x1 +#define SYSHUB_CL_MASK_SOCCLK__MP1_MASK_DIS_SOCCLK__SHIFT 0x2 +#define SYSHUB_CL_MASK_SOCCLK__MP1DRAM_MASK_DIS_SOCCLK_MASK 0x00000002L +#define SYSHUB_CL_MASK_SOCCLK__MP1_MASK_DIS_SOCCLK_MASK 0x00000004L +//SYSHUB_HANG_CNTL_SOCCLK +#define SYSHUB_HANG_CNTL_SOCCLK__DROP_UNEXPECTED_RESP_DIS_SOCCLK_SW0_CL0__SHIFT 0x0 +#define SYSHUB_HANG_CNTL_SOCCLK__DROP_UNEXPECTED_RESP_DIS_SOCCLK_SW0_CL1__SHIFT 0x1 +#define SYSHUB_HANG_CNTL_SOCCLK__DROP_UNEXPECTED_RESP_DIS_SOCCLK_SW1_CL0__SHIFT 0x2 +#define SYSHUB_HANG_CNTL_SOCCLK__DROP_UNEXPECTED_RESP_DIS_SOCCLK_SW2_CL0__SHIFT 0x3 +#define SYSHUB_HANG_CNTL_SOCCLK__DROP_UNEXPECTED_RESP_DIS_SOCCLK_SW2_CL1__SHIFT 0x4 +#define SYSHUB_HANG_CNTL_SOCCLK__DROP_UNEXPECTED_RESP_DIS_SOCCLK_SW2_CL2__SHIFT 0x5 +#define SYSHUB_HANG_CNTL_SOCCLK__DROP_UNEXPECTED_RESP_DIS_SOCCLK_SW2_CL3__SHIFT 0x6 +#define SYSHUB_HANG_CNTL_SOCCLK__DROP_UNEXPECTED_RESP_DIS_SOCCLK_SW2_CL4__SHIFT 0x7 +#define SYSHUB_HANG_CNTL_SOCCLK__DROP_UNEXPECTED_RESP_DIS_SOCCLK_SW0_CL0_MASK 0x00000001L +#define SYSHUB_HANG_CNTL_SOCCLK__DROP_UNEXPECTED_RESP_DIS_SOCCLK_SW0_CL1_MASK 0x00000002L +#define SYSHUB_HANG_CNTL_SOCCLK__DROP_UNEXPECTED_RESP_DIS_SOCCLK_SW1_CL0_MASK 0x00000004L +#define SYSHUB_HANG_CNTL_SOCCLK__DROP_UNEXPECTED_RESP_DIS_SOCCLK_SW2_CL0_MASK 0x00000008L +#define SYSHUB_HANG_CNTL_SOCCLK__DROP_UNEXPECTED_RESP_DIS_SOCCLK_SW2_CL1_MASK 0x00000010L +#define SYSHUB_HANG_CNTL_SOCCLK__DROP_UNEXPECTED_RESP_DIS_SOCCLK_SW2_CL2_MASK 0x00000020L +#define SYSHUB_HANG_CNTL_SOCCLK__DROP_UNEXPECTED_RESP_DIS_SOCCLK_SW2_CL3_MASK 0x00000040L +#define SYSHUB_HANG_CNTL_SOCCLK__DROP_UNEXPECTED_RESP_DIS_SOCCLK_SW2_CL4_MASK 0x00000080L +//HST_CLK0_SW0_CL0_CNTL +#define HST_CLK0_SW0_CL0_CNTL__FLR_ON_RS_RESET_EN__SHIFT 0x0 +#define HST_CLK0_SW0_CL0_CNTL__LKRST_ON_RS_RESET_EN__SHIFT 0x1 +#define HST_CLK0_SW0_CL0_CNTL__FLR_ON_RS_RESET_EN_MASK 0x00000001L +#define HST_CLK0_SW0_CL0_CNTL__LKRST_ON_RS_RESET_EN_MASK 0x00000002L +//HST_CLK0_SW0_CL1_CNTL +#define HST_CLK0_SW0_CL1_CNTL__FLR_ON_RS_RESET_EN__SHIFT 0x0 +#define HST_CLK0_SW0_CL1_CNTL__LKRST_ON_RS_RESET_EN__SHIFT 0x1 +#define HST_CLK0_SW0_CL1_CNTL__FLR_ON_RS_RESET_EN_MASK 0x00000001L +#define HST_CLK0_SW0_CL1_CNTL__LKRST_ON_RS_RESET_EN_MASK 0x00000002L +//HST_CLK0_SW1_CL0_CNTL +#define HST_CLK0_SW1_CL0_CNTL__FLR_ON_RS_RESET_EN__SHIFT 0x0 +#define HST_CLK0_SW1_CL0_CNTL__LKRST_ON_RS_RESET_EN__SHIFT 0x1 +#define HST_CLK0_SW1_CL0_CNTL__FLR_ON_RS_RESET_EN_MASK 0x00000001L +#define HST_CLK0_SW1_CL0_CNTL__LKRST_ON_RS_RESET_EN_MASK 0x00000002L +//HST_CLK0_SW2_CL0_CNTL +#define HST_CLK0_SW2_CL0_CNTL__FLR_ON_RS_RESET_EN__SHIFT 0x0 +#define HST_CLK0_SW2_CL0_CNTL__LKRST_ON_RS_RESET_EN__SHIFT 0x1 +#define HST_CLK0_SW2_CL0_CNTL__FLR_ON_RS_RESET_EN_MASK 0x00000001L +#define HST_CLK0_SW2_CL0_CNTL__LKRST_ON_RS_RESET_EN_MASK 0x00000002L +//HST_CLK0_SW2_CL1_CNTL +#define HST_CLK0_SW2_CL1_CNTL__FLR_ON_RS_RESET_EN__SHIFT 0x0 +#define HST_CLK0_SW2_CL1_CNTL__LKRST_ON_RS_RESET_EN__SHIFT 0x1 +#define HST_CLK0_SW2_CL1_CNTL__FLR_ON_RS_RESET_EN_MASK 0x00000001L +#define HST_CLK0_SW2_CL1_CNTL__LKRST_ON_RS_RESET_EN_MASK 0x00000002L +//HST_CLK0_SW2_CL2_CNTL +#define HST_CLK0_SW2_CL2_CNTL__FLR_ON_RS_RESET_EN__SHIFT 0x0 +#define HST_CLK0_SW2_CL2_CNTL__LKRST_ON_RS_RESET_EN__SHIFT 0x1 +#define HST_CLK0_SW2_CL2_CNTL__FLR_ON_RS_RESET_EN_MASK 0x00000001L +#define HST_CLK0_SW2_CL2_CNTL__LKRST_ON_RS_RESET_EN_MASK 0x00000002L +//HST_CLK0_SW2_CL3_CNTL +#define HST_CLK0_SW2_CL3_CNTL__FLR_ON_RS_RESET_EN__SHIFT 0x0 +#define HST_CLK0_SW2_CL3_CNTL__LKRST_ON_RS_RESET_EN__SHIFT 0x1 +#define HST_CLK0_SW2_CL3_CNTL__FLR_ON_RS_RESET_EN_MASK 0x00000001L +#define HST_CLK0_SW2_CL3_CNTL__LKRST_ON_RS_RESET_EN_MASK 0x00000002L +//HST_CLK0_SW2_CL4_CNTL +#define HST_CLK0_SW2_CL4_CNTL__FLR_ON_RS_RESET_EN__SHIFT 0x0 +#define HST_CLK0_SW2_CL4_CNTL__LKRST_ON_RS_RESET_EN__SHIFT 0x1 +#define HST_CLK0_SW2_CL4_CNTL__FLR_ON_RS_RESET_EN_MASK 0x00000001L +#define HST_CLK0_SW2_CL4_CNTL__LKRST_ON_RS_RESET_EN_MASK 0x00000002L +//DMA_CLK0_SW0_SYSHUB_QOS_CNTL +#define DMA_CLK0_SW0_SYSHUB_QOS_CNTL__QOS_CNTL_MODE__SHIFT 0x0 +#define DMA_CLK0_SW0_SYSHUB_QOS_CNTL__QOS_MAX_VALUE__SHIFT 0x1 +#define DMA_CLK0_SW0_SYSHUB_QOS_CNTL__QOS_MIN_VALUE__SHIFT 0x5 +#define DMA_CLK0_SW0_SYSHUB_QOS_CNTL__QOS_CNTL_MODE_MASK 0x00000001L +#define DMA_CLK0_SW0_SYSHUB_QOS_CNTL__QOS_MAX_VALUE_MASK 0x0000001EL +#define DMA_CLK0_SW0_SYSHUB_QOS_CNTL__QOS_MIN_VALUE_MASK 0x000001E0L +//DMA_CLK0_SW0_CL0_CNTL +#define DMA_CLK0_SW0_CL0_CNTL__FLR_ON_RS_RESET_EN__SHIFT 0x0 +#define DMA_CLK0_SW0_CL0_CNTL__LKRST_ON_RS_RESET_EN__SHIFT 0x1 +#define DMA_CLK0_SW0_CL0_CNTL__QOS_STATIC_OVERRIDE_EN__SHIFT 0x8 +#define DMA_CLK0_SW0_CL0_CNTL__QOS_STATIC_OVERRIDE_VALUE__SHIFT 0x9 +#define DMA_CLK0_SW0_CL0_CNTL__READ_WRR_WEIGHT__SHIFT 0x10 +#define DMA_CLK0_SW0_CL0_CNTL__WRITE_WRR_WEIGHT__SHIFT 0x18 +#define DMA_CLK0_SW0_CL0_CNTL__FLR_ON_RS_RESET_EN_MASK 0x00000001L +#define DMA_CLK0_SW0_CL0_CNTL__LKRST_ON_RS_RESET_EN_MASK 0x00000002L +#define DMA_CLK0_SW0_CL0_CNTL__QOS_STATIC_OVERRIDE_EN_MASK 0x00000100L +#define DMA_CLK0_SW0_CL0_CNTL__QOS_STATIC_OVERRIDE_VALUE_MASK 0x00001E00L +#define DMA_CLK0_SW0_CL0_CNTL__READ_WRR_WEIGHT_MASK 0x00FF0000L +#define DMA_CLK0_SW0_CL0_CNTL__WRITE_WRR_WEIGHT_MASK 0xFF000000L +//DMA_CLK0_SW0_CL1_CNTL +#define DMA_CLK0_SW0_CL1_CNTL__FLR_ON_RS_RESET_EN__SHIFT 0x0 +#define DMA_CLK0_SW0_CL1_CNTL__LKRST_ON_RS_RESET_EN__SHIFT 0x1 +#define DMA_CLK0_SW0_CL1_CNTL__QOS_STATIC_OVERRIDE_EN__SHIFT 0x8 +#define DMA_CLK0_SW0_CL1_CNTL__QOS_STATIC_OVERRIDE_VALUE__SHIFT 0x9 +#define DMA_CLK0_SW0_CL1_CNTL__READ_WRR_WEIGHT__SHIFT 0x10 +#define DMA_CLK0_SW0_CL1_CNTL__WRITE_WRR_WEIGHT__SHIFT 0x18 +#define DMA_CLK0_SW0_CL1_CNTL__FLR_ON_RS_RESET_EN_MASK 0x00000001L +#define DMA_CLK0_SW0_CL1_CNTL__LKRST_ON_RS_RESET_EN_MASK 0x00000002L +#define DMA_CLK0_SW0_CL1_CNTL__QOS_STATIC_OVERRIDE_EN_MASK 0x00000100L +#define DMA_CLK0_SW0_CL1_CNTL__QOS_STATIC_OVERRIDE_VALUE_MASK 0x00001E00L +#define DMA_CLK0_SW0_CL1_CNTL__READ_WRR_WEIGHT_MASK 0x00FF0000L +#define DMA_CLK0_SW0_CL1_CNTL__WRITE_WRR_WEIGHT_MASK 0xFF000000L +//SYSHUB_DS_CTRL_SHUBCLK +#define SYSHUB_DS_CTRL_SHUBCLK__SYSHUB_SHUBCLK_DEEPSLEEP_ALLOW_ENABLE__SHIFT 0x1c +#define SYSHUB_DS_CTRL_SHUBCLK__SYSHUB_SHUBCLK_DS_EN__SHIFT 0x1f +#define SYSHUB_DS_CTRL_SHUBCLK__SYSHUB_SHUBCLK_DEEPSLEEP_ALLOW_ENABLE_MASK 0x10000000L +#define SYSHUB_DS_CTRL_SHUBCLK__SYSHUB_SHUBCLK_DS_EN_MASK 0x80000000L +//SYSHUB_DS_CTRL2_SHUBCLK +#define SYSHUB_DS_CTRL2_SHUBCLK__SYSHUB_SHUBCLK_DS_TIMER__SHIFT 0x0 +#define SYSHUB_DS_CTRL2_SHUBCLK__SYSHUB_SHUBCLK_DS_TIMER_MASK 0x0000FFFFL +//SYSHUB_BGEN_ENHANCEMENT_BYPASS_EN_SHUBCLK +//SYSHUB_BGEN_ENHANCEMENT_IMM_EN_SHUBCLK +//SYSHUB_MGCG_CTRL_SHUBCLK +#define SYSHUB_MGCG_CTRL_SHUBCLK__SYSHUB_MGCG_EN_SHUBCLK__SHIFT 0x0 +#define SYSHUB_MGCG_CTRL_SHUBCLK__SYSHUB_MGCG_MODE_SHUBCLK__SHIFT 0x1 +#define SYSHUB_MGCG_CTRL_SHUBCLK__SYSHUB_MGCG_HYSTERESIS_SHUBCLK__SHIFT 0x2 +#define SYSHUB_MGCG_CTRL_SHUBCLK__SYSHUB_MGCG_HST_DIS_SHUBCLK__SHIFT 0xa +#define SYSHUB_MGCG_CTRL_SHUBCLK__SYSHUB_MGCG_DMA_DIS_SHUBCLK__SHIFT 0xb +#define SYSHUB_MGCG_CTRL_SHUBCLK__SYSHUB_MGCG_REG_DIS_SHUBCLK__SHIFT 0xc +#define SYSHUB_MGCG_CTRL_SHUBCLK__SYSHUB_MGCG_AER_DIS_SHUBCLK__SHIFT 0xd +#define SYSHUB_MGCG_CTRL_SHUBCLK__SYSHUB_MGCG_EN_SHUBCLK_MASK 0x00000001L +#define SYSHUB_MGCG_CTRL_SHUBCLK__SYSHUB_MGCG_MODE_SHUBCLK_MASK 0x00000002L +#define SYSHUB_MGCG_CTRL_SHUBCLK__SYSHUB_MGCG_HYSTERESIS_SHUBCLK_MASK 0x000003FCL +#define SYSHUB_MGCG_CTRL_SHUBCLK__SYSHUB_MGCG_HST_DIS_SHUBCLK_MASK 0x00000400L +#define SYSHUB_MGCG_CTRL_SHUBCLK__SYSHUB_MGCG_DMA_DIS_SHUBCLK_MASK 0x00000800L +#define SYSHUB_MGCG_CTRL_SHUBCLK__SYSHUB_MGCG_REG_DIS_SHUBCLK_MASK 0x00001000L +#define SYSHUB_MGCG_CTRL_SHUBCLK__SYSHUB_MGCG_AER_DIS_SHUBCLK_MASK 0x00002000L +//SYSHUB_SCRATCH_SHUBCLK +#define SYSHUB_SCRATCH_SHUBCLK__SCRATCH_SHUBCLK__SHIFT 0x0 +#define SYSHUB_SCRATCH_SHUBCLK__SCRATCH_SHUBCLK_MASK 0xFFFFFFFFL +//SYSHUB_SELECT_SHUBCLK +#define SYSHUB_SELECT_SHUBCLK__SELECT_USB0__SHIFT 0x0 +#define SYSHUB_SELECT_SHUBCLK__SELECT_USB1__SHIFT 0x1 +#define SYSHUB_SELECT_SHUBCLK__SELECT_USB0_MASK 0x00000001L +#define SYSHUB_SELECT_SHUBCLK__SELECT_USB1_MASK 0x00000002L +//SYSHUB_SCRATCH_LCLK +#define SYSHUB_SCRATCH_LCLK__SCRATCH_LCLK__SHIFT 0x0 +#define SYSHUB_SCRATCH_LCLK__SCRATCH_LCLK_MASK 0xFFFFFFFFL +//NIC400_0_ASIB_0_FN_MOD +#define NIC400_0_ASIB_0_FN_MOD__read_iss_override__SHIFT 0x0 +#define NIC400_0_ASIB_0_FN_MOD__write_iss_override__SHIFT 0x1 +#define NIC400_0_ASIB_0_FN_MOD__read_iss_override_MASK 0x00000001L +#define NIC400_0_ASIB_0_FN_MOD__write_iss_override_MASK 0x00000002L +//NIC400_0_AMIB_0_FN_MOD_BM_ISS +#define NIC400_0_AMIB_0_FN_MOD_BM_ISS__read_iss_override__SHIFT 0x0 +#define NIC400_0_AMIB_0_FN_MOD_BM_ISS__write_iss_override__SHIFT 0x1 +#define NIC400_0_AMIB_0_FN_MOD_BM_ISS__read_iss_override_MASK 0x00000001L +#define NIC400_0_AMIB_0_FN_MOD_BM_ISS__write_iss_override_MASK 0x00000002L +//NIC400_0_AMIB_1_FN_MOD_BM_ISS +#define NIC400_0_AMIB_1_FN_MOD_BM_ISS__read_iss_override__SHIFT 0x0 +#define NIC400_0_AMIB_1_FN_MOD_BM_ISS__write_iss_override__SHIFT 0x1 +#define NIC400_0_AMIB_1_FN_MOD_BM_ISS__read_iss_override_MASK 0x00000001L +#define NIC400_0_AMIB_1_FN_MOD_BM_ISS__write_iss_override_MASK 0x00000002L +//NIC400_2_ASIB_0_FN_MOD +#define NIC400_2_ASIB_0_FN_MOD__read_iss_override__SHIFT 0x0 +#define NIC400_2_ASIB_0_FN_MOD__write_iss_override__SHIFT 0x1 +#define NIC400_2_ASIB_0_FN_MOD__read_iss_override_MASK 0x00000001L +#define NIC400_2_ASIB_0_FN_MOD__write_iss_override_MASK 0x00000002L +//NIC400_2_AMIB_0_FN_MOD_BM_ISS +#define NIC400_2_AMIB_0_FN_MOD_BM_ISS__read_iss_override__SHIFT 0x0 +#define NIC400_2_AMIB_0_FN_MOD_BM_ISS__write_iss_override__SHIFT 0x1 +#define NIC400_2_AMIB_0_FN_MOD_BM_ISS__read_iss_override_MASK 0x00000001L +#define NIC400_2_AMIB_0_FN_MOD_BM_ISS__write_iss_override_MASK 0x00000002L +//NIC400_2_AMIB_1_FN_MOD_BM_ISS +#define NIC400_2_AMIB_1_FN_MOD_BM_ISS__read_iss_override__SHIFT 0x0 +#define NIC400_2_AMIB_1_FN_MOD_BM_ISS__write_iss_override__SHIFT 0x1 +#define NIC400_2_AMIB_1_FN_MOD_BM_ISS__read_iss_override_MASK 0x00000001L +#define NIC400_2_AMIB_1_FN_MOD_BM_ISS__write_iss_override_MASK 0x00000002L +//NIC400_2_AMIB_2_FN_MOD_BM_ISS +#define NIC400_2_AMIB_2_FN_MOD_BM_ISS__read_iss_override__SHIFT 0x0 +#define NIC400_2_AMIB_2_FN_MOD_BM_ISS__write_iss_override__SHIFT 0x1 +#define NIC400_2_AMIB_2_FN_MOD_BM_ISS__read_iss_override_MASK 0x00000001L +#define NIC400_2_AMIB_2_FN_MOD_BM_ISS__write_iss_override_MASK 0x00000002L +//NIC400_2_AMIB_3_FN_MOD_BM_ISS +#define NIC400_2_AMIB_3_FN_MOD_BM_ISS__read_iss_override__SHIFT 0x0 +#define NIC400_2_AMIB_3_FN_MOD_BM_ISS__write_iss_override__SHIFT 0x1 +#define NIC400_2_AMIB_3_FN_MOD_BM_ISS__read_iss_override_MASK 0x00000001L +#define NIC400_2_AMIB_3_FN_MOD_BM_ISS__write_iss_override_MASK 0x00000002L +//NIC400_2_AMIB_4_FN_MOD_BM_ISS +#define NIC400_2_AMIB_4_FN_MOD_BM_ISS__read_iss_override__SHIFT 0x0 +#define NIC400_2_AMIB_4_FN_MOD_BM_ISS__write_iss_override__SHIFT 0x1 +#define NIC400_2_AMIB_4_FN_MOD_BM_ISS__read_iss_override_MASK 0x00000001L +#define NIC400_2_AMIB_4_FN_MOD_BM_ISS__write_iss_override_MASK 0x00000002L +//NIC400_3_AMIB_0_FN_MOD_BM_ISS +#define NIC400_3_AMIB_0_FN_MOD_BM_ISS__read_iss_override__SHIFT 0x0 +#define NIC400_3_AMIB_0_FN_MOD_BM_ISS__write_iss_override__SHIFT 0x1 +#define NIC400_3_AMIB_0_FN_MOD_BM_ISS__read_iss_override_MASK 0x00000001L +#define NIC400_3_AMIB_0_FN_MOD_BM_ISS__write_iss_override_MASK 0x00000002L +//NIC400_3_ASIB_0_FN_MOD +#define NIC400_3_ASIB_0_FN_MOD__read_iss_override__SHIFT 0x0 +#define NIC400_3_ASIB_0_FN_MOD__write_iss_override__SHIFT 0x1 +#define NIC400_3_ASIB_0_FN_MOD__read_iss_override_MASK 0x00000001L +#define NIC400_3_ASIB_0_FN_MOD__write_iss_override_MASK 0x00000002L +//NIC400_3_ASIB_0_QOS_CNTL +#define NIC400_3_ASIB_0_QOS_CNTL__en_aw_rate__SHIFT 0x0 +#define NIC400_3_ASIB_0_QOS_CNTL__en_ar_rate__SHIFT 0x1 +#define NIC400_3_ASIB_0_QOS_CNTL__en_awar_rate__SHIFT 0x2 +#define NIC400_3_ASIB_0_QOS_CNTL__en_aw_fc__SHIFT 0x3 +#define NIC400_3_ASIB_0_QOS_CNTL__en_ar_fc__SHIFT 0x4 +#define NIC400_3_ASIB_0_QOS_CNTL__en_aw_ot__SHIFT 0x5 +#define NIC400_3_ASIB_0_QOS_CNTL__en_ar_ot__SHIFT 0x6 +#define NIC400_3_ASIB_0_QOS_CNTL__en_awar_ot__SHIFT 0x7 +#define NIC400_3_ASIB_0_QOS_CNTL__mode_aw_fc__SHIFT 0x10 +#define NIC400_3_ASIB_0_QOS_CNTL__mode_ar_fc__SHIFT 0x14 +#define NIC400_3_ASIB_0_QOS_CNTL__en_aw_rate_MASK 0x00000001L +#define NIC400_3_ASIB_0_QOS_CNTL__en_ar_rate_MASK 0x00000002L +#define NIC400_3_ASIB_0_QOS_CNTL__en_awar_rate_MASK 0x00000004L +#define NIC400_3_ASIB_0_QOS_CNTL__en_aw_fc_MASK 0x00000008L +#define NIC400_3_ASIB_0_QOS_CNTL__en_ar_fc_MASK 0x00000010L +#define NIC400_3_ASIB_0_QOS_CNTL__en_aw_ot_MASK 0x00000020L +#define NIC400_3_ASIB_0_QOS_CNTL__en_ar_ot_MASK 0x00000040L +#define NIC400_3_ASIB_0_QOS_CNTL__en_awar_ot_MASK 0x00000080L +#define NIC400_3_ASIB_0_QOS_CNTL__mode_aw_fc_MASK 0x00010000L +#define NIC400_3_ASIB_0_QOS_CNTL__mode_ar_fc_MASK 0x00100000L +//NIC400_3_ASIB_0_MAX_OT +#define NIC400_3_ASIB_0_MAX_OT__aw_max_otf__SHIFT 0x0 +#define NIC400_3_ASIB_0_MAX_OT__aw_max_oti__SHIFT 0x8 +#define NIC400_3_ASIB_0_MAX_OT__ar_max_otf__SHIFT 0x10 +#define NIC400_3_ASIB_0_MAX_OT__ar_max_oti__SHIFT 0x18 +#define NIC400_3_ASIB_0_MAX_OT__aw_max_otf_MASK 0x000000FFL +#define NIC400_3_ASIB_0_MAX_OT__aw_max_oti_MASK 0x00003F00L +#define NIC400_3_ASIB_0_MAX_OT__ar_max_otf_MASK 0x00FF0000L +#define NIC400_3_ASIB_0_MAX_OT__ar_max_oti_MASK 0x3F000000L +//NIC400_3_ASIB_0_MAX_COMB_OT +#define NIC400_3_ASIB_0_MAX_COMB_OT__awar_max_otf__SHIFT 0x0 +#define NIC400_3_ASIB_0_MAX_COMB_OT__awar_max_oti__SHIFT 0x8 +#define NIC400_3_ASIB_0_MAX_COMB_OT__awar_max_otf_MASK 0x000000FFL +#define NIC400_3_ASIB_0_MAX_COMB_OT__awar_max_oti_MASK 0x00007F00L +//NIC400_3_ASIB_0_AW_P +#define NIC400_3_ASIB_0_AW_P__aw_p__SHIFT 0x18 +#define NIC400_3_ASIB_0_AW_P__aw_p_MASK 0xFF000000L +//NIC400_3_ASIB_0_AW_B +#define NIC400_3_ASIB_0_AW_B__aw_b__SHIFT 0x0 +#define NIC400_3_ASIB_0_AW_B__aw_b_MASK 0x0000FFFFL +//NIC400_3_ASIB_0_AW_R +#define NIC400_3_ASIB_0_AW_R__aw_r__SHIFT 0x14 +#define NIC400_3_ASIB_0_AW_R__aw_r_MASK 0xFFF00000L +//NIC400_3_ASIB_0_AR_P +#define NIC400_3_ASIB_0_AR_P__ar_p__SHIFT 0x18 +#define NIC400_3_ASIB_0_AR_P__ar_p_MASK 0xFF000000L +//NIC400_3_ASIB_0_AR_B +#define NIC400_3_ASIB_0_AR_B__ar_b__SHIFT 0x0 +#define NIC400_3_ASIB_0_AR_B__ar_b_MASK 0x0000FFFFL +//NIC400_3_ASIB_0_AR_R +#define NIC400_3_ASIB_0_AR_R__ar_r__SHIFT 0x14 +#define NIC400_3_ASIB_0_AR_R__ar_r_MASK 0xFFF00000L +//NIC400_3_ASIB_0_TARGET_FC +#define NIC400_3_ASIB_0_TARGET_FC__aw_tgt_latency__SHIFT 0x0 +#define NIC400_3_ASIB_0_TARGET_FC__ar_tgt_latency__SHIFT 0x10 +#define NIC400_3_ASIB_0_TARGET_FC__aw_tgt_latency_MASK 0x00000FFFL +#define NIC400_3_ASIB_0_TARGET_FC__ar_tgt_latency_MASK 0x0FFF0000L +//NIC400_3_ASIB_0_KI_FC +#define NIC400_3_ASIB_0_KI_FC__aw_tgt_latency__SHIFT 0x0 +#define NIC400_3_ASIB_0_KI_FC__ar_tgt_latency__SHIFT 0x8 +#define NIC400_3_ASIB_0_KI_FC__aw_tgt_latency_MASK 0x00000007L +#define NIC400_3_ASIB_0_KI_FC__ar_tgt_latency_MASK 0x00000700L +//NIC400_3_ASIB_0_QOS_RANGE +#define NIC400_3_ASIB_0_QOS_RANGE__aw_min_qos__SHIFT 0x0 +#define NIC400_3_ASIB_0_QOS_RANGE__aw_max_qos__SHIFT 0x8 +#define NIC400_3_ASIB_0_QOS_RANGE__ar_min_qos__SHIFT 0x10 +#define NIC400_3_ASIB_0_QOS_RANGE__ar_max_qos__SHIFT 0x18 +#define NIC400_3_ASIB_0_QOS_RANGE__aw_min_qos_MASK 0x0000000FL +#define NIC400_3_ASIB_0_QOS_RANGE__aw_max_qos_MASK 0x00000F00L +#define NIC400_3_ASIB_0_QOS_RANGE__ar_min_qos_MASK 0x000F0000L +#define NIC400_3_ASIB_0_QOS_RANGE__ar_max_qos_MASK 0x0F000000L +//NIC400_3_ASIB_1_FN_MOD +#define NIC400_3_ASIB_1_FN_MOD__read_iss_override__SHIFT 0x0 +#define NIC400_3_ASIB_1_FN_MOD__write_iss_override__SHIFT 0x1 +#define NIC400_3_ASIB_1_FN_MOD__read_iss_override_MASK 0x00000001L +#define NIC400_3_ASIB_1_FN_MOD__write_iss_override_MASK 0x00000002L +//NIC400_3_ASIB_1_QOS_CNTL +#define NIC400_3_ASIB_1_QOS_CNTL__en_aw_rate__SHIFT 0x0 +#define NIC400_3_ASIB_1_QOS_CNTL__en_ar_rate__SHIFT 0x1 +#define NIC400_3_ASIB_1_QOS_CNTL__en_awar_rate__SHIFT 0x2 +#define NIC400_3_ASIB_1_QOS_CNTL__en_aw_fc__SHIFT 0x3 +#define NIC400_3_ASIB_1_QOS_CNTL__en_ar_fc__SHIFT 0x4 +#define NIC400_3_ASIB_1_QOS_CNTL__en_aw_ot__SHIFT 0x5 +#define NIC400_3_ASIB_1_QOS_CNTL__en_ar_ot__SHIFT 0x6 +#define NIC400_3_ASIB_1_QOS_CNTL__en_awar_ot__SHIFT 0x7 +#define NIC400_3_ASIB_1_QOS_CNTL__mode_aw_fc__SHIFT 0x10 +#define NIC400_3_ASIB_1_QOS_CNTL__mode_ar_fc__SHIFT 0x14 +#define NIC400_3_ASIB_1_QOS_CNTL__en_aw_rate_MASK 0x00000001L +#define NIC400_3_ASIB_1_QOS_CNTL__en_ar_rate_MASK 0x00000002L +#define NIC400_3_ASIB_1_QOS_CNTL__en_awar_rate_MASK 0x00000004L +#define NIC400_3_ASIB_1_QOS_CNTL__en_aw_fc_MASK 0x00000008L +#define NIC400_3_ASIB_1_QOS_CNTL__en_ar_fc_MASK 0x00000010L +#define NIC400_3_ASIB_1_QOS_CNTL__en_aw_ot_MASK 0x00000020L +#define NIC400_3_ASIB_1_QOS_CNTL__en_ar_ot_MASK 0x00000040L +#define NIC400_3_ASIB_1_QOS_CNTL__en_awar_ot_MASK 0x00000080L +#define NIC400_3_ASIB_1_QOS_CNTL__mode_aw_fc_MASK 0x00010000L +#define NIC400_3_ASIB_1_QOS_CNTL__mode_ar_fc_MASK 0x00100000L +//NIC400_3_ASIB_1_MAX_OT +#define NIC400_3_ASIB_1_MAX_OT__aw_max_otf__SHIFT 0x0 +#define NIC400_3_ASIB_1_MAX_OT__aw_max_oti__SHIFT 0x8 +#define NIC400_3_ASIB_1_MAX_OT__ar_max_otf__SHIFT 0x10 +#define NIC400_3_ASIB_1_MAX_OT__ar_max_oti__SHIFT 0x18 +#define NIC400_3_ASIB_1_MAX_OT__aw_max_otf_MASK 0x000000FFL +#define NIC400_3_ASIB_1_MAX_OT__aw_max_oti_MASK 0x00003F00L +#define NIC400_3_ASIB_1_MAX_OT__ar_max_otf_MASK 0x00FF0000L +#define NIC400_3_ASIB_1_MAX_OT__ar_max_oti_MASK 0x3F000000L +//NIC400_3_ASIB_1_MAX_COMB_OT +#define NIC400_3_ASIB_1_MAX_COMB_OT__awar_max_otf__SHIFT 0x0 +#define NIC400_3_ASIB_1_MAX_COMB_OT__awar_max_oti__SHIFT 0x8 +#define NIC400_3_ASIB_1_MAX_COMB_OT__awar_max_otf_MASK 0x000000FFL +#define NIC400_3_ASIB_1_MAX_COMB_OT__awar_max_oti_MASK 0x00007F00L +//NIC400_3_ASIB_1_AW_P +#define NIC400_3_ASIB_1_AW_P__aw_p__SHIFT 0x18 +#define NIC400_3_ASIB_1_AW_P__aw_p_MASK 0xFF000000L +//NIC400_3_ASIB_1_AW_B +#define NIC400_3_ASIB_1_AW_B__aw_b__SHIFT 0x0 +#define NIC400_3_ASIB_1_AW_B__aw_b_MASK 0x0000FFFFL +//NIC400_3_ASIB_1_AW_R +#define NIC400_3_ASIB_1_AW_R__aw_r__SHIFT 0x14 +#define NIC400_3_ASIB_1_AW_R__aw_r_MASK 0xFFF00000L +//NIC400_3_ASIB_1_AR_P +#define NIC400_3_ASIB_1_AR_P__ar_p__SHIFT 0x18 +#define NIC400_3_ASIB_1_AR_P__ar_p_MASK 0xFF000000L +//NIC400_3_ASIB_1_AR_B +#define NIC400_3_ASIB_1_AR_B__ar_b__SHIFT 0x0 +#define NIC400_3_ASIB_1_AR_B__ar_b_MASK 0x0000FFFFL +//NIC400_3_ASIB_1_AR_R +#define NIC400_3_ASIB_1_AR_R__ar_r__SHIFT 0x14 +#define NIC400_3_ASIB_1_AR_R__ar_r_MASK 0xFFF00000L +//NIC400_3_ASIB_1_TARGET_FC +#define NIC400_3_ASIB_1_TARGET_FC__aw_tgt_latency__SHIFT 0x0 +#define NIC400_3_ASIB_1_TARGET_FC__ar_tgt_latency__SHIFT 0x10 +#define NIC400_3_ASIB_1_TARGET_FC__aw_tgt_latency_MASK 0x00000FFFL +#define NIC400_3_ASIB_1_TARGET_FC__ar_tgt_latency_MASK 0x0FFF0000L +//NIC400_3_ASIB_1_KI_FC +#define NIC400_3_ASIB_1_KI_FC__aw_tgt_latency__SHIFT 0x0 +#define NIC400_3_ASIB_1_KI_FC__ar_tgt_latency__SHIFT 0x8 +#define NIC400_3_ASIB_1_KI_FC__aw_tgt_latency_MASK 0x00000007L +#define NIC400_3_ASIB_1_KI_FC__ar_tgt_latency_MASK 0x00000700L +//NIC400_3_ASIB_1_QOS_RANGE +#define NIC400_3_ASIB_1_QOS_RANGE__aw_min_qos__SHIFT 0x0 +#define NIC400_3_ASIB_1_QOS_RANGE__aw_max_qos__SHIFT 0x8 +#define NIC400_3_ASIB_1_QOS_RANGE__ar_min_qos__SHIFT 0x10 +#define NIC400_3_ASIB_1_QOS_RANGE__ar_max_qos__SHIFT 0x18 +#define NIC400_3_ASIB_1_QOS_RANGE__aw_min_qos_MASK 0x0000000FL +#define NIC400_3_ASIB_1_QOS_RANGE__aw_max_qos_MASK 0x00000F00L +#define NIC400_3_ASIB_1_QOS_RANGE__ar_min_qos_MASK 0x000F0000L +#define NIC400_3_ASIB_1_QOS_RANGE__ar_max_qos_MASK 0x0F000000L + + +// addressBlock: nbio_nbif0_nbif_sion_SIONDEC +//SION_CL0_RdRsp_BurstTarget_REG0 +#define SION_CL0_RdRsp_BurstTarget_REG0__RdRsp_BurstTarget_31_0__SHIFT 0x0 +#define SION_CL0_RdRsp_BurstTarget_REG0__RdRsp_BurstTarget_31_0_MASK 0xFFFFFFFFL +//SION_CL0_RdRsp_BurstTarget_REG1 +#define SION_CL0_RdRsp_BurstTarget_REG1__RdRsp_BurstTarget_63_32__SHIFT 0x0 +#define SION_CL0_RdRsp_BurstTarget_REG1__RdRsp_BurstTarget_63_32_MASK 0xFFFFFFFFL +//SION_CL0_RdRsp_TimeSlot_REG0 +#define SION_CL0_RdRsp_TimeSlot_REG0__RdRsp_TimeSlot_31_0__SHIFT 0x0 +#define SION_CL0_RdRsp_TimeSlot_REG0__RdRsp_TimeSlot_31_0_MASK 0xFFFFFFFFL +//SION_CL0_RdRsp_TimeSlot_REG1 +#define SION_CL0_RdRsp_TimeSlot_REG1__RdRsp_TimeSlot_63_32__SHIFT 0x0 +#define SION_CL0_RdRsp_TimeSlot_REG1__RdRsp_TimeSlot_63_32_MASK 0xFFFFFFFFL +//SION_CL0_WrRsp_BurstTarget_REG0 +#define SION_CL0_WrRsp_BurstTarget_REG0__WrRsp_BurstTarget_31_0__SHIFT 0x0 +#define SION_CL0_WrRsp_BurstTarget_REG0__WrRsp_BurstTarget_31_0_MASK 0xFFFFFFFFL +//SION_CL0_WrRsp_BurstTarget_REG1 +#define SION_CL0_WrRsp_BurstTarget_REG1__WrRsp_BurstTarget_63_32__SHIFT 0x0 +#define SION_CL0_WrRsp_BurstTarget_REG1__WrRsp_BurstTarget_63_32_MASK 0xFFFFFFFFL +//SION_CL0_WrRsp_TimeSlot_REG0 +#define SION_CL0_WrRsp_TimeSlot_REG0__WrRsp_TimeSlot_31_0__SHIFT 0x0 +#define SION_CL0_WrRsp_TimeSlot_REG0__WrRsp_TimeSlot_31_0_MASK 0xFFFFFFFFL +//SION_CL0_WrRsp_TimeSlot_REG1 +#define SION_CL0_WrRsp_TimeSlot_REG1__WrRsp_TimeSlot_63_32__SHIFT 0x0 +#define SION_CL0_WrRsp_TimeSlot_REG1__WrRsp_TimeSlot_63_32_MASK 0xFFFFFFFFL +//SION_CL0_Req_BurstTarget_REG0 +#define SION_CL0_Req_BurstTarget_REG0__Req_BurstTarget_31_0__SHIFT 0x0 +#define SION_CL0_Req_BurstTarget_REG0__Req_BurstTarget_31_0_MASK 0xFFFFFFFFL +//SION_CL0_Req_BurstTarget_REG1 +#define SION_CL0_Req_BurstTarget_REG1__Req_BurstTarget_63_32__SHIFT 0x0 +#define SION_CL0_Req_BurstTarget_REG1__Req_BurstTarget_63_32_MASK 0xFFFFFFFFL +//SION_CL0_Req_TimeSlot_REG0 +#define SION_CL0_Req_TimeSlot_REG0__Req_TimeSlot_31_0__SHIFT 0x0 +#define SION_CL0_Req_TimeSlot_REG0__Req_TimeSlot_31_0_MASK 0xFFFFFFFFL +//SION_CL0_Req_TimeSlot_REG1 +#define SION_CL0_Req_TimeSlot_REG1__Req_TimeSlot_63_32__SHIFT 0x0 +#define SION_CL0_Req_TimeSlot_REG1__Req_TimeSlot_63_32_MASK 0xFFFFFFFFL +//SION_CL0_ReqPoolCredit_Alloc_REG0 +#define SION_CL0_ReqPoolCredit_Alloc_REG0__ReqPoolCredit_Alloc_31_0__SHIFT 0x0 +#define SION_CL0_ReqPoolCredit_Alloc_REG0__ReqPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL +//SION_CL0_ReqPoolCredit_Alloc_REG1 +#define SION_CL0_ReqPoolCredit_Alloc_REG1__ReqPoolCredit_Alloc_63_32__SHIFT 0x0 +#define SION_CL0_ReqPoolCredit_Alloc_REG1__ReqPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL +//SION_CL0_DataPoolCredit_Alloc_REG0 +#define SION_CL0_DataPoolCredit_Alloc_REG0__DataPoolCredit_Alloc_31_0__SHIFT 0x0 +#define SION_CL0_DataPoolCredit_Alloc_REG0__DataPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL +//SION_CL0_DataPoolCredit_Alloc_REG1 +#define SION_CL0_DataPoolCredit_Alloc_REG1__DataPoolCredit_Alloc_63_32__SHIFT 0x0 +#define SION_CL0_DataPoolCredit_Alloc_REG1__DataPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL +//SION_CL0_RdRspPoolCredit_Alloc_REG0 +#define SION_CL0_RdRspPoolCredit_Alloc_REG0__RdRspPoolCredit_Alloc_31_0__SHIFT 0x0 +#define SION_CL0_RdRspPoolCredit_Alloc_REG0__RdRspPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL +//SION_CL0_RdRspPoolCredit_Alloc_REG1 +#define SION_CL0_RdRspPoolCredit_Alloc_REG1__RdRspPoolCredit_Alloc_63_32__SHIFT 0x0 +#define SION_CL0_RdRspPoolCredit_Alloc_REG1__RdRspPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL +//SION_CL0_WrRspPoolCredit_Alloc_REG0 +#define SION_CL0_WrRspPoolCredit_Alloc_REG0__WrRspPoolCredit_Alloc_31_0__SHIFT 0x0 +#define SION_CL0_WrRspPoolCredit_Alloc_REG0__WrRspPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL +//SION_CL0_WrRspPoolCredit_Alloc_REG1 +#define SION_CL0_WrRspPoolCredit_Alloc_REG1__WrRspPoolCredit_Alloc_63_32__SHIFT 0x0 +#define SION_CL0_WrRspPoolCredit_Alloc_REG1__WrRspPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL +//SION_CL1_RdRsp_BurstTarget_REG0 +#define SION_CL1_RdRsp_BurstTarget_REG0__RdRsp_BurstTarget_31_0__SHIFT 0x0 +#define SION_CL1_RdRsp_BurstTarget_REG0__RdRsp_BurstTarget_31_0_MASK 0xFFFFFFFFL +//SION_CL1_RdRsp_BurstTarget_REG1 +#define SION_CL1_RdRsp_BurstTarget_REG1__RdRsp_BurstTarget_63_32__SHIFT 0x0 +#define SION_CL1_RdRsp_BurstTarget_REG1__RdRsp_BurstTarget_63_32_MASK 0xFFFFFFFFL +//SION_CL1_RdRsp_TimeSlot_REG0 +#define SION_CL1_RdRsp_TimeSlot_REG0__RdRsp_TimeSlot_31_0__SHIFT 0x0 +#define SION_CL1_RdRsp_TimeSlot_REG0__RdRsp_TimeSlot_31_0_MASK 0xFFFFFFFFL +//SION_CL1_RdRsp_TimeSlot_REG1 +#define SION_CL1_RdRsp_TimeSlot_REG1__RdRsp_TimeSlot_63_32__SHIFT 0x0 +#define SION_CL1_RdRsp_TimeSlot_REG1__RdRsp_TimeSlot_63_32_MASK 0xFFFFFFFFL +//SION_CL1_WrRsp_BurstTarget_REG0 +#define SION_CL1_WrRsp_BurstTarget_REG0__WrRsp_BurstTarget_31_0__SHIFT 0x0 +#define SION_CL1_WrRsp_BurstTarget_REG0__WrRsp_BurstTarget_31_0_MASK 0xFFFFFFFFL +//SION_CL1_WrRsp_BurstTarget_REG1 +#define SION_CL1_WrRsp_BurstTarget_REG1__WrRsp_BurstTarget_63_32__SHIFT 0x0 +#define SION_CL1_WrRsp_BurstTarget_REG1__WrRsp_BurstTarget_63_32_MASK 0xFFFFFFFFL +//SION_CL1_WrRsp_TimeSlot_REG0 +#define SION_CL1_WrRsp_TimeSlot_REG0__WrRsp_TimeSlot_31_0__SHIFT 0x0 +#define SION_CL1_WrRsp_TimeSlot_REG0__WrRsp_TimeSlot_31_0_MASK 0xFFFFFFFFL +//SION_CL1_WrRsp_TimeSlot_REG1 +#define SION_CL1_WrRsp_TimeSlot_REG1__WrRsp_TimeSlot_63_32__SHIFT 0x0 +#define SION_CL1_WrRsp_TimeSlot_REG1__WrRsp_TimeSlot_63_32_MASK 0xFFFFFFFFL +//SION_CL1_Req_BurstTarget_REG0 +#define SION_CL1_Req_BurstTarget_REG0__Req_BurstTarget_31_0__SHIFT 0x0 +#define SION_CL1_Req_BurstTarget_REG0__Req_BurstTarget_31_0_MASK 0xFFFFFFFFL +//SION_CL1_Req_BurstTarget_REG1 +#define SION_CL1_Req_BurstTarget_REG1__Req_BurstTarget_63_32__SHIFT 0x0 +#define SION_CL1_Req_BurstTarget_REG1__Req_BurstTarget_63_32_MASK 0xFFFFFFFFL +//SION_CL1_Req_TimeSlot_REG0 +#define SION_CL1_Req_TimeSlot_REG0__Req_TimeSlot_31_0__SHIFT 0x0 +#define SION_CL1_Req_TimeSlot_REG0__Req_TimeSlot_31_0_MASK 0xFFFFFFFFL +//SION_CL1_Req_TimeSlot_REG1 +#define SION_CL1_Req_TimeSlot_REG1__Req_TimeSlot_63_32__SHIFT 0x0 +#define SION_CL1_Req_TimeSlot_REG1__Req_TimeSlot_63_32_MASK 0xFFFFFFFFL +//SION_CL1_ReqPoolCredit_Alloc_REG0 +#define SION_CL1_ReqPoolCredit_Alloc_REG0__ReqPoolCredit_Alloc_31_0__SHIFT 0x0 +#define SION_CL1_ReqPoolCredit_Alloc_REG0__ReqPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL +//SION_CL1_ReqPoolCredit_Alloc_REG1 +#define SION_CL1_ReqPoolCredit_Alloc_REG1__ReqPoolCredit_Alloc_63_32__SHIFT 0x0 +#define SION_CL1_ReqPoolCredit_Alloc_REG1__ReqPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL +//SION_CL1_DataPoolCredit_Alloc_REG0 +#define SION_CL1_DataPoolCredit_Alloc_REG0__DataPoolCredit_Alloc_31_0__SHIFT 0x0 +#define SION_CL1_DataPoolCredit_Alloc_REG0__DataPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL +//SION_CL1_DataPoolCredit_Alloc_REG1 +#define SION_CL1_DataPoolCredit_Alloc_REG1__DataPoolCredit_Alloc_63_32__SHIFT 0x0 +#define SION_CL1_DataPoolCredit_Alloc_REG1__DataPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL +//SION_CL1_RdRspPoolCredit_Alloc_REG0 +#define SION_CL1_RdRspPoolCredit_Alloc_REG0__RdRspPoolCredit_Alloc_31_0__SHIFT 0x0 +#define SION_CL1_RdRspPoolCredit_Alloc_REG0__RdRspPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL +//SION_CL1_RdRspPoolCredit_Alloc_REG1 +#define SION_CL1_RdRspPoolCredit_Alloc_REG1__RdRspPoolCredit_Alloc_63_32__SHIFT 0x0 +#define SION_CL1_RdRspPoolCredit_Alloc_REG1__RdRspPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL +//SION_CL1_WrRspPoolCredit_Alloc_REG0 +#define SION_CL1_WrRspPoolCredit_Alloc_REG0__WrRspPoolCredit_Alloc_31_0__SHIFT 0x0 +#define SION_CL1_WrRspPoolCredit_Alloc_REG0__WrRspPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL +//SION_CL1_WrRspPoolCredit_Alloc_REG1 +#define SION_CL1_WrRspPoolCredit_Alloc_REG1__WrRspPoolCredit_Alloc_63_32__SHIFT 0x0 +#define SION_CL1_WrRspPoolCredit_Alloc_REG1__WrRspPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL +//SION_CL2_RdRsp_BurstTarget_REG0 +#define SION_CL2_RdRsp_BurstTarget_REG0__RdRsp_BurstTarget_31_0__SHIFT 0x0 +#define SION_CL2_RdRsp_BurstTarget_REG0__RdRsp_BurstTarget_31_0_MASK 0xFFFFFFFFL +//SION_CL2_RdRsp_BurstTarget_REG1 +#define SION_CL2_RdRsp_BurstTarget_REG1__RdRsp_BurstTarget_63_32__SHIFT 0x0 +#define SION_CL2_RdRsp_BurstTarget_REG1__RdRsp_BurstTarget_63_32_MASK 0xFFFFFFFFL +//SION_CL2_RdRsp_TimeSlot_REG0 +#define SION_CL2_RdRsp_TimeSlot_REG0__RdRsp_TimeSlot_31_0__SHIFT 0x0 +#define SION_CL2_RdRsp_TimeSlot_REG0__RdRsp_TimeSlot_31_0_MASK 0xFFFFFFFFL +//SION_CL2_RdRsp_TimeSlot_REG1 +#define SION_CL2_RdRsp_TimeSlot_REG1__RdRsp_TimeSlot_63_32__SHIFT 0x0 +#define SION_CL2_RdRsp_TimeSlot_REG1__RdRsp_TimeSlot_63_32_MASK 0xFFFFFFFFL +//SION_CL2_WrRsp_BurstTarget_REG0 +#define SION_CL2_WrRsp_BurstTarget_REG0__WrRsp_BurstTarget_31_0__SHIFT 0x0 +#define SION_CL2_WrRsp_BurstTarget_REG0__WrRsp_BurstTarget_31_0_MASK 0xFFFFFFFFL +//SION_CL2_WrRsp_BurstTarget_REG1 +#define SION_CL2_WrRsp_BurstTarget_REG1__WrRsp_BurstTarget_63_32__SHIFT 0x0 +#define SION_CL2_WrRsp_BurstTarget_REG1__WrRsp_BurstTarget_63_32_MASK 0xFFFFFFFFL +//SION_CL2_WrRsp_TimeSlot_REG0 +#define SION_CL2_WrRsp_TimeSlot_REG0__WrRsp_TimeSlot_31_0__SHIFT 0x0 +#define SION_CL2_WrRsp_TimeSlot_REG0__WrRsp_TimeSlot_31_0_MASK 0xFFFFFFFFL +//SION_CL2_WrRsp_TimeSlot_REG1 +#define SION_CL2_WrRsp_TimeSlot_REG1__WrRsp_TimeSlot_63_32__SHIFT 0x0 +#define SION_CL2_WrRsp_TimeSlot_REG1__WrRsp_TimeSlot_63_32_MASK 0xFFFFFFFFL +//SION_CL2_Req_BurstTarget_REG0 +#define SION_CL2_Req_BurstTarget_REG0__Req_BurstTarget_31_0__SHIFT 0x0 +#define SION_CL2_Req_BurstTarget_REG0__Req_BurstTarget_31_0_MASK 0xFFFFFFFFL +//SION_CL2_Req_BurstTarget_REG1 +#define SION_CL2_Req_BurstTarget_REG1__Req_BurstTarget_63_32__SHIFT 0x0 +#define SION_CL2_Req_BurstTarget_REG1__Req_BurstTarget_63_32_MASK 0xFFFFFFFFL +//SION_CL2_Req_TimeSlot_REG0 +#define SION_CL2_Req_TimeSlot_REG0__Req_TimeSlot_31_0__SHIFT 0x0 +#define SION_CL2_Req_TimeSlot_REG0__Req_TimeSlot_31_0_MASK 0xFFFFFFFFL +//SION_CL2_Req_TimeSlot_REG1 +#define SION_CL2_Req_TimeSlot_REG1__Req_TimeSlot_63_32__SHIFT 0x0 +#define SION_CL2_Req_TimeSlot_REG1__Req_TimeSlot_63_32_MASK 0xFFFFFFFFL +//SION_CL2_ReqPoolCredit_Alloc_REG0 +#define SION_CL2_ReqPoolCredit_Alloc_REG0__ReqPoolCredit_Alloc_31_0__SHIFT 0x0 +#define SION_CL2_ReqPoolCredit_Alloc_REG0__ReqPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL +//SION_CL2_ReqPoolCredit_Alloc_REG1 +#define SION_CL2_ReqPoolCredit_Alloc_REG1__ReqPoolCredit_Alloc_63_32__SHIFT 0x0 +#define SION_CL2_ReqPoolCredit_Alloc_REG1__ReqPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL +//SION_CL2_DataPoolCredit_Alloc_REG0 +#define SION_CL2_DataPoolCredit_Alloc_REG0__DataPoolCredit_Alloc_31_0__SHIFT 0x0 +#define SION_CL2_DataPoolCredit_Alloc_REG0__DataPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL +//SION_CL2_DataPoolCredit_Alloc_REG1 +#define SION_CL2_DataPoolCredit_Alloc_REG1__DataPoolCredit_Alloc_63_32__SHIFT 0x0 +#define SION_CL2_DataPoolCredit_Alloc_REG1__DataPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL +//SION_CL2_RdRspPoolCredit_Alloc_REG0 +#define SION_CL2_RdRspPoolCredit_Alloc_REG0__RdRspPoolCredit_Alloc_31_0__SHIFT 0x0 +#define SION_CL2_RdRspPoolCredit_Alloc_REG0__RdRspPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL +//SION_CL2_RdRspPoolCredit_Alloc_REG1 +#define SION_CL2_RdRspPoolCredit_Alloc_REG1__RdRspPoolCredit_Alloc_63_32__SHIFT 0x0 +#define SION_CL2_RdRspPoolCredit_Alloc_REG1__RdRspPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL +//SION_CL2_WrRspPoolCredit_Alloc_REG0 +#define SION_CL2_WrRspPoolCredit_Alloc_REG0__WrRspPoolCredit_Alloc_31_0__SHIFT 0x0 +#define SION_CL2_WrRspPoolCredit_Alloc_REG0__WrRspPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL +//SION_CL2_WrRspPoolCredit_Alloc_REG1 +#define SION_CL2_WrRspPoolCredit_Alloc_REG1__WrRspPoolCredit_Alloc_63_32__SHIFT 0x0 +#define SION_CL2_WrRspPoolCredit_Alloc_REG1__WrRspPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL +//SION_CL3_RdRsp_BurstTarget_REG0 +#define SION_CL3_RdRsp_BurstTarget_REG0__RdRsp_BurstTarget_31_0__SHIFT 0x0 +#define SION_CL3_RdRsp_BurstTarget_REG0__RdRsp_BurstTarget_31_0_MASK 0xFFFFFFFFL +//SION_CL3_RdRsp_BurstTarget_REG1 +#define SION_CL3_RdRsp_BurstTarget_REG1__RdRsp_BurstTarget_63_32__SHIFT 0x0 +#define SION_CL3_RdRsp_BurstTarget_REG1__RdRsp_BurstTarget_63_32_MASK 0xFFFFFFFFL +//SION_CL3_RdRsp_TimeSlot_REG0 +#define SION_CL3_RdRsp_TimeSlot_REG0__RdRsp_TimeSlot_31_0__SHIFT 0x0 +#define SION_CL3_RdRsp_TimeSlot_REG0__RdRsp_TimeSlot_31_0_MASK 0xFFFFFFFFL +//SION_CL3_RdRsp_TimeSlot_REG1 +#define SION_CL3_RdRsp_TimeSlot_REG1__RdRsp_TimeSlot_63_32__SHIFT 0x0 +#define SION_CL3_RdRsp_TimeSlot_REG1__RdRsp_TimeSlot_63_32_MASK 0xFFFFFFFFL +//SION_CL3_WrRsp_BurstTarget_REG0 +#define SION_CL3_WrRsp_BurstTarget_REG0__WrRsp_BurstTarget_31_0__SHIFT 0x0 +#define SION_CL3_WrRsp_BurstTarget_REG0__WrRsp_BurstTarget_31_0_MASK 0xFFFFFFFFL +//SION_CL3_WrRsp_BurstTarget_REG1 +#define SION_CL3_WrRsp_BurstTarget_REG1__WrRsp_BurstTarget_63_32__SHIFT 0x0 +#define SION_CL3_WrRsp_BurstTarget_REG1__WrRsp_BurstTarget_63_32_MASK 0xFFFFFFFFL +//SION_CL3_WrRsp_TimeSlot_REG0 +#define SION_CL3_WrRsp_TimeSlot_REG0__WrRsp_TimeSlot_31_0__SHIFT 0x0 +#define SION_CL3_WrRsp_TimeSlot_REG0__WrRsp_TimeSlot_31_0_MASK 0xFFFFFFFFL +//SION_CL3_WrRsp_TimeSlot_REG1 +#define SION_CL3_WrRsp_TimeSlot_REG1__WrRsp_TimeSlot_63_32__SHIFT 0x0 +#define SION_CL3_WrRsp_TimeSlot_REG1__WrRsp_TimeSlot_63_32_MASK 0xFFFFFFFFL +//SION_CL3_Req_BurstTarget_REG0 +#define SION_CL3_Req_BurstTarget_REG0__Req_BurstTarget_31_0__SHIFT 0x0 +#define SION_CL3_Req_BurstTarget_REG0__Req_BurstTarget_31_0_MASK 0xFFFFFFFFL +//SION_CL3_Req_BurstTarget_REG1 +#define SION_CL3_Req_BurstTarget_REG1__Req_BurstTarget_63_32__SHIFT 0x0 +#define SION_CL3_Req_BurstTarget_REG1__Req_BurstTarget_63_32_MASK 0xFFFFFFFFL +//SION_CL3_Req_TimeSlot_REG0 +#define SION_CL3_Req_TimeSlot_REG0__Req_TimeSlot_31_0__SHIFT 0x0 +#define SION_CL3_Req_TimeSlot_REG0__Req_TimeSlot_31_0_MASK 0xFFFFFFFFL +//SION_CL3_Req_TimeSlot_REG1 +#define SION_CL3_Req_TimeSlot_REG1__Req_TimeSlot_63_32__SHIFT 0x0 +#define SION_CL3_Req_TimeSlot_REG1__Req_TimeSlot_63_32_MASK 0xFFFFFFFFL +//SION_CL3_ReqPoolCredit_Alloc_REG0 +#define SION_CL3_ReqPoolCredit_Alloc_REG0__ReqPoolCredit_Alloc_31_0__SHIFT 0x0 +#define SION_CL3_ReqPoolCredit_Alloc_REG0__ReqPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL +//SION_CL3_ReqPoolCredit_Alloc_REG1 +#define SION_CL3_ReqPoolCredit_Alloc_REG1__ReqPoolCredit_Alloc_63_32__SHIFT 0x0 +#define SION_CL3_ReqPoolCredit_Alloc_REG1__ReqPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL +//SION_CL3_DataPoolCredit_Alloc_REG0 +#define SION_CL3_DataPoolCredit_Alloc_REG0__DataPoolCredit_Alloc_31_0__SHIFT 0x0 +#define SION_CL3_DataPoolCredit_Alloc_REG0__DataPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL +//SION_CL3_DataPoolCredit_Alloc_REG1 +#define SION_CL3_DataPoolCredit_Alloc_REG1__DataPoolCredit_Alloc_63_32__SHIFT 0x0 +#define SION_CL3_DataPoolCredit_Alloc_REG1__DataPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL +//SION_CL3_RdRspPoolCredit_Alloc_REG0 +#define SION_CL3_RdRspPoolCredit_Alloc_REG0__RdRspPoolCredit_Alloc_31_0__SHIFT 0x0 +#define SION_CL3_RdRspPoolCredit_Alloc_REG0__RdRspPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL +//SION_CL3_RdRspPoolCredit_Alloc_REG1 +#define SION_CL3_RdRspPoolCredit_Alloc_REG1__RdRspPoolCredit_Alloc_63_32__SHIFT 0x0 +#define SION_CL3_RdRspPoolCredit_Alloc_REG1__RdRspPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL +//SION_CL3_WrRspPoolCredit_Alloc_REG0 +#define SION_CL3_WrRspPoolCredit_Alloc_REG0__WrRspPoolCredit_Alloc_31_0__SHIFT 0x0 +#define SION_CL3_WrRspPoolCredit_Alloc_REG0__WrRspPoolCredit_Alloc_31_0_MASK 0xFFFFFFFFL +//SION_CL3_WrRspPoolCredit_Alloc_REG1 +#define SION_CL3_WrRspPoolCredit_Alloc_REG1__WrRspPoolCredit_Alloc_63_32__SHIFT 0x0 +#define SION_CL3_WrRspPoolCredit_Alloc_REG1__WrRspPoolCredit_Alloc_63_32_MASK 0xFFFFFFFFL +//SION_CNTL_REG0 +#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK0__SHIFT 0x0 +#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK1__SHIFT 0x1 +#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK2__SHIFT 0x2 +#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK3__SHIFT 0x3 +#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK4__SHIFT 0x4 +#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK5__SHIFT 0x5 +#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK6__SHIFT 0x6 +#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK7__SHIFT 0x7 +#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK8__SHIFT 0x8 +#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK9__SHIFT 0x9 +#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK0__SHIFT 0xa +#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK1__SHIFT 0xb +#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK2__SHIFT 0xc +#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK3__SHIFT 0xd +#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK4__SHIFT 0xe +#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK5__SHIFT 0xf +#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK6__SHIFT 0x10 +#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK7__SHIFT 0x11 +#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK8__SHIFT 0x12 +#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK9__SHIFT 0x13 +#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK0_MASK 0x00000001L +#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK1_MASK 0x00000002L +#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK2_MASK 0x00000004L +#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK3_MASK 0x00000008L +#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK4_MASK 0x00000010L +#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK5_MASK 0x00000020L +#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK6_MASK 0x00000040L +#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK7_MASK 0x00000080L +#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK8_MASK 0x00000100L +#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_0_SOFT_OVERRIDE_CLK9_MASK 0x00000200L +#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK0_MASK 0x00000400L +#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK1_MASK 0x00000800L +#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK2_MASK 0x00001000L +#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK3_MASK 0x00002000L +#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK4_MASK 0x00004000L +#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK5_MASK 0x00008000L +#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK6_MASK 0x00010000L +#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK7_MASK 0x00020000L +#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK8_MASK 0x00040000L +#define SION_CNTL_REG0__NBIFSION_GLUE_CG_LCLK_CTRL_1_SOFT_OVERRIDE_CLK9_MASK 0x00080000L +//SION_CNTL_REG1 +#define SION_CNTL_REG1__LIVELOCK_WATCHDOG_THRESHOLD__SHIFT 0x0 +#define SION_CNTL_REG1__CG_OFF_HYSTERESIS__SHIFT 0x8 +#define SION_CNTL_REG1__LIVELOCK_WATCHDOG_THRESHOLD_MASK 0x000000FFL +#define SION_CNTL_REG1__CG_OFF_HYSTERESIS_MASK 0x0000FF00L + + +// addressBlock: nbio_nbif0_gdc_rst_GDCRST_DEC +//SHUB_PF_FLR_RST +#define SHUB_PF_FLR_RST__DEV0_PF0_FLR_RST__SHIFT 0x0 +#define SHUB_PF_FLR_RST__DEV0_PF1_FLR_RST__SHIFT 0x1 +#define SHUB_PF_FLR_RST__DEV0_PF0_FLR_RST_MASK 0x00000001L +#define SHUB_PF_FLR_RST__DEV0_PF1_FLR_RST_MASK 0x00000002L +//SHUB_GFX_DRV_VPU_RST +#define SHUB_GFX_DRV_VPU_RST__GFX_DRV_MODE1_RST__SHIFT 0x0 +#define SHUB_GFX_DRV_VPU_RST__GFX_DRV_MODE1_RST_MASK 0x00000001L +//SHUB_LINK_RESET +#define SHUB_LINK_RESET__LINK_P0_RESET__SHIFT 0x0 +#define SHUB_LINK_RESET__LINK_P1_RESET__SHIFT 0x1 +#define SHUB_LINK_RESET__LINK_P2_RESET__SHIFT 0x2 +#define SHUB_LINK_RESET__LINK_P0_RESET_MASK 0x00000001L +#define SHUB_LINK_RESET__LINK_P1_RESET_MASK 0x00000002L +#define SHUB_LINK_RESET__LINK_P2_RESET_MASK 0x00000004L +//SHUB_PF0_VF_FLR_RST +#define SHUB_PF0_VF_FLR_RST__PF0_VF0_FLR_RST__SHIFT 0x0 +#define SHUB_PF0_VF_FLR_RST__PF0_VF1_FLR_RST__SHIFT 0x1 +#define SHUB_PF0_VF_FLR_RST__PF0_VF2_FLR_RST__SHIFT 0x2 +#define SHUB_PF0_VF_FLR_RST__PF0_VF3_FLR_RST__SHIFT 0x3 +#define SHUB_PF0_VF_FLR_RST__PF0_VF4_FLR_RST__SHIFT 0x4 +#define SHUB_PF0_VF_FLR_RST__PF0_VF5_FLR_RST__SHIFT 0x5 +#define SHUB_PF0_VF_FLR_RST__PF0_VF6_FLR_RST__SHIFT 0x6 +#define SHUB_PF0_VF_FLR_RST__PF0_VF7_FLR_RST__SHIFT 0x7 +#define SHUB_PF0_VF_FLR_RST__PF0_VF8_FLR_RST__SHIFT 0x8 +#define SHUB_PF0_VF_FLR_RST__PF0_VF9_FLR_RST__SHIFT 0x9 +#define SHUB_PF0_VF_FLR_RST__PF0_VF10_FLR_RST__SHIFT 0xa +#define SHUB_PF0_VF_FLR_RST__PF0_VF11_FLR_RST__SHIFT 0xb +#define SHUB_PF0_VF_FLR_RST__PF0_VF12_FLR_RST__SHIFT 0xc +#define SHUB_PF0_VF_FLR_RST__PF0_VF13_FLR_RST__SHIFT 0xd +#define SHUB_PF0_VF_FLR_RST__PF0_VF14_FLR_RST__SHIFT 0xe +#define SHUB_PF0_VF_FLR_RST__PF0_VF15_FLR_RST__SHIFT 0xf +#define SHUB_PF0_VF_FLR_RST__PF0_SOFTPF_FLR_RST__SHIFT 0x1f +#define SHUB_PF0_VF_FLR_RST__PF0_VF0_FLR_RST_MASK 0x00000001L +#define SHUB_PF0_VF_FLR_RST__PF0_VF1_FLR_RST_MASK 0x00000002L +#define SHUB_PF0_VF_FLR_RST__PF0_VF2_FLR_RST_MASK 0x00000004L +#define SHUB_PF0_VF_FLR_RST__PF0_VF3_FLR_RST_MASK 0x00000008L +#define SHUB_PF0_VF_FLR_RST__PF0_VF4_FLR_RST_MASK 0x00000010L +#define SHUB_PF0_VF_FLR_RST__PF0_VF5_FLR_RST_MASK 0x00000020L +#define SHUB_PF0_VF_FLR_RST__PF0_VF6_FLR_RST_MASK 0x00000040L +#define SHUB_PF0_VF_FLR_RST__PF0_VF7_FLR_RST_MASK 0x00000080L +#define SHUB_PF0_VF_FLR_RST__PF0_VF8_FLR_RST_MASK 0x00000100L +#define SHUB_PF0_VF_FLR_RST__PF0_VF9_FLR_RST_MASK 0x00000200L +#define SHUB_PF0_VF_FLR_RST__PF0_VF10_FLR_RST_MASK 0x00000400L +#define SHUB_PF0_VF_FLR_RST__PF0_VF11_FLR_RST_MASK 0x00000800L +#define SHUB_PF0_VF_FLR_RST__PF0_VF12_FLR_RST_MASK 0x00001000L +#define SHUB_PF0_VF_FLR_RST__PF0_VF13_FLR_RST_MASK 0x00002000L +#define SHUB_PF0_VF_FLR_RST__PF0_VF14_FLR_RST_MASK 0x00004000L +#define SHUB_PF0_VF_FLR_RST__PF0_VF15_FLR_RST_MASK 0x00008000L +#define SHUB_PF0_VF_FLR_RST__PF0_SOFTPF_FLR_RST_MASK 0x80000000L +//SHUB_HARD_RST_CTRL +#define SHUB_HARD_RST_CTRL__COR_RESET_EN__SHIFT 0x0 +#define SHUB_HARD_RST_CTRL__REG_RESET_EN__SHIFT 0x1 +#define SHUB_HARD_RST_CTRL__STY_RESET_EN__SHIFT 0x2 +#define SHUB_HARD_RST_CTRL__NIC400_RESET_EN__SHIFT 0x3 +#define SHUB_HARD_RST_CTRL__SDP_PORT_RESET_EN__SHIFT 0x4 +#define SHUB_HARD_RST_CTRL__SION_AON_RESET_EN__SHIFT 0x5 +#define SHUB_HARD_RST_CTRL__COR_RESET_EN_MASK 0x00000001L +#define SHUB_HARD_RST_CTRL__REG_RESET_EN_MASK 0x00000002L +#define SHUB_HARD_RST_CTRL__STY_RESET_EN_MASK 0x00000004L +#define SHUB_HARD_RST_CTRL__NIC400_RESET_EN_MASK 0x00000008L +#define SHUB_HARD_RST_CTRL__SDP_PORT_RESET_EN_MASK 0x00000010L +#define SHUB_HARD_RST_CTRL__SION_AON_RESET_EN_MASK 0x00000020L +//SHUB_SOFT_RST_CTRL +#define SHUB_SOFT_RST_CTRL__COR_RESET_EN__SHIFT 0x0 +#define SHUB_SOFT_RST_CTRL__REG_RESET_EN__SHIFT 0x1 +#define SHUB_SOFT_RST_CTRL__STY_RESET_EN__SHIFT 0x2 +#define SHUB_SOFT_RST_CTRL__NIC400_RESET_EN__SHIFT 0x3 +#define SHUB_SOFT_RST_CTRL__SDP_PORT_RESET_EN__SHIFT 0x4 +#define SHUB_SOFT_RST_CTRL__SION_AON_RESET_EN__SHIFT 0x5 +#define SHUB_SOFT_RST_CTRL__COR_RESET_EN_MASK 0x00000001L +#define SHUB_SOFT_RST_CTRL__REG_RESET_EN_MASK 0x00000002L +#define SHUB_SOFT_RST_CTRL__STY_RESET_EN_MASK 0x00000004L +#define SHUB_SOFT_RST_CTRL__NIC400_RESET_EN_MASK 0x00000008L +#define SHUB_SOFT_RST_CTRL__SDP_PORT_RESET_EN_MASK 0x00000010L +#define SHUB_SOFT_RST_CTRL__SION_AON_RESET_EN_MASK 0x00000020L +//SHUB_SDP_PORT_RST +#define SHUB_SDP_PORT_RST__A2S_SDP_PORT_RST__SHIFT 0x0 +#define SHUB_SDP_PORT_RST__NBIFSION_BIF_SDP_PORT_RST__SHIFT 0x1 +#define SHUB_SDP_PORT_RST__ATHUB_HST_SDP_PORT_RST__SHIFT 0x2 +#define SHUB_SDP_PORT_RST__ATHUB_DMA_SDP_PORT_RST__SHIFT 0x3 +#define SHUB_SDP_PORT_RST__ATDMA_NBIFSOIN_SDP_PORT_RST__SHIFT 0x4 +#define SHUB_SDP_PORT_RST__INT_NBIFSION_SDP_PORT_RST__SHIFT 0x5 +#define SHUB_SDP_PORT_RST__MP4SDP_SDP_PORT_RST__SHIFT 0x6 +#define SHUB_SDP_PORT_RST__GDC_HST_SDP_PORT_RST__SHIFT 0x7 +#define SHUB_SDP_PORT_RST__NTB_HST_SDP_PORT_RST__SHIFT 0x8 +#define SHUB_SDP_PORT_RST__NTB_DMA_SDP_PORT_RST__SHIFT 0x9 +#define SHUB_SDP_PORT_RST__SION_AON_RST__SHIFT 0x18 +#define SHUB_SDP_PORT_RST__A2S_SDP_PORT_RST_MASK 0x00000001L +#define SHUB_SDP_PORT_RST__NBIFSION_BIF_SDP_PORT_RST_MASK 0x00000002L +#define SHUB_SDP_PORT_RST__ATHUB_HST_SDP_PORT_RST_MASK 0x00000004L +#define SHUB_SDP_PORT_RST__ATHUB_DMA_SDP_PORT_RST_MASK 0x00000008L +#define SHUB_SDP_PORT_RST__ATDMA_NBIFSOIN_SDP_PORT_RST_MASK 0x00000010L +#define SHUB_SDP_PORT_RST__INT_NBIFSION_SDP_PORT_RST_MASK 0x00000020L +#define SHUB_SDP_PORT_RST__MP4SDP_SDP_PORT_RST_MASK 0x00000040L +#define SHUB_SDP_PORT_RST__GDC_HST_SDP_PORT_RST_MASK 0x00000080L +#define SHUB_SDP_PORT_RST__NTB_HST_SDP_PORT_RST_MASK 0x00000100L +#define SHUB_SDP_PORT_RST__NTB_DMA_SDP_PORT_RST_MASK 0x00000200L +#define SHUB_SDP_PORT_RST__SION_AON_RST_MASK 0x01000000L +//SHUB_RST_MISC_TRL +#define SHUB_RST_MISC_TRL__RSMU_SOFT_RST_ATOMIC__SHIFT 0x0 +#define SHUB_RST_MISC_TRL__RSMU_SOFT_RST_CYCLE__SHIFT 0x10 +#define SHUB_RST_MISC_TRL__RSMU_SOFT_RST_ATOMIC_MASK 0x00000001L +#define SHUB_RST_MISC_TRL__RSMU_SOFT_RST_CYCLE_MASK 0x00FF0000L + + +// addressBlock: nbio_nbif0_gdc_ras_gdc_ras_regblk +//GDCL_RAS_CENTRAL_STATUS +#define GDCL_RAS_CENTRAL_STATUS__GDCL_L2C_EgStall_det__SHIFT 0x0 +#define GDCL_RAS_CENTRAL_STATUS__GDCL_L2C_ErrEvent_det__SHIFT 0x1 +#define GDCL_RAS_CENTRAL_STATUS__GDCL_C2L_EgStall_det__SHIFT 0x2 +#define GDCL_RAS_CENTRAL_STATUS__GDCL_C2L_ErrEvent_det__SHIFT 0x3 +#define GDCL_RAS_CENTRAL_STATUS__GDCL_L2C_EgStall_det_MASK 0x00000001L +#define GDCL_RAS_CENTRAL_STATUS__GDCL_L2C_ErrEvent_det_MASK 0x00000002L +#define GDCL_RAS_CENTRAL_STATUS__GDCL_C2L_EgStall_det_MASK 0x00000004L +#define GDCL_RAS_CENTRAL_STATUS__GDCL_C2L_ErrEvent_det_MASK 0x00000008L +//GDCSOC_RAS_CENTRAL_STATUS +#define GDCSOC_RAS_CENTRAL_STATUS__GDCSOC_L2C_EgStall_det__SHIFT 0x0 +#define GDCSOC_RAS_CENTRAL_STATUS__GDCSOC_L2C_ErrEvent_det__SHIFT 0x1 +#define GDCSOC_RAS_CENTRAL_STATUS__GDCSOC_C2L_EgStall_det__SHIFT 0x2 +#define GDCSOC_RAS_CENTRAL_STATUS__GDCSOC_C2L_ErrEvent_det__SHIFT 0x3 +#define GDCSOC_RAS_CENTRAL_STATUS__GDCSOC_L2C_EgStall_det_MASK 0x00000001L +#define GDCSOC_RAS_CENTRAL_STATUS__GDCSOC_L2C_ErrEvent_det_MASK 0x00000002L +#define GDCSOC_RAS_CENTRAL_STATUS__GDCSOC_C2L_EgStall_det_MASK 0x00000004L +#define GDCSOC_RAS_CENTRAL_STATUS__GDCSOC_C2L_ErrEvent_det_MASK 0x00000008L +//GDCSOC_RAS_LEAF0_CTRL +#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_ERR_EVENT_DET_EN__SHIFT 0x0 +#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_POISON_ERREVENT_EN__SHIFT 0x1 +#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_POISON_STALL_EN__SHIFT 0x2 +#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_PARITY_ERREVENT_EN__SHIFT 0x3 +#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_PARITY_STALL_EN__SHIFT 0x4 +#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_ERR_EVENT_GEN_EN__SHIFT 0x8 +#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_EGRESS_STALL_GEN_EN__SHIFT 0x9 +#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_ERR_EVENT_PROP_EN__SHIFT 0xa +#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_EGRESS_STALL_PROP_EN__SHIFT 0xb +#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_ERR_EVENT_DET_EN_MASK 0x00000001L +#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_POISON_ERREVENT_EN_MASK 0x00000002L +#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_POISON_STALL_EN_MASK 0x00000004L +#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_PARITY_ERREVENT_EN_MASK 0x00000008L +#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_PARITY_STALL_EN_MASK 0x00000010L +#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_ERR_EVENT_GEN_EN_MASK 0x00000100L +#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_EGRESS_STALL_GEN_EN_MASK 0x00000200L +#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_ERR_EVENT_PROP_EN_MASK 0x00000400L +#define GDCSOC_RAS_LEAF0_CTRL__GDCSOC_RAS_LEAF0_CTRL_EGRESS_STALL_PROP_EN_MASK 0x00000800L +//GDCSOC_RAS_LEAF1_CTRL +#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_ERR_EVENT_DET_EN__SHIFT 0x0 +#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_POISON_ERREVENT_EN__SHIFT 0x1 +#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_POISON_STALL_EN__SHIFT 0x2 +#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_PARITY_ERREVENT_EN__SHIFT 0x3 +#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_PARITY_STALL_EN__SHIFT 0x4 +#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_ERR_EVENT_GEN_EN__SHIFT 0x8 +#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_EGRESS_STALL_GEN_EN__SHIFT 0x9 +#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_ERR_EVENT_PROP_EN__SHIFT 0xa +#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_EGRESS_STALL_PROP_EN__SHIFT 0xb +#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_ERR_EVENT_DET_EN_MASK 0x00000001L +#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_POISON_ERREVENT_EN_MASK 0x00000002L +#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_POISON_STALL_EN_MASK 0x00000004L +#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_PARITY_ERREVENT_EN_MASK 0x00000008L +#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_PARITY_STALL_EN_MASK 0x00000010L +#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_ERR_EVENT_GEN_EN_MASK 0x00000100L +#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_EGRESS_STALL_GEN_EN_MASK 0x00000200L +#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_ERR_EVENT_PROP_EN_MASK 0x00000400L +#define GDCSOC_RAS_LEAF1_CTRL__GDCSOC_RAS_LEAF1_CTRL_EGRESS_STALL_PROP_EN_MASK 0x00000800L +//GDCSOC_RAS_LEAF2_CTRL +#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_ERR_EVENT_DET_EN__SHIFT 0x0 +#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_POISON_ERREVENT_EN__SHIFT 0x1 +#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_POISON_STALL_EN__SHIFT 0x2 +#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_PARITY_ERREVENT_EN__SHIFT 0x3 +#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_PARITY_STALL_EN__SHIFT 0x4 +#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_ERR_EVENT_GEN_EN__SHIFT 0x8 +#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_EGRESS_STALL_GEN_EN__SHIFT 0x9 +#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_ERR_EVENT_PROP_EN__SHIFT 0xa +#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_EGRESS_STALL_PROP_EN__SHIFT 0xb +#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_ERR_EVENT_RAS_INTR_EN__SHIFT 0x10 +#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_ERR_EVENT_RAS_HSTRSP_SHUB_STALL_EN__SHIFT 0x18 +#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_ERR_EVENT_RAS_HSTRSP_CDC_STALL_EN__SHIFT 0x19 +#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_ERR_EVENT_RAS_IHINTR_PORT_MASK_DIS__SHIFT 0x1a +#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_ERR_EVENT_RAS_IHINTR_TRANS_MASK_DIS__SHIFT 0x1b +#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_ERR_EVENT_DET_EN_MASK 0x00000001L +#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_POISON_ERREVENT_EN_MASK 0x00000002L +#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_POISON_STALL_EN_MASK 0x00000004L +#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_PARITY_ERREVENT_EN_MASK 0x00000008L +#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_PARITY_STALL_EN_MASK 0x00000010L +#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_ERR_EVENT_GEN_EN_MASK 0x00000100L +#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_EGRESS_STALL_GEN_EN_MASK 0x00000200L +#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_ERR_EVENT_PROP_EN_MASK 0x00000400L +#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_EGRESS_STALL_PROP_EN_MASK 0x00000800L +#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_ERR_EVENT_RAS_INTR_EN_MASK 0x00010000L +#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_ERR_EVENT_RAS_HSTRSP_SHUB_STALL_EN_MASK 0x01000000L +#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_ERR_EVENT_RAS_HSTRSP_CDC_STALL_EN_MASK 0x02000000L +#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_ERR_EVENT_RAS_IHINTR_PORT_MASK_DIS_MASK 0x04000000L +#define GDCSOC_RAS_LEAF2_CTRL__GDCSOC_RAS_LEAF2_CTRL_ERR_EVENT_RAS_IHINTR_TRANS_MASK_DIS_MASK 0x08000000L +//GDCSOC_RAS_LEAF3_CTRL +#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_ERR_EVENT_DET_EN__SHIFT 0x0 +#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_POISON_ERREVENT_EN__SHIFT 0x1 +#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_POISON_STALL_EN__SHIFT 0x2 +#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_PARITY_ERREVENT_EN__SHIFT 0x3 +#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_PARITY_STALL_EN__SHIFT 0x4 +#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_ERR_EVENT_GEN_EN__SHIFT 0x8 +#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_EGRESS_STALL_GEN_EN__SHIFT 0x9 +#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_ERR_EVENT_PROP_EN__SHIFT 0xa +#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_EGRESS_STALL_PROP_EN__SHIFT 0xb +#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_ERR_EVENT_DET_EN_MASK 0x00000001L +#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_POISON_ERREVENT_EN_MASK 0x00000002L +#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_POISON_STALL_EN_MASK 0x00000004L +#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_PARITY_ERREVENT_EN_MASK 0x00000008L +#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_PARITY_STALL_EN_MASK 0x00000010L +#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_ERR_EVENT_GEN_EN_MASK 0x00000100L +#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_EGRESS_STALL_GEN_EN_MASK 0x00000200L +#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_ERR_EVENT_PROP_EN_MASK 0x00000400L +#define GDCSOC_RAS_LEAF3_CTRL__GDCSOC_RAS_LEAF3_CTRL_EGRESS_STALL_PROP_EN_MASK 0x00000800L +//GDCSOC_RAS_LEAF4_CTRL +#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_ERR_EVENT_DET_EN__SHIFT 0x0 +#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_POISON_ERREVENT_EN__SHIFT 0x1 +#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_POISON_STALL_EN__SHIFT 0x2 +#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_PARITY_ERREVENT_EN__SHIFT 0x3 +#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_PARITY_STALL_EN__SHIFT 0x4 +#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_ERR_EVENT_GEN_EN__SHIFT 0x8 +#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_EGRESS_STALL_GEN_EN__SHIFT 0x9 +#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_ERR_EVENT_PROP_EN__SHIFT 0xa +#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_EGRESS_STALL_PROP_EN__SHIFT 0xb +#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_ERR_EVENT_DET_EN_MASK 0x00000001L +#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_POISON_ERREVENT_EN_MASK 0x00000002L +#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_POISON_STALL_EN_MASK 0x00000004L +#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_PARITY_ERREVENT_EN_MASK 0x00000008L +#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_PARITY_STALL_EN_MASK 0x00000010L +#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_ERR_EVENT_GEN_EN_MASK 0x00000100L +#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_EGRESS_STALL_GEN_EN_MASK 0x00000200L +#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_ERR_EVENT_PROP_EN_MASK 0x00000400L +#define GDCSOC_RAS_LEAF4_CTRL__GDCSOC_RAS_LEAF4_CTRL_EGRESS_STALL_PROP_EN_MASK 0x00000800L +//GDCSOC_RAS_LEAF5_CTRL +#define GDCSOC_RAS_LEAF5_CTRL__GDCSOC_RAS_LEAF5_CTRL_ERR_EVENT_DET_EN__SHIFT 0x0 +#define GDCSOC_RAS_LEAF5_CTRL__GDCSOC_RAS_LEAF5_CTRL_POISON_ERREVENT_EN__SHIFT 0x1 +#define GDCSOC_RAS_LEAF5_CTRL__GDCSOC_RAS_LEAF5_CTRL_POISON_STALL_EN__SHIFT 0x2 +#define GDCSOC_RAS_LEAF5_CTRL__GDCSOC_RAS_LEAF5_CTRL_PARITY_ERREVENT_EN__SHIFT 0x3 +#define GDCSOC_RAS_LEAF5_CTRL__GDCSOC_RAS_LEAF5_CTRL_PARITY_STALL_EN__SHIFT 0x4 +#define GDCSOC_RAS_LEAF5_CTRL__GDCSOC_RAS_LEAF5_CTRL_ERR_EVENT_GEN_EN__SHIFT 0x8 +#define GDCSOC_RAS_LEAF5_CTRL__GDCSOC_RAS_LEAF5_CTRL_EGRESS_STALL_GEN_EN__SHIFT 0x9 +#define GDCSOC_RAS_LEAF5_CTRL__GDCSOC_RAS_LEAF5_CTRL_ERR_EVENT_PROP_EN__SHIFT 0xa +#define GDCSOC_RAS_LEAF5_CTRL__GDCSOC_RAS_LEAF5_CTRL_EGRESS_STALL_PROP_EN__SHIFT 0xb +#define GDCSOC_RAS_LEAF5_CTRL__GDCSOC_RAS_LEAF5_CTRL_ERR_EVENT_DET_EN_MASK 0x00000001L +#define GDCSOC_RAS_LEAF5_CTRL__GDCSOC_RAS_LEAF5_CTRL_POISON_ERREVENT_EN_MASK 0x00000002L +#define GDCSOC_RAS_LEAF5_CTRL__GDCSOC_RAS_LEAF5_CTRL_POISON_STALL_EN_MASK 0x00000004L +#define GDCSOC_RAS_LEAF5_CTRL__GDCSOC_RAS_LEAF5_CTRL_PARITY_ERREVENT_EN_MASK 0x00000008L +#define GDCSOC_RAS_LEAF5_CTRL__GDCSOC_RAS_LEAF5_CTRL_PARITY_STALL_EN_MASK 0x00000010L +#define GDCSOC_RAS_LEAF5_CTRL__GDCSOC_RAS_LEAF5_CTRL_ERR_EVENT_GEN_EN_MASK 0x00000100L +#define GDCSOC_RAS_LEAF5_CTRL__GDCSOC_RAS_LEAF5_CTRL_EGRESS_STALL_GEN_EN_MASK 0x00000200L +#define GDCSOC_RAS_LEAF5_CTRL__GDCSOC_RAS_LEAF5_CTRL_ERR_EVENT_PROP_EN_MASK 0x00000400L +#define GDCSOC_RAS_LEAF5_CTRL__GDCSOC_RAS_LEAF5_CTRL_EGRESS_STALL_PROP_EN_MASK 0x00000800L +//GDCSOC_RAS_LEAF6_CTRL +#define GDCSOC_RAS_LEAF6_CTRL__GDCSOC_RAS_LEAF6_CTRL_ERR_EVENT_DET_EN__SHIFT 0x0 +#define GDCSOC_RAS_LEAF6_CTRL__GDCSOC_RAS_LEAF6_CTRL_POISON_ERREVENT_EN__SHIFT 0x1 +#define GDCSOC_RAS_LEAF6_CTRL__GDCSOC_RAS_LEAF6_CTRL_POISON_STALL_EN__SHIFT 0x2 +#define GDCSOC_RAS_LEAF6_CTRL__GDCSOC_RAS_LEAF6_CTRL_PARITY_ERREVENT_EN__SHIFT 0x3 +#define GDCSOC_RAS_LEAF6_CTRL__GDCSOC_RAS_LEAF6_CTRL_PARITY_STALL_EN__SHIFT 0x4 +#define GDCSOC_RAS_LEAF6_CTRL__GDCSOC_RAS_LEAF6_CTRL_ERR_EVENT_GEN_EN__SHIFT 0x8 +#define GDCSOC_RAS_LEAF6_CTRL__GDCSOC_RAS_LEAF6_CTRL_EGRESS_STALL_GEN_EN__SHIFT 0x9 +#define GDCSOC_RAS_LEAF6_CTRL__GDCSOC_RAS_LEAF6_CTRL_ERR_EVENT_PROP_EN__SHIFT 0xa +#define GDCSOC_RAS_LEAF6_CTRL__GDCSOC_RAS_LEAF6_CTRL_EGRESS_STALL_PROP_EN__SHIFT 0xb +#define GDCSOC_RAS_LEAF6_CTRL__GDCSOC_RAS_LEAF6_CTRL_ERR_EVENT_DET_EN_MASK 0x00000001L +#define GDCSOC_RAS_LEAF6_CTRL__GDCSOC_RAS_LEAF6_CTRL_POISON_ERREVENT_EN_MASK 0x00000002L +#define GDCSOC_RAS_LEAF6_CTRL__GDCSOC_RAS_LEAF6_CTRL_POISON_STALL_EN_MASK 0x00000004L +#define GDCSOC_RAS_LEAF6_CTRL__GDCSOC_RAS_LEAF6_CTRL_PARITY_ERREVENT_EN_MASK 0x00000008L +#define GDCSOC_RAS_LEAF6_CTRL__GDCSOC_RAS_LEAF6_CTRL_PARITY_STALL_EN_MASK 0x00000010L +#define GDCSOC_RAS_LEAF6_CTRL__GDCSOC_RAS_LEAF6_CTRL_ERR_EVENT_GEN_EN_MASK 0x00000100L +#define GDCSOC_RAS_LEAF6_CTRL__GDCSOC_RAS_LEAF6_CTRL_EGRESS_STALL_GEN_EN_MASK 0x00000200L +#define GDCSOC_RAS_LEAF6_CTRL__GDCSOC_RAS_LEAF6_CTRL_ERR_EVENT_PROP_EN_MASK 0x00000400L +#define GDCSOC_RAS_LEAF6_CTRL__GDCSOC_RAS_LEAF6_CTRL_EGRESS_STALL_PROP_EN_MASK 0x00000800L +//GDCSOC_RAS_LEAF2_MISC_CTRL +#define GDCSOC_RAS_LEAF2_MISC_CTRL__GDCSOC_RAS_LEAF2_MISC_CTRL_ERR_EVENT_RAS_HSTRSP_SHUB_DROP_EN__SHIFT 0x0 +#define GDCSOC_RAS_LEAF2_MISC_CTRL__GDCSOC_RAS_LEAF2_MISC_CTRL_ERR_EVENT_RAS_HSTRSP_CDC_DROP_EN__SHIFT 0x1 +#define GDCSOC_RAS_LEAF2_MISC_CTRL__GDCSOC_RAS_LEAF2_MISC_CTRL_ERR_EVENT_RAS_IHINTR_PORT_MASK_DIS__SHIFT 0x8 +#define GDCSOC_RAS_LEAF2_MISC_CTRL__GDCSOC_RAS_LEAF2_MISC_CTRL_ERR_EVENT_RAS_IHINTR_TRANS_MASK_DIS__SHIFT 0x9 +#define GDCSOC_RAS_LEAF2_MISC_CTRL__GDCSOC_RAS_LEAF2_MISC_CTRL_ERR_EVENT_RAS_HSTRSP_SHUB_DROP_EN_MASK 0x00000001L +#define GDCSOC_RAS_LEAF2_MISC_CTRL__GDCSOC_RAS_LEAF2_MISC_CTRL_ERR_EVENT_RAS_HSTRSP_CDC_DROP_EN_MASK 0x00000002L +#define GDCSOC_RAS_LEAF2_MISC_CTRL__GDCSOC_RAS_LEAF2_MISC_CTRL_ERR_EVENT_RAS_IHINTR_PORT_MASK_DIS_MASK 0x00000100L +#define GDCSOC_RAS_LEAF2_MISC_CTRL__GDCSOC_RAS_LEAF2_MISC_CTRL_ERR_EVENT_RAS_IHINTR_TRANS_MASK_DIS_MASK 0x00000200L +//GDCSOC_RAS_LEAF0_STATUS +#define GDCSOC_RAS_LEAF0_STATUS__GDCSOC_RAS_LEAF0_STATUS_ERR_EVENT_RECV__SHIFT 0x0 +#define GDCSOC_RAS_LEAF0_STATUS__GDCSOC_RAS_LEAF0_STATUS_POISON_ERR_DET__SHIFT 0x1 +#define GDCSOC_RAS_LEAF0_STATUS__GDCSOC_RAS_LEAF0_STATUS_PARITY_ERR_DET__SHIFT 0x2 +#define GDCSOC_RAS_LEAF0_STATUS__GDCSOC_RAS_LEAF0_STATUS_ERR_EVENT_GENN_STAT__SHIFT 0x8 +#define GDCSOC_RAS_LEAF0_STATUS__GDCSOC_RAS_LEAF0_STATUS_EGRESS_STALLED_GENN_STAT__SHIFT 0x9 +#define GDCSOC_RAS_LEAF0_STATUS__GDCSOC_RAS_LEAF0_STATUS_ERR_EVENT_PROP_STAT__SHIFT 0xa +#define GDCSOC_RAS_LEAF0_STATUS__GDCSOC_RAS_LEAF0_STATUS_EGRESS_STALLED_PROP_STAT__SHIFT 0xb +#define GDCSOC_RAS_LEAF0_STATUS__GDCSOC_RAS_LEAF0_STATUS_ERR_EVENT_RECV_MASK 0x00000001L +#define GDCSOC_RAS_LEAF0_STATUS__GDCSOC_RAS_LEAF0_STATUS_POISON_ERR_DET_MASK 0x00000002L +#define GDCSOC_RAS_LEAF0_STATUS__GDCSOC_RAS_LEAF0_STATUS_PARITY_ERR_DET_MASK 0x00000004L +#define GDCSOC_RAS_LEAF0_STATUS__GDCSOC_RAS_LEAF0_STATUS_ERR_EVENT_GENN_STAT_MASK 0x00000100L +#define GDCSOC_RAS_LEAF0_STATUS__GDCSOC_RAS_LEAF0_STATUS_EGRESS_STALLED_GENN_STAT_MASK 0x00000200L +#define GDCSOC_RAS_LEAF0_STATUS__GDCSOC_RAS_LEAF0_STATUS_ERR_EVENT_PROP_STAT_MASK 0x00000400L +#define GDCSOC_RAS_LEAF0_STATUS__GDCSOC_RAS_LEAF0_STATUS_EGRESS_STALLED_PROP_STAT_MASK 0x00000800L +//GDCSOC_RAS_LEAF1_STATUS +#define GDCSOC_RAS_LEAF1_STATUS__GDCSOC_RAS_LEAF1_STATUS_ERR_EVENT_RECV__SHIFT 0x0 +#define GDCSOC_RAS_LEAF1_STATUS__GDCSOC_RAS_LEAF1_STATUS_POISON_ERR_DET__SHIFT 0x1 +#define GDCSOC_RAS_LEAF1_STATUS__GDCSOC_RAS_LEAF1_STATUS_PARITY_ERR_DET__SHIFT 0x2 +#define GDCSOC_RAS_LEAF1_STATUS__GDCSOC_RAS_LEAF1_STATUS_ERR_EVENT_GENN_STAT__SHIFT 0x8 +#define GDCSOC_RAS_LEAF1_STATUS__GDCSOC_RAS_LEAF1_STATUS_EGRESS_STALLED_GENN_STAT__SHIFT 0x9 +#define GDCSOC_RAS_LEAF1_STATUS__GDCSOC_RAS_LEAF1_STATUS_ERR_EVENT_PROP_STAT__SHIFT 0xa +#define GDCSOC_RAS_LEAF1_STATUS__GDCSOC_RAS_LEAF1_STATUS_EGRESS_STALLED_PROP_STAT__SHIFT 0xb +#define GDCSOC_RAS_LEAF1_STATUS__GDCSOC_RAS_LEAF1_STATUS_ERR_EVENT_RECV_MASK 0x00000001L +#define GDCSOC_RAS_LEAF1_STATUS__GDCSOC_RAS_LEAF1_STATUS_POISON_ERR_DET_MASK 0x00000002L +#define GDCSOC_RAS_LEAF1_STATUS__GDCSOC_RAS_LEAF1_STATUS_PARITY_ERR_DET_MASK 0x00000004L +#define GDCSOC_RAS_LEAF1_STATUS__GDCSOC_RAS_LEAF1_STATUS_ERR_EVENT_GENN_STAT_MASK 0x00000100L +#define GDCSOC_RAS_LEAF1_STATUS__GDCSOC_RAS_LEAF1_STATUS_EGRESS_STALLED_GENN_STAT_MASK 0x00000200L +#define GDCSOC_RAS_LEAF1_STATUS__GDCSOC_RAS_LEAF1_STATUS_ERR_EVENT_PROP_STAT_MASK 0x00000400L +#define GDCSOC_RAS_LEAF1_STATUS__GDCSOC_RAS_LEAF1_STATUS_EGRESS_STALLED_PROP_STAT_MASK 0x00000800L +//GDCSOC_RAS_LEAF2_STATUS +#define GDCSOC_RAS_LEAF2_STATUS__GDCSOC_RAS_LEAF2_STATUS_ERR_EVENT_RECV__SHIFT 0x0 +#define GDCSOC_RAS_LEAF2_STATUS__GDCSOC_RAS_LEAF2_STATUS_POISON_ERR_DET__SHIFT 0x1 +#define GDCSOC_RAS_LEAF2_STATUS__GDCSOC_RAS_LEAF2_STATUS_PARITY_ERR_DET__SHIFT 0x2 +#define GDCSOC_RAS_LEAF2_STATUS__GDCSOC_RAS_LEAF2_STATUS_ERR_EVENT_GENN_STAT__SHIFT 0x8 +#define GDCSOC_RAS_LEAF2_STATUS__GDCSOC_RAS_LEAF2_STATUS_EGRESS_STALLED_GENN_STAT__SHIFT 0x9 +#define GDCSOC_RAS_LEAF2_STATUS__GDCSOC_RAS_LEAF2_STATUS_ERR_EVENT_PROP_STAT__SHIFT 0xa +#define GDCSOC_RAS_LEAF2_STATUS__GDCSOC_RAS_LEAF2_STATUS_EGRESS_STALLED_PROP_STAT__SHIFT 0xb +#define GDCSOC_RAS_LEAF2_STATUS__GDCSOC_RAS_LEAF2_STATUS_ERR_EVENT_RECV_MASK 0x00000001L +#define GDCSOC_RAS_LEAF2_STATUS__GDCSOC_RAS_LEAF2_STATUS_POISON_ERR_DET_MASK 0x00000002L +#define GDCSOC_RAS_LEAF2_STATUS__GDCSOC_RAS_LEAF2_STATUS_PARITY_ERR_DET_MASK 0x00000004L +#define GDCSOC_RAS_LEAF2_STATUS__GDCSOC_RAS_LEAF2_STATUS_ERR_EVENT_GENN_STAT_MASK 0x00000100L +#define GDCSOC_RAS_LEAF2_STATUS__GDCSOC_RAS_LEAF2_STATUS_EGRESS_STALLED_GENN_STAT_MASK 0x00000200L +#define GDCSOC_RAS_LEAF2_STATUS__GDCSOC_RAS_LEAF2_STATUS_ERR_EVENT_PROP_STAT_MASK 0x00000400L +#define GDCSOC_RAS_LEAF2_STATUS__GDCSOC_RAS_LEAF2_STATUS_EGRESS_STALLED_PROP_STAT_MASK 0x00000800L +//GDCSOC_RAS_LEAF3_STATUS +#define GDCSOC_RAS_LEAF3_STATUS__GDCSOC_RAS_LEAF3_STATUS_ERR_EVENT_RECV__SHIFT 0x0 +#define GDCSOC_RAS_LEAF3_STATUS__GDCSOC_RAS_LEAF3_STATUS_POISON_ERR_DET__SHIFT 0x1 +#define GDCSOC_RAS_LEAF3_STATUS__GDCSOC_RAS_LEAF3_STATUS_PARITY_ERR_DET__SHIFT 0x2 +#define GDCSOC_RAS_LEAF3_STATUS__GDCSOC_RAS_LEAF3_STATUS_ERR_EVENT_GENN_STAT__SHIFT 0x8 +#define GDCSOC_RAS_LEAF3_STATUS__GDCSOC_RAS_LEAF3_STATUS_EGRESS_STALLED_GENN_STAT__SHIFT 0x9 +#define GDCSOC_RAS_LEAF3_STATUS__GDCSOC_RAS_LEAF3_STATUS_ERR_EVENT_PROP_STAT__SHIFT 0xa +#define GDCSOC_RAS_LEAF3_STATUS__GDCSOC_RAS_LEAF3_STATUS_EGRESS_STALLED_PROP_STAT__SHIFT 0xb +#define GDCSOC_RAS_LEAF3_STATUS__GDCSOC_RAS_LEAF3_STATUS_ERR_EVENT_RECV_MASK 0x00000001L +#define GDCSOC_RAS_LEAF3_STATUS__GDCSOC_RAS_LEAF3_STATUS_POISON_ERR_DET_MASK 0x00000002L +#define GDCSOC_RAS_LEAF3_STATUS__GDCSOC_RAS_LEAF3_STATUS_PARITY_ERR_DET_MASK 0x00000004L +#define GDCSOC_RAS_LEAF3_STATUS__GDCSOC_RAS_LEAF3_STATUS_ERR_EVENT_GENN_STAT_MASK 0x00000100L +#define GDCSOC_RAS_LEAF3_STATUS__GDCSOC_RAS_LEAF3_STATUS_EGRESS_STALLED_GENN_STAT_MASK 0x00000200L +#define GDCSOC_RAS_LEAF3_STATUS__GDCSOC_RAS_LEAF3_STATUS_ERR_EVENT_PROP_STAT_MASK 0x00000400L +#define GDCSOC_RAS_LEAF3_STATUS__GDCSOC_RAS_LEAF3_STATUS_EGRESS_STALLED_PROP_STAT_MASK 0x00000800L +//GDCSOC_RAS_LEAF4_STATUS +#define GDCSOC_RAS_LEAF4_STATUS__GDCSOC_RAS_LEAF4_STATUS_ERR_EVENT_RECV__SHIFT 0x0 +#define GDCSOC_RAS_LEAF4_STATUS__GDCSOC_RAS_LEAF4_STATUS_POISON_ERR_DET__SHIFT 0x1 +#define GDCSOC_RAS_LEAF4_STATUS__GDCSOC_RAS_LEAF4_STATUS_PARITY_ERR_DET__SHIFT 0x2 +#define GDCSOC_RAS_LEAF4_STATUS__GDCSOC_RAS_LEAF4_STATUS_ERR_EVENT_GENN_STAT__SHIFT 0x8 +#define GDCSOC_RAS_LEAF4_STATUS__GDCSOC_RAS_LEAF4_STATUS_EGRESS_STALLED_GENN_STAT__SHIFT 0x9 +#define GDCSOC_RAS_LEAF4_STATUS__GDCSOC_RAS_LEAF4_STATUS_ERR_EVENT_PROP_STAT__SHIFT 0xa +#define GDCSOC_RAS_LEAF4_STATUS__GDCSOC_RAS_LEAF4_STATUS_EGRESS_STALLED_PROP_STAT__SHIFT 0xb +#define GDCSOC_RAS_LEAF4_STATUS__GDCSOC_RAS_LEAF4_STATUS_ERR_EVENT_RECV_MASK 0x00000001L +#define GDCSOC_RAS_LEAF4_STATUS__GDCSOC_RAS_LEAF4_STATUS_POISON_ERR_DET_MASK 0x00000002L +#define GDCSOC_RAS_LEAF4_STATUS__GDCSOC_RAS_LEAF4_STATUS_PARITY_ERR_DET_MASK 0x00000004L +#define GDCSOC_RAS_LEAF4_STATUS__GDCSOC_RAS_LEAF4_STATUS_ERR_EVENT_GENN_STAT_MASK 0x00000100L +#define GDCSOC_RAS_LEAF4_STATUS__GDCSOC_RAS_LEAF4_STATUS_EGRESS_STALLED_GENN_STAT_MASK 0x00000200L +#define GDCSOC_RAS_LEAF4_STATUS__GDCSOC_RAS_LEAF4_STATUS_ERR_EVENT_PROP_STAT_MASK 0x00000400L +#define GDCSOC_RAS_LEAF4_STATUS__GDCSOC_RAS_LEAF4_STATUS_EGRESS_STALLED_PROP_STAT_MASK 0x00000800L +//GDCSOC_RAS_LEAF5_STATUS +#define GDCSOC_RAS_LEAF5_STATUS__GDCSOC_RAS_LEAF5_STATUS_ERR_EVENT_RECV__SHIFT 0x0 +#define GDCSOC_RAS_LEAF5_STATUS__GDCSOC_RAS_LEAF5_STATUS_POISON_ERR_DET__SHIFT 0x1 +#define GDCSOC_RAS_LEAF5_STATUS__GDCSOC_RAS_LEAF5_STATUS_PARITY_ERR_DET__SHIFT 0x2 +#define GDCSOC_RAS_LEAF5_STATUS__GDCSOC_RAS_LEAF5_STATUS_ERR_EVENT_GENN_STAT__SHIFT 0x8 +#define GDCSOC_RAS_LEAF5_STATUS__GDCSOC_RAS_LEAF5_STATUS_EGRESS_STALLED_GENN_STAT__SHIFT 0x9 +#define GDCSOC_RAS_LEAF5_STATUS__GDCSOC_RAS_LEAF5_STATUS_ERR_EVENT_PROP_STAT__SHIFT 0xa +#define GDCSOC_RAS_LEAF5_STATUS__GDCSOC_RAS_LEAF5_STATUS_EGRESS_STALLED_PROP_STAT__SHIFT 0xb +#define GDCSOC_RAS_LEAF5_STATUS__GDCSOC_RAS_LEAF5_STATUS_ERR_EVENT_RECV_MASK 0x00000001L +#define GDCSOC_RAS_LEAF5_STATUS__GDCSOC_RAS_LEAF5_STATUS_POISON_ERR_DET_MASK 0x00000002L +#define GDCSOC_RAS_LEAF5_STATUS__GDCSOC_RAS_LEAF5_STATUS_PARITY_ERR_DET_MASK 0x00000004L +#define GDCSOC_RAS_LEAF5_STATUS__GDCSOC_RAS_LEAF5_STATUS_ERR_EVENT_GENN_STAT_MASK 0x00000100L +#define GDCSOC_RAS_LEAF5_STATUS__GDCSOC_RAS_LEAF5_STATUS_EGRESS_STALLED_GENN_STAT_MASK 0x00000200L +#define GDCSOC_RAS_LEAF5_STATUS__GDCSOC_RAS_LEAF5_STATUS_ERR_EVENT_PROP_STAT_MASK 0x00000400L +#define GDCSOC_RAS_LEAF5_STATUS__GDCSOC_RAS_LEAF5_STATUS_EGRESS_STALLED_PROP_STAT_MASK 0x00000800L +//GDCSOC_RAS_LEAF6_STATUS +#define GDCSOC_RAS_LEAF6_STATUS__GDCSOC_RAS_LEAF6_STATUS_ERR_EVENT_RECV__SHIFT 0x0 +#define GDCSOC_RAS_LEAF6_STATUS__GDCSOC_RAS_LEAF6_STATUS_POISON_ERR_DET__SHIFT 0x1 +#define GDCSOC_RAS_LEAF6_STATUS__GDCSOC_RAS_LEAF6_STATUS_PARITY_ERR_DET__SHIFT 0x2 +#define GDCSOC_RAS_LEAF6_STATUS__GDCSOC_RAS_LEAF6_STATUS_ERR_EVENT_GENN_STAT__SHIFT 0x8 +#define GDCSOC_RAS_LEAF6_STATUS__GDCSOC_RAS_LEAF6_STATUS_EGRESS_STALLED_GENN_STAT__SHIFT 0x9 +#define GDCSOC_RAS_LEAF6_STATUS__GDCSOC_RAS_LEAF6_STATUS_ERR_EVENT_PROP_STAT__SHIFT 0xa +#define GDCSOC_RAS_LEAF6_STATUS__GDCSOC_RAS_LEAF6_STATUS_EGRESS_STALLED_PROP_STAT__SHIFT 0xb +#define GDCSOC_RAS_LEAF6_STATUS__GDCSOC_RAS_LEAF6_STATUS_ERR_EVENT_RECV_MASK 0x00000001L +#define GDCSOC_RAS_LEAF6_STATUS__GDCSOC_RAS_LEAF6_STATUS_POISON_ERR_DET_MASK 0x00000002L +#define GDCSOC_RAS_LEAF6_STATUS__GDCSOC_RAS_LEAF6_STATUS_PARITY_ERR_DET_MASK 0x00000004L +#define GDCSOC_RAS_LEAF6_STATUS__GDCSOC_RAS_LEAF6_STATUS_ERR_EVENT_GENN_STAT_MASK 0x00000100L +#define GDCSOC_RAS_LEAF6_STATUS__GDCSOC_RAS_LEAF6_STATUS_EGRESS_STALLED_GENN_STAT_MASK 0x00000200L +#define GDCSOC_RAS_LEAF6_STATUS__GDCSOC_RAS_LEAF6_STATUS_ERR_EVENT_PROP_STAT_MASK 0x00000400L +#define GDCSOC_RAS_LEAF6_STATUS__GDCSOC_RAS_LEAF6_STATUS_EGRESS_STALLED_PROP_STAT_MASK 0x00000800L +//GDCSHUB_RAS_CENTRAL_STATUS +#define GDCSHUB_RAS_CENTRAL_STATUS__GDCSHUB_L2C_EgStall_det__SHIFT 0x0 +#define GDCSHUB_RAS_CENTRAL_STATUS__GDCSHUB_L2C_ErrEvent_det__SHIFT 0x1 +#define GDCSHUB_RAS_CENTRAL_STATUS__GDCSHUB_C2L_EgStall_det__SHIFT 0x2 +#define GDCSHUB_RAS_CENTRAL_STATUS__GDCSHUB_C2L_ErrEvent_det__SHIFT 0x3 +#define GDCSHUB_RAS_CENTRAL_STATUS__GDCSHUB_L2C_EgStall_det_MASK 0x00000001L +#define GDCSHUB_RAS_CENTRAL_STATUS__GDCSHUB_L2C_ErrEvent_det_MASK 0x00000002L +#define GDCSHUB_RAS_CENTRAL_STATUS__GDCSHUB_C2L_EgStall_det_MASK 0x00000004L +#define GDCSHUB_RAS_CENTRAL_STATUS__GDCSHUB_C2L_ErrEvent_det_MASK 0x00000008L + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_swds_bifcfgdecp +//BIF_CFG_DEV0_SWDS0_VENDOR_ID +#define BIF_CFG_DEV0_SWDS0_VENDOR_ID__VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_SWDS0_DEVICE_ID +#define BIF_CFG_DEV0_SWDS0_DEVICE_ID__DEVICE_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_SWDS0_COMMAND +#define BIF_CFG_DEV0_SWDS0_COMMAND__IOEN_DN__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_COMMAND__MEMEN_DN__SHIFT 0x1 +#define BIF_CFG_DEV0_SWDS0_COMMAND__BUS_MASTER_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_SWDS0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_COMMAND__AD_STEPPING__SHIFT 0x7 +#define BIF_CFG_DEV0_SWDS0_COMMAND__SERR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_COMMAND__FAST_B2B_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_SWDS0_COMMAND__INT_DIS__SHIFT 0xa +#define BIF_CFG_DEV0_SWDS0_COMMAND__IOEN_DN_MASK 0x0001L +#define BIF_CFG_DEV0_SWDS0_COMMAND__MEMEN_DN_MASK 0x0002L +#define BIF_CFG_DEV0_SWDS0_COMMAND__BUS_MASTER_EN_MASK 0x0004L +#define BIF_CFG_DEV0_SWDS0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L +#define BIF_CFG_DEV0_SWDS0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L +#define BIF_CFG_DEV0_SWDS0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L +#define BIF_CFG_DEV0_SWDS0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_COMMAND__AD_STEPPING_MASK 0x0080L +#define BIF_CFG_DEV0_SWDS0_COMMAND__SERR_EN_MASK 0x0100L +#define BIF_CFG_DEV0_SWDS0_COMMAND__FAST_B2B_EN_MASK 0x0200L +#define BIF_CFG_DEV0_SWDS0_COMMAND__INT_DIS_MASK 0x0400L +//BIF_CFG_DEV0_SWDS0_STATUS +#define BIF_CFG_DEV0_SWDS0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_STATUS__INT_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_STATUS__CAP_LIST__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_STATUS__PCI_66_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7 +#define BIF_CFG_DEV0_SWDS0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_STATUS__DEVSEL_TIMING__SHIFT 0x9 +#define BIF_CFG_DEV0_SWDS0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb +#define BIF_CFG_DEV0_SWDS0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd +#define BIF_CFG_DEV0_SWDS0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe +#define BIF_CFG_DEV0_SWDS0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L +#define BIF_CFG_DEV0_SWDS0_STATUS__INT_STATUS_MASK 0x0008L +#define BIF_CFG_DEV0_SWDS0_STATUS__CAP_LIST_MASK 0x0010L +#define BIF_CFG_DEV0_SWDS0_STATUS__PCI_66_CAP_MASK 0x0020L +#define BIF_CFG_DEV0_SWDS0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L +#define BIF_CFG_DEV0_SWDS0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L +#define BIF_CFG_DEV0_SWDS0_STATUS__DEVSEL_TIMING_MASK 0x0600L +#define BIF_CFG_DEV0_SWDS0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L +#define BIF_CFG_DEV0_SWDS0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L +#define BIF_CFG_DEV0_SWDS0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L +#define BIF_CFG_DEV0_SWDS0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L +#define BIF_CFG_DEV0_SWDS0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L +//BIF_CFG_DEV0_SWDS0_REVISION_ID +#define BIF_CFG_DEV0_SWDS0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL +#define BIF_CFG_DEV0_SWDS0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L +//BIF_CFG_DEV0_SWDS0_PROG_INTERFACE +#define BIF_CFG_DEV0_SWDS0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL +//BIF_CFG_DEV0_SWDS0_SUB_CLASS +#define BIF_CFG_DEV0_SWDS0_SUB_CLASS__SUB_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_SUB_CLASS__SUB_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_SWDS0_BASE_CLASS +#define BIF_CFG_DEV0_SWDS0_BASE_CLASS__BASE_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_BASE_CLASS__BASE_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_SWDS0_CACHE_LINE +#define BIF_CFG_DEV0_SWDS0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL +//BIF_CFG_DEV0_SWDS0_LATENCY +#define BIF_CFG_DEV0_SWDS0_LATENCY__LATENCY_TIMER__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LATENCY__LATENCY_TIMER_MASK 0xFFL +//BIF_CFG_DEV0_SWDS0_HEADER +#define BIF_CFG_DEV0_SWDS0_HEADER__HEADER_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_HEADER__DEVICE_TYPE__SHIFT 0x7 +#define BIF_CFG_DEV0_SWDS0_HEADER__HEADER_TYPE_MASK 0x7FL +#define BIF_CFG_DEV0_SWDS0_HEADER__DEVICE_TYPE_MASK 0x80L +//BIF_CFG_DEV0_SWDS0_BIST +#define BIF_CFG_DEV0_SWDS0_BIST__BIST_COMP__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_BIST__BIST_STRT__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_BIST__BIST_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_SWDS0_BIST__BIST_COMP_MASK 0x0FL +#define BIF_CFG_DEV0_SWDS0_BIST__BIST_STRT_MASK 0x40L +#define BIF_CFG_DEV0_SWDS0_BIST__BIST_CAP_MASK 0x80L +//BIF_CFG_DEV0_SWDS0_BASE_ADDR_1 +#define BIF_CFG_DEV0_SWDS0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_SWDS0_SUB_BUS_NUMBER_LATENCY +#define BIF_CFG_DEV0_SWDS0_SUB_BUS_NUMBER_LATENCY__PRIMARY_BUS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_SUB_BUS_NUMBER_LATENCY__SECONDARY_BUS__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_SUB_BUS_NUMBER_LATENCY__SUB_BUS_NUM__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_SUB_BUS_NUMBER_LATENCY__SECONDARY_LATENCY_TIMER__SHIFT 0x18 +#define BIF_CFG_DEV0_SWDS0_SUB_BUS_NUMBER_LATENCY__PRIMARY_BUS_MASK 0x000000FFL +#define BIF_CFG_DEV0_SWDS0_SUB_BUS_NUMBER_LATENCY__SECONDARY_BUS_MASK 0x0000FF00L +#define BIF_CFG_DEV0_SWDS0_SUB_BUS_NUMBER_LATENCY__SUB_BUS_NUM_MASK 0x00FF0000L +#define BIF_CFG_DEV0_SWDS0_SUB_BUS_NUMBER_LATENCY__SECONDARY_LATENCY_TIMER_MASK 0xFF000000L +//BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT +#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT__IO_BASE_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT__IO_BASE__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT__IO_LIMIT_TYPE__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT__IO_LIMIT__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT__IO_BASE_TYPE_MASK 0x000FL +#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT__IO_BASE_MASK 0x00F0L +#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT__IO_LIMIT_TYPE_MASK 0x0F00L +#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT__IO_LIMIT_MASK 0xF000L +//BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS +#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__PCI_66_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7 +#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__DEVSEL_TIMING__SHIFT 0x9 +#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb +#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd +#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__RECEIVED_SYSTEM_ERROR__SHIFT 0xe +#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__PCI_66_CAP_MASK 0x0020L +#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L +#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L +#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__DEVSEL_TIMING_MASK 0x0600L +#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L +#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L +#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L +#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__RECEIVED_SYSTEM_ERROR_MASK 0x4000L +#define BIF_CFG_DEV0_SWDS0_SECONDARY_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L +//BIF_CFG_DEV0_SWDS0_MEM_BASE_LIMIT +#define BIF_CFG_DEV0_SWDS0_MEM_BASE_LIMIT__MEM_BASE_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_MEM_BASE_LIMIT__MEM_BASE_31_20__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_MEM_BASE_LIMIT__MEM_LIMIT_TYPE__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_MEM_BASE_LIMIT__MEM_LIMIT_31_20__SHIFT 0x14 +#define BIF_CFG_DEV0_SWDS0_MEM_BASE_LIMIT__MEM_BASE_TYPE_MASK 0x0000000FL +#define BIF_CFG_DEV0_SWDS0_MEM_BASE_LIMIT__MEM_BASE_31_20_MASK 0x0000FFF0L +#define BIF_CFG_DEV0_SWDS0_MEM_BASE_LIMIT__MEM_LIMIT_TYPE_MASK 0x000F0000L +#define BIF_CFG_DEV0_SWDS0_MEM_BASE_LIMIT__MEM_LIMIT_31_20_MASK 0xFFF00000L +//BIF_CFG_DEV0_SWDS0_PREF_BASE_LIMIT +#define BIF_CFG_DEV0_SWDS0_PREF_BASE_LIMIT__PREF_MEM_BASE_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PREF_BASE_LIMIT__PREF_MEM_BASE_31_20__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PREF_BASE_LIMIT__PREF_MEM_LIMIT_TYPE__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_PREF_BASE_LIMIT__PREF_MEM_LIMIT_31_20__SHIFT 0x14 +#define BIF_CFG_DEV0_SWDS0_PREF_BASE_LIMIT__PREF_MEM_BASE_TYPE_MASK 0x0000000FL +#define BIF_CFG_DEV0_SWDS0_PREF_BASE_LIMIT__PREF_MEM_BASE_31_20_MASK 0x0000FFF0L +#define BIF_CFG_DEV0_SWDS0_PREF_BASE_LIMIT__PREF_MEM_LIMIT_TYPE_MASK 0x000F0000L +#define BIF_CFG_DEV0_SWDS0_PREF_BASE_LIMIT__PREF_MEM_LIMIT_31_20_MASK 0xFFF00000L +//BIF_CFG_DEV0_SWDS0_PREF_BASE_UPPER +#define BIF_CFG_DEV0_SWDS0_PREF_BASE_UPPER__PREF_BASE_UPPER__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PREF_BASE_UPPER__PREF_BASE_UPPER_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_SWDS0_PREF_LIMIT_UPPER +#define BIF_CFG_DEV0_SWDS0_PREF_LIMIT_UPPER__PREF_LIMIT_UPPER__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PREF_LIMIT_UPPER__PREF_LIMIT_UPPER_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT_HI +#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT_HI__IO_BASE_31_16__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT_HI__IO_LIMIT_31_16__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT_HI__IO_BASE_31_16_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_SWDS0_IO_BASE_LIMIT_HI__IO_LIMIT_31_16_MASK 0xFFFF0000L +//BIF_CFG_DEV0_SWDS0_CAP_PTR +#define BIF_CFG_DEV0_SWDS0_CAP_PTR__CAP_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_CAP_PTR__CAP_PTR_MASK 0x000000FFL +//BIF_CFG_DEV0_SWDS0_INTERRUPT_LINE +#define BIF_CFG_DEV0_SWDS0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL +//BIF_CFG_DEV0_SWDS0_INTERRUPT_PIN +#define BIF_CFG_DEV0_SWDS0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL +//BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL +#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__PARITY_RESPONSE_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__SERR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__ISA_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__VGA_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__VGA_DEC__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__MASTER_ABORT_MODE__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__SECONDARY_BUS_RESET__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__FAST_B2B_EN__SHIFT 0x7 +#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__PARITY_RESPONSE_EN_MASK 0x0001L +#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__SERR_EN_MASK 0x0002L +#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__ISA_EN_MASK 0x0004L +#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__VGA_EN_MASK 0x0008L +#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__VGA_DEC_MASK 0x0010L +#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__MASTER_ABORT_MODE_MASK 0x0020L +#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__SECONDARY_BUS_RESET_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_IRQ_BRIDGE_CNTL__FAST_B2B_EN_MASK 0x0080L +//BIF_CFG_DEV0_SWDS0_PMI_CAP_LIST +#define BIF_CFG_DEV0_SWDS0_PMI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PMI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PMI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_SWDS0_PMI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_PMI_CAP +#define BIF_CFG_DEV0_SWDS0_PMI_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PMI_CAP__PME_CLOCK__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PMI_CAP__DEV_SPECIFIC_INIT__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_PMI_CAP__AUX_CURRENT__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_PMI_CAP__D1_SUPPORT__SHIFT 0x9 +#define BIF_CFG_DEV0_SWDS0_PMI_CAP__D2_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_SWDS0_PMI_CAP__PME_SUPPORT__SHIFT 0xb +#define BIF_CFG_DEV0_SWDS0_PMI_CAP__VERSION_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_PMI_CAP__PME_CLOCK_MASK 0x0008L +#define BIF_CFG_DEV0_SWDS0_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0_MASK 0x0010L +#define BIF_CFG_DEV0_SWDS0_PMI_CAP__DEV_SPECIFIC_INIT_MASK 0x0020L +#define BIF_CFG_DEV0_SWDS0_PMI_CAP__AUX_CURRENT_MASK 0x01C0L +#define BIF_CFG_DEV0_SWDS0_PMI_CAP__D1_SUPPORT_MASK 0x0200L +#define BIF_CFG_DEV0_SWDS0_PMI_CAP__D2_SUPPORT_MASK 0x0400L +#define BIF_CFG_DEV0_SWDS0_PMI_CAP__PME_SUPPORT_MASK 0xF800L +//BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL +#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__POWER_STATE__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__NO_SOFT_RESET__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__PME_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__DATA_SELECT__SHIFT 0x9 +#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__DATA_SCALE__SHIFT 0xd +#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__PME_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__B2_B3_SUPPORT__SHIFT 0x16 +#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__BUS_PWR_EN__SHIFT 0x17 +#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__PMI_DATA__SHIFT 0x18 +#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__POWER_STATE_MASK 0x00000003L +#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__NO_SOFT_RESET_MASK 0x00000008L +#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__PME_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__DATA_SELECT_MASK 0x00001E00L +#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__DATA_SCALE_MASK 0x00006000L +#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__PME_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__B2_B3_SUPPORT_MASK 0x00400000L +#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__BUS_PWR_EN_MASK 0x00800000L +#define BIF_CFG_DEV0_SWDS0_PMI_STATUS_CNTL__PMI_DATA_MASK 0xFF000000L +//BIF_CFG_DEV0_SWDS0_PCIE_CAP_LIST +#define BIF_CFG_DEV0_SWDS0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_SWDS0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_PCIE_CAP +#define BIF_CFG_DEV0_SWDS0_PCIE_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9 +#define BIF_CFG_DEV0_SWDS0_PCIE_CAP__VERSION_MASK 0x000FL +#define BIF_CFG_DEV0_SWDS0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L +#define BIF_CFG_DEV0_SWDS0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L +#define BIF_CFG_DEV0_SWDS0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L +//BIF_CFG_DEV0_SWDS0_DEVICE_CAP +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L +//BIF_CFG_DEV0_SWDS0_DEVICE_CNTL +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__BRIDGE_CFG_RETRY_EN__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL__BRIDGE_CFG_RETRY_EN_MASK 0x8000L +//BIF_CFG_DEV0_SWDS0_DEVICE_STATUS +#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1 +#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2 +#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L +#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L +#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L +#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L +#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L +#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L +#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L +//BIF_CFG_DEV0_SWDS0_LINK_CAP +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__PM_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12 +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13 +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14 +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15 +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16 +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__PORT_NUMBER__SHIFT 0x18 +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L +#define BIF_CFG_DEV0_SWDS0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L +//BIF_CFG_DEV0_SWDS0_LINK_CNTL +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__PM_CONTROL__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__LINK_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7 +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__PM_CONTROL_MASK 0x0003L +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__LINK_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L +//BIF_CFG_DEV0_SWDS0_LINK_STATUS +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L +//BIF_CFG_DEV0_SWDS0_SLOT_CAP +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__ATTN_BUTTON_PRESENT__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__PWR_CONTROLLER_PRESENT__SHIFT 0x1 +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__MRL_SENSOR_PRESENT__SHIFT 0x2 +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__ATTN_INDICATOR_PRESENT__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__PWR_INDICATOR_PRESENT__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__HOTPLUG_SURPRISE__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__HOTPLUG_CAPABLE__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__SLOT_PWR_LIMIT_VALUE__SHIFT 0x7 +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__SLOT_PWR_LIMIT_SCALE__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__ELECTROMECH_INTERLOCK_PRESENT__SHIFT 0x11 +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__NO_COMMAND_COMPLETED_SUPPORTED__SHIFT 0x12 +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__PHYSICAL_SLOT_NUM__SHIFT 0x13 +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__ATTN_BUTTON_PRESENT_MASK 0x00000001L +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__PWR_CONTROLLER_PRESENT_MASK 0x00000002L +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__MRL_SENSOR_PRESENT_MASK 0x00000004L +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__ATTN_INDICATOR_PRESENT_MASK 0x00000008L +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__PWR_INDICATOR_PRESENT_MASK 0x00000010L +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__HOTPLUG_SURPRISE_MASK 0x00000020L +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__HOTPLUG_CAPABLE_MASK 0x00000040L +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__SLOT_PWR_LIMIT_VALUE_MASK 0x00007F80L +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__SLOT_PWR_LIMIT_SCALE_MASK 0x00018000L +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__ELECTROMECH_INTERLOCK_PRESENT_MASK 0x00020000L +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__NO_COMMAND_COMPLETED_SUPPORTED_MASK 0x00040000L +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP__PHYSICAL_SLOT_NUM_MASK 0xFFF80000L +//BIF_CFG_DEV0_SWDS0_SLOT_CNTL +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__ATTN_BUTTON_PRESSED_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__PWR_FAULT_DETECTED_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__MRL_SENSOR_CHANGED_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__PRESENCE_DETECT_CHANGED_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__COMMAND_COMPLETED_INTR_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__HOTPLUG_INTR_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__ATTN_INDICATOR_CNTL__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__PWR_INDICATOR_CNTL__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__PWR_CONTROLLER_CNTL__SHIFT 0xa +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__ELECTROMECH_INTERLOCK_CNTL__SHIFT 0xb +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__DL_STATE_CHANGED_EN__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__ATTN_BUTTON_PRESSED_EN_MASK 0x0001L +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__PWR_FAULT_DETECTED_EN_MASK 0x0002L +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__MRL_SENSOR_CHANGED_EN_MASK 0x0004L +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__PRESENCE_DETECT_CHANGED_EN_MASK 0x0008L +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__COMMAND_COMPLETED_INTR_EN_MASK 0x0010L +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__HOTPLUG_INTR_EN_MASK 0x0020L +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__ATTN_INDICATOR_CNTL_MASK 0x00C0L +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__PWR_INDICATOR_CNTL_MASK 0x0300L +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__PWR_CONTROLLER_CNTL_MASK 0x0400L +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__ELECTROMECH_INTERLOCK_CNTL_MASK 0x0800L +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL__DL_STATE_CHANGED_EN_MASK 0x1000L +//BIF_CFG_DEV0_SWDS0_SLOT_STATUS +#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__ATTN_BUTTON_PRESSED__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__PWR_FAULT_DETECTED__SHIFT 0x1 +#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__MRL_SENSOR_CHANGED__SHIFT 0x2 +#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__PRESENCE_DETECT_CHANGED__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__COMMAND_COMPLETED__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__MRL_SENSOR_STATE__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__PRESENCE_DETECT_STATE__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__ELECTROMECH_INTERLOCK_STATUS__SHIFT 0x7 +#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__DL_STATE_CHANGED__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__ATTN_BUTTON_PRESSED_MASK 0x0001L +#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__PWR_FAULT_DETECTED_MASK 0x0002L +#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__MRL_SENSOR_CHANGED_MASK 0x0004L +#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__PRESENCE_DETECT_CHANGED_MASK 0x0008L +#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__COMMAND_COMPLETED_MASK 0x0010L +#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__MRL_SENSOR_STATE_MASK 0x0020L +#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__PRESENCE_DETECT_STATE_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__ELECTROMECH_INTERLOCK_STATUS_MASK 0x0080L +#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS__DL_STATE_CHANGED_MASK 0x0100L +//BIF_CFG_DEV0_SWDS0_DEVICE_CAP2 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L +//BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L +#define BIF_CFG_DEV0_SWDS0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L +//BIF_CFG_DEV0_SWDS0_DEVICE_STATUS2 +#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_SWDS0_LINK_CAP2 +#define BIF_CFG_DEV0_SWDS0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1 +#define BIF_CFG_DEV0_SWDS0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17 +#define BIF_CFG_DEV0_SWDS0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18 +#define BIF_CFG_DEV0_SWDS0_LINK_CAP2__RESERVED__SHIFT 0x19 +#define BIF_CFG_DEV0_SWDS0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL +#define BIF_CFG_DEV0_SWDS0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_SWDS0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L +#define BIF_CFG_DEV0_SWDS0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L +#define BIF_CFG_DEV0_SWDS0_LINK_CAP2__RESERVED_MASK 0xFE000000L +//BIF_CFG_DEV0_SWDS0_LINK_CNTL2 +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7 +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L +//BIF_CFG_DEV0_SWDS0_LINK_STATUS2 +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1 +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2 +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7 +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L +//BIF_CFG_DEV0_SWDS0_SLOT_CAP2 +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_SWDS0_SLOT_CNTL2 +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_SWDS0_SLOT_STATUS2 +#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_SWDS0_MSI_CAP_LIST +#define BIF_CFG_DEV0_SWDS0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_SWDS0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL +#define BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7 +#define BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L +#define BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL +#define BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L +#define BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L +#define BIF_CFG_DEV0_SWDS0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L +//BIF_CFG_DEV0_SWDS0_MSI_MSG_ADDR_LO +#define BIF_CFG_DEV0_SWDS0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2 +#define BIF_CFG_DEV0_SWDS0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_SWDS0_MSI_MSG_ADDR_HI +#define BIF_CFG_DEV0_SWDS0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_SWDS0_MSI_MSG_DATA +#define BIF_CFG_DEV0_SWDS0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL +//BIF_CFG_DEV0_SWDS0_MSI_MSG_DATA_64 +#define BIF_CFG_DEV0_SWDS0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL +//BIF_CFG_DEV0_SWDS0_SSID_CAP_LIST +#define BIF_CFG_DEV0_SWDS0_SSID_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_SSID_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_SSID_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_SWDS0_SSID_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_SSID_CAP +#define BIF_CFG_DEV0_SWDS0_SSID_CAP__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_SSID_CAP__SUBSYSTEM_ID__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_SSID_CAP__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_SWDS0_SSID_CAP__SUBSYSTEM_ID_MASK 0xFFFF0000L +//BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST +#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_HDR +#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14 +#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L +#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L +//BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC1 +#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC2 +#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_SWDS0_PCIE_VC_ENH_CAP_LIST +#define BIF_CFG_DEV0_SWDS0_PCIE_VC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_SWDS0_PCIE_VC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_SWDS0_PCIE_VC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG1 +#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG1__EXT_VC_COUNT__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG1__LOW_PRIORITY_EXT_VC_COUNT__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG1__REF_CLK__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG1__PORT_ARB_TABLE_ENTRY_SIZE__SHIFT 0xa +#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG1__EXT_VC_COUNT_MASK 0x00000007L +#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG1__LOW_PRIORITY_EXT_VC_COUNT_MASK 0x00000070L +#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG1__REF_CLK_MASK 0x00000300L +#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG1__PORT_ARB_TABLE_ENTRY_SIZE_MASK 0x00000C00L +//BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG2 +#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG2__VC_ARB_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG2__VC_ARB_TABLE_OFFSET__SHIFT 0x18 +#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG2__VC_ARB_CAP_MASK 0x000000FFL +#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CAP_REG2__VC_ARB_TABLE_OFFSET_MASK 0xFF000000L +//BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CNTL +#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CNTL__LOAD_VC_ARB_TABLE__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CNTL__VC_ARB_SELECT__SHIFT 0x1 +#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CNTL__LOAD_VC_ARB_TABLE_MASK 0x0001L +#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_CNTL__VC_ARB_SELECT_MASK 0x000EL +//BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_STATUS +#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_STATUS__VC_ARB_TABLE_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_PORT_VC_STATUS__VC_ARB_TABLE_STATUS_MASK 0x0001L +//BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CAP +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CAP__REJECT_SNOOP_TRANS__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CAP__MAX_TIME_SLOTS__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET__SHIFT 0x18 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_CAP_MASK 0x000000FFL +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CAP__REJECT_SNOOP_TRANS_MASK 0x00008000L +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CAP__MAX_TIME_SLOTS_MASK 0x003F0000L +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET_MASK 0xFF000000L +//BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC0__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC1_7__SHIFT 0x1 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__PORT_ARB_SELECT__SHIFT 0x11 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__VC_ID__SHIFT 0x18 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__VC_ENABLE__SHIFT 0x1f +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC0_MASK 0x00000001L +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC1_7_MASK 0x000000FEL +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE_MASK 0x00010000L +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__PORT_ARB_SELECT_MASK 0x000E0000L +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__VC_ID_MASK 0x07000000L +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_CNTL__VC_ENABLE_MASK 0x80000000L +//BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_STATUS +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_STATUS__VC_NEGOTIATION_PENDING__SHIFT 0x1 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS_MASK 0x0001L +#define BIF_CFG_DEV0_SWDS0_PCIE_VC0_RESOURCE_STATUS__VC_NEGOTIATION_PENDING_MASK 0x0002L +//BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CAP +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CAP__REJECT_SNOOP_TRANS__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CAP__MAX_TIME_SLOTS__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET__SHIFT 0x18 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_CAP_MASK 0x000000FFL +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CAP__REJECT_SNOOP_TRANS_MASK 0x00008000L +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CAP__MAX_TIME_SLOTS_MASK 0x003F0000L +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET_MASK 0xFF000000L +//BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC0__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC1_7__SHIFT 0x1 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__PORT_ARB_SELECT__SHIFT 0x11 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__VC_ID__SHIFT 0x18 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__VC_ENABLE__SHIFT 0x1f +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC0_MASK 0x00000001L +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC1_7_MASK 0x000000FEL +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE_MASK 0x00010000L +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__PORT_ARB_SELECT_MASK 0x000E0000L +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__VC_ID_MASK 0x07000000L +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_CNTL__VC_ENABLE_MASK 0x80000000L +//BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_STATUS +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_STATUS__VC_NEGOTIATION_PENDING__SHIFT 0x1 +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS_MASK 0x0001L +#define BIF_CFG_DEV0_SWDS0_PCIE_VC1_RESOURCE_STATUS__VC_NEGOTIATION_PENDING_MASK 0x0002L +//BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST +#define BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_DW1 +#define BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_DW1__SERIAL_NUMBER_LO__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_DW1__SERIAL_NUMBER_LO_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_DW2 +#define BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_DW2__SERIAL_NUMBER_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_DEV_SERIAL_NUM_DW2__SERIAL_NUMBER_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L +//BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L +//BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19 +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L +#define BIF_CFG_DEV0_SWDS0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L +//BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7 +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L +//BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7 +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_SWDS0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L +//BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9 +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L +#define BIF_CFG_DEV0_SWDS0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L +//BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG0 +#define BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG1 +#define BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG2 +#define BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG3 +#define BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG0 +#define BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG1 +#define BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG2 +#define BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG3 +#define BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_SWDS0_PCIE_SECONDARY_ENH_CAP_LIST +#define BIF_CFG_DEV0_SWDS0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_PCIE_SECONDARY_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_SWDS0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_SWDS0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_SWDS0_PCIE_SECONDARY_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_SWDS0_PCIE_LINK_CNTL3 +#define BIF_CFG_DEV0_SWDS0_PCIE_LINK_CNTL3__PERFORM_EQUALIZATION__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_LINK_CNTL3__LINK_EQUALIZATION_REQ_INT_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_SWDS0_PCIE_LINK_CNTL3__RESERVED__SHIFT 0x2 +#define BIF_CFG_DEV0_SWDS0_PCIE_LINK_CNTL3__PERFORM_EQUALIZATION_MASK 0x00000001L +#define BIF_CFG_DEV0_SWDS0_PCIE_LINK_CNTL3__LINK_EQUALIZATION_REQ_INT_EN_MASK 0x00000002L +#define BIF_CFG_DEV0_SWDS0_PCIE_LINK_CNTL3__RESERVED_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_SWDS0_PCIE_LANE_ERROR_STATUS +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_ERROR_STATUS__LANE_ERROR_STATUS_BITS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_ERROR_STATUS__RESERVED__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_ERROR_STATUS__LANE_ERROR_STATUS_BITS_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_ERROR_STATUS__RESERVED_MASK 0xFFFF0000L +//BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_0_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_1_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_2_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_3_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_4_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_5_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_6_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_7_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_8_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_9_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_10_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_11_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_12_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_13_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_14_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_SWDS0_PCIE_LANE_15_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_SWDS0_PCIE_ACS_ENH_CAP_LIST +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__SOURCE_VALIDATION__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__TRANSLATION_BLOCKING__SHIFT 0x1 +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT__SHIFT 0x2 +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__UPSTREAM_FORWARDING__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__P2P_EGRESS_CONTROL__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__SOURCE_VALIDATION_MASK 0x0001L +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__TRANSLATION_BLOCKING_MASK 0x0002L +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT_MASK 0x0004L +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT_MASK 0x0008L +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__UPSTREAM_FORWARDING_MASK 0x0010L +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__P2P_EGRESS_CONTROL_MASK 0x0020L +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN_MASK 0x0001L +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN_MASK 0x0002L +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN_MASK 0x0004L +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN_MASK 0x0010L +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN_MASK 0x0020L +#define BIF_CFG_DEV0_SWDS0_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN_MASK 0x0040L +//BIF_CFG_DEV0_SWDS0_PCIE_DLF_ENH_CAP_LIST +#define BIF_CFG_DEV0_SWDS0_PCIE_DLF_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PCIE_DLF_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_PCIE_DLF_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_SWDS0_PCIE_DLF_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_SWDS0_PCIE_DLF_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_SWDS0_PCIE_DLF_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_CAP +#define BIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_CAP__LOCAL_DLF_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_CAP__DLF_EXCHANGE_ENABLE__SHIFT 0x1f +#define BIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_CAP__LOCAL_DLF_SUPPORTED_MASK 0x007FFFFFL +#define BIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_CAP__DLF_EXCHANGE_ENABLE_MASK 0x80000000L +//BIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_STATUS +#define BIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_VALID__SHIFT 0x1f +#define BIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_MASK 0x007FFFFFL +#define BIF_CFG_DEV0_SWDS0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_VALID_MASK 0x80000000L +//BIF_CFG_DEV0_SWDS0_PHY_16GT_ENH_CAP_LIST +#define BIF_CFG_DEV0_SWDS0_PHY_16GT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_PHY_16GT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_PHY_16GT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_SWDS0_PHY_16GT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_SWDS0_PHY_16GT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_SWDS0_PHY_16GT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_SWDS0_LINK_CAP_16GT +#define BIF_CFG_DEV0_SWDS0_LINK_CAP_16GT__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LINK_CAP_16GT__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_SWDS0_LINK_CNTL_16GT +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL_16GT__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LINK_CNTL_16GT__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT__EQUALIZATION_COMPLETE_16GT__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT__EQUALIZATION_PHASE1_SUCCESS_16GT__SHIFT 0x1 +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT__EQUALIZATION_PHASE2_SUCCESS_16GT__SHIFT 0x2 +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT__EQUALIZATION_PHASE3_SUCCESS_16GT__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT__LINK_EQUALIZATION_REQUEST_16GT__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT__EQUALIZATION_COMPLETE_16GT_MASK 0x00000001L +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT__EQUALIZATION_PHASE1_SUCCESS_16GT_MASK 0x00000002L +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT__EQUALIZATION_PHASE2_SUCCESS_16GT_MASK 0x00000004L +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT__EQUALIZATION_PHASE3_SUCCESS_16GT_MASK 0x00000008L +#define BIF_CFG_DEV0_SWDS0_LINK_STATUS_16GT__LINK_EQUALIZATION_REQUEST_16GT_MASK 0x00000010L +//BIF_CFG_DEV0_SWDS0_LOCAL_PARITY_MISMATCH_STATUS_16GT +#define BIF_CFG_DEV0_SWDS0_LOCAL_PARITY_MISMATCH_STATUS_16GT__LOCAL_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LOCAL_PARITY_MISMATCH_STATUS_16GT__LOCAL_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL +//BIF_CFG_DEV0_SWDS0_RTM1_PARITY_MISMATCH_STATUS_16GT +#define BIF_CFG_DEV0_SWDS0_RTM1_PARITY_MISMATCH_STATUS_16GT__RTM1_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_RTM1_PARITY_MISMATCH_STATUS_16GT__RTM1_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL +//BIF_CFG_DEV0_SWDS0_RTM2_PARITY_MISMATCH_STATUS_16GT +#define BIF_CFG_DEV0_SWDS0_RTM2_PARITY_MISMATCH_STATUS_16GT__RTM2_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_RTM2_PARITY_MISMATCH_STATUS_16GT__RTM2_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL +//BIF_CFG_DEV0_SWDS0_LANE_0_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_SWDS0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_SWDS0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_SWDS0_LANE_1_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_SWDS0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_SWDS0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_SWDS0_LANE_2_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_SWDS0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_SWDS0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_SWDS0_LANE_3_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_SWDS0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_SWDS0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_SWDS0_LANE_4_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_SWDS0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_SWDS0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_SWDS0_LANE_5_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_SWDS0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_SWDS0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_SWDS0_LANE_6_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_SWDS0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_SWDS0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_SWDS0_LANE_7_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_SWDS0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_SWDS0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_SWDS0_LANE_8_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_SWDS0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_SWDS0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_SWDS0_LANE_9_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_SWDS0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_SWDS0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_SWDS0_LANE_10_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_SWDS0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_SWDS0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_SWDS0_LANE_11_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_SWDS0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_SWDS0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_SWDS0_LANE_12_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_SWDS0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_SWDS0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_SWDS0_LANE_13_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_SWDS0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_SWDS0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_SWDS0_LANE_14_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_SWDS0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_SWDS0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_SWDS0_LANE_15_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_SWDS0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_SWDS0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_SWDS0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_SWDS0_MARGINING_ENH_CAP_LIST +#define BIF_CFG_DEV0_SWDS0_MARGINING_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_MARGINING_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_SWDS0_MARGINING_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_SWDS0_MARGINING_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_SWDS0_MARGINING_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_SWDS0_MARGINING_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_SWDS0_MARGINING_PORT_CAP +#define BIF_CFG_DEV0_SWDS0_MARGINING_PORT_CAP__MARGINING_USES_SOFTWARE__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_MARGINING_PORT_CAP__MARGINING_USES_SOFTWARE_MASK 0x0001L +//BIF_CFG_DEV0_SWDS0_MARGINING_PORT_STATUS +#define BIF_CFG_DEV0_SWDS0_MARGINING_PORT_STATUS__MARGINING_READY__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_MARGINING_PORT_STATUS__MARGINING_SOFTWARE_READY__SHIFT 0x1 +#define BIF_CFG_DEV0_SWDS0_MARGINING_PORT_STATUS__MARGINING_READY_MASK 0x0001L +#define BIF_CFG_DEV0_SWDS0_MARGINING_PORT_STATUS__MARGINING_SOFTWARE_READY_MASK 0x0002L +//BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_CNTL__LANE_0_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_CNTL__LANE_0_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_CNTL__LANE_0_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_CNTL__LANE_0_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_STATUS__LANE_0_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_STATUS__LANE_0_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_STATUS__LANE_0_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_STATUS__LANE_0_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_CNTL__LANE_1_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_CNTL__LANE_1_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_CNTL__LANE_1_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_CNTL__LANE_1_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_STATUS__LANE_1_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_STATUS__LANE_1_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_STATUS__LANE_1_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_STATUS__LANE_1_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_CNTL__LANE_2_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_CNTL__LANE_2_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_CNTL__LANE_2_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_CNTL__LANE_2_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_STATUS__LANE_2_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_STATUS__LANE_2_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_STATUS__LANE_2_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_STATUS__LANE_2_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_CNTL__LANE_3_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_CNTL__LANE_3_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_CNTL__LANE_3_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_CNTL__LANE_3_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_STATUS__LANE_3_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_STATUS__LANE_3_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_STATUS__LANE_3_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_STATUS__LANE_3_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_CNTL__LANE_4_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_CNTL__LANE_4_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_CNTL__LANE_4_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_CNTL__LANE_4_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_STATUS__LANE_4_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_STATUS__LANE_4_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_STATUS__LANE_4_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_STATUS__LANE_4_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_CNTL__LANE_5_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_CNTL__LANE_5_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_CNTL__LANE_5_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_CNTL__LANE_5_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_STATUS__LANE_5_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_STATUS__LANE_5_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_STATUS__LANE_5_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_STATUS__LANE_5_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_CNTL__LANE_6_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_CNTL__LANE_6_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_CNTL__LANE_6_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_CNTL__LANE_6_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_STATUS__LANE_6_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_STATUS__LANE_6_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_STATUS__LANE_6_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_STATUS__LANE_6_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_CNTL__LANE_7_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_CNTL__LANE_7_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_CNTL__LANE_7_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_CNTL__LANE_7_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_STATUS__LANE_7_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_STATUS__LANE_7_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_STATUS__LANE_7_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_STATUS__LANE_7_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_CNTL__LANE_8_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_CNTL__LANE_8_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_CNTL__LANE_8_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_CNTL__LANE_8_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_STATUS__LANE_8_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_STATUS__LANE_8_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_STATUS__LANE_8_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_STATUS__LANE_8_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_CNTL__LANE_9_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_CNTL__LANE_9_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_CNTL__LANE_9_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_CNTL__LANE_9_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_STATUS__LANE_9_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_STATUS__LANE_9_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_STATUS__LANE_9_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_STATUS__LANE_9_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_CNTL__LANE_10_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_CNTL__LANE_10_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_CNTL__LANE_10_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_CNTL__LANE_10_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_STATUS__LANE_10_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_STATUS__LANE_10_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_STATUS__LANE_10_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_STATUS__LANE_10_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_CNTL__LANE_11_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_CNTL__LANE_11_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_CNTL__LANE_11_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_CNTL__LANE_11_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_STATUS__LANE_11_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_STATUS__LANE_11_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_STATUS__LANE_11_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_STATUS__LANE_11_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_CNTL__LANE_12_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_CNTL__LANE_12_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_CNTL__LANE_12_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_CNTL__LANE_12_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_STATUS__LANE_12_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_STATUS__LANE_12_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_STATUS__LANE_12_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_STATUS__LANE_12_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_CNTL__LANE_13_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_CNTL__LANE_13_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_CNTL__LANE_13_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_CNTL__LANE_13_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_STATUS__LANE_13_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_STATUS__LANE_13_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_STATUS__LANE_13_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_STATUS__LANE_13_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_CNTL__LANE_14_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_CNTL__LANE_14_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_CNTL__LANE_14_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_CNTL__LANE_14_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_STATUS__LANE_14_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_STATUS__LANE_14_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_STATUS__LANE_14_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_STATUS__LANE_14_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_CNTL__LANE_15_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_CNTL__LANE_15_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_CNTL__LANE_15_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_CNTL__LANE_15_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_STATUS__LANE_15_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_STATUS__LANE_15_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_STATUS__LANE_15_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_STATUS__LANE_15_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_SWDS0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L + + +// addressBlock: nbio_nbif0_rcc_strap_BIFDEC1 +//RCC_STRAP0_RCC_DEV0_EPF0_STRAP0 +#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_DEVICE_ID_DEV0_F0__SHIFT 0x0 +#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F0__SHIFT 0x10 +#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_MINOR_REV_ID_DEV0_F0__SHIFT 0x14 +#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT 0x18 +#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_FUNC_EN_DEV0_F0__SHIFT 0x1c +#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_LEGACY_DEVICE_TYPE_EN_DEV0_F0__SHIFT 0x1d +#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_D1_SUPPORT_DEV0_F0__SHIFT 0x1e +#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_D2_SUPPORT_DEV0_F0__SHIFT 0x1f +#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_DEVICE_ID_DEV0_F0_MASK 0x0000FFFFL +#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F0_MASK 0x000F0000L +#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_MINOR_REV_ID_DEV0_F0_MASK 0x00F00000L +#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK 0x0F000000L +#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_FUNC_EN_DEV0_F0_MASK 0x10000000L +#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_LEGACY_DEVICE_TYPE_EN_DEV0_F0_MASK 0x20000000L +#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_D1_SUPPORT_DEV0_F0_MASK 0x40000000L +#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_D2_SUPPORT_DEV0_F0_MASK 0x80000000L + + +// addressBlock: nbio_nbif0_rcc_ep_dev0_BIFDEC1 +//RCC_EP_DEV0_0_EP_PCIE_SCRATCH +#define RCC_EP_DEV0_0_EP_PCIE_SCRATCH__PCIE_SCRATCH__SHIFT 0x0 +#define RCC_EP_DEV0_0_EP_PCIE_SCRATCH__PCIE_SCRATCH_MASK 0xFFFFFFFFL +//RCC_EP_DEV0_0_EP_PCIE_CNTL +#define RCC_EP_DEV0_0_EP_PCIE_CNTL__UR_ERR_REPORT_DIS__SHIFT 0x7 +#define RCC_EP_DEV0_0_EP_PCIE_CNTL__PCIE_MALFORM_ATOMIC_OPS__SHIFT 0x8 +#define RCC_EP_DEV0_0_EP_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR__SHIFT 0x1e +#define RCC_EP_DEV0_0_EP_PCIE_CNTL__UR_ERR_REPORT_DIS_MASK 0x00000080L +#define RCC_EP_DEV0_0_EP_PCIE_CNTL__PCIE_MALFORM_ATOMIC_OPS_MASK 0x00000100L +#define RCC_EP_DEV0_0_EP_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR_MASK 0x40000000L +//RCC_EP_DEV0_0_EP_PCIE_INT_CNTL +#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__CORR_ERR_INT_EN__SHIFT 0x0 +#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__NON_FATAL_ERR_INT_EN__SHIFT 0x1 +#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__FATAL_ERR_INT_EN__SHIFT 0x2 +#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__USR_DETECTED_INT_EN__SHIFT 0x3 +#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__MISC_ERR_INT_EN__SHIFT 0x4 +#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__POWER_STATE_CHG_INT_EN__SHIFT 0x6 +#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__CORR_ERR_INT_EN_MASK 0x00000001L +#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__NON_FATAL_ERR_INT_EN_MASK 0x00000002L +#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__FATAL_ERR_INT_EN_MASK 0x00000004L +#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__USR_DETECTED_INT_EN_MASK 0x00000008L +#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__MISC_ERR_INT_EN_MASK 0x00000010L +#define RCC_EP_DEV0_0_EP_PCIE_INT_CNTL__POWER_STATE_CHG_INT_EN_MASK 0x00000040L +//RCC_EP_DEV0_0_EP_PCIE_INT_STATUS +#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__CORR_ERR_INT_STATUS__SHIFT 0x0 +#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__NON_FATAL_ERR_INT_STATUS__SHIFT 0x1 +#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__FATAL_ERR_INT_STATUS__SHIFT 0x2 +#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__USR_DETECTED_INT_STATUS__SHIFT 0x3 +#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__MISC_ERR_INT_STATUS__SHIFT 0x4 +#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__POWER_STATE_CHG_INT_STATUS__SHIFT 0x6 +#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__CORR_ERR_INT_STATUS_MASK 0x00000001L +#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__NON_FATAL_ERR_INT_STATUS_MASK 0x00000002L +#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__FATAL_ERR_INT_STATUS_MASK 0x00000004L +#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__USR_DETECTED_INT_STATUS_MASK 0x00000008L +#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__MISC_ERR_INT_STATUS_MASK 0x00000010L +#define RCC_EP_DEV0_0_EP_PCIE_INT_STATUS__POWER_STATE_CHG_INT_STATUS_MASK 0x00000040L +//RCC_EP_DEV0_0_EP_PCIE_RX_CNTL2 +#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL2__RX_IGNORE_EP_INVALIDPASID_UR__SHIFT 0x0 +#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL2__RX_IGNORE_EP_INVALIDPASID_UR_MASK 0x00000001L +//RCC_EP_DEV0_0_EP_PCIE_BUS_CNTL +#define RCC_EP_DEV0_0_EP_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS__SHIFT 0x7 +#define RCC_EP_DEV0_0_EP_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS_MASK 0x00000080L +//RCC_EP_DEV0_0_EP_PCIE_CFG_CNTL +#define RCC_EP_DEV0_0_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG__SHIFT 0x0 +#define RCC_EP_DEV0_0_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG__SHIFT 0x1 +#define RCC_EP_DEV0_0_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG__SHIFT 0x2 +#define RCC_EP_DEV0_0_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG__SHIFT 0x3 +#define RCC_EP_DEV0_0_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG_MASK 0x00000001L +#define RCC_EP_DEV0_0_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG_MASK 0x00000002L +#define RCC_EP_DEV0_0_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG_MASK 0x00000004L +#define RCC_EP_DEV0_0_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG_MASK 0x00000008L +//RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL +#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_SHORT_VALUE__SHIFT 0x0 +#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_LONG_VALUE__SHIFT 0x3 +#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_REQUIREMENT__SHIFT 0x6 +#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_SHORT_VALUE__SHIFT 0x7 +#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_LONG_VALUE__SHIFT 0xa +#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_REQUIREMENT__SHIFT 0xd +#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0__SHIFT 0xe +#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_RST_LTR_IN_DL_DOWN__SHIFT 0xf +#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__TX_CHK_FC_FOR_L1__SHIFT 0x10 +#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_DSTATE_USING_WDATA_EN__SHIFT 0x11 +#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_SHORT_VALUE_MASK 0x00000007L +#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_LONG_VALUE_MASK 0x00000038L +#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_REQUIREMENT_MASK 0x00000040L +#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_SHORT_VALUE_MASK 0x00000380L +#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_LONG_VALUE_MASK 0x00001C00L +#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_REQUIREMENT_MASK 0x00002000L +#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK 0x00004000L +#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_RST_LTR_IN_DL_DOWN_MASK 0x00008000L +#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__TX_CHK_FC_FOR_L1_MASK 0x00010000L +#define RCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL__LTR_DSTATE_USING_WDATA_EN_MASK 0x00020000L +//RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_0 +#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_1 +#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_2 +#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_3 +#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_4 +#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_5 +#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_6 +#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_7 +#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define RCC_EP_DEV0_0_PCIE_F1_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CAP +#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CAP__TRANS_LAT_UNIT__SHIFT 0x8 +#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CAP__PWR_ALLOC_SCALE__SHIFT 0xc +#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_0__SHIFT 0x10 +#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_1__SHIFT 0x18 +#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CAP__TRANS_LAT_UNIT_MASK 0x00000300L +#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CAP__PWR_ALLOC_SCALE_MASK 0x00003000L +#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_0_MASK 0x00FF0000L +#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_1_MASK 0xFF000000L +//RCC_EP_DEV0_0_EP_PCIE_F0_DPA_LATENCY_INDICATOR +#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS__SHIFT 0x0 +#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS_MASK 0xFFL +//RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CNTL +#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CNTL__SUBSTATE_STATUS__SHIFT 0x0 +#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CNTL__DPA_COMPLIANCE_MODE__SHIFT 0x8 +#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CNTL__SUBSTATE_STATUS_MASK 0x001FL +#define RCC_EP_DEV0_0_EP_PCIE_F0_DPA_CNTL__DPA_COMPLIANCE_MODE_MASK 0x0100L +//RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0 +#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1 +#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2 +#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3 +#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4 +#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5 +#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6 +#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7 +#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define RCC_EP_DEV0_0_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//RCC_EP_DEV0_0_EP_PCIE_PME_CONTROL +#define RCC_EP_DEV0_0_EP_PCIE_PME_CONTROL__PME_SERVICE_TIMER__SHIFT 0x0 +#define RCC_EP_DEV0_0_EP_PCIE_PME_CONTROL__PME_SERVICE_TIMER_MASK 0x1FL +//RCC_EP_DEV0_0_EP_PCIEP_RESERVED +#define RCC_EP_DEV0_0_EP_PCIEP_RESERVED__PCIEP_RESERVED__SHIFT 0x0 +#define RCC_EP_DEV0_0_EP_PCIEP_RESERVED__PCIEP_RESERVED_MASK 0xFFFFFFFFL +//RCC_EP_DEV0_0_EP_PCIE_TX_CNTL +#define RCC_EP_DEV0_0_EP_PCIE_TX_CNTL__TX_SNR_OVERRIDE__SHIFT 0xa +#define RCC_EP_DEV0_0_EP_PCIE_TX_CNTL__TX_RO_OVERRIDE__SHIFT 0xc +#define RCC_EP_DEV0_0_EP_PCIE_TX_CNTL__TX_F0_TPH_DIS__SHIFT 0x18 +#define RCC_EP_DEV0_0_EP_PCIE_TX_CNTL__TX_F1_TPH_DIS__SHIFT 0x19 +#define RCC_EP_DEV0_0_EP_PCIE_TX_CNTL__TX_F2_TPH_DIS__SHIFT 0x1a +#define RCC_EP_DEV0_0_EP_PCIE_TX_CNTL__TX_SNR_OVERRIDE_MASK 0x00000C00L +#define RCC_EP_DEV0_0_EP_PCIE_TX_CNTL__TX_RO_OVERRIDE_MASK 0x00003000L +#define RCC_EP_DEV0_0_EP_PCIE_TX_CNTL__TX_F0_TPH_DIS_MASK 0x01000000L +#define RCC_EP_DEV0_0_EP_PCIE_TX_CNTL__TX_F1_TPH_DIS_MASK 0x02000000L +#define RCC_EP_DEV0_0_EP_PCIE_TX_CNTL__TX_F2_TPH_DIS_MASK 0x04000000L +//RCC_EP_DEV0_0_EP_PCIE_TX_REQUESTER_ID +#define RCC_EP_DEV0_0_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_FUNCTION__SHIFT 0x0 +#define RCC_EP_DEV0_0_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_DEVICE__SHIFT 0x3 +#define RCC_EP_DEV0_0_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_BUS__SHIFT 0x8 +#define RCC_EP_DEV0_0_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_FUNCTION_MASK 0x00000007L +#define RCC_EP_DEV0_0_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_DEVICE_MASK 0x000000F8L +#define RCC_EP_DEV0_0_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_BUS_MASK 0x0000FF00L +//RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL +#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__ERR_REPORTING_DIS__SHIFT 0x0 +#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT__SHIFT 0x8 +#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY__SHIFT 0x11 +#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__STRAP_POISONED_ADVISORY_NONFATAL__SHIFT 0x12 +#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED__SHIFT 0x18 +#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F1_TIMER_EXPIRED__SHIFT 0x19 +#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F2_TIMER_EXPIRED__SHIFT 0x1a +#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F3_TIMER_EXPIRED__SHIFT 0x1b +#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F4_TIMER_EXPIRED__SHIFT 0x1c +#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F5_TIMER_EXPIRED__SHIFT 0x1d +#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F6_TIMER_EXPIRED__SHIFT 0x1e +#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F7_TIMER_EXPIRED__SHIFT 0x1f +#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__ERR_REPORTING_DIS_MASK 0x00000001L +#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT_MASK 0x00000700L +#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY_MASK 0x00020000L +#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__STRAP_POISONED_ADVISORY_NONFATAL_MASK 0x00040000L +#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED_MASK 0x01000000L +#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F1_TIMER_EXPIRED_MASK 0x02000000L +#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F2_TIMER_EXPIRED_MASK 0x04000000L +#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F3_TIMER_EXPIRED_MASK 0x08000000L +#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F4_TIMER_EXPIRED_MASK 0x10000000L +#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F5_TIMER_EXPIRED_MASK 0x20000000L +#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F6_TIMER_EXPIRED_MASK 0x40000000L +#define RCC_EP_DEV0_0_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F7_TIMER_EXPIRED_MASK 0x80000000L +//RCC_EP_DEV0_0_EP_PCIE_RX_CNTL +#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR__SHIFT 0x8 +#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_TC_ERR__SHIFT 0x9 +#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS__SHIFT 0x14 +#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR__SHIFT 0x15 +#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_MAXPREFIX_ERR__SHIFT 0x16 +#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_INVALIDPASID_ERR__SHIFT 0x18 +#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_NOT_PASID_UR__SHIFT 0x19 +#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_TPH_DIS__SHIFT 0x1a +#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR_MASK 0x00000100L +#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_TC_ERR_MASK 0x00000200L +#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS_MASK 0x00100000L +#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR_MASK 0x00200000L +#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_MAXPREFIX_ERR_MASK 0x00400000L +#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_INVALIDPASID_ERR_MASK 0x01000000L +#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_IGNORE_NOT_PASID_UR_MASK 0x02000000L +#define RCC_EP_DEV0_0_EP_PCIE_RX_CNTL__RX_TPH_DIS_MASK 0x04000000L +//RCC_EP_DEV0_0_EP_PCIE_LC_SPEED_CNTL +#define RCC_EP_DEV0_0_EP_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP__SHIFT 0x0 +#define RCC_EP_DEV0_0_EP_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP__SHIFT 0x1 +#define RCC_EP_DEV0_0_EP_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP__SHIFT 0x2 +#define RCC_EP_DEV0_0_EP_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP_MASK 0x00000001L +#define RCC_EP_DEV0_0_EP_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP_MASK 0x00000002L +#define RCC_EP_DEV0_0_EP_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP_MASK 0x00000004L + + +// addressBlock: nbio_nbif0_rcc_dwn_dev0_BIFDEC1 +//RCC_DWN_DEV0_0_DN_PCIE_RESERVED +#define RCC_DWN_DEV0_0_DN_PCIE_RESERVED__PCIE_RESERVED__SHIFT 0x0 +#define RCC_DWN_DEV0_0_DN_PCIE_RESERVED__PCIE_RESERVED_MASK 0xFFFFFFFFL +//RCC_DWN_DEV0_0_DN_PCIE_SCRATCH +#define RCC_DWN_DEV0_0_DN_PCIE_SCRATCH__PCIE_SCRATCH__SHIFT 0x0 +#define RCC_DWN_DEV0_0_DN_PCIE_SCRATCH__PCIE_SCRATCH_MASK 0xFFFFFFFFL +//RCC_DWN_DEV0_0_DN_PCIE_CNTL +#define RCC_DWN_DEV0_0_DN_PCIE_CNTL__HWINIT_WR_LOCK__SHIFT 0x0 +#define RCC_DWN_DEV0_0_DN_PCIE_CNTL__UR_ERR_REPORT_DIS_DN__SHIFT 0x7 +#define RCC_DWN_DEV0_0_DN_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR__SHIFT 0x1e +#define RCC_DWN_DEV0_0_DN_PCIE_CNTL__HWINIT_WR_LOCK_MASK 0x00000001L +#define RCC_DWN_DEV0_0_DN_PCIE_CNTL__UR_ERR_REPORT_DIS_DN_MASK 0x00000080L +#define RCC_DWN_DEV0_0_DN_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR_MASK 0x40000000L +//RCC_DWN_DEV0_0_DN_PCIE_CONFIG_CNTL +#define RCC_DWN_DEV0_0_DN_PCIE_CONFIG_CNTL__CI_EXTENDED_TAG_EN_OVERRIDE__SHIFT 0x19 +#define RCC_DWN_DEV0_0_DN_PCIE_CONFIG_CNTL__CI_EXTENDED_TAG_EN_OVERRIDE_MASK 0x06000000L +//RCC_DWN_DEV0_0_DN_PCIE_RX_CNTL2 +#define RCC_DWN_DEV0_0_DN_PCIE_RX_CNTL2__FLR_EXTEND_MODE__SHIFT 0x1c +#define RCC_DWN_DEV0_0_DN_PCIE_RX_CNTL2__FLR_EXTEND_MODE_MASK 0x70000000L +//RCC_DWN_DEV0_0_DN_PCIE_BUS_CNTL +#define RCC_DWN_DEV0_0_DN_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS__SHIFT 0x7 +#define RCC_DWN_DEV0_0_DN_PCIE_BUS_CNTL__AER_CPL_TIMEOUT_RO_DIS_SWDN__SHIFT 0x8 +#define RCC_DWN_DEV0_0_DN_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS_MASK 0x00000080L +#define RCC_DWN_DEV0_0_DN_PCIE_BUS_CNTL__AER_CPL_TIMEOUT_RO_DIS_SWDN_MASK 0x00000100L +//RCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL +#define RCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG__SHIFT 0x0 +#define RCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG__SHIFT 0x1 +#define RCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG__SHIFT 0x2 +#define RCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG__SHIFT 0x3 +#define RCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG_MASK 0x00000001L +#define RCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG_MASK 0x00000002L +#define RCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG_MASK 0x00000004L +#define RCC_DWN_DEV0_0_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG_MASK 0x00000008L + + +// addressBlock: nbio_nbif0_rcc_dwnp_dev0_BIFDEC1 +//RCC_DWNP_DEV0_0_PCIE_ERR_CNTL +#define RCC_DWNP_DEV0_0_PCIE_ERR_CNTL__ERR_REPORTING_DIS__SHIFT 0x0 +#define RCC_DWNP_DEV0_0_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY__SHIFT 0x11 +#define RCC_DWNP_DEV0_0_PCIE_ERR_CNTL__ERR_REPORTING_DIS_MASK 0x00000001L +#define RCC_DWNP_DEV0_0_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY_MASK 0x00020000L +//RCC_DWNP_DEV0_0_PCIE_RX_CNTL +#define RCC_DWNP_DEV0_0_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR__SHIFT 0x8 +#define RCC_DWNP_DEV0_0_PCIE_RX_CNTL__RX_IGNORE_TC_ERR_DN__SHIFT 0x9 +#define RCC_DWNP_DEV0_0_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS__SHIFT 0x14 +#define RCC_DWNP_DEV0_0_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR_DN__SHIFT 0x15 +#define RCC_DWNP_DEV0_0_PCIE_RX_CNTL__RX_RCB_FLR_TIMEOUT_DIS__SHIFT 0x1b +#define RCC_DWNP_DEV0_0_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR_MASK 0x00000100L +#define RCC_DWNP_DEV0_0_PCIE_RX_CNTL__RX_IGNORE_TC_ERR_DN_MASK 0x00000200L +#define RCC_DWNP_DEV0_0_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS_MASK 0x00100000L +#define RCC_DWNP_DEV0_0_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR_DN_MASK 0x00200000L +#define RCC_DWNP_DEV0_0_PCIE_RX_CNTL__RX_RCB_FLR_TIMEOUT_DIS_MASK 0x08000000L +//RCC_DWNP_DEV0_0_PCIE_LC_SPEED_CNTL +//RCC_DWNP_DEV0_0_PCIE_LC_CNTL2 +#define RCC_DWNP_DEV0_0_PCIE_LC_CNTL2__LC_LINK_BW_NOTIFICATION_DIS__SHIFT 0x1b +#define RCC_DWNP_DEV0_0_PCIE_LC_CNTL2__LC_LINK_BW_NOTIFICATION_DIS_MASK 0x08000000L +//RCC_DWNP_DEV0_0_LTR_MSG_INFO_FROM_EP +#define RCC_DWNP_DEV0_0_LTR_MSG_INFO_FROM_EP__LTR_MSG_INFO_FROM_EP__SHIFT 0x0 +#define RCC_DWNP_DEV0_0_LTR_MSG_INFO_FROM_EP__LTR_MSG_INFO_FROM_EP_MASK 0xFFFFFFFFL + + +// addressBlock: nbio_nbif0_rcc_dev0_BIFDEC1 + + +// addressBlock: nbio_nbif0_rcc_shadow_reg_shadowdec +//SHADOW_COMMAND +#define SHADOW_COMMAND__IOEN_UP__SHIFT 0x0 +#define SHADOW_COMMAND__MEMEN_UP__SHIFT 0x1 +#define SHADOW_COMMAND__IOEN_UP_MASK 0x0001L +#define SHADOW_COMMAND__MEMEN_UP_MASK 0x0002L +//SHADOW_BASE_ADDR_1 +#define SHADOW_BASE_ADDR_1__BAR1_UP__SHIFT 0x0 +#define SHADOW_BASE_ADDR_1__BAR1_UP_MASK 0xFFFFFFFFL +//SHADOW_BASE_ADDR_2 +#define SHADOW_BASE_ADDR_2__BAR2_UP__SHIFT 0x0 +#define SHADOW_BASE_ADDR_2__BAR2_UP_MASK 0xFFFFFFFFL +//SHADOW_SUB_BUS_NUMBER_LATENCY +#define SHADOW_SUB_BUS_NUMBER_LATENCY__SECONDARY_BUS_UP__SHIFT 0x8 +#define SHADOW_SUB_BUS_NUMBER_LATENCY__SUB_BUS_NUM_UP__SHIFT 0x10 +#define SHADOW_SUB_BUS_NUMBER_LATENCY__SECONDARY_BUS_UP_MASK 0x0000FF00L +#define SHADOW_SUB_BUS_NUMBER_LATENCY__SUB_BUS_NUM_UP_MASK 0x00FF0000L +//SHADOW_IO_BASE_LIMIT +#define SHADOW_IO_BASE_LIMIT__IO_BASE_UP__SHIFT 0x4 +#define SHADOW_IO_BASE_LIMIT__IO_LIMIT_UP__SHIFT 0xc +#define SHADOW_IO_BASE_LIMIT__IO_BASE_UP_MASK 0x00F0L +#define SHADOW_IO_BASE_LIMIT__IO_LIMIT_UP_MASK 0xF000L +//SHADOW_MEM_BASE_LIMIT +#define SHADOW_MEM_BASE_LIMIT__MEM_BASE_TYPE__SHIFT 0x0 +#define SHADOW_MEM_BASE_LIMIT__MEM_BASE_31_20_UP__SHIFT 0x4 +#define SHADOW_MEM_BASE_LIMIT__MEM_LIMIT_TYPE__SHIFT 0x10 +#define SHADOW_MEM_BASE_LIMIT__MEM_LIMIT_31_20_UP__SHIFT 0x14 +#define SHADOW_MEM_BASE_LIMIT__MEM_BASE_TYPE_MASK 0x0000000FL +#define SHADOW_MEM_BASE_LIMIT__MEM_BASE_31_20_UP_MASK 0x0000FFF0L +#define SHADOW_MEM_BASE_LIMIT__MEM_LIMIT_TYPE_MASK 0x000F0000L +#define SHADOW_MEM_BASE_LIMIT__MEM_LIMIT_31_20_UP_MASK 0xFFF00000L +//SHADOW_PREF_BASE_LIMIT +#define SHADOW_PREF_BASE_LIMIT__PREF_MEM_BASE_TYPE__SHIFT 0x0 +#define SHADOW_PREF_BASE_LIMIT__PREF_MEM_BASE_31_20_UP__SHIFT 0x4 +#define SHADOW_PREF_BASE_LIMIT__PREF_MEM_LIMIT_TYPE__SHIFT 0x10 +#define SHADOW_PREF_BASE_LIMIT__PREF_MEM_LIMIT_31_20_UP__SHIFT 0x14 +#define SHADOW_PREF_BASE_LIMIT__PREF_MEM_BASE_TYPE_MASK 0x0000000FL +#define SHADOW_PREF_BASE_LIMIT__PREF_MEM_BASE_31_20_UP_MASK 0x0000FFF0L +#define SHADOW_PREF_BASE_LIMIT__PREF_MEM_LIMIT_TYPE_MASK 0x000F0000L +#define SHADOW_PREF_BASE_LIMIT__PREF_MEM_LIMIT_31_20_UP_MASK 0xFFF00000L +//SHADOW_PREF_BASE_UPPER +#define SHADOW_PREF_BASE_UPPER__PREF_BASE_UPPER_UP__SHIFT 0x0 +#define SHADOW_PREF_BASE_UPPER__PREF_BASE_UPPER_UP_MASK 0xFFFFFFFFL +//SHADOW_PREF_LIMIT_UPPER +#define SHADOW_PREF_LIMIT_UPPER__PREF_LIMIT_UPPER_UP__SHIFT 0x0 +#define SHADOW_PREF_LIMIT_UPPER__PREF_LIMIT_UPPER_UP_MASK 0xFFFFFFFFL +//SHADOW_IO_BASE_LIMIT_HI +#define SHADOW_IO_BASE_LIMIT_HI__IO_BASE_31_16_UP__SHIFT 0x0 +#define SHADOW_IO_BASE_LIMIT_HI__IO_LIMIT_31_16_UP__SHIFT 0x10 +#define SHADOW_IO_BASE_LIMIT_HI__IO_BASE_31_16_UP_MASK 0x0000FFFFL +#define SHADOW_IO_BASE_LIMIT_HI__IO_LIMIT_31_16_UP_MASK 0xFFFF0000L +//SHADOW_IRQ_BRIDGE_CNTL +#define SHADOW_IRQ_BRIDGE_CNTL__ISA_EN_UP__SHIFT 0x2 +#define SHADOW_IRQ_BRIDGE_CNTL__VGA_EN_UP__SHIFT 0x3 +#define SHADOW_IRQ_BRIDGE_CNTL__VGA_DEC_UP__SHIFT 0x4 +#define SHADOW_IRQ_BRIDGE_CNTL__SECONDARY_BUS_RESET_UP__SHIFT 0x6 +#define SHADOW_IRQ_BRIDGE_CNTL__ISA_EN_UP_MASK 0x0004L +#define SHADOW_IRQ_BRIDGE_CNTL__VGA_EN_UP_MASK 0x0008L +#define SHADOW_IRQ_BRIDGE_CNTL__VGA_DEC_UP_MASK 0x0010L +#define SHADOW_IRQ_BRIDGE_CNTL__SECONDARY_BUS_RESET_UP_MASK 0x0040L +//SUC_INDEX +#define SUC_INDEX__SUC_INDEX__SHIFT 0x0 +#define SUC_INDEX__SUC_INDEX_MASK 0xFFFFFFFFL +//SUC_DATA +#define SUC_DATA__SUC_DATA__SHIFT 0x0 +#define SUC_DATA__SUC_DATA_MASK 0xFFFFFFFFL + + +// addressBlock: nbio_nbif0_rcc_dev0_RCCPORTDEC + + +// addressBlock: nbio_nbif0_rcc_ep_dev0_RCCPORTDEC +//RCC_EP_DEV0_1_EP_PCIE_SCRATCH +#define RCC_EP_DEV0_1_EP_PCIE_SCRATCH__PCIE_SCRATCH__SHIFT 0x0 +#define RCC_EP_DEV0_1_EP_PCIE_SCRATCH__PCIE_SCRATCH_MASK 0xFFFFFFFFL +//RCC_EP_DEV0_1_EP_PCIE_CNTL +#define RCC_EP_DEV0_1_EP_PCIE_CNTL__UR_ERR_REPORT_DIS__SHIFT 0x7 +#define RCC_EP_DEV0_1_EP_PCIE_CNTL__PCIE_MALFORM_ATOMIC_OPS__SHIFT 0x8 +#define RCC_EP_DEV0_1_EP_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR__SHIFT 0x1e +#define RCC_EP_DEV0_1_EP_PCIE_CNTL__UR_ERR_REPORT_DIS_MASK 0x00000080L +#define RCC_EP_DEV0_1_EP_PCIE_CNTL__PCIE_MALFORM_ATOMIC_OPS_MASK 0x00000100L +#define RCC_EP_DEV0_1_EP_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR_MASK 0x40000000L +//RCC_EP_DEV0_1_EP_PCIE_INT_CNTL +#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__CORR_ERR_INT_EN__SHIFT 0x0 +#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__NON_FATAL_ERR_INT_EN__SHIFT 0x1 +#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__FATAL_ERR_INT_EN__SHIFT 0x2 +#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__USR_DETECTED_INT_EN__SHIFT 0x3 +#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__MISC_ERR_INT_EN__SHIFT 0x4 +#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__POWER_STATE_CHG_INT_EN__SHIFT 0x6 +#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__CORR_ERR_INT_EN_MASK 0x00000001L +#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__NON_FATAL_ERR_INT_EN_MASK 0x00000002L +#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__FATAL_ERR_INT_EN_MASK 0x00000004L +#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__USR_DETECTED_INT_EN_MASK 0x00000008L +#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__MISC_ERR_INT_EN_MASK 0x00000010L +#define RCC_EP_DEV0_1_EP_PCIE_INT_CNTL__POWER_STATE_CHG_INT_EN_MASK 0x00000040L +//RCC_EP_DEV0_1_EP_PCIE_INT_STATUS +#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__CORR_ERR_INT_STATUS__SHIFT 0x0 +#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__NON_FATAL_ERR_INT_STATUS__SHIFT 0x1 +#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__FATAL_ERR_INT_STATUS__SHIFT 0x2 +#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__USR_DETECTED_INT_STATUS__SHIFT 0x3 +#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__MISC_ERR_INT_STATUS__SHIFT 0x4 +#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__POWER_STATE_CHG_INT_STATUS__SHIFT 0x6 +#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__CORR_ERR_INT_STATUS_MASK 0x00000001L +#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__NON_FATAL_ERR_INT_STATUS_MASK 0x00000002L +#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__FATAL_ERR_INT_STATUS_MASK 0x00000004L +#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__USR_DETECTED_INT_STATUS_MASK 0x00000008L +#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__MISC_ERR_INT_STATUS_MASK 0x00000010L +#define RCC_EP_DEV0_1_EP_PCIE_INT_STATUS__POWER_STATE_CHG_INT_STATUS_MASK 0x00000040L +//RCC_EP_DEV0_1_EP_PCIE_RX_CNTL2 +#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL2__RX_IGNORE_EP_INVALIDPASID_UR__SHIFT 0x0 +#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL2__RX_IGNORE_EP_INVALIDPASID_UR_MASK 0x00000001L +//RCC_EP_DEV0_1_EP_PCIE_BUS_CNTL +#define RCC_EP_DEV0_1_EP_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS__SHIFT 0x7 +#define RCC_EP_DEV0_1_EP_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS_MASK 0x00000080L +//RCC_EP_DEV0_1_EP_PCIE_CFG_CNTL +#define RCC_EP_DEV0_1_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG__SHIFT 0x0 +#define RCC_EP_DEV0_1_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG__SHIFT 0x1 +#define RCC_EP_DEV0_1_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG__SHIFT 0x2 +#define RCC_EP_DEV0_1_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG__SHIFT 0x3 +#define RCC_EP_DEV0_1_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG_MASK 0x00000001L +#define RCC_EP_DEV0_1_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG_MASK 0x00000002L +#define RCC_EP_DEV0_1_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG_MASK 0x00000004L +#define RCC_EP_DEV0_1_EP_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG_MASK 0x00000008L +//RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL +#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_SHORT_VALUE__SHIFT 0x0 +#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_LONG_VALUE__SHIFT 0x3 +#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_REQUIREMENT__SHIFT 0x6 +#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_SHORT_VALUE__SHIFT 0x7 +#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_LONG_VALUE__SHIFT 0xa +#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_REQUIREMENT__SHIFT 0xd +#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0__SHIFT 0xe +#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_RST_LTR_IN_DL_DOWN__SHIFT 0xf +#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__TX_CHK_FC_FOR_L1__SHIFT 0x10 +#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_DSTATE_USING_WDATA_EN__SHIFT 0x11 +#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_SHORT_VALUE_MASK 0x00000007L +#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_LONG_VALUE_MASK 0x00000038L +#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_S_REQUIREMENT_MASK 0x00000040L +#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_SHORT_VALUE_MASK 0x00000380L +#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_LONG_VALUE_MASK 0x00001C00L +#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_NS_REQUIREMENT_MASK 0x00002000L +#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK 0x00004000L +#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_PRIV_RST_LTR_IN_DL_DOWN_MASK 0x00008000L +#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__TX_CHK_FC_FOR_L1_MASK 0x00010000L +#define RCC_EP_DEV0_1_EP_PCIE_TX_LTR_CNTL__LTR_DSTATE_USING_WDATA_EN_MASK 0x00020000L +//RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CAP +#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CAP__TRANS_LAT_UNIT__SHIFT 0x8 +#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CAP__PWR_ALLOC_SCALE__SHIFT 0xc +#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_0__SHIFT 0x10 +#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_1__SHIFT 0x18 +#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CAP__TRANS_LAT_UNIT_MASK 0x00000300L +#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CAP__PWR_ALLOC_SCALE_MASK 0x00003000L +#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_0_MASK 0x00FF0000L +#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CAP__TRANS_LAT_VAL_1_MASK 0xFF000000L +//RCC_EP_DEV0_1_EP_PCIE_F0_DPA_LATENCY_INDICATOR +#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS__SHIFT 0x0 +#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS_MASK 0xFFL +//RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CNTL +#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CNTL__SUBSTATE_STATUS__SHIFT 0x0 +#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CNTL__DPA_COMPLIANCE_MODE__SHIFT 0x8 +#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CNTL__SUBSTATE_STATUS_MASK 0x001FL +#define RCC_EP_DEV0_1_EP_PCIE_F0_DPA_CNTL__DPA_COMPLIANCE_MODE_MASK 0x0100L +//RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0 +#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1 +#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2 +#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3 +#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4 +#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5 +#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6 +#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7 +#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define RCC_EP_DEV0_1_PCIE_F0_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//RCC_EP_DEV0_1_EP_PCIE_PME_CONTROL +#define RCC_EP_DEV0_1_EP_PCIE_PME_CONTROL__PME_SERVICE_TIMER__SHIFT 0x0 +#define RCC_EP_DEV0_1_EP_PCIE_PME_CONTROL__PME_SERVICE_TIMER_MASK 0x1FL +//RCC_EP_DEV0_1_EP_PCIEP_RESERVED +#define RCC_EP_DEV0_1_EP_PCIEP_RESERVED__PCIEP_RESERVED__SHIFT 0x0 +#define RCC_EP_DEV0_1_EP_PCIEP_RESERVED__PCIEP_RESERVED_MASK 0xFFFFFFFFL +//RCC_EP_DEV0_1_EP_PCIE_TX_CNTL +#define RCC_EP_DEV0_1_EP_PCIE_TX_CNTL__TX_SNR_OVERRIDE__SHIFT 0xa +#define RCC_EP_DEV0_1_EP_PCIE_TX_CNTL__TX_RO_OVERRIDE__SHIFT 0xc +#define RCC_EP_DEV0_1_EP_PCIE_TX_CNTL__TX_F0_TPH_DIS__SHIFT 0x18 +#define RCC_EP_DEV0_1_EP_PCIE_TX_CNTL__TX_F1_TPH_DIS__SHIFT 0x19 +#define RCC_EP_DEV0_1_EP_PCIE_TX_CNTL__TX_F2_TPH_DIS__SHIFT 0x1a +#define RCC_EP_DEV0_1_EP_PCIE_TX_CNTL__TX_SNR_OVERRIDE_MASK 0x00000C00L +#define RCC_EP_DEV0_1_EP_PCIE_TX_CNTL__TX_RO_OVERRIDE_MASK 0x00003000L +#define RCC_EP_DEV0_1_EP_PCIE_TX_CNTL__TX_F0_TPH_DIS_MASK 0x01000000L +#define RCC_EP_DEV0_1_EP_PCIE_TX_CNTL__TX_F1_TPH_DIS_MASK 0x02000000L +#define RCC_EP_DEV0_1_EP_PCIE_TX_CNTL__TX_F2_TPH_DIS_MASK 0x04000000L +//RCC_EP_DEV0_1_EP_PCIE_TX_REQUESTER_ID +#define RCC_EP_DEV0_1_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_FUNCTION__SHIFT 0x0 +#define RCC_EP_DEV0_1_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_DEVICE__SHIFT 0x3 +#define RCC_EP_DEV0_1_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_BUS__SHIFT 0x8 +#define RCC_EP_DEV0_1_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_FUNCTION_MASK 0x00000007L +#define RCC_EP_DEV0_1_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_DEVICE_MASK 0x000000F8L +#define RCC_EP_DEV0_1_EP_PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_BUS_MASK 0x0000FF00L +//RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL +#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__ERR_REPORTING_DIS__SHIFT 0x0 +#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT__SHIFT 0x8 +#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY__SHIFT 0x11 +#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__STRAP_POISONED_ADVISORY_NONFATAL__SHIFT 0x12 +#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED__SHIFT 0x18 +#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F1_TIMER_EXPIRED__SHIFT 0x19 +#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F2_TIMER_EXPIRED__SHIFT 0x1a +#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F3_TIMER_EXPIRED__SHIFT 0x1b +#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F4_TIMER_EXPIRED__SHIFT 0x1c +#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F5_TIMER_EXPIRED__SHIFT 0x1d +#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F6_TIMER_EXPIRED__SHIFT 0x1e +#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F7_TIMER_EXPIRED__SHIFT 0x1f +#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__ERR_REPORTING_DIS_MASK 0x00000001L +#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT_MASK 0x00000700L +#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY_MASK 0x00020000L +#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__STRAP_POISONED_ADVISORY_NONFATAL_MASK 0x00040000L +#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED_MASK 0x01000000L +#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F1_TIMER_EXPIRED_MASK 0x02000000L +#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F2_TIMER_EXPIRED_MASK 0x04000000L +#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F3_TIMER_EXPIRED_MASK 0x08000000L +#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F4_TIMER_EXPIRED_MASK 0x10000000L +#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F5_TIMER_EXPIRED_MASK 0x20000000L +#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F6_TIMER_EXPIRED_MASK 0x40000000L +#define RCC_EP_DEV0_1_EP_PCIE_ERR_CNTL__AER_HDR_LOG_F7_TIMER_EXPIRED_MASK 0x80000000L +//RCC_EP_DEV0_1_EP_PCIE_RX_CNTL +#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR__SHIFT 0x8 +#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_TC_ERR__SHIFT 0x9 +#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS__SHIFT 0x14 +#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR__SHIFT 0x15 +#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_MAXPREFIX_ERR__SHIFT 0x16 +#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_INVALIDPASID_ERR__SHIFT 0x18 +#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_NOT_PASID_UR__SHIFT 0x19 +#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_TPH_DIS__SHIFT 0x1a +#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR_MASK 0x00000100L +#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_TC_ERR_MASK 0x00000200L +#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS_MASK 0x00100000L +#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR_MASK 0x00200000L +#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_MAXPREFIX_ERR_MASK 0x00400000L +#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_INVALIDPASID_ERR_MASK 0x01000000L +#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_IGNORE_NOT_PASID_UR_MASK 0x02000000L +#define RCC_EP_DEV0_1_EP_PCIE_RX_CNTL__RX_TPH_DIS_MASK 0x04000000L +//RCC_EP_DEV0_1_EP_PCIE_LC_SPEED_CNTL +#define RCC_EP_DEV0_1_EP_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP__SHIFT 0x0 +#define RCC_EP_DEV0_1_EP_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP__SHIFT 0x1 +#define RCC_EP_DEV0_1_EP_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP__SHIFT 0x2 +#define RCC_EP_DEV0_1_EP_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP_MASK 0x00000001L +#define RCC_EP_DEV0_1_EP_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP_MASK 0x00000002L +#define RCC_EP_DEV0_1_EP_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP_MASK 0x00000004L + + +// addressBlock: nbio_nbif0_rcc_dwn_dev0_RCCPORTDEC +//RCC_DWN_DEV0_1_DN_PCIE_RESERVED +#define RCC_DWN_DEV0_1_DN_PCIE_RESERVED__PCIE_RESERVED__SHIFT 0x0 +#define RCC_DWN_DEV0_1_DN_PCIE_RESERVED__PCIE_RESERVED_MASK 0xFFFFFFFFL +//RCC_DWN_DEV0_1_DN_PCIE_SCRATCH +#define RCC_DWN_DEV0_1_DN_PCIE_SCRATCH__PCIE_SCRATCH__SHIFT 0x0 +#define RCC_DWN_DEV0_1_DN_PCIE_SCRATCH__PCIE_SCRATCH_MASK 0xFFFFFFFFL +//RCC_DWN_DEV0_1_DN_PCIE_CNTL +#define RCC_DWN_DEV0_1_DN_PCIE_CNTL__HWINIT_WR_LOCK__SHIFT 0x0 +#define RCC_DWN_DEV0_1_DN_PCIE_CNTL__UR_ERR_REPORT_DIS_DN__SHIFT 0x7 +#define RCC_DWN_DEV0_1_DN_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR__SHIFT 0x1e +#define RCC_DWN_DEV0_1_DN_PCIE_CNTL__HWINIT_WR_LOCK_MASK 0x00000001L +#define RCC_DWN_DEV0_1_DN_PCIE_CNTL__UR_ERR_REPORT_DIS_DN_MASK 0x00000080L +#define RCC_DWN_DEV0_1_DN_PCIE_CNTL__RX_IGNORE_LTR_MSG_UR_MASK 0x40000000L +//RCC_DWN_DEV0_1_DN_PCIE_CONFIG_CNTL +#define RCC_DWN_DEV0_1_DN_PCIE_CONFIG_CNTL__CI_EXTENDED_TAG_EN_OVERRIDE__SHIFT 0x19 +#define RCC_DWN_DEV0_1_DN_PCIE_CONFIG_CNTL__CI_EXTENDED_TAG_EN_OVERRIDE_MASK 0x06000000L +//RCC_DWN_DEV0_1_DN_PCIE_RX_CNTL2 +#define RCC_DWN_DEV0_1_DN_PCIE_RX_CNTL2__FLR_EXTEND_MODE__SHIFT 0x1c +#define RCC_DWN_DEV0_1_DN_PCIE_RX_CNTL2__FLR_EXTEND_MODE_MASK 0x70000000L +//RCC_DWN_DEV0_1_DN_PCIE_BUS_CNTL +#define RCC_DWN_DEV0_1_DN_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS__SHIFT 0x7 +#define RCC_DWN_DEV0_1_DN_PCIE_BUS_CNTL__AER_CPL_TIMEOUT_RO_DIS_SWDN__SHIFT 0x8 +#define RCC_DWN_DEV0_1_DN_PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS_MASK 0x00000080L +#define RCC_DWN_DEV0_1_DN_PCIE_BUS_CNTL__AER_CPL_TIMEOUT_RO_DIS_SWDN_MASK 0x00000100L +//RCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL +#define RCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG__SHIFT 0x0 +#define RCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG__SHIFT 0x1 +#define RCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG__SHIFT 0x2 +#define RCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG__SHIFT 0x3 +#define RCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG_MASK 0x00000001L +#define RCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG_MASK 0x00000002L +#define RCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG_MASK 0x00000004L +#define RCC_DWN_DEV0_1_DN_PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN4_HIDDEN_REG_MASK 0x00000008L + + +// addressBlock: nbio_nbif0_rcc_dwnp_dev0_RCCPORTDEC +//RCC_DWNP_DEV0_1_PCIE_ERR_CNTL +#define RCC_DWNP_DEV0_1_PCIE_ERR_CNTL__ERR_REPORTING_DIS__SHIFT 0x0 +#define RCC_DWNP_DEV0_1_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY__SHIFT 0x11 +#define RCC_DWNP_DEV0_1_PCIE_ERR_CNTL__ERR_REPORTING_DIS_MASK 0x00000001L +#define RCC_DWNP_DEV0_1_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY_MASK 0x00020000L +//RCC_DWNP_DEV0_1_PCIE_RX_CNTL +#define RCC_DWNP_DEV0_1_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR__SHIFT 0x8 +#define RCC_DWNP_DEV0_1_PCIE_RX_CNTL__RX_IGNORE_TC_ERR_DN__SHIFT 0x9 +#define RCC_DWNP_DEV0_1_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS__SHIFT 0x14 +#define RCC_DWNP_DEV0_1_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR_DN__SHIFT 0x15 +#define RCC_DWNP_DEV0_1_PCIE_RX_CNTL__RX_RCB_FLR_TIMEOUT_DIS__SHIFT 0x1b +#define RCC_DWNP_DEV0_1_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR_MASK 0x00000100L +#define RCC_DWNP_DEV0_1_PCIE_RX_CNTL__RX_IGNORE_TC_ERR_DN_MASK 0x00000200L +#define RCC_DWNP_DEV0_1_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS_MASK 0x00100000L +#define RCC_DWNP_DEV0_1_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR_DN_MASK 0x00200000L +#define RCC_DWNP_DEV0_1_PCIE_RX_CNTL__RX_RCB_FLR_TIMEOUT_DIS_MASK 0x08000000L +//RCC_DWNP_DEV0_1_PCIE_LC_SPEED_CNTL +#define RCC_DWNP_DEV0_1_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP__SHIFT 0x0 +#define RCC_DWNP_DEV0_1_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP__SHIFT 0x1 +#define RCC_DWNP_DEV0_1_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP__SHIFT 0x2 +#define RCC_DWNP_DEV0_1_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP_MASK 0x00000001L +#define RCC_DWNP_DEV0_1_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP_MASK 0x00000002L +#define RCC_DWNP_DEV0_1_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP_MASK 0x00000004L +//RCC_DWNP_DEV0_1_PCIE_LC_CNTL2 +#define RCC_DWNP_DEV0_1_PCIE_LC_CNTL2__LC_LINK_BW_NOTIFICATION_DIS__SHIFT 0x1b +#define RCC_DWNP_DEV0_1_PCIE_LC_CNTL2__LC_LINK_BW_NOTIFICATION_DIS_MASK 0x08000000L +//RCC_DWNP_DEV0_1_LTR_MSG_INFO_FROM_EP +#define RCC_DWNP_DEV0_1_LTR_MSG_INFO_FROM_EP__LTR_MSG_INFO_FROM_EP__SHIFT 0x0 +#define RCC_DWNP_DEV0_1_LTR_MSG_INFO_FROM_EP__LTR_MSG_INFO_FROM_EP_MASK 0xFFFFFFFFL + + +// addressBlock: nbio_nbif0_rcc_strap_rcc_strap_internal +//RCC_STRAP1_RCC_DEV0_EPF0_STRAP0 +#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_DEVICE_ID_DEV0_F0__SHIFT 0x0 +#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F0__SHIFT 0x10 +#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_MINOR_REV_ID_DEV0_F0__SHIFT 0x14 +#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT 0x18 +#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_FUNC_EN_DEV0_F0__SHIFT 0x1c +#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_LEGACY_DEVICE_TYPE_EN_DEV0_F0__SHIFT 0x1d +#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_D1_SUPPORT_DEV0_F0__SHIFT 0x1e +#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_D2_SUPPORT_DEV0_F0__SHIFT 0x1f +#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_DEVICE_ID_DEV0_F0_MASK 0x0000FFFFL +#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F0_MASK 0x000F0000L +#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_MINOR_REV_ID_DEV0_F0_MASK 0x00F00000L +#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK 0x0F000000L +#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_FUNC_EN_DEV0_F0_MASK 0x10000000L +#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_LEGACY_DEVICE_TYPE_EN_DEV0_F0_MASK 0x20000000L +#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_D1_SUPPORT_DEV0_F0_MASK 0x40000000L +#define RCC_STRAP1_RCC_DEV0_EPF0_STRAP0__STRAP_D2_SUPPORT_DEV0_F0_MASK 0x80000000L + + +// addressBlock: nbio_nbif0_bif_misc_bif_misc_regblk +//MISC_SCRATCH +#define MISC_SCRATCH__MISC_SCRATCH0__SHIFT 0x0 +#define MISC_SCRATCH__MISC_SCRATCH0_MASK 0xFFFFFFFFL +//INTR_LINE_POLARITY +#define INTR_LINE_POLARITY__INTR_LINE_POLARITY_DEV0__SHIFT 0x0 +#define INTR_LINE_POLARITY__INTR_LINE_POLARITY_DEV0_MASK 0x000000FFL +//INTR_LINE_ENABLE +#define INTR_LINE_ENABLE__INTR_LINE_ENABLE_DEV0__SHIFT 0x0 +#define INTR_LINE_ENABLE__INTR_LINE_ENABLE_DEV0_MASK 0x000000FFL +//OUTSTANDING_VC_ALLOC +#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC0_ALLOC__SHIFT 0x0 +#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC1_ALLOC__SHIFT 0x2 +#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC2_ALLOC__SHIFT 0x4 +#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC3_ALLOC__SHIFT 0x6 +#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC4_ALLOC__SHIFT 0x8 +#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC5_ALLOC__SHIFT 0xa +#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC6_ALLOC__SHIFT 0xc +#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC7_ALLOC__SHIFT 0xe +#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_THRD__SHIFT 0x10 +#define OUTSTANDING_VC_ALLOC__HST_OUTSTANDING_VC0_ALLOC__SHIFT 0x18 +#define OUTSTANDING_VC_ALLOC__HST_OUTSTANDING_VC1_ALLOC__SHIFT 0x1a +#define OUTSTANDING_VC_ALLOC__HST_OUTSTANDING_THRD__SHIFT 0x1c +#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC0_ALLOC_MASK 0x00000003L +#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC1_ALLOC_MASK 0x0000000CL +#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC2_ALLOC_MASK 0x00000030L +#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC3_ALLOC_MASK 0x000000C0L +#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC4_ALLOC_MASK 0x00000300L +#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC5_ALLOC_MASK 0x00000C00L +#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC6_ALLOC_MASK 0x00003000L +#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_VC7_ALLOC_MASK 0x0000C000L +#define OUTSTANDING_VC_ALLOC__DMA_OUTSTANDING_THRD_MASK 0x000F0000L +#define OUTSTANDING_VC_ALLOC__HST_OUTSTANDING_VC0_ALLOC_MASK 0x03000000L +#define OUTSTANDING_VC_ALLOC__HST_OUTSTANDING_VC1_ALLOC_MASK 0x0C000000L +#define OUTSTANDING_VC_ALLOC__HST_OUTSTANDING_THRD_MASK 0xF0000000L +//BIFC_MISC_CTRL0 +#define BIFC_MISC_CTRL0__VWIRE_TARG_UNITID_CHECK_EN__SHIFT 0x0 +#define BIFC_MISC_CTRL0__VWIRE_SRC_UNITID_CHECK_EN__SHIFT 0x1 +#define BIFC_MISC_CTRL0__DMA_VC4_NON_DVM_STS__SHIFT 0x4 +#define BIFC_MISC_CTRL0__DMA_CHAIN_BREAK_IN_RCMODE__SHIFT 0x8 +#define BIFC_MISC_CTRL0__HST_ARB_CHAIN_LOCK__SHIFT 0x9 +#define BIFC_MISC_CTRL0__GSI_SST_ARB_CHAIN_LOCK__SHIFT 0xa +#define BIFC_MISC_CTRL0__GSI_RD_SPLIT_STALL_FLUSH_EN__SHIFT 0xb +#define BIFC_MISC_CTRL0__GSI_RD_SPLIT_STALL_NPWR_DIS__SHIFT 0xc +#define BIFC_MISC_CTRL0__GSI_SET_PRECEEDINGWR_DIS__SHIFT 0xd +#define BIFC_MISC_CTRL0__DMA_ATOMIC_LENGTH_CHK_DIS__SHIFT 0x10 +#define BIFC_MISC_CTRL0__DMA_ATOMIC_FAILED_STS_SEL__SHIFT 0x11 +#define BIFC_MISC_CTRL0__DMA_FORCE_VF_AS_PF_SRIOIVEN_LOW__SHIFT 0x12 +#define BIFC_MISC_CTRL0__DMA_ADDR_KEEP_PH__SHIFT 0x13 +#define BIFC_MISC_CTRL0__RCC_GMI_TD_FORCE_ZERO__SHIFT 0x14 +#define BIFC_MISC_CTRL0__PCIE_CAPABILITY_PROT_DIS__SHIFT 0x18 +#define BIFC_MISC_CTRL0__VC7_DMA_IOCFG_DIS__SHIFT 0x19 +#define BIFC_MISC_CTRL0__DMA_2ND_REQ_DIS__SHIFT 0x1a +#define BIFC_MISC_CTRL0__PORT_DSTATE_BYPASS_MODE__SHIFT 0x1b +#define BIFC_MISC_CTRL0__PME_TURNOFF_MODE__SHIFT 0x1c +#define BIFC_MISC_CTRL0__PCIESWUS_SELECTION__SHIFT 0x1f +#define BIFC_MISC_CTRL0__VWIRE_TARG_UNITID_CHECK_EN_MASK 0x00000001L +#define BIFC_MISC_CTRL0__VWIRE_SRC_UNITID_CHECK_EN_MASK 0x00000006L +#define BIFC_MISC_CTRL0__DMA_VC4_NON_DVM_STS_MASK 0x000000F0L +#define BIFC_MISC_CTRL0__DMA_CHAIN_BREAK_IN_RCMODE_MASK 0x00000100L +#define BIFC_MISC_CTRL0__HST_ARB_CHAIN_LOCK_MASK 0x00000200L +#define BIFC_MISC_CTRL0__GSI_SST_ARB_CHAIN_LOCK_MASK 0x00000400L +#define BIFC_MISC_CTRL0__GSI_RD_SPLIT_STALL_FLUSH_EN_MASK 0x00000800L +#define BIFC_MISC_CTRL0__GSI_RD_SPLIT_STALL_NPWR_DIS_MASK 0x00001000L +#define BIFC_MISC_CTRL0__GSI_SET_PRECEEDINGWR_DIS_MASK 0x00002000L +#define BIFC_MISC_CTRL0__DMA_ATOMIC_LENGTH_CHK_DIS_MASK 0x00010000L +#define BIFC_MISC_CTRL0__DMA_ATOMIC_FAILED_STS_SEL_MASK 0x00020000L +#define BIFC_MISC_CTRL0__DMA_FORCE_VF_AS_PF_SRIOIVEN_LOW_MASK 0x00040000L +#define BIFC_MISC_CTRL0__DMA_ADDR_KEEP_PH_MASK 0x00080000L +#define BIFC_MISC_CTRL0__RCC_GMI_TD_FORCE_ZERO_MASK 0x00100000L +#define BIFC_MISC_CTRL0__PCIE_CAPABILITY_PROT_DIS_MASK 0x01000000L +#define BIFC_MISC_CTRL0__VC7_DMA_IOCFG_DIS_MASK 0x02000000L +#define BIFC_MISC_CTRL0__DMA_2ND_REQ_DIS_MASK 0x04000000L +#define BIFC_MISC_CTRL0__PORT_DSTATE_BYPASS_MODE_MASK 0x08000000L +#define BIFC_MISC_CTRL0__PME_TURNOFF_MODE_MASK 0x10000000L +#define BIFC_MISC_CTRL0__PCIESWUS_SELECTION_MASK 0x80000000L +//BIFC_MISC_CTRL1 +#define BIFC_MISC_CTRL1__THT_HST_CPLD_POISON_REPORT__SHIFT 0x0 +#define BIFC_MISC_CTRL1__DMA_REQ_POISON_REPORT__SHIFT 0x1 +#define BIFC_MISC_CTRL1__DMA_REQ_ACSVIO_REPORT__SHIFT 0x2 +#define BIFC_MISC_CTRL1__DMA_RSP_POISON_CPLD_REPORT__SHIFT 0x3 +#define BIFC_MISC_CTRL1__GSI_SMN_WORST_ERR_STSTUS__SHIFT 0x4 +#define BIFC_MISC_CTRL1__GSI_SDP_RDRSP_DATA_FORCE1_FOR_ERROR__SHIFT 0x5 +#define BIFC_MISC_CTRL1__GSI_RDWR_BALANCE_DIS__SHIFT 0x6 +#define BIFC_MISC_CTRL1__GMI_ATOMIC_POISON_DROP__SHIFT 0x7 +#define BIFC_MISC_CTRL1__HST_UNSUPPORT_SDPCMD_STS__SHIFT 0x8 +#define BIFC_MISC_CTRL1__HST_UNSUPPORT_SDPCMD_DATASTS__SHIFT 0xa +#define BIFC_MISC_CTRL1__DROP_OTHER_HT_ADDR_REQ__SHIFT 0xc +#define BIFC_MISC_CTRL1__DMAWRREQ_HSTRDRSP_ORDER_FORCE__SHIFT 0xd +#define BIFC_MISC_CTRL1__DMAWRREQ_HSTRDRSP_ORDER_FORCE_VALUE__SHIFT 0xe +#define BIFC_MISC_CTRL1__UPS_SDP_RDY_TIE1__SHIFT 0xf +#define BIFC_MISC_CTRL1__GMI_RCC_DN_BME_DROP_DIS__SHIFT 0x10 +#define BIFC_MISC_CTRL1__GMI_RCC_EP_BME_DROP_DIS__SHIFT 0x11 +#define BIFC_MISC_CTRL1__GMI_BIH_DN_BME_DROP_DIS__SHIFT 0x12 +#define BIFC_MISC_CTRL1__GMI_BIH_EP_BME_DROP_DIS__SHIFT 0x13 +#define BIFC_MISC_CTRL1__GSI_SDP_RDRSP_DATA_FORCE0_FOR_ERROR__SHIFT 0x14 +#define BIFC_MISC_CTRL1__GSI_SMN_POSTWR_MULTI_EN__SHIFT 0x15 +#define BIFC_MISC_CTRL1__GMI_RDSIZED_REQATTR_MASK__SHIFT 0x18 +#define BIFC_MISC_CTRL1__GMI_RDSIZEDDW_REQATTR_MASK__SHIFT 0x19 +#define BIFC_MISC_CTRL1__GMI_WRSIZED_REQATTR_MASK__SHIFT 0x1a +#define BIFC_MISC_CTRL1__GMI_WRSIZEDFL_REQATTR_MASK__SHIFT 0x1b +#define BIFC_MISC_CTRL1__GMI_FORCE_NOT_SEND_NON_BASEVC_RSPCREDIT__SHIFT 0x1c +#define BIFC_MISC_CTRL1__GMI_CPLBUF_EN__SHIFT 0x1d +#define BIFC_MISC_CTRL1__GMI_MSG_BLOCKLVL_SEL__SHIFT 0x1e +#define BIFC_MISC_CTRL1__THT_HST_CPLD_POISON_REPORT_MASK 0x00000001L +#define BIFC_MISC_CTRL1__DMA_REQ_POISON_REPORT_MASK 0x00000002L +#define BIFC_MISC_CTRL1__DMA_REQ_ACSVIO_REPORT_MASK 0x00000004L +#define BIFC_MISC_CTRL1__DMA_RSP_POISON_CPLD_REPORT_MASK 0x00000008L +#define BIFC_MISC_CTRL1__GSI_SMN_WORST_ERR_STSTUS_MASK 0x00000010L +#define BIFC_MISC_CTRL1__GSI_SDP_RDRSP_DATA_FORCE1_FOR_ERROR_MASK 0x00000020L +#define BIFC_MISC_CTRL1__GSI_RDWR_BALANCE_DIS_MASK 0x00000040L +#define BIFC_MISC_CTRL1__GMI_ATOMIC_POISON_DROP_MASK 0x00000080L +#define BIFC_MISC_CTRL1__HST_UNSUPPORT_SDPCMD_STS_MASK 0x00000300L +#define BIFC_MISC_CTRL1__HST_UNSUPPORT_SDPCMD_DATASTS_MASK 0x00000C00L +#define BIFC_MISC_CTRL1__DROP_OTHER_HT_ADDR_REQ_MASK 0x00001000L +#define BIFC_MISC_CTRL1__DMAWRREQ_HSTRDRSP_ORDER_FORCE_MASK 0x00002000L +#define BIFC_MISC_CTRL1__DMAWRREQ_HSTRDRSP_ORDER_FORCE_VALUE_MASK 0x00004000L +#define BIFC_MISC_CTRL1__UPS_SDP_RDY_TIE1_MASK 0x00008000L +#define BIFC_MISC_CTRL1__GMI_RCC_DN_BME_DROP_DIS_MASK 0x00010000L +#define BIFC_MISC_CTRL1__GMI_RCC_EP_BME_DROP_DIS_MASK 0x00020000L +#define BIFC_MISC_CTRL1__GMI_BIH_DN_BME_DROP_DIS_MASK 0x00040000L +#define BIFC_MISC_CTRL1__GMI_BIH_EP_BME_DROP_DIS_MASK 0x00080000L +#define BIFC_MISC_CTRL1__GSI_SDP_RDRSP_DATA_FORCE0_FOR_ERROR_MASK 0x00100000L +#define BIFC_MISC_CTRL1__GSI_SMN_POSTWR_MULTI_EN_MASK 0x00200000L +#define BIFC_MISC_CTRL1__GMI_RDSIZED_REQATTR_MASK_MASK 0x01000000L +#define BIFC_MISC_CTRL1__GMI_RDSIZEDDW_REQATTR_MASK_MASK 0x02000000L +#define BIFC_MISC_CTRL1__GMI_WRSIZED_REQATTR_MASK_MASK 0x04000000L +#define BIFC_MISC_CTRL1__GMI_WRSIZEDFL_REQATTR_MASK_MASK 0x08000000L +#define BIFC_MISC_CTRL1__GMI_FORCE_NOT_SEND_NON_BASEVC_RSPCREDIT_MASK 0x10000000L +#define BIFC_MISC_CTRL1__GMI_CPLBUF_EN_MASK 0x20000000L +#define BIFC_MISC_CTRL1__GMI_MSG_BLOCKLVL_SEL_MASK 0xC0000000L +//BIFC_BME_ERR_LOG +#define BIFC_BME_ERR_LOG__DMA_ON_BME_LOW_DEV0_F0__SHIFT 0x0 +#define BIFC_BME_ERR_LOG__DMA_ON_BME_LOW_DEV0_F1__SHIFT 0x1 +#define BIFC_BME_ERR_LOG__DMA_ON_BME_LOW_DEV0_F2__SHIFT 0x2 +#define BIFC_BME_ERR_LOG__DMA_ON_BME_LOW_DEV0_F3__SHIFT 0x3 +#define BIFC_BME_ERR_LOG__DMA_ON_BME_LOW_DEV0_F4__SHIFT 0x4 +#define BIFC_BME_ERR_LOG__DMA_ON_BME_LOW_DEV0_F5__SHIFT 0x5 +#define BIFC_BME_ERR_LOG__DMA_ON_BME_LOW_DEV0_F6__SHIFT 0x6 +#define BIFC_BME_ERR_LOG__DMA_ON_BME_LOW_DEV0_F7__SHIFT 0x7 +#define BIFC_BME_ERR_LOG__CLEAR_DMA_ON_BME_LOW_DEV0_F0__SHIFT 0x10 +#define BIFC_BME_ERR_LOG__CLEAR_DMA_ON_BME_LOW_DEV0_F1__SHIFT 0x11 +#define BIFC_BME_ERR_LOG__CLEAR_DMA_ON_BME_LOW_DEV0_F2__SHIFT 0x12 +#define BIFC_BME_ERR_LOG__CLEAR_DMA_ON_BME_LOW_DEV0_F3__SHIFT 0x13 +#define BIFC_BME_ERR_LOG__CLEAR_DMA_ON_BME_LOW_DEV0_F4__SHIFT 0x14 +#define BIFC_BME_ERR_LOG__CLEAR_DMA_ON_BME_LOW_DEV0_F5__SHIFT 0x15 +#define BIFC_BME_ERR_LOG__CLEAR_DMA_ON_BME_LOW_DEV0_F6__SHIFT 0x16 +#define BIFC_BME_ERR_LOG__CLEAR_DMA_ON_BME_LOW_DEV0_F7__SHIFT 0x17 +#define BIFC_BME_ERR_LOG__DMA_ON_BME_LOW_DEV0_F0_MASK 0x00000001L +#define BIFC_BME_ERR_LOG__DMA_ON_BME_LOW_DEV0_F1_MASK 0x00000002L +#define BIFC_BME_ERR_LOG__DMA_ON_BME_LOW_DEV0_F2_MASK 0x00000004L +#define BIFC_BME_ERR_LOG__DMA_ON_BME_LOW_DEV0_F3_MASK 0x00000008L +#define BIFC_BME_ERR_LOG__DMA_ON_BME_LOW_DEV0_F4_MASK 0x00000010L +#define BIFC_BME_ERR_LOG__DMA_ON_BME_LOW_DEV0_F5_MASK 0x00000020L +#define BIFC_BME_ERR_LOG__DMA_ON_BME_LOW_DEV0_F6_MASK 0x00000040L +#define BIFC_BME_ERR_LOG__DMA_ON_BME_LOW_DEV0_F7_MASK 0x00000080L +#define BIFC_BME_ERR_LOG__CLEAR_DMA_ON_BME_LOW_DEV0_F0_MASK 0x00010000L +#define BIFC_BME_ERR_LOG__CLEAR_DMA_ON_BME_LOW_DEV0_F1_MASK 0x00020000L +#define BIFC_BME_ERR_LOG__CLEAR_DMA_ON_BME_LOW_DEV0_F2_MASK 0x00040000L +#define BIFC_BME_ERR_LOG__CLEAR_DMA_ON_BME_LOW_DEV0_F3_MASK 0x00080000L +#define BIFC_BME_ERR_LOG__CLEAR_DMA_ON_BME_LOW_DEV0_F4_MASK 0x00100000L +#define BIFC_BME_ERR_LOG__CLEAR_DMA_ON_BME_LOW_DEV0_F5_MASK 0x00200000L +#define BIFC_BME_ERR_LOG__CLEAR_DMA_ON_BME_LOW_DEV0_F6_MASK 0x00400000L +#define BIFC_BME_ERR_LOG__CLEAR_DMA_ON_BME_LOW_DEV0_F7_MASK 0x00800000L +//BIFC_RCCBIH_BME_ERR_LOG0 +#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F0__SHIFT 0x0 +#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F1__SHIFT 0x1 +#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F2__SHIFT 0x2 +#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F3__SHIFT 0x3 +#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F4__SHIFT 0x4 +#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F5__SHIFT 0x5 +#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F6__SHIFT 0x6 +#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F7__SHIFT 0x7 +#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F0__SHIFT 0x10 +#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F1__SHIFT 0x11 +#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F2__SHIFT 0x12 +#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F3__SHIFT 0x13 +#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F4__SHIFT 0x14 +#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F5__SHIFT 0x15 +#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F6__SHIFT 0x16 +#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F7__SHIFT 0x17 +#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F0_MASK 0x00000001L +#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F1_MASK 0x00000002L +#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F2_MASK 0x00000004L +#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F3_MASK 0x00000008L +#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F4_MASK 0x00000010L +#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F5_MASK 0x00000020L +#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F6_MASK 0x00000040L +#define BIFC_RCCBIH_BME_ERR_LOG0__RCCBIH_ON_BME_LOW_DEV0_F7_MASK 0x00000080L +#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F0_MASK 0x00010000L +#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F1_MASK 0x00020000L +#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F2_MASK 0x00040000L +#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F3_MASK 0x00080000L +#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F4_MASK 0x00100000L +#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F5_MASK 0x00200000L +#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F6_MASK 0x00400000L +#define BIFC_RCCBIH_BME_ERR_LOG0__CLEAR_RCCBIH_ON_BME_LOW_DEV0_F7_MASK 0x00800000L +//BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_IDO_OVERIDE_P_DEV0_F0__SHIFT 0x0 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_IDO_OVERIDE_NP_DEV0_F0__SHIFT 0x2 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__BLKLVL_FOR_IDO_DEV0_F0__SHIFT 0x4 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_RO_OVERIDE_P_DEV0_F0__SHIFT 0x6 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_RO_OVERIDE_NP_DEV0_F0__SHIFT 0x8 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_SNR_OVERIDE_P_DEV0_F0__SHIFT 0xa +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_SNR_OVERIDE_NP_DEV0_F0__SHIFT 0xc +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__BLKLVL_FOR_NONIDO_DEV0_F0__SHIFT 0xe +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_IDO_OVERIDE_P_DEV0_F1__SHIFT 0x10 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_IDO_OVERIDE_NP_DEV0_F1__SHIFT 0x12 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__BLKLVL_FOR_IDO_DEV0_F1__SHIFT 0x14 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_RO_OVERIDE_P_DEV0_F1__SHIFT 0x16 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_RO_OVERIDE_NP_DEV0_F1__SHIFT 0x18 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_SNR_OVERIDE_P_DEV0_F1__SHIFT 0x1a +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_SNR_OVERIDE_NP_DEV0_F1__SHIFT 0x1c +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__BLKLVL_FOR_NONIDO_DEV0_F1__SHIFT 0x1e +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_IDO_OVERIDE_P_DEV0_F0_MASK 0x00000003L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_IDO_OVERIDE_NP_DEV0_F0_MASK 0x0000000CL +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__BLKLVL_FOR_IDO_DEV0_F0_MASK 0x00000030L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_RO_OVERIDE_P_DEV0_F0_MASK 0x000000C0L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_RO_OVERIDE_NP_DEV0_F0_MASK 0x00000300L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_SNR_OVERIDE_P_DEV0_F0_MASK 0x00000C00L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_SNR_OVERIDE_NP_DEV0_F0_MASK 0x00003000L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__BLKLVL_FOR_NONIDO_DEV0_F0_MASK 0x0000C000L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_IDO_OVERIDE_P_DEV0_F1_MASK 0x00030000L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_IDO_OVERIDE_NP_DEV0_F1_MASK 0x000C0000L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__BLKLVL_FOR_IDO_DEV0_F1_MASK 0x00300000L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_RO_OVERIDE_P_DEV0_F1_MASK 0x00C00000L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_RO_OVERIDE_NP_DEV0_F1_MASK 0x03000000L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_SNR_OVERIDE_P_DEV0_F1_MASK 0x0C000000L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__TX_SNR_OVERIDE_NP_DEV0_F1_MASK 0x30000000L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F0_F1__BLKLVL_FOR_NONIDO_DEV0_F1_MASK 0xC0000000L +//BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_IDO_OVERIDE_P_DEV0_F2__SHIFT 0x0 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_IDO_OVERIDE_NP_DEV0_F2__SHIFT 0x2 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__BLKLVL_FOR_IDO_DEV0_F2__SHIFT 0x4 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_RO_OVERIDE_P_DEV0_F2__SHIFT 0x6 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_RO_OVERIDE_NP_DEV0_F2__SHIFT 0x8 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_SNR_OVERIDE_P_DEV0_F2__SHIFT 0xa +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_SNR_OVERIDE_NP_DEV0_F2__SHIFT 0xc +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__BLKLVL_FOR_NONIDO_DEV0_F2__SHIFT 0xe +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_IDO_OVERIDE_P_DEV0_F3__SHIFT 0x10 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_IDO_OVERIDE_NP_DEV0_F3__SHIFT 0x12 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__BLKLVL_FOR_IDO_DEV0_F3__SHIFT 0x14 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_RO_OVERIDE_P_DEV0_F3__SHIFT 0x16 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_RO_OVERIDE_NP_DEV0_F3__SHIFT 0x18 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_SNR_OVERIDE_P_DEV0_F3__SHIFT 0x1a +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_SNR_OVERIDE_NP_DEV0_F3__SHIFT 0x1c +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__BLKLVL_FOR_NONIDO_DEV0_F3__SHIFT 0x1e +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_IDO_OVERIDE_P_DEV0_F2_MASK 0x00000003L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_IDO_OVERIDE_NP_DEV0_F2_MASK 0x0000000CL +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__BLKLVL_FOR_IDO_DEV0_F2_MASK 0x00000030L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_RO_OVERIDE_P_DEV0_F2_MASK 0x000000C0L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_RO_OVERIDE_NP_DEV0_F2_MASK 0x00000300L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_SNR_OVERIDE_P_DEV0_F2_MASK 0x00000C00L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_SNR_OVERIDE_NP_DEV0_F2_MASK 0x00003000L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__BLKLVL_FOR_NONIDO_DEV0_F2_MASK 0x0000C000L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_IDO_OVERIDE_P_DEV0_F3_MASK 0x00030000L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_IDO_OVERIDE_NP_DEV0_F3_MASK 0x000C0000L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__BLKLVL_FOR_IDO_DEV0_F3_MASK 0x00300000L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_RO_OVERIDE_P_DEV0_F3_MASK 0x00C00000L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_RO_OVERIDE_NP_DEV0_F3_MASK 0x03000000L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_SNR_OVERIDE_P_DEV0_F3_MASK 0x0C000000L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__TX_SNR_OVERIDE_NP_DEV0_F3_MASK 0x30000000L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F2_F3__BLKLVL_FOR_NONIDO_DEV0_F3_MASK 0xC0000000L +//BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_IDO_OVERIDE_P_DEV0_F4__SHIFT 0x0 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_IDO_OVERIDE_NP_DEV0_F4__SHIFT 0x2 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__BLKLVL_FOR_IDO_DEV0_F4__SHIFT 0x4 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_RO_OVERIDE_P_DEV0_F4__SHIFT 0x6 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_RO_OVERIDE_NP_DEV0_F4__SHIFT 0x8 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_SNR_OVERIDE_P_DEV0_F4__SHIFT 0xa +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_SNR_OVERIDE_NP_DEV0_F4__SHIFT 0xc +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__BLKLVL_FOR_NONIDO_DEV0_F4__SHIFT 0xe +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_IDO_OVERIDE_P_DEV0_F5__SHIFT 0x10 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_IDO_OVERIDE_NP_DEV0_F5__SHIFT 0x12 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__BLKLVL_FOR_IDO_DEV0_F5__SHIFT 0x14 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_RO_OVERIDE_P_DEV0_F5__SHIFT 0x16 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_RO_OVERIDE_NP_DEV0_F5__SHIFT 0x18 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_SNR_OVERIDE_P_DEV0_F5__SHIFT 0x1a +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_SNR_OVERIDE_NP_DEV0_F5__SHIFT 0x1c +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__BLKLVL_FOR_NONIDO_DEV0_F5__SHIFT 0x1e +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_IDO_OVERIDE_P_DEV0_F4_MASK 0x00000003L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_IDO_OVERIDE_NP_DEV0_F4_MASK 0x0000000CL +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__BLKLVL_FOR_IDO_DEV0_F4_MASK 0x00000030L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_RO_OVERIDE_P_DEV0_F4_MASK 0x000000C0L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_RO_OVERIDE_NP_DEV0_F4_MASK 0x00000300L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_SNR_OVERIDE_P_DEV0_F4_MASK 0x00000C00L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_SNR_OVERIDE_NP_DEV0_F4_MASK 0x00003000L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__BLKLVL_FOR_NONIDO_DEV0_F4_MASK 0x0000C000L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_IDO_OVERIDE_P_DEV0_F5_MASK 0x00030000L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_IDO_OVERIDE_NP_DEV0_F5_MASK 0x000C0000L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__BLKLVL_FOR_IDO_DEV0_F5_MASK 0x00300000L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_RO_OVERIDE_P_DEV0_F5_MASK 0x00C00000L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_RO_OVERIDE_NP_DEV0_F5_MASK 0x03000000L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_SNR_OVERIDE_P_DEV0_F5_MASK 0x0C000000L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__TX_SNR_OVERIDE_NP_DEV0_F5_MASK 0x30000000L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F4_F5__BLKLVL_FOR_NONIDO_DEV0_F5_MASK 0xC0000000L +//BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_IDO_OVERIDE_P_DEV0_F6__SHIFT 0x0 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_IDO_OVERIDE_NP_DEV0_F6__SHIFT 0x2 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__BLKLVL_FOR_IDO_DEV0_F6__SHIFT 0x4 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_RO_OVERIDE_P_DEV0_F6__SHIFT 0x6 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_RO_OVERIDE_NP_DEV0_F6__SHIFT 0x8 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_SNR_OVERIDE_P_DEV0_F6__SHIFT 0xa +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_SNR_OVERIDE_NP_DEV0_F6__SHIFT 0xc +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__BLKLVL_FOR_NONIDO_DEV0_F6__SHIFT 0xe +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_IDO_OVERIDE_P_DEV0_F7__SHIFT 0x10 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_IDO_OVERIDE_NP_DEV0_F7__SHIFT 0x12 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__BLKLVL_FOR_IDO_DEV0_F7__SHIFT 0x14 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_RO_OVERIDE_P_DEV0_F7__SHIFT 0x16 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_RO_OVERIDE_NP_DEV0_F7__SHIFT 0x18 +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_SNR_OVERIDE_P_DEV0_F7__SHIFT 0x1a +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_SNR_OVERIDE_NP_DEV0_F7__SHIFT 0x1c +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__BLKLVL_FOR_NONIDO_DEV0_F7__SHIFT 0x1e +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_IDO_OVERIDE_P_DEV0_F6_MASK 0x00000003L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_IDO_OVERIDE_NP_DEV0_F6_MASK 0x0000000CL +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__BLKLVL_FOR_IDO_DEV0_F6_MASK 0x00000030L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_RO_OVERIDE_P_DEV0_F6_MASK 0x000000C0L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_RO_OVERIDE_NP_DEV0_F6_MASK 0x00000300L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_SNR_OVERIDE_P_DEV0_F6_MASK 0x00000C00L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_SNR_OVERIDE_NP_DEV0_F6_MASK 0x00003000L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__BLKLVL_FOR_NONIDO_DEV0_F6_MASK 0x0000C000L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_IDO_OVERIDE_P_DEV0_F7_MASK 0x00030000L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_IDO_OVERIDE_NP_DEV0_F7_MASK 0x000C0000L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__BLKLVL_FOR_IDO_DEV0_F7_MASK 0x00300000L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_RO_OVERIDE_P_DEV0_F7_MASK 0x00C00000L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_RO_OVERIDE_NP_DEV0_F7_MASK 0x03000000L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_SNR_OVERIDE_P_DEV0_F7_MASK 0x0C000000L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__TX_SNR_OVERIDE_NP_DEV0_F7_MASK 0x30000000L +#define BIFC_DMA_ATTR_OVERRIDE_DEV0_F6_F7__BLKLVL_FOR_NONIDO_DEV0_F7_MASK 0xC0000000L +//BIFC_DMA_ATTR_CNTL2_DEV0 +#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F0__SHIFT 0x0 +#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F1__SHIFT 0x4 +#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F2__SHIFT 0x8 +#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F3__SHIFT 0xc +#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F4__SHIFT 0x10 +#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F5__SHIFT 0x14 +#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F6__SHIFT 0x18 +#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F7__SHIFT 0x1c +#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F0_MASK 0x00000001L +#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F1_MASK 0x00000010L +#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F2_MASK 0x00000100L +#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F3_MASK 0x00001000L +#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F4_MASK 0x00010000L +#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F5_MASK 0x00100000L +#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F6_MASK 0x01000000L +#define BIFC_DMA_ATTR_CNTL2_DEV0__BLKLVL_BYPASS_PCIE_IDO_CONTROL_DEV0_F7_MASK 0x10000000L +//BME_DUMMY_CNTL_0 +#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F0__SHIFT 0x0 +#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F1__SHIFT 0x2 +#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F2__SHIFT 0x4 +#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F3__SHIFT 0x6 +#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F4__SHIFT 0x8 +#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F5__SHIFT 0xa +#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F6__SHIFT 0xc +#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F7__SHIFT 0xe +#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F0_MASK 0x00000003L +#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F1_MASK 0x0000000CL +#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F2_MASK 0x00000030L +#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F3_MASK 0x000000C0L +#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F4_MASK 0x00000300L +#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F5_MASK 0x00000C00L +#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F6_MASK 0x00003000L +#define BME_DUMMY_CNTL_0__BME_DUMMY_RSPSTS_DEV0_F7_MASK 0x0000C000L +//BIFC_THT_CNTL +#define BIFC_THT_CNTL__CREDIT_ALLOC_THT_RD_VC0__SHIFT 0x0 +#define BIFC_THT_CNTL__CREDIT_ALLOC_THT_WR_VC0__SHIFT 0x4 +#define BIFC_THT_CNTL__CREDIT_ALLOC_THT_WR_VC1__SHIFT 0x8 +#define BIFC_THT_CNTL__UR_OVRD_FOR_ECRC_EN__SHIFT 0x10 +#define BIFC_THT_CNTL__CREDIT_ALLOC_THT_RD_VC0_MASK 0x0000000FL +#define BIFC_THT_CNTL__CREDIT_ALLOC_THT_WR_VC0_MASK 0x000000F0L +#define BIFC_THT_CNTL__CREDIT_ALLOC_THT_WR_VC1_MASK 0x00000F00L +#define BIFC_THT_CNTL__UR_OVRD_FOR_ECRC_EN_MASK 0x00010000L +//BIFC_HSTARB_CNTL +#define BIFC_HSTARB_CNTL__SLVARB_MODE__SHIFT 0x0 +#define BIFC_HSTARB_CNTL__SLVARB_MODE_MASK 0x00000003L +//BIFC_GSI_CNTL +#define BIFC_GSI_CNTL__GSI_SDP_RSP_ARB_MODE__SHIFT 0x0 +#define BIFC_GSI_CNTL__GSI_CPL_RSP_ARB_MODE__SHIFT 0x2 +#define BIFC_GSI_CNTL__GSI_CPL_INTERLEAVING_EN__SHIFT 0x5 +#define BIFC_GSI_CNTL__GSI_CPL_PCR_EP_CAUSE_UR_EN__SHIFT 0x6 +#define BIFC_GSI_CNTL__GSI_CPL_SMN_P_EP_CAUSE_UR_EN__SHIFT 0x7 +#define BIFC_GSI_CNTL__GSI_CPL_SMN_NP_EP_CAUSE_UR_EN__SHIFT 0x8 +#define BIFC_GSI_CNTL__GSI_CPL_SST_EP_CAUSE_UR_EN__SHIFT 0x9 +#define BIFC_GSI_CNTL__GSI_SDP_REQ_ARB_MODE__SHIFT 0xa +#define BIFC_GSI_CNTL__GSI_SMN_REQ_ARB_MODE__SHIFT 0xc +#define BIFC_GSI_CNTL__GSI_SDP_RSP_ARB_MODE_MASK 0x00000003L +#define BIFC_GSI_CNTL__GSI_CPL_RSP_ARB_MODE_MASK 0x0000001CL +#define BIFC_GSI_CNTL__GSI_CPL_INTERLEAVING_EN_MASK 0x00000020L +#define BIFC_GSI_CNTL__GSI_CPL_PCR_EP_CAUSE_UR_EN_MASK 0x00000040L +#define BIFC_GSI_CNTL__GSI_CPL_SMN_P_EP_CAUSE_UR_EN_MASK 0x00000080L +#define BIFC_GSI_CNTL__GSI_CPL_SMN_NP_EP_CAUSE_UR_EN_MASK 0x00000100L +#define BIFC_GSI_CNTL__GSI_CPL_SST_EP_CAUSE_UR_EN_MASK 0x00000200L +#define BIFC_GSI_CNTL__GSI_SDP_REQ_ARB_MODE_MASK 0x00000C00L +#define BIFC_GSI_CNTL__GSI_SMN_REQ_ARB_MODE_MASK 0x00003000L +//BIFC_PCIEFUNC_CNTL +#define BIFC_PCIEFUNC_CNTL__DMA_NON_PCIEFUNC_BUSDEVFUNC__SHIFT 0x0 +#define BIFC_PCIEFUNC_CNTL__MP1SYSHUBDATA_DRAM_IS_PCIEFUNC__SHIFT 0x10 +#define BIFC_PCIEFUNC_CNTL__DMA_NON_PCIEFUNC_BUSDEVFUNC_MASK 0x0000FFFFL +#define BIFC_PCIEFUNC_CNTL__MP1SYSHUBDATA_DRAM_IS_PCIEFUNC_MASK 0x00010000L +//BIFC_PASID_CHECK_DIS +#define BIFC_PASID_CHECK_DIS__PASID_CHECK_DIS_DEV0_F0__SHIFT 0x0 +#define BIFC_PASID_CHECK_DIS__PASID_CHECK_DIS_DEV0_F1__SHIFT 0x1 +#define BIFC_PASID_CHECK_DIS__PASID_CHECK_DIS_DEV0_F0_MASK 0x00000001L +#define BIFC_PASID_CHECK_DIS__PASID_CHECK_DIS_DEV0_F1_MASK 0x00000002L +//BIFC_SDP_CNTL_0 +#define BIFC_SDP_CNTL_0__HRP_SDP_DISCON_HYSTERESIS__SHIFT 0x0 +#define BIFC_SDP_CNTL_0__GSI_SDP_DISCON_HYSTERESIS__SHIFT 0x8 +#define BIFC_SDP_CNTL_0__GMI_DNS_SDP_DISCON_HYSTERESIS__SHIFT 0x10 +#define BIFC_SDP_CNTL_0__GMI_UPS_SDP_DISCON_HYSTERESIS__SHIFT 0x18 +#define BIFC_SDP_CNTL_0__HRP_SDP_DISCON_HYSTERESIS_MASK 0x000000FFL +#define BIFC_SDP_CNTL_0__GSI_SDP_DISCON_HYSTERESIS_MASK 0x0000FF00L +#define BIFC_SDP_CNTL_0__GMI_DNS_SDP_DISCON_HYSTERESIS_MASK 0x00FF0000L +#define BIFC_SDP_CNTL_0__GMI_UPS_SDP_DISCON_HYSTERESIS_MASK 0xFF000000L +//BIFC_SDP_CNTL_1 +#define BIFC_SDP_CNTL_1__HRP_SDP_DISCON_DIS__SHIFT 0x0 +#define BIFC_SDP_CNTL_1__GSI_SDP_DISCON_DIS__SHIFT 0x1 +#define BIFC_SDP_CNTL_1__GMI_DNS_SDP_DISCON_DIS__SHIFT 0x2 +#define BIFC_SDP_CNTL_1__GMI_UPS_SDP_DISCON_DIS__SHIFT 0x3 +#define BIFC_SDP_CNTL_1__HRP_SDP_DISCON_VLINK_NONL0_ONLY__SHIFT 0x4 +#define BIFC_SDP_CNTL_1__GMI_UPS_SDP_DISCON_VLINK_NONL0_ONLY__SHIFT 0x7 +#define BIFC_SDP_CNTL_1__HRP_SDP_DISCON_DIS_MASK 0x00000001L +#define BIFC_SDP_CNTL_1__GSI_SDP_DISCON_DIS_MASK 0x00000002L +#define BIFC_SDP_CNTL_1__GMI_DNS_SDP_DISCON_DIS_MASK 0x00000004L +#define BIFC_SDP_CNTL_1__GMI_UPS_SDP_DISCON_DIS_MASK 0x00000008L +#define BIFC_SDP_CNTL_1__HRP_SDP_DISCON_VLINK_NONL0_ONLY_MASK 0x00000010L +#define BIFC_SDP_CNTL_1__GMI_UPS_SDP_DISCON_VLINK_NONL0_ONLY_MASK 0x00000080L +//BIFC_PASID_STS +#define BIFC_PASID_STS__PASID_STS__SHIFT 0x0 +#define BIFC_PASID_STS__PASID_STS_MASK 0x0000000FL +//BIFC_ATHUB_ACT_CNTL +#define BIFC_ATHUB_ACT_CNTL__ATHUB_ACT_GSI_RSP_STS_TYPE__SHIFT 0x0 +#define BIFC_ATHUB_ACT_CNTL__ATHUB_ACT_GSI_RSP_STS_TYPE_MASK 0x00000007L +//BIFC_PERF_CNTL_0 +#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_RD_EN__SHIFT 0x0 +#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_WR_EN__SHIFT 0x1 +#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_RD_RESET__SHIFT 0x8 +#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_WR_RESET__SHIFT 0x9 +#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_RD_SEL__SHIFT 0x10 +#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_WR_SEL__SHIFT 0x18 +#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_RD_EN_MASK 0x00000001L +#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_WR_EN_MASK 0x00000002L +#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_RD_RESET_MASK 0x00000100L +#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_WR_RESET_MASK 0x00000200L +#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_RD_SEL_MASK 0x003F0000L +#define BIFC_PERF_CNTL_0__PERF_CNT_MMIO_WR_SEL_MASK 0x3F000000L +//BIFC_PERF_CNTL_1 +#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_RD_EN__SHIFT 0x0 +#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_WR_EN__SHIFT 0x1 +#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_RD_RESET__SHIFT 0x8 +#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_WR_RESET__SHIFT 0x9 +#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_RD_SEL__SHIFT 0x10 +#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_WR_SEL__SHIFT 0x18 +#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_RD_EN_MASK 0x00000001L +#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_WR_EN_MASK 0x00000002L +#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_RD_RESET_MASK 0x00000100L +#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_WR_RESET_MASK 0x00000200L +#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_RD_SEL_MASK 0x003F0000L +#define BIFC_PERF_CNTL_1__PERF_CNT_DMA_WR_SEL_MASK 0x7F000000L +//BIFC_PERF_CNT_MMIO_RD +#define BIFC_PERF_CNT_MMIO_RD__PERF_CNT_MMIO_RD_VALUE__SHIFT 0x0 +#define BIFC_PERF_CNT_MMIO_RD__PERF_CNT_MMIO_RD_VALUE_MASK 0xFFFFFFFFL +//BIFC_PERF_CNT_MMIO_WR +#define BIFC_PERF_CNT_MMIO_WR__PERF_CNT_MMIO_WR_VALUE__SHIFT 0x0 +#define BIFC_PERF_CNT_MMIO_WR__PERF_CNT_MMIO_WR_VALUE_MASK 0xFFFFFFFFL +//BIFC_PERF_CNT_DMA_RD +#define BIFC_PERF_CNT_DMA_RD__PERF_CNT_DMA_RD_VALUE__SHIFT 0x0 +#define BIFC_PERF_CNT_DMA_RD__PERF_CNT_DMA_RD_VALUE_MASK 0xFFFFFFFFL +//BIFC_PERF_CNT_DMA_WR +#define BIFC_PERF_CNT_DMA_WR__PERF_CNT_DMA_WR_VALUE__SHIFT 0x0 +#define BIFC_PERF_CNT_DMA_WR__PERF_CNT_DMA_WR_VALUE_MASK 0xFFFFFFFFL +//NBIF_REGIF_ERRSET_CTRL +#define NBIF_REGIF_ERRSET_CTRL__DROP_NONPF_MMREGREQ_SETERR_DIS__SHIFT 0x0 +#define NBIF_REGIF_ERRSET_CTRL__DROP_NONPF_MMREGREQ_SETERR_DIS_MASK 0x00000001L +//SMN_MST_EP_CNTL3 +#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF0__SHIFT 0x0 +#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF1__SHIFT 0x1 +#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF2__SHIFT 0x2 +#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF3__SHIFT 0x3 +#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF4__SHIFT 0x4 +#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF5__SHIFT 0x5 +#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF6__SHIFT 0x6 +#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF7__SHIFT 0x7 +#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF0_MASK 0x00000001L +#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF1_MASK 0x00000002L +#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF2_MASK 0x00000004L +#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF3_MASK 0x00000008L +#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF4_MASK 0x00000010L +#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF5_MASK 0x00000020L +#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF6_MASK 0x00000040L +#define SMN_MST_EP_CNTL3__SMN_ZERO_BE_WR_EN_EP_DEV0_PF7_MASK 0x00000080L +//SMN_MST_EP_CNTL4 +#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF0__SHIFT 0x0 +#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF1__SHIFT 0x1 +#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF2__SHIFT 0x2 +#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF3__SHIFT 0x3 +#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF4__SHIFT 0x4 +#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF5__SHIFT 0x5 +#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF6__SHIFT 0x6 +#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF7__SHIFT 0x7 +#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF0_MASK 0x00000001L +#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF1_MASK 0x00000002L +#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF2_MASK 0x00000004L +#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF3_MASK 0x00000008L +#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF4_MASK 0x00000010L +#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF5_MASK 0x00000020L +#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF6_MASK 0x00000040L +#define SMN_MST_EP_CNTL4__SMN_ZERO_BE_RD_EN_EP_DEV0_PF7_MASK 0x00000080L +//SMN_MST_CNTL1 +#define SMN_MST_CNTL1__SMN_ERRRSP_DATA_ALLF_DIS_UPS__SHIFT 0x0 +#define SMN_MST_CNTL1__SMN_ERRRSP_DATA_ALLF_DIS_DNS_DEV0__SHIFT 0x10 +#define SMN_MST_CNTL1__SMN_ERRRSP_DATA_ALLF_DIS_UPS_MASK 0x00000001L +#define SMN_MST_CNTL1__SMN_ERRRSP_DATA_ALLF_DIS_DNS_DEV0_MASK 0x00010000L +//SMN_MST_EP_CNTL5 +#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF0__SHIFT 0x0 +#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF1__SHIFT 0x1 +#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF2__SHIFT 0x2 +#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF3__SHIFT 0x3 +#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF4__SHIFT 0x4 +#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF5__SHIFT 0x5 +#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF6__SHIFT 0x6 +#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF7__SHIFT 0x7 +#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF0_MASK 0x00000001L +#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF1_MASK 0x00000002L +#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF2_MASK 0x00000004L +#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF3_MASK 0x00000008L +#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF4_MASK 0x00000010L +#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF5_MASK 0x00000020L +#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF6_MASK 0x00000040L +#define SMN_MST_EP_CNTL5__SMN_ERRRSP_DATA_ALLF_DIS_EP_DEV0_PF7_MASK 0x00000080L +//BIF_SELFRING_BUFFER_VID +#define BIF_SELFRING_BUFFER_VID__DOORBELL_MONITOR_CID__SHIFT 0x0 +#define BIF_SELFRING_BUFFER_VID__RAS_CNTLR_INTR_CID__SHIFT 0x8 +#define BIF_SELFRING_BUFFER_VID__RAS_ATHUB_ERR_EVENT_INTR_CID__SHIFT 0x10 +#define BIF_SELFRING_BUFFER_VID__DOORBELL_MONITOR_CID_MASK 0x000000FFL +#define BIF_SELFRING_BUFFER_VID__RAS_CNTLR_INTR_CID_MASK 0x0000FF00L +#define BIF_SELFRING_BUFFER_VID__RAS_ATHUB_ERR_EVENT_INTR_CID_MASK 0x00FF0000L +//BIF_SELFRING_VECTOR_CNTL +#define BIF_SELFRING_VECTOR_CNTL__MISC_DB_MNTR_INTR_DIS__SHIFT 0x0 +#define BIF_SELFRING_VECTOR_CNTL__DB_MNTR_TS_FROM__SHIFT 0x1 +#define BIF_SELFRING_VECTOR_CNTL__MISC_DB_MNTR_INTR_DIS_MASK 0x00000001L +#define BIF_SELFRING_VECTOR_CNTL__DB_MNTR_TS_FROM_MASK 0x00000002L +//NBIF_INTX_DSTATE_MISC_CNTL +#define NBIF_INTX_DSTATE_MISC_CNTL__DEASRT_INTX_DSTATE_CHK_DIS_EP__SHIFT 0x0 +#define NBIF_INTX_DSTATE_MISC_CNTL__DEASRT_INTX_DSTATE_CHK_DIS_DN__SHIFT 0x1 +#define NBIF_INTX_DSTATE_MISC_CNTL__DEASRT_INTX_DSTATE_CHK_DIS_SWUS__SHIFT 0x2 +#define NBIF_INTX_DSTATE_MISC_CNTL__DEASRT_INTX_IN_NOND0_EN_EP__SHIFT 0x3 +#define NBIF_INTX_DSTATE_MISC_CNTL__DEASRT_INTX_IN_NOND0_EN_DN__SHIFT 0x4 +#define NBIF_INTX_DSTATE_MISC_CNTL__PMI_INT_DIS_EP__SHIFT 0x5 +#define NBIF_INTX_DSTATE_MISC_CNTL__PMI_INT_DIS_DN__SHIFT 0x6 +#define NBIF_INTX_DSTATE_MISC_CNTL__PMI_INT_DIS_SWUS__SHIFT 0x7 +#define NBIF_INTX_DSTATE_MISC_CNTL__DEASRT_INTX_DSTATE_CHK_DIS_EP_MASK 0x00000001L +#define NBIF_INTX_DSTATE_MISC_CNTL__DEASRT_INTX_DSTATE_CHK_DIS_DN_MASK 0x00000002L +#define NBIF_INTX_DSTATE_MISC_CNTL__DEASRT_INTX_DSTATE_CHK_DIS_SWUS_MASK 0x00000004L +#define NBIF_INTX_DSTATE_MISC_CNTL__DEASRT_INTX_IN_NOND0_EN_EP_MASK 0x00000008L +#define NBIF_INTX_DSTATE_MISC_CNTL__DEASRT_INTX_IN_NOND0_EN_DN_MASK 0x00000010L +#define NBIF_INTX_DSTATE_MISC_CNTL__PMI_INT_DIS_EP_MASK 0x00000020L +#define NBIF_INTX_DSTATE_MISC_CNTL__PMI_INT_DIS_DN_MASK 0x00000040L +#define NBIF_INTX_DSTATE_MISC_CNTL__PMI_INT_DIS_SWUS_MASK 0x00000080L +//NBIF_PENDING_MISC_CNTL +#define NBIF_PENDING_MISC_CNTL__FLR_MST_PEND_CHK_DIS__SHIFT 0x0 +#define NBIF_PENDING_MISC_CNTL__FLR_SLV_PEND_CHK_DIS__SHIFT 0x1 +#define NBIF_PENDING_MISC_CNTL__FLR_MST_PEND_CHK_DIS_MASK 0x00000001L +#define NBIF_PENDING_MISC_CNTL__FLR_SLV_PEND_CHK_DIS_MASK 0x00000002L +//BIF_GMI_WRR_WEIGHT +#define BIF_GMI_WRR_WEIGHT__GMI_REQ_WRR_MODE__SHIFT 0x1f +#define BIF_GMI_WRR_WEIGHT__GMI_REQ_WRR_MODE_MASK 0x80000000L +//BIF_GMI_WRR_WEIGHT2 +#define BIF_GMI_WRR_WEIGHT2__GMI_REQ_ENTRY0_WEIGHT__SHIFT 0x0 +#define BIF_GMI_WRR_WEIGHT2__GMI_REQ_ENTRY1_WEIGHT__SHIFT 0x8 +#define BIF_GMI_WRR_WEIGHT2__GMI_REQ_ENTRY2_WEIGHT__SHIFT 0x10 +#define BIF_GMI_WRR_WEIGHT2__GMI_REQ_ENTRY3_WEIGHT__SHIFT 0x18 +#define BIF_GMI_WRR_WEIGHT2__GMI_REQ_ENTRY0_WEIGHT_MASK 0x000000FFL +#define BIF_GMI_WRR_WEIGHT2__GMI_REQ_ENTRY1_WEIGHT_MASK 0x0000FF00L +#define BIF_GMI_WRR_WEIGHT2__GMI_REQ_ENTRY2_WEIGHT_MASK 0x00FF0000L +#define BIF_GMI_WRR_WEIGHT2__GMI_REQ_ENTRY3_WEIGHT_MASK 0xFF000000L +//BIF_GMI_WRR_WEIGHT3 +#define BIF_GMI_WRR_WEIGHT3__GMI_REQ_ENTRY4_WEIGHT__SHIFT 0x0 +#define BIF_GMI_WRR_WEIGHT3__GMI_REQ_ENTRY5_WEIGHT__SHIFT 0x8 +#define BIF_GMI_WRR_WEIGHT3__GMI_REQ_ENTRY6_WEIGHT__SHIFT 0x10 +#define BIF_GMI_WRR_WEIGHT3__GMI_REQ_ENTRY7_WEIGHT__SHIFT 0x18 +#define BIF_GMI_WRR_WEIGHT3__GMI_REQ_ENTRY4_WEIGHT_MASK 0x000000FFL +#define BIF_GMI_WRR_WEIGHT3__GMI_REQ_ENTRY5_WEIGHT_MASK 0x0000FF00L +#define BIF_GMI_WRR_WEIGHT3__GMI_REQ_ENTRY6_WEIGHT_MASK 0x00FF0000L +#define BIF_GMI_WRR_WEIGHT3__GMI_REQ_ENTRY7_WEIGHT_MASK 0xFF000000L +//NBIF_PWRBRK_REQUEST +#define NBIF_PWRBRK_REQUEST__NBIF_PWRBRK_REQUEST__SHIFT 0x0 +#define NBIF_PWRBRK_REQUEST__NBIF_PWRBRK_REQUEST_MASK 0x00000001L +//BIF_ATOMIC_ERR_LOG_DEV0_F0 +#define BIF_ATOMIC_ERR_LOG_DEV0_F0__UR_ATOMIC_OPCODE_DEV0_F0__SHIFT 0x0 +#define BIF_ATOMIC_ERR_LOG_DEV0_F0__UR_ATOMIC_REQEN_LOW_DEV0_F0__SHIFT 0x1 +#define BIF_ATOMIC_ERR_LOG_DEV0_F0__UR_ATOMIC_LENGTH_DEV0_F0__SHIFT 0x2 +#define BIF_ATOMIC_ERR_LOG_DEV0_F0__UR_ATOMIC_NR_DEV0_F0__SHIFT 0x3 +#define BIF_ATOMIC_ERR_LOG_DEV0_F0__CLEAR_UR_ATOMIC_OPCODE_DEV0_F0__SHIFT 0x10 +#define BIF_ATOMIC_ERR_LOG_DEV0_F0__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F0__SHIFT 0x11 +#define BIF_ATOMIC_ERR_LOG_DEV0_F0__CLEAR_UR_ATOMIC_LENGTH_DEV0_F0__SHIFT 0x12 +#define BIF_ATOMIC_ERR_LOG_DEV0_F0__CLEAR_UR_ATOMIC_NR_DEV0_F0__SHIFT 0x13 +#define BIF_ATOMIC_ERR_LOG_DEV0_F0__UR_ATOMIC_OPCODE_DEV0_F0_MASK 0x00000001L +#define BIF_ATOMIC_ERR_LOG_DEV0_F0__UR_ATOMIC_REQEN_LOW_DEV0_F0_MASK 0x00000002L +#define BIF_ATOMIC_ERR_LOG_DEV0_F0__UR_ATOMIC_LENGTH_DEV0_F0_MASK 0x00000004L +#define BIF_ATOMIC_ERR_LOG_DEV0_F0__UR_ATOMIC_NR_DEV0_F0_MASK 0x00000008L +#define BIF_ATOMIC_ERR_LOG_DEV0_F0__CLEAR_UR_ATOMIC_OPCODE_DEV0_F0_MASK 0x00010000L +#define BIF_ATOMIC_ERR_LOG_DEV0_F0__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F0_MASK 0x00020000L +#define BIF_ATOMIC_ERR_LOG_DEV0_F0__CLEAR_UR_ATOMIC_LENGTH_DEV0_F0_MASK 0x00040000L +#define BIF_ATOMIC_ERR_LOG_DEV0_F0__CLEAR_UR_ATOMIC_NR_DEV0_F0_MASK 0x00080000L +//BIF_ATOMIC_ERR_LOG_DEV0_F1 +#define BIF_ATOMIC_ERR_LOG_DEV0_F1__UR_ATOMIC_OPCODE_DEV0_F1__SHIFT 0x0 +#define BIF_ATOMIC_ERR_LOG_DEV0_F1__UR_ATOMIC_REQEN_LOW_DEV0_F1__SHIFT 0x1 +#define BIF_ATOMIC_ERR_LOG_DEV0_F1__UR_ATOMIC_LENGTH_DEV0_F1__SHIFT 0x2 +#define BIF_ATOMIC_ERR_LOG_DEV0_F1__UR_ATOMIC_NR_DEV0_F1__SHIFT 0x3 +#define BIF_ATOMIC_ERR_LOG_DEV0_F1__CLEAR_UR_ATOMIC_OPCODE_DEV0_F1__SHIFT 0x10 +#define BIF_ATOMIC_ERR_LOG_DEV0_F1__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F1__SHIFT 0x11 +#define BIF_ATOMIC_ERR_LOG_DEV0_F1__CLEAR_UR_ATOMIC_LENGTH_DEV0_F1__SHIFT 0x12 +#define BIF_ATOMIC_ERR_LOG_DEV0_F1__CLEAR_UR_ATOMIC_NR_DEV0_F1__SHIFT 0x13 +#define BIF_ATOMIC_ERR_LOG_DEV0_F1__UR_ATOMIC_OPCODE_DEV0_F1_MASK 0x00000001L +#define BIF_ATOMIC_ERR_LOG_DEV0_F1__UR_ATOMIC_REQEN_LOW_DEV0_F1_MASK 0x00000002L +#define BIF_ATOMIC_ERR_LOG_DEV0_F1__UR_ATOMIC_LENGTH_DEV0_F1_MASK 0x00000004L +#define BIF_ATOMIC_ERR_LOG_DEV0_F1__UR_ATOMIC_NR_DEV0_F1_MASK 0x00000008L +#define BIF_ATOMIC_ERR_LOG_DEV0_F1__CLEAR_UR_ATOMIC_OPCODE_DEV0_F1_MASK 0x00010000L +#define BIF_ATOMIC_ERR_LOG_DEV0_F1__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F1_MASK 0x00020000L +#define BIF_ATOMIC_ERR_LOG_DEV0_F1__CLEAR_UR_ATOMIC_LENGTH_DEV0_F1_MASK 0x00040000L +#define BIF_ATOMIC_ERR_LOG_DEV0_F1__CLEAR_UR_ATOMIC_NR_DEV0_F1_MASK 0x00080000L +//BIF_ATOMIC_ERR_LOG_DEV0_F2 +#define BIF_ATOMIC_ERR_LOG_DEV0_F2__UR_ATOMIC_OPCODE_DEV0_F2__SHIFT 0x0 +#define BIF_ATOMIC_ERR_LOG_DEV0_F2__UR_ATOMIC_REQEN_LOW_DEV0_F2__SHIFT 0x1 +#define BIF_ATOMIC_ERR_LOG_DEV0_F2__UR_ATOMIC_LENGTH_DEV0_F2__SHIFT 0x2 +#define BIF_ATOMIC_ERR_LOG_DEV0_F2__UR_ATOMIC_NR_DEV0_F2__SHIFT 0x3 +#define BIF_ATOMIC_ERR_LOG_DEV0_F2__CLEAR_UR_ATOMIC_OPCODE_DEV0_F2__SHIFT 0x10 +#define BIF_ATOMIC_ERR_LOG_DEV0_F2__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F2__SHIFT 0x11 +#define BIF_ATOMIC_ERR_LOG_DEV0_F2__CLEAR_UR_ATOMIC_LENGTH_DEV0_F2__SHIFT 0x12 +#define BIF_ATOMIC_ERR_LOG_DEV0_F2__CLEAR_UR_ATOMIC_NR_DEV0_F2__SHIFT 0x13 +#define BIF_ATOMIC_ERR_LOG_DEV0_F2__UR_ATOMIC_OPCODE_DEV0_F2_MASK 0x00000001L +#define BIF_ATOMIC_ERR_LOG_DEV0_F2__UR_ATOMIC_REQEN_LOW_DEV0_F2_MASK 0x00000002L +#define BIF_ATOMIC_ERR_LOG_DEV0_F2__UR_ATOMIC_LENGTH_DEV0_F2_MASK 0x00000004L +#define BIF_ATOMIC_ERR_LOG_DEV0_F2__UR_ATOMIC_NR_DEV0_F2_MASK 0x00000008L +#define BIF_ATOMIC_ERR_LOG_DEV0_F2__CLEAR_UR_ATOMIC_OPCODE_DEV0_F2_MASK 0x00010000L +#define BIF_ATOMIC_ERR_LOG_DEV0_F2__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F2_MASK 0x00020000L +#define BIF_ATOMIC_ERR_LOG_DEV0_F2__CLEAR_UR_ATOMIC_LENGTH_DEV0_F2_MASK 0x00040000L +#define BIF_ATOMIC_ERR_LOG_DEV0_F2__CLEAR_UR_ATOMIC_NR_DEV0_F2_MASK 0x00080000L +//BIF_ATOMIC_ERR_LOG_DEV0_F3 +#define BIF_ATOMIC_ERR_LOG_DEV0_F3__UR_ATOMIC_OPCODE_DEV0_F3__SHIFT 0x0 +#define BIF_ATOMIC_ERR_LOG_DEV0_F3__UR_ATOMIC_REQEN_LOW_DEV0_F3__SHIFT 0x1 +#define BIF_ATOMIC_ERR_LOG_DEV0_F3__UR_ATOMIC_LENGTH_DEV0_F3__SHIFT 0x2 +#define BIF_ATOMIC_ERR_LOG_DEV0_F3__UR_ATOMIC_NR_DEV0_F3__SHIFT 0x3 +#define BIF_ATOMIC_ERR_LOG_DEV0_F3__CLEAR_UR_ATOMIC_OPCODE_DEV0_F3__SHIFT 0x10 +#define BIF_ATOMIC_ERR_LOG_DEV0_F3__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F3__SHIFT 0x11 +#define BIF_ATOMIC_ERR_LOG_DEV0_F3__CLEAR_UR_ATOMIC_LENGTH_DEV0_F3__SHIFT 0x12 +#define BIF_ATOMIC_ERR_LOG_DEV0_F3__CLEAR_UR_ATOMIC_NR_DEV0_F3__SHIFT 0x13 +#define BIF_ATOMIC_ERR_LOG_DEV0_F3__UR_ATOMIC_OPCODE_DEV0_F3_MASK 0x00000001L +#define BIF_ATOMIC_ERR_LOG_DEV0_F3__UR_ATOMIC_REQEN_LOW_DEV0_F3_MASK 0x00000002L +#define BIF_ATOMIC_ERR_LOG_DEV0_F3__UR_ATOMIC_LENGTH_DEV0_F3_MASK 0x00000004L +#define BIF_ATOMIC_ERR_LOG_DEV0_F3__UR_ATOMIC_NR_DEV0_F3_MASK 0x00000008L +#define BIF_ATOMIC_ERR_LOG_DEV0_F3__CLEAR_UR_ATOMIC_OPCODE_DEV0_F3_MASK 0x00010000L +#define BIF_ATOMIC_ERR_LOG_DEV0_F3__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F3_MASK 0x00020000L +#define BIF_ATOMIC_ERR_LOG_DEV0_F3__CLEAR_UR_ATOMIC_LENGTH_DEV0_F3_MASK 0x00040000L +#define BIF_ATOMIC_ERR_LOG_DEV0_F3__CLEAR_UR_ATOMIC_NR_DEV0_F3_MASK 0x00080000L +//BIF_ATOMIC_ERR_LOG_DEV0_F4 +#define BIF_ATOMIC_ERR_LOG_DEV0_F4__UR_ATOMIC_OPCODE_DEV0_F4__SHIFT 0x0 +#define BIF_ATOMIC_ERR_LOG_DEV0_F4__UR_ATOMIC_REQEN_LOW_DEV0_F4__SHIFT 0x1 +#define BIF_ATOMIC_ERR_LOG_DEV0_F4__UR_ATOMIC_LENGTH_DEV0_F4__SHIFT 0x2 +#define BIF_ATOMIC_ERR_LOG_DEV0_F4__UR_ATOMIC_NR_DEV0_F4__SHIFT 0x3 +#define BIF_ATOMIC_ERR_LOG_DEV0_F4__CLEAR_UR_ATOMIC_OPCODE_DEV0_F4__SHIFT 0x10 +#define BIF_ATOMIC_ERR_LOG_DEV0_F4__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F4__SHIFT 0x11 +#define BIF_ATOMIC_ERR_LOG_DEV0_F4__CLEAR_UR_ATOMIC_LENGTH_DEV0_F4__SHIFT 0x12 +#define BIF_ATOMIC_ERR_LOG_DEV0_F4__CLEAR_UR_ATOMIC_NR_DEV0_F4__SHIFT 0x13 +#define BIF_ATOMIC_ERR_LOG_DEV0_F4__UR_ATOMIC_OPCODE_DEV0_F4_MASK 0x00000001L +#define BIF_ATOMIC_ERR_LOG_DEV0_F4__UR_ATOMIC_REQEN_LOW_DEV0_F4_MASK 0x00000002L +#define BIF_ATOMIC_ERR_LOG_DEV0_F4__UR_ATOMIC_LENGTH_DEV0_F4_MASK 0x00000004L +#define BIF_ATOMIC_ERR_LOG_DEV0_F4__UR_ATOMIC_NR_DEV0_F4_MASK 0x00000008L +#define BIF_ATOMIC_ERR_LOG_DEV0_F4__CLEAR_UR_ATOMIC_OPCODE_DEV0_F4_MASK 0x00010000L +#define BIF_ATOMIC_ERR_LOG_DEV0_F4__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F4_MASK 0x00020000L +#define BIF_ATOMIC_ERR_LOG_DEV0_F4__CLEAR_UR_ATOMIC_LENGTH_DEV0_F4_MASK 0x00040000L +#define BIF_ATOMIC_ERR_LOG_DEV0_F4__CLEAR_UR_ATOMIC_NR_DEV0_F4_MASK 0x00080000L +//BIF_ATOMIC_ERR_LOG_DEV0_F5 +#define BIF_ATOMIC_ERR_LOG_DEV0_F5__UR_ATOMIC_OPCODE_DEV0_F5__SHIFT 0x0 +#define BIF_ATOMIC_ERR_LOG_DEV0_F5__UR_ATOMIC_REQEN_LOW_DEV0_F5__SHIFT 0x1 +#define BIF_ATOMIC_ERR_LOG_DEV0_F5__UR_ATOMIC_LENGTH_DEV0_F5__SHIFT 0x2 +#define BIF_ATOMIC_ERR_LOG_DEV0_F5__UR_ATOMIC_NR_DEV0_F5__SHIFT 0x3 +#define BIF_ATOMIC_ERR_LOG_DEV0_F5__CLEAR_UR_ATOMIC_OPCODE_DEV0_F5__SHIFT 0x10 +#define BIF_ATOMIC_ERR_LOG_DEV0_F5__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F5__SHIFT 0x11 +#define BIF_ATOMIC_ERR_LOG_DEV0_F5__CLEAR_UR_ATOMIC_LENGTH_DEV0_F5__SHIFT 0x12 +#define BIF_ATOMIC_ERR_LOG_DEV0_F5__CLEAR_UR_ATOMIC_NR_DEV0_F5__SHIFT 0x13 +#define BIF_ATOMIC_ERR_LOG_DEV0_F5__UR_ATOMIC_OPCODE_DEV0_F5_MASK 0x00000001L +#define BIF_ATOMIC_ERR_LOG_DEV0_F5__UR_ATOMIC_REQEN_LOW_DEV0_F5_MASK 0x00000002L +#define BIF_ATOMIC_ERR_LOG_DEV0_F5__UR_ATOMIC_LENGTH_DEV0_F5_MASK 0x00000004L +#define BIF_ATOMIC_ERR_LOG_DEV0_F5__UR_ATOMIC_NR_DEV0_F5_MASK 0x00000008L +#define BIF_ATOMIC_ERR_LOG_DEV0_F5__CLEAR_UR_ATOMIC_OPCODE_DEV0_F5_MASK 0x00010000L +#define BIF_ATOMIC_ERR_LOG_DEV0_F5__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F5_MASK 0x00020000L +#define BIF_ATOMIC_ERR_LOG_DEV0_F5__CLEAR_UR_ATOMIC_LENGTH_DEV0_F5_MASK 0x00040000L +#define BIF_ATOMIC_ERR_LOG_DEV0_F5__CLEAR_UR_ATOMIC_NR_DEV0_F5_MASK 0x00080000L +//BIF_ATOMIC_ERR_LOG_DEV0_F6 +#define BIF_ATOMIC_ERR_LOG_DEV0_F6__UR_ATOMIC_OPCODE_DEV0_F6__SHIFT 0x0 +#define BIF_ATOMIC_ERR_LOG_DEV0_F6__UR_ATOMIC_REQEN_LOW_DEV0_F6__SHIFT 0x1 +#define BIF_ATOMIC_ERR_LOG_DEV0_F6__UR_ATOMIC_LENGTH_DEV0_F6__SHIFT 0x2 +#define BIF_ATOMIC_ERR_LOG_DEV0_F6__UR_ATOMIC_NR_DEV0_F6__SHIFT 0x3 +#define BIF_ATOMIC_ERR_LOG_DEV0_F6__CLEAR_UR_ATOMIC_OPCODE_DEV0_F6__SHIFT 0x10 +#define BIF_ATOMIC_ERR_LOG_DEV0_F6__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F6__SHIFT 0x11 +#define BIF_ATOMIC_ERR_LOG_DEV0_F6__CLEAR_UR_ATOMIC_LENGTH_DEV0_F6__SHIFT 0x12 +#define BIF_ATOMIC_ERR_LOG_DEV0_F6__CLEAR_UR_ATOMIC_NR_DEV0_F6__SHIFT 0x13 +#define BIF_ATOMIC_ERR_LOG_DEV0_F6__UR_ATOMIC_OPCODE_DEV0_F6_MASK 0x00000001L +#define BIF_ATOMIC_ERR_LOG_DEV0_F6__UR_ATOMIC_REQEN_LOW_DEV0_F6_MASK 0x00000002L +#define BIF_ATOMIC_ERR_LOG_DEV0_F6__UR_ATOMIC_LENGTH_DEV0_F6_MASK 0x00000004L +#define BIF_ATOMIC_ERR_LOG_DEV0_F6__UR_ATOMIC_NR_DEV0_F6_MASK 0x00000008L +#define BIF_ATOMIC_ERR_LOG_DEV0_F6__CLEAR_UR_ATOMIC_OPCODE_DEV0_F6_MASK 0x00010000L +#define BIF_ATOMIC_ERR_LOG_DEV0_F6__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F6_MASK 0x00020000L +#define BIF_ATOMIC_ERR_LOG_DEV0_F6__CLEAR_UR_ATOMIC_LENGTH_DEV0_F6_MASK 0x00040000L +#define BIF_ATOMIC_ERR_LOG_DEV0_F6__CLEAR_UR_ATOMIC_NR_DEV0_F6_MASK 0x00080000L +//BIF_ATOMIC_ERR_LOG_DEV0_F7 +#define BIF_ATOMIC_ERR_LOG_DEV0_F7__UR_ATOMIC_OPCODE_DEV0_F7__SHIFT 0x0 +#define BIF_ATOMIC_ERR_LOG_DEV0_F7__UR_ATOMIC_REQEN_LOW_DEV0_F7__SHIFT 0x1 +#define BIF_ATOMIC_ERR_LOG_DEV0_F7__UR_ATOMIC_LENGTH_DEV0_F7__SHIFT 0x2 +#define BIF_ATOMIC_ERR_LOG_DEV0_F7__UR_ATOMIC_NR_DEV0_F7__SHIFT 0x3 +#define BIF_ATOMIC_ERR_LOG_DEV0_F7__CLEAR_UR_ATOMIC_OPCODE_DEV0_F7__SHIFT 0x10 +#define BIF_ATOMIC_ERR_LOG_DEV0_F7__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F7__SHIFT 0x11 +#define BIF_ATOMIC_ERR_LOG_DEV0_F7__CLEAR_UR_ATOMIC_LENGTH_DEV0_F7__SHIFT 0x12 +#define BIF_ATOMIC_ERR_LOG_DEV0_F7__CLEAR_UR_ATOMIC_NR_DEV0_F7__SHIFT 0x13 +#define BIF_ATOMIC_ERR_LOG_DEV0_F7__UR_ATOMIC_OPCODE_DEV0_F7_MASK 0x00000001L +#define BIF_ATOMIC_ERR_LOG_DEV0_F7__UR_ATOMIC_REQEN_LOW_DEV0_F7_MASK 0x00000002L +#define BIF_ATOMIC_ERR_LOG_DEV0_F7__UR_ATOMIC_LENGTH_DEV0_F7_MASK 0x00000004L +#define BIF_ATOMIC_ERR_LOG_DEV0_F7__UR_ATOMIC_NR_DEV0_F7_MASK 0x00000008L +#define BIF_ATOMIC_ERR_LOG_DEV0_F7__CLEAR_UR_ATOMIC_OPCODE_DEV0_F7_MASK 0x00010000L +#define BIF_ATOMIC_ERR_LOG_DEV0_F7__CLEAR_UR_ATOMIC_REQEN_LOW_DEV0_F7_MASK 0x00020000L +#define BIF_ATOMIC_ERR_LOG_DEV0_F7__CLEAR_UR_ATOMIC_LENGTH_DEV0_F7_MASK 0x00040000L +#define BIF_ATOMIC_ERR_LOG_DEV0_F7__CLEAR_UR_ATOMIC_NR_DEV0_F7_MASK 0x00080000L +//BIF_DMA_MP4_ERR_LOG +#define BIF_DMA_MP4_ERR_LOG__MP4SDP_VC4_NON_DVM_ERR__SHIFT 0x0 +#define BIF_DMA_MP4_ERR_LOG__MP4SDP_ATOMIC_REQEN_LOW_ERR__SHIFT 0x1 +#define BIF_DMA_MP4_ERR_LOG__CLEAR_MP4SDP_VC4_NON_DVM_ERR__SHIFT 0x10 +#define BIF_DMA_MP4_ERR_LOG__CLEAR_MP4SDP_ATOMIC_REQEN_LOW_ERR__SHIFT 0x11 +#define BIF_DMA_MP4_ERR_LOG__MP4SDP_VC4_NON_DVM_ERR_MASK 0x00000001L +#define BIF_DMA_MP4_ERR_LOG__MP4SDP_ATOMIC_REQEN_LOW_ERR_MASK 0x00000002L +#define BIF_DMA_MP4_ERR_LOG__CLEAR_MP4SDP_VC4_NON_DVM_ERR_MASK 0x00010000L +#define BIF_DMA_MP4_ERR_LOG__CLEAR_MP4SDP_ATOMIC_REQEN_LOW_ERR_MASK 0x00020000L +//BIF_PASID_ERR_LOG +#define BIF_PASID_ERR_LOG__PASID_ERR_DEV0_F0__SHIFT 0x0 +#define BIF_PASID_ERR_LOG__PASID_ERR_DEV0_F1__SHIFT 0x1 +#define BIF_PASID_ERR_LOG__PASID_ERR_DEV0_F0_MASK 0x00000001L +#define BIF_PASID_ERR_LOG__PASID_ERR_DEV0_F1_MASK 0x00000002L +//BIF_PASID_ERR_CLR +#define BIF_PASID_ERR_CLR__PASID_ERR_CLR_DEV0_F0__SHIFT 0x0 +#define BIF_PASID_ERR_CLR__PASID_ERR_CLR_DEV0_F1__SHIFT 0x1 +#define BIF_PASID_ERR_CLR__PASID_ERR_CLR_DEV0_F0_MASK 0x00000001L +#define BIF_PASID_ERR_CLR__PASID_ERR_CLR_DEV0_F1_MASK 0x00000002L +//NBIF_VWIRE_CTRL +#define NBIF_VWIRE_CTRL__NBIF_SMN_VWR_DIS__SHIFT 0x0 +#define NBIF_VWIRE_CTRL__SMN_VWR_RESET_DELAY_CNT__SHIFT 0x4 +#define NBIF_VWIRE_CTRL__SMN_VWR_POSTED__SHIFT 0x8 +#define NBIF_VWIRE_CTRL__NBIF_SDP_UPS_VWR_DIS__SHIFT 0x10 +#define NBIF_VWIRE_CTRL__SDP_VWR_RESET_DELAY_CNT__SHIFT 0x14 +#define NBIF_VWIRE_CTRL__SDP_VWR_BLOCKLVL__SHIFT 0x1a +#define NBIF_VWIRE_CTRL__NBIF_SMN_VWR_DIS_MASK 0x00000001L +#define NBIF_VWIRE_CTRL__SMN_VWR_RESET_DELAY_CNT_MASK 0x000000F0L +#define NBIF_VWIRE_CTRL__SMN_VWR_POSTED_MASK 0x00000100L +#define NBIF_VWIRE_CTRL__NBIF_SDP_UPS_VWR_DIS_MASK 0x00010000L +#define NBIF_VWIRE_CTRL__SDP_VWR_RESET_DELAY_CNT_MASK 0x00F00000L +#define NBIF_VWIRE_CTRL__SDP_VWR_BLOCKLVL_MASK 0x0C000000L +//NBIF_SMN_VWR_VCHG_DIS_CTRL +#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET0_DIS__SHIFT 0x0 +#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET1_DIS__SHIFT 0x1 +#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET2_DIS__SHIFT 0x2 +#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET3_DIS__SHIFT 0x3 +#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET4_DIS__SHIFT 0x4 +#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET5_DIS__SHIFT 0x5 +#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET6_DIS__SHIFT 0x6 +#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET0_DIS_MASK 0x00000001L +#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET1_DIS_MASK 0x00000002L +#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET2_DIS_MASK 0x00000004L +#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET3_DIS_MASK 0x00000008L +#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET4_DIS_MASK 0x00000010L +#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET5_DIS_MASK 0x00000020L +#define NBIF_SMN_VWR_VCHG_DIS_CTRL__SMN_VWR_VCHG_SET6_DIS_MASK 0x00000040L +//NBIF_SMN_VWR_VCHG_RST_CTRL0 +#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET0_RST_DEF_REV__SHIFT 0x0 +#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET1_RST_DEF_REV__SHIFT 0x1 +#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET2_RST_DEF_REV__SHIFT 0x2 +#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET3_RST_DEF_REV__SHIFT 0x3 +#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET4_RST_DEF_REV__SHIFT 0x4 +#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET5_RST_DEF_REV__SHIFT 0x5 +#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET6_RST_DEF_REV__SHIFT 0x6 +#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET0_RST_DEF_REV_MASK 0x00000001L +#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET1_RST_DEF_REV_MASK 0x00000002L +#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET2_RST_DEF_REV_MASK 0x00000004L +#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET3_RST_DEF_REV_MASK 0x00000008L +#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET4_RST_DEF_REV_MASK 0x00000010L +#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET5_RST_DEF_REV_MASK 0x00000020L +#define NBIF_SMN_VWR_VCHG_RST_CTRL0__SMN_VWR_VCHG_SET6_RST_DEF_REV_MASK 0x00000040L +//NBIF_SMN_VWR_VCHG_TRIG +#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET0_TRIG__SHIFT 0x0 +#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET1_TRIG__SHIFT 0x1 +#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET2_TRIG__SHIFT 0x2 +#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET3_TRIG__SHIFT 0x3 +#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET4_TRIG__SHIFT 0x4 +#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET5_TRIG__SHIFT 0x5 +#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET6_TRIG__SHIFT 0x6 +#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET0_TRIG_MASK 0x00000001L +#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET1_TRIG_MASK 0x00000002L +#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET2_TRIG_MASK 0x00000004L +#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET3_TRIG_MASK 0x00000008L +#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET4_TRIG_MASK 0x00000010L +#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET5_TRIG_MASK 0x00000020L +#define NBIF_SMN_VWR_VCHG_TRIG__SMN_VWR_VCHG_SET6_TRIG_MASK 0x00000040L +//NBIF_SMN_VWR_WTRIG_CNTL +#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET0_DIS__SHIFT 0x0 +#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET1_DIS__SHIFT 0x1 +#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET2_DIS__SHIFT 0x2 +#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET3_DIS__SHIFT 0x3 +#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET4_DIS__SHIFT 0x4 +#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET5_DIS__SHIFT 0x5 +#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET6_DIS__SHIFT 0x6 +#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET0_DIS_MASK 0x00000001L +#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET1_DIS_MASK 0x00000002L +#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET2_DIS_MASK 0x00000004L +#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET3_DIS_MASK 0x00000008L +#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET4_DIS_MASK 0x00000010L +#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET5_DIS_MASK 0x00000020L +#define NBIF_SMN_VWR_WTRIG_CNTL__SMN_VWR_WTRIG_SET6_DIS_MASK 0x00000040L +//NBIF_SMN_VWR_VCHG_DIS_CTRL_1 +#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET0_DIFFDET_DEF_REV__SHIFT 0x0 +#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET1_DIFFDET_DEF_REV__SHIFT 0x1 +#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET2_DIFFDET_DEF_REV__SHIFT 0x2 +#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET3_DIFFDET_DEF_REV__SHIFT 0x3 +#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET4_DIFFDET_DEF_REV__SHIFT 0x4 +#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET5_DIFFDET_DEF_REV__SHIFT 0x5 +#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET6_DIFFDET_DEF_REV__SHIFT 0x6 +#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET0_DIFFDET_DEF_REV_MASK 0x00000001L +#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET1_DIFFDET_DEF_REV_MASK 0x00000002L +#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET2_DIFFDET_DEF_REV_MASK 0x00000004L +#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET3_DIFFDET_DEF_REV_MASK 0x00000008L +#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET4_DIFFDET_DEF_REV_MASK 0x00000010L +#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET5_DIFFDET_DEF_REV_MASK 0x00000020L +#define NBIF_SMN_VWR_VCHG_DIS_CTRL_1__SMN_VWR_VCHG_SET6_DIFFDET_DEF_REV_MASK 0x00000040L +//NBIF_MGCG_CTRL_LCLK +#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_EN_LCLK__SHIFT 0x0 +#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_MODE_LCLK__SHIFT 0x1 +#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_HYSTERESIS_LCLK__SHIFT 0x2 +#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_HST_DIS_LCLK__SHIFT 0xa +#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_DMA_DIS_LCLK__SHIFT 0xb +#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_REG_DIS_LCLK__SHIFT 0xc +#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_AER_DIS_LCLK__SHIFT 0xd +#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_EN_LCLK_MASK 0x00000001L +#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_MODE_LCLK_MASK 0x00000002L +#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_HYSTERESIS_LCLK_MASK 0x000003FCL +#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_HST_DIS_LCLK_MASK 0x00000400L +#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_DMA_DIS_LCLK_MASK 0x00000800L +#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_REG_DIS_LCLK_MASK 0x00001000L +#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_AER_DIS_LCLK_MASK 0x00002000L +//NBIF_DS_CTRL_LCLK +#define NBIF_DS_CTRL_LCLK__NBIF_LCLK_DS_EN__SHIFT 0x0 +#define NBIF_DS_CTRL_LCLK__NBIF_LCLK_DS_TIMER__SHIFT 0x10 +#define NBIF_DS_CTRL_LCLK__NBIF_LCLK_DS_EN_MASK 0x00000001L +#define NBIF_DS_CTRL_LCLK__NBIF_LCLK_DS_TIMER_MASK 0xFFFF0000L +//SMN_MST_CNTL0 +#define SMN_MST_CNTL0__SMN_ARB_MODE__SHIFT 0x0 +#define SMN_MST_CNTL0__SMN_ZERO_BE_WR_EN_UPS__SHIFT 0x8 +#define SMN_MST_CNTL0__SMN_ZERO_BE_RD_EN_UPS__SHIFT 0x9 +#define SMN_MST_CNTL0__SMN_POST_MASK_EN_UPS__SHIFT 0xa +#define SMN_MST_CNTL0__MULTI_SMN_TRANS_ID_DIS_UPS__SHIFT 0xb +#define SMN_MST_CNTL0__SMN_ZERO_BE_WR_EN_DNS_DEV0__SHIFT 0x10 +#define SMN_MST_CNTL0__SMN_ZERO_BE_RD_EN_DNS_DEV0__SHIFT 0x14 +#define SMN_MST_CNTL0__SMN_POST_MASK_EN_DNS_DEV0__SHIFT 0x18 +#define SMN_MST_CNTL0__MULTI_SMN_TRANS_ID_DIS_DNS_DEV0__SHIFT 0x1c +#define SMN_MST_CNTL0__SMN_ARB_MODE_MASK 0x00000003L +#define SMN_MST_CNTL0__SMN_ZERO_BE_WR_EN_UPS_MASK 0x00000100L +#define SMN_MST_CNTL0__SMN_ZERO_BE_RD_EN_UPS_MASK 0x00000200L +#define SMN_MST_CNTL0__SMN_POST_MASK_EN_UPS_MASK 0x00000400L +#define SMN_MST_CNTL0__MULTI_SMN_TRANS_ID_DIS_UPS_MASK 0x00000800L +#define SMN_MST_CNTL0__SMN_ZERO_BE_WR_EN_DNS_DEV0_MASK 0x00010000L +#define SMN_MST_CNTL0__SMN_ZERO_BE_RD_EN_DNS_DEV0_MASK 0x00100000L +#define SMN_MST_CNTL0__SMN_POST_MASK_EN_DNS_DEV0_MASK 0x01000000L +#define SMN_MST_CNTL0__MULTI_SMN_TRANS_ID_DIS_DNS_DEV0_MASK 0x10000000L +//SMN_MST_EP_CNTL1 +#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF0__SHIFT 0x0 +#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF1__SHIFT 0x1 +#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF2__SHIFT 0x2 +#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF3__SHIFT 0x3 +#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF4__SHIFT 0x4 +#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF5__SHIFT 0x5 +#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF6__SHIFT 0x6 +#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF7__SHIFT 0x7 +#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF0_MASK 0x00000001L +#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF1_MASK 0x00000002L +#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF2_MASK 0x00000004L +#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF3_MASK 0x00000008L +#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF4_MASK 0x00000010L +#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF5_MASK 0x00000020L +#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF6_MASK 0x00000040L +#define SMN_MST_EP_CNTL1__SMN_POST_MASK_EN_EP_DEV0_PF7_MASK 0x00000080L +//SMN_MST_EP_CNTL2 +#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF0__SHIFT 0x0 +#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF1__SHIFT 0x1 +#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF2__SHIFT 0x2 +#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF3__SHIFT 0x3 +#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF4__SHIFT 0x4 +#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF5__SHIFT 0x5 +#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF6__SHIFT 0x6 +#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF7__SHIFT 0x7 +#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF0_MASK 0x00000001L +#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF1_MASK 0x00000002L +#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF2_MASK 0x00000004L +#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF3_MASK 0x00000008L +#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF4_MASK 0x00000010L +#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF5_MASK 0x00000020L +#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF6_MASK 0x00000040L +#define SMN_MST_EP_CNTL2__MULTI_SMN_TRANS_ID_DIS_EP_DEV0_PF7_MASK 0x00000080L +//NBIF_SDP_VWR_VCHG_DIS_CTRL +#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F0_DIS__SHIFT 0x0 +#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F1_DIS__SHIFT 0x1 +#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F2_DIS__SHIFT 0x2 +#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F3_DIS__SHIFT 0x3 +#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F4_DIS__SHIFT 0x4 +#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F5_DIS__SHIFT 0x5 +#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F6_DIS__SHIFT 0x6 +#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F7_DIS__SHIFT 0x7 +#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_SWDS_P0_DIS__SHIFT 0x18 +#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F0_DIS_MASK 0x00000001L +#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F1_DIS_MASK 0x00000002L +#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F2_DIS_MASK 0x00000004L +#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F3_DIS_MASK 0x00000008L +#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F4_DIS_MASK 0x00000010L +#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F5_DIS_MASK 0x00000020L +#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F6_DIS_MASK 0x00000040L +#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_ENDP_F7_DIS_MASK 0x00000080L +#define NBIF_SDP_VWR_VCHG_DIS_CTRL__SDP_VWR_VCHG_SWDS_P0_DIS_MASK 0x01000000L +//NBIF_SDP_VWR_VCHG_RST_CTRL0 +#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F0_RST_OVRD_EN__SHIFT 0x0 +#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F1_RST_OVRD_EN__SHIFT 0x1 +#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F2_RST_OVRD_EN__SHIFT 0x2 +#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F3_RST_OVRD_EN__SHIFT 0x3 +#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F4_RST_OVRD_EN__SHIFT 0x4 +#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F5_RST_OVRD_EN__SHIFT 0x5 +#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F6_RST_OVRD_EN__SHIFT 0x6 +#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F7_RST_OVRD_EN__SHIFT 0x7 +#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_SWDS_P0_RST_OVRD_EN__SHIFT 0x18 +#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F0_RST_OVRD_EN_MASK 0x00000001L +#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F1_RST_OVRD_EN_MASK 0x00000002L +#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F2_RST_OVRD_EN_MASK 0x00000004L +#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F3_RST_OVRD_EN_MASK 0x00000008L +#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F4_RST_OVRD_EN_MASK 0x00000010L +#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F5_RST_OVRD_EN_MASK 0x00000020L +#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F6_RST_OVRD_EN_MASK 0x00000040L +#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_ENDP_F7_RST_OVRD_EN_MASK 0x00000080L +#define NBIF_SDP_VWR_VCHG_RST_CTRL0__SDP_VWR_VCHG_SWDS_P0_RST_OVRD_EN_MASK 0x01000000L +//NBIF_SDP_VWR_VCHG_RST_CTRL1 +#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F0_RST_OVRD_VAL__SHIFT 0x0 +#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F1_RST_OVRD_VAL__SHIFT 0x1 +#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F2_RST_OVRD_VAL__SHIFT 0x2 +#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F3_RST_OVRD_VAL__SHIFT 0x3 +#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F4_RST_OVRD_VAL__SHIFT 0x4 +#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F5_RST_OVRD_VAL__SHIFT 0x5 +#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F6_RST_OVRD_VAL__SHIFT 0x6 +#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F7_RST_OVRD_VAL__SHIFT 0x7 +#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_SWDS_P0_RST_OVRD_VAL__SHIFT 0x18 +#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F0_RST_OVRD_VAL_MASK 0x00000001L +#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F1_RST_OVRD_VAL_MASK 0x00000002L +#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F2_RST_OVRD_VAL_MASK 0x00000004L +#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F3_RST_OVRD_VAL_MASK 0x00000008L +#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F4_RST_OVRD_VAL_MASK 0x00000010L +#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F5_RST_OVRD_VAL_MASK 0x00000020L +#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F6_RST_OVRD_VAL_MASK 0x00000040L +#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_ENDP_F7_RST_OVRD_VAL_MASK 0x00000080L +#define NBIF_SDP_VWR_VCHG_RST_CTRL1__SDP_VWR_VCHG_SWDS_P0_RST_OVRD_VAL_MASK 0x01000000L +//NBIF_SDP_VWR_VCHG_TRIG +#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F0_TRIG__SHIFT 0x0 +#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F1_TRIG__SHIFT 0x1 +#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F2_TRIG__SHIFT 0x2 +#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F3_TRIG__SHIFT 0x3 +#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F4_TRIG__SHIFT 0x4 +#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F5_TRIG__SHIFT 0x5 +#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F6_TRIG__SHIFT 0x6 +#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F7_TRIG__SHIFT 0x7 +#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_SWDS_P0_TRIG__SHIFT 0x18 +#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F0_TRIG_MASK 0x00000001L +#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F1_TRIG_MASK 0x00000002L +#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F2_TRIG_MASK 0x00000004L +#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F3_TRIG_MASK 0x00000008L +#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F4_TRIG_MASK 0x00000010L +#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F5_TRIG_MASK 0x00000020L +#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F6_TRIG_MASK 0x00000040L +#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_ENDP_F7_TRIG_MASK 0x00000080L +#define NBIF_SDP_VWR_VCHG_TRIG__SDP_VWR_VCHG_SWDS_P0_TRIG_MASK 0x01000000L + + +// addressBlock: nbio_nbif0_rcc_pfc_amdgfx_RCCPFCDEC +//RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL +#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__SNOOP_LATENCY_VALUE__SHIFT 0x0 +#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__SNOOP_LATENCY_SCALE__SHIFT 0xa +#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__SNOOP_REQUIREMENT__SHIFT 0xf +#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__NONSNOOP_LATENCY_VALUE__SHIFT 0x10 +#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__NONSNOOP_LATENCY_SCALE__SHIFT 0x1a +#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__NONSNOOP_REQUIREMENT__SHIFT 0x1f +#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__SNOOP_LATENCY_VALUE_MASK 0x000003FFL +#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__SNOOP_LATENCY_SCALE_MASK 0x00001C00L +#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__SNOOP_REQUIREMENT_MASK 0x00008000L +#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__NONSNOOP_LATENCY_VALUE_MASK 0x03FF0000L +#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__NONSNOOP_LATENCY_SCALE_MASK 0x1C000000L +#define RCC_PFC_AMDGFX_RCC_PFC_LTR_CNTL__NONSNOOP_REQUIREMENT_MASK 0x80000000L +//RCC_PFC_AMDGFX_RCC_PFC_PME_RESTORE +#define RCC_PFC_AMDGFX_RCC_PFC_PME_RESTORE__PME_RESTORE_PME_EN__SHIFT 0x0 +#define RCC_PFC_AMDGFX_RCC_PFC_PME_RESTORE__PME_RESTORE_PME_STATUS__SHIFT 0x8 +#define RCC_PFC_AMDGFX_RCC_PFC_PME_RESTORE__PME_RESTORE_PME_EN_MASK 0x00000001L +#define RCC_PFC_AMDGFX_RCC_PFC_PME_RESTORE__PME_RESTORE_PME_STATUS_MASK 0x00000100L +//RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0 +#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_PSN_ERR_STATUS__SHIFT 0x0 +#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_CPL_TIMEOUT_STATUS__SHIFT 0x1 +#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_CPL_ABORT_ERR_STATUS__SHIFT 0x2 +#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_UNEXP_CPL_STATUS__SHIFT 0x3 +#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_MAL_TLP_STATUS__SHIFT 0x4 +#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_ECRC_ERR_STATUS__SHIFT 0x5 +#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_UNSUPP_REQ_ERR_STATUS__SHIFT 0x6 +#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0x7 +#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_PSN_ERR_STATUS_MASK 0x00000001L +#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_CPL_TIMEOUT_STATUS_MASK 0x00000002L +#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_CPL_ABORT_ERR_STATUS_MASK 0x00000004L +#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_UNEXP_CPL_STATUS_MASK 0x00000008L +#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_MAL_TLP_STATUS_MASK 0x00000010L +#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_ECRC_ERR_STATUS_MASK 0x00000020L +#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_UNSUPP_REQ_ERR_STATUS_MASK 0x00000040L +#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_0__RESTORE_ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00000080L +//RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_1 +#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_1__RESTORE_TLP_HDR_0__SHIFT 0x0 +#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_1__RESTORE_TLP_HDR_0_MASK 0xFFFFFFFFL +//RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_2 +#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_2__RESTORE_TLP_HDR_1__SHIFT 0x0 +#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_2__RESTORE_TLP_HDR_1_MASK 0xFFFFFFFFL +//RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_3 +#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_3__RESTORE_TLP_HDR_2__SHIFT 0x0 +#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_3__RESTORE_TLP_HDR_2_MASK 0xFFFFFFFFL +//RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_4 +#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_4__RESTORE_TLP_HDR_3__SHIFT 0x0 +#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_4__RESTORE_TLP_HDR_3_MASK 0xFFFFFFFFL +//RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_5 +#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_5__RESTORE_TLP_PREFIX__SHIFT 0x0 +#define RCC_PFC_AMDGFX_RCC_PFC_STICKY_RESTORE_5__RESTORE_TLP_PREFIX_MASK 0xFFFFFFFFL +//RCC_PFC_AMDGFX_RCC_PFC_AUXPWR_CNTL +#define RCC_PFC_AMDGFX_RCC_PFC_AUXPWR_CNTL__AUX_CURRENT_OVERRIDE__SHIFT 0x0 +#define RCC_PFC_AMDGFX_RCC_PFC_AUXPWR_CNTL__AUX_POWER_DETECTED_OVERRIDE__SHIFT 0x3 +#define RCC_PFC_AMDGFX_RCC_PFC_AUXPWR_CNTL__AUX_CURRENT_OVERRIDE_MASK 0x00000007L +#define RCC_PFC_AMDGFX_RCC_PFC_AUXPWR_CNTL__AUX_POWER_DETECTED_OVERRIDE_MASK 0x00000008L + + +// addressBlock: nbio_nbif0_rcc_pfc_amdgfxaz_RCCPFCDEC +//RCC_PFC_AMDGFXAZ_RCC_PFC_LTR_CNTL +#define RCC_PFC_AMDGFXAZ_RCC_PFC_LTR_CNTL__SNOOP_LATENCY_VALUE__SHIFT 0x0 +#define RCC_PFC_AMDGFXAZ_RCC_PFC_LTR_CNTL__SNOOP_LATENCY_SCALE__SHIFT 0xa +#define RCC_PFC_AMDGFXAZ_RCC_PFC_LTR_CNTL__SNOOP_REQUIREMENT__SHIFT 0xf +#define RCC_PFC_AMDGFXAZ_RCC_PFC_LTR_CNTL__NONSNOOP_LATENCY_VALUE__SHIFT 0x10 +#define RCC_PFC_AMDGFXAZ_RCC_PFC_LTR_CNTL__NONSNOOP_LATENCY_SCALE__SHIFT 0x1a +#define RCC_PFC_AMDGFXAZ_RCC_PFC_LTR_CNTL__NONSNOOP_REQUIREMENT__SHIFT 0x1f +#define RCC_PFC_AMDGFXAZ_RCC_PFC_LTR_CNTL__SNOOP_LATENCY_VALUE_MASK 0x000003FFL +#define RCC_PFC_AMDGFXAZ_RCC_PFC_LTR_CNTL__SNOOP_LATENCY_SCALE_MASK 0x00001C00L +#define RCC_PFC_AMDGFXAZ_RCC_PFC_LTR_CNTL__SNOOP_REQUIREMENT_MASK 0x00008000L +#define RCC_PFC_AMDGFXAZ_RCC_PFC_LTR_CNTL__NONSNOOP_LATENCY_VALUE_MASK 0x03FF0000L +#define RCC_PFC_AMDGFXAZ_RCC_PFC_LTR_CNTL__NONSNOOP_LATENCY_SCALE_MASK 0x1C000000L +#define RCC_PFC_AMDGFXAZ_RCC_PFC_LTR_CNTL__NONSNOOP_REQUIREMENT_MASK 0x80000000L +//RCC_PFC_AMDGFXAZ_RCC_PFC_PME_RESTORE +#define RCC_PFC_AMDGFXAZ_RCC_PFC_PME_RESTORE__PME_RESTORE_PME_EN__SHIFT 0x0 +#define RCC_PFC_AMDGFXAZ_RCC_PFC_PME_RESTORE__PME_RESTORE_PME_STATUS__SHIFT 0x8 +#define RCC_PFC_AMDGFXAZ_RCC_PFC_PME_RESTORE__PME_RESTORE_PME_EN_MASK 0x00000001L +#define RCC_PFC_AMDGFXAZ_RCC_PFC_PME_RESTORE__PME_RESTORE_PME_STATUS_MASK 0x00000100L +//RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0 +#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_PSN_ERR_STATUS__SHIFT 0x0 +#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_CPL_TIMEOUT_STATUS__SHIFT 0x1 +#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_CPL_ABORT_ERR_STATUS__SHIFT 0x2 +#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_UNEXP_CPL_STATUS__SHIFT 0x3 +#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_MAL_TLP_STATUS__SHIFT 0x4 +#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_ECRC_ERR_STATUS__SHIFT 0x5 +#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_UNSUPP_REQ_ERR_STATUS__SHIFT 0x6 +#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0x7 +#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_PSN_ERR_STATUS_MASK 0x00000001L +#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_CPL_TIMEOUT_STATUS_MASK 0x00000002L +#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_CPL_ABORT_ERR_STATUS_MASK 0x00000004L +#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_UNEXP_CPL_STATUS_MASK 0x00000008L +#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_MAL_TLP_STATUS_MASK 0x00000010L +#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_ECRC_ERR_STATUS_MASK 0x00000020L +#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_UNSUPP_REQ_ERR_STATUS_MASK 0x00000040L +#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_0__RESTORE_ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00000080L +//RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_1 +#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_1__RESTORE_TLP_HDR_0__SHIFT 0x0 +#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_1__RESTORE_TLP_HDR_0_MASK 0xFFFFFFFFL +//RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_2 +#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_2__RESTORE_TLP_HDR_1__SHIFT 0x0 +#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_2__RESTORE_TLP_HDR_1_MASK 0xFFFFFFFFL +//RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_3 +#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_3__RESTORE_TLP_HDR_2__SHIFT 0x0 +#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_3__RESTORE_TLP_HDR_2_MASK 0xFFFFFFFFL +//RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_4 +#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_4__RESTORE_TLP_HDR_3__SHIFT 0x0 +#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_4__RESTORE_TLP_HDR_3_MASK 0xFFFFFFFFL +//RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_5 +#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_5__RESTORE_TLP_PREFIX__SHIFT 0x0 +#define RCC_PFC_AMDGFXAZ_RCC_PFC_STICKY_RESTORE_5__RESTORE_TLP_PREFIX_MASK 0xFFFFFFFFL +//RCC_PFC_AMDGFXAZ_RCC_PFC_AUXPWR_CNTL +#define RCC_PFC_AMDGFXAZ_RCC_PFC_AUXPWR_CNTL__AUX_CURRENT_OVERRIDE__SHIFT 0x0 +#define RCC_PFC_AMDGFXAZ_RCC_PFC_AUXPWR_CNTL__AUX_POWER_DETECTED_OVERRIDE__SHIFT 0x3 +#define RCC_PFC_AMDGFXAZ_RCC_PFC_AUXPWR_CNTL__AUX_CURRENT_OVERRIDE_MASK 0x00000007L +#define RCC_PFC_AMDGFXAZ_RCC_PFC_AUXPWR_CNTL__AUX_POWER_DETECTED_OVERRIDE_MASK 0x00000008L + + +// addressBlock: nbio_nbif0_bif_rst_bif_rst_regblk +//HARD_RST_CTRL +#define HARD_RST_CTRL__DSPT_CFG_RST_EN__SHIFT 0x0 +#define HARD_RST_CTRL__DSPT_CFG_STICKY_RST_EN__SHIFT 0x1 +#define HARD_RST_CTRL__DSPT_PRV_RST_EN__SHIFT 0x2 +#define HARD_RST_CTRL__DSPT_PRV_STICKY_RST_EN__SHIFT 0x3 +#define HARD_RST_CTRL__EP_CFG_RST_EN__SHIFT 0x4 +#define HARD_RST_CTRL__EP_CFG_STICKY_RST_EN__SHIFT 0x5 +#define HARD_RST_CTRL__EP_PRV_RST_EN__SHIFT 0x6 +#define HARD_RST_CTRL__EP_PRV_STICKY_RST_EN__SHIFT 0x7 +#define HARD_RST_CTRL__SWUS_SHADOW_RST_EN__SHIFT 0x1c +#define HARD_RST_CTRL__CORE_STICKY_RST_EN__SHIFT 0x1d +#define HARD_RST_CTRL__RELOAD_STRAP_EN__SHIFT 0x1e +#define HARD_RST_CTRL__CORE_RST_EN__SHIFT 0x1f +#define HARD_RST_CTRL__DSPT_CFG_RST_EN_MASK 0x00000001L +#define HARD_RST_CTRL__DSPT_CFG_STICKY_RST_EN_MASK 0x00000002L +#define HARD_RST_CTRL__DSPT_PRV_RST_EN_MASK 0x00000004L +#define HARD_RST_CTRL__DSPT_PRV_STICKY_RST_EN_MASK 0x00000008L +#define HARD_RST_CTRL__EP_CFG_RST_EN_MASK 0x00000010L +#define HARD_RST_CTRL__EP_CFG_STICKY_RST_EN_MASK 0x00000020L +#define HARD_RST_CTRL__EP_PRV_RST_EN_MASK 0x00000040L +#define HARD_RST_CTRL__EP_PRV_STICKY_RST_EN_MASK 0x00000080L +#define HARD_RST_CTRL__SWUS_SHADOW_RST_EN_MASK 0x10000000L +#define HARD_RST_CTRL__CORE_STICKY_RST_EN_MASK 0x20000000L +#define HARD_RST_CTRL__RELOAD_STRAP_EN_MASK 0x40000000L +#define HARD_RST_CTRL__CORE_RST_EN_MASK 0x80000000L +//RSMU_SOFT_RST_CTRL +#define RSMU_SOFT_RST_CTRL__DSPT_CFG_RST_EN__SHIFT 0x0 +#define RSMU_SOFT_RST_CTRL__DSPT_CFG_STICKY_RST_EN__SHIFT 0x1 +#define RSMU_SOFT_RST_CTRL__DSPT_PRV_RST_EN__SHIFT 0x2 +#define RSMU_SOFT_RST_CTRL__DSPT_PRV_STICKY_RST_EN__SHIFT 0x3 +#define RSMU_SOFT_RST_CTRL__EP_CFG_RST_EN__SHIFT 0x4 +#define RSMU_SOFT_RST_CTRL__EP_CFG_STICKY_RST_EN__SHIFT 0x5 +#define RSMU_SOFT_RST_CTRL__EP_PRV_RST_EN__SHIFT 0x6 +#define RSMU_SOFT_RST_CTRL__EP_PRV_STICKY_RST_EN__SHIFT 0x7 +#define RSMU_SOFT_RST_CTRL__SWUS_SHADOW_RST_EN__SHIFT 0x1c +#define RSMU_SOFT_RST_CTRL__CORE_STICKY_RST_EN__SHIFT 0x1d +#define RSMU_SOFT_RST_CTRL__RELOAD_STRAP_EN__SHIFT 0x1e +#define RSMU_SOFT_RST_CTRL__CORE_RST_EN__SHIFT 0x1f +#define RSMU_SOFT_RST_CTRL__DSPT_CFG_RST_EN_MASK 0x00000001L +#define RSMU_SOFT_RST_CTRL__DSPT_CFG_STICKY_RST_EN_MASK 0x00000002L +#define RSMU_SOFT_RST_CTRL__DSPT_PRV_RST_EN_MASK 0x00000004L +#define RSMU_SOFT_RST_CTRL__DSPT_PRV_STICKY_RST_EN_MASK 0x00000008L +#define RSMU_SOFT_RST_CTRL__EP_CFG_RST_EN_MASK 0x00000010L +#define RSMU_SOFT_RST_CTRL__EP_CFG_STICKY_RST_EN_MASK 0x00000020L +#define RSMU_SOFT_RST_CTRL__EP_PRV_RST_EN_MASK 0x00000040L +#define RSMU_SOFT_RST_CTRL__EP_PRV_STICKY_RST_EN_MASK 0x00000080L +#define RSMU_SOFT_RST_CTRL__SWUS_SHADOW_RST_EN_MASK 0x10000000L +#define RSMU_SOFT_RST_CTRL__CORE_STICKY_RST_EN_MASK 0x20000000L +#define RSMU_SOFT_RST_CTRL__RELOAD_STRAP_EN_MASK 0x40000000L +#define RSMU_SOFT_RST_CTRL__CORE_RST_EN_MASK 0x80000000L +//SELF_SOFT_RST +#define SELF_SOFT_RST__DSPT0_CFG_RST__SHIFT 0x0 +#define SELF_SOFT_RST__DSPT0_CFG_STICKY_RST__SHIFT 0x1 +#define SELF_SOFT_RST__DSPT0_PRV_RST__SHIFT 0x2 +#define SELF_SOFT_RST__DSPT0_PRV_STICKY_RST__SHIFT 0x3 +#define SELF_SOFT_RST__EP0_CFG_RST__SHIFT 0x4 +#define SELF_SOFT_RST__EP0_CFG_STICKY_RST__SHIFT 0x5 +#define SELF_SOFT_RST__EP0_PRV_RST__SHIFT 0x6 +#define SELF_SOFT_RST__EP0_PRV_STICKY_RST__SHIFT 0x7 +#define SELF_SOFT_RST__HRPU_SDP_PORT_RST__SHIFT 0x18 +#define SELF_SOFT_RST__GSID_SDP_PORT_RST__SHIFT 0x19 +#define SELF_SOFT_RST__GMIU_SDP_PORT_RST__SHIFT 0x1a +#define SELF_SOFT_RST__GMID_SDP_PORT_RST__SHIFT 0x1b +#define SELF_SOFT_RST__SWUS_SHADOW_RST__SHIFT 0x1c +#define SELF_SOFT_RST__CORE_STICKY_RST__SHIFT 0x1d +#define SELF_SOFT_RST__RELOAD_STRAP__SHIFT 0x1e +#define SELF_SOFT_RST__CORE_RST__SHIFT 0x1f +#define SELF_SOFT_RST__DSPT0_CFG_RST_MASK 0x00000001L +#define SELF_SOFT_RST__DSPT0_CFG_STICKY_RST_MASK 0x00000002L +#define SELF_SOFT_RST__DSPT0_PRV_RST_MASK 0x00000004L +#define SELF_SOFT_RST__DSPT0_PRV_STICKY_RST_MASK 0x00000008L +#define SELF_SOFT_RST__EP0_CFG_RST_MASK 0x00000010L +#define SELF_SOFT_RST__EP0_CFG_STICKY_RST_MASK 0x00000020L +#define SELF_SOFT_RST__EP0_PRV_RST_MASK 0x00000040L +#define SELF_SOFT_RST__EP0_PRV_STICKY_RST_MASK 0x00000080L +#define SELF_SOFT_RST__HRPU_SDP_PORT_RST_MASK 0x01000000L +#define SELF_SOFT_RST__GSID_SDP_PORT_RST_MASK 0x02000000L +#define SELF_SOFT_RST__GMIU_SDP_PORT_RST_MASK 0x04000000L +#define SELF_SOFT_RST__GMID_SDP_PORT_RST_MASK 0x08000000L +#define SELF_SOFT_RST__SWUS_SHADOW_RST_MASK 0x10000000L +#define SELF_SOFT_RST__CORE_STICKY_RST_MASK 0x20000000L +#define SELF_SOFT_RST__RELOAD_STRAP_MASK 0x40000000L +#define SELF_SOFT_RST__CORE_RST_MASK 0x80000000L +//BIF_GFX_DRV_VPU_RST +#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_PF_CFG_RST__SHIFT 0x0 +#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_PF_CFG_FLR_EXC_RST__SHIFT 0x1 +#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_PF_CFG_STICKY_RST__SHIFT 0x2 +#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_PF_PRV_RST__SHIFT 0x3 +#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_PF_PRV_STICKY_RST__SHIFT 0x4 +#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_VF_CFG_RST__SHIFT 0x5 +#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_VF_CFG_STICKY_RST__SHIFT 0x6 +#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_VF_PRV_RST__SHIFT 0x7 +#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_PF_CFG_RST_MASK 0x00000001L +#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_PF_CFG_FLR_EXC_RST_MASK 0x00000002L +#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_PF_CFG_STICKY_RST_MASK 0x00000004L +#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_PF_PRV_RST_MASK 0x00000008L +#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_PF_PRV_STICKY_RST_MASK 0x00000010L +#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_VF_CFG_RST_MASK 0x00000020L +#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_VF_CFG_STICKY_RST_MASK 0x00000040L +#define BIF_GFX_DRV_VPU_RST__DRV_MODE1_VF_PRV_RST_MASK 0x00000080L +//BIF_RST_MISC_CTRL +#define BIF_RST_MISC_CTRL__ERRSTATUS_KEPT_IN_PERSTB__SHIFT 0x0 +#define BIF_RST_MISC_CTRL__DRV_RST_MODE__SHIFT 0x2 +#define BIF_RST_MISC_CTRL__DRV_RST_CFG_MASK__SHIFT 0x4 +#define BIF_RST_MISC_CTRL__DRV_RST_BITS_AUTO_CLEAR__SHIFT 0x5 +#define BIF_RST_MISC_CTRL__FLR_RST_BIT_AUTO_CLEAR__SHIFT 0x6 +#define BIF_RST_MISC_CTRL__STRAP_EP_LNK_RST_IOV_EN__SHIFT 0x8 +#define BIF_RST_MISC_CTRL__LNK_RST_GRACE_MODE__SHIFT 0x9 +#define BIF_RST_MISC_CTRL__LNK_RST_GRACE_TIMEOUT__SHIFT 0xa +#define BIF_RST_MISC_CTRL__LNK_RST_TIMER_SEL__SHIFT 0xd +#define BIF_RST_MISC_CTRL__LNK_RST_TIMER2_SEL__SHIFT 0xf +#define BIF_RST_MISC_CTRL__SRIOV_SAVE_VFS_ON_VFENABLE_CLR__SHIFT 0x11 +#define BIF_RST_MISC_CTRL__LNK_RST_DMA_DUMMY_DIS__SHIFT 0x17 +#define BIF_RST_MISC_CTRL__LNK_RST_DMA_DUMMY_RSPSTS__SHIFT 0x18 +#define BIF_RST_MISC_CTRL__ERRSTATUS_KEPT_IN_PERSTB_MASK 0x00000001L +#define BIF_RST_MISC_CTRL__DRV_RST_MODE_MASK 0x0000000CL +#define BIF_RST_MISC_CTRL__DRV_RST_CFG_MASK_MASK 0x00000010L +#define BIF_RST_MISC_CTRL__DRV_RST_BITS_AUTO_CLEAR_MASK 0x00000020L +#define BIF_RST_MISC_CTRL__FLR_RST_BIT_AUTO_CLEAR_MASK 0x00000040L +#define BIF_RST_MISC_CTRL__STRAP_EP_LNK_RST_IOV_EN_MASK 0x00000100L +#define BIF_RST_MISC_CTRL__LNK_RST_GRACE_MODE_MASK 0x00000200L +#define BIF_RST_MISC_CTRL__LNK_RST_GRACE_TIMEOUT_MASK 0x00001C00L +#define BIF_RST_MISC_CTRL__LNK_RST_TIMER_SEL_MASK 0x00006000L +#define BIF_RST_MISC_CTRL__LNK_RST_TIMER2_SEL_MASK 0x00018000L +#define BIF_RST_MISC_CTRL__SRIOV_SAVE_VFS_ON_VFENABLE_CLR_MASK 0x000E0000L +#define BIF_RST_MISC_CTRL__LNK_RST_DMA_DUMMY_DIS_MASK 0x00800000L +#define BIF_RST_MISC_CTRL__LNK_RST_DMA_DUMMY_RSPSTS_MASK 0x03000000L +//BIF_RST_MISC_CTRL2 +#define BIF_RST_MISC_CTRL2__SWUS_LNK_RST_TRANS_IDLE__SHIFT 0x10 +#define BIF_RST_MISC_CTRL2__SWDS_LNK_RST_TRANS_IDLE__SHIFT 0x11 +#define BIF_RST_MISC_CTRL2__ENDP0_LNK_RST_TRANS_IDLE__SHIFT 0x12 +#define BIF_RST_MISC_CTRL2__ALL_RST_TRANS_IDLE__SHIFT 0x1f +#define BIF_RST_MISC_CTRL2__SWUS_LNK_RST_TRANS_IDLE_MASK 0x00010000L +#define BIF_RST_MISC_CTRL2__SWDS_LNK_RST_TRANS_IDLE_MASK 0x00020000L +#define BIF_RST_MISC_CTRL2__ENDP0_LNK_RST_TRANS_IDLE_MASK 0x00040000L +#define BIF_RST_MISC_CTRL2__ALL_RST_TRANS_IDLE_MASK 0x80000000L +//BIF_RST_MISC_CTRL3 +#define BIF_RST_MISC_CTRL3__TIMER_SCALE__SHIFT 0x0 +#define BIF_RST_MISC_CTRL3__PME_TURNOFF_TIMEOUT__SHIFT 0x4 +#define BIF_RST_MISC_CTRL3__PME_TURNOFF_MODE__SHIFT 0x6 +#define BIF_RST_MISC_CTRL3__RSMU_SOFT_RST_CYCLE__SHIFT 0x10 +#define BIF_RST_MISC_CTRL3__TIMER_SCALE_MASK 0x0000000FL +#define BIF_RST_MISC_CTRL3__PME_TURNOFF_TIMEOUT_MASK 0x00000030L +#define BIF_RST_MISC_CTRL3__PME_TURNOFF_MODE_MASK 0x00000040L +#define BIF_RST_MISC_CTRL3__RSMU_SOFT_RST_CYCLE_MASK 0x00FF0000L +//BIF_RST_GFXVF_FLR_IDLE +#define BIF_RST_GFXVF_FLR_IDLE__VF0_TRANS_IDLE__SHIFT 0x0 +#define BIF_RST_GFXVF_FLR_IDLE__VF1_TRANS_IDLE__SHIFT 0x1 +#define BIF_RST_GFXVF_FLR_IDLE__VF2_TRANS_IDLE__SHIFT 0x2 +#define BIF_RST_GFXVF_FLR_IDLE__VF3_TRANS_IDLE__SHIFT 0x3 +#define BIF_RST_GFXVF_FLR_IDLE__VF4_TRANS_IDLE__SHIFT 0x4 +#define BIF_RST_GFXVF_FLR_IDLE__VF5_TRANS_IDLE__SHIFT 0x5 +#define BIF_RST_GFXVF_FLR_IDLE__VF6_TRANS_IDLE__SHIFT 0x6 +#define BIF_RST_GFXVF_FLR_IDLE__VF7_TRANS_IDLE__SHIFT 0x7 +#define BIF_RST_GFXVF_FLR_IDLE__VF8_TRANS_IDLE__SHIFT 0x8 +#define BIF_RST_GFXVF_FLR_IDLE__VF9_TRANS_IDLE__SHIFT 0x9 +#define BIF_RST_GFXVF_FLR_IDLE__VF10_TRANS_IDLE__SHIFT 0xa +#define BIF_RST_GFXVF_FLR_IDLE__VF11_TRANS_IDLE__SHIFT 0xb +#define BIF_RST_GFXVF_FLR_IDLE__VF12_TRANS_IDLE__SHIFT 0xc +#define BIF_RST_GFXVF_FLR_IDLE__VF13_TRANS_IDLE__SHIFT 0xd +#define BIF_RST_GFXVF_FLR_IDLE__VF14_TRANS_IDLE__SHIFT 0xe +#define BIF_RST_GFXVF_FLR_IDLE__VF15_TRANS_IDLE__SHIFT 0xf +#define BIF_RST_GFXVF_FLR_IDLE__SOFTPF_TRANS_IDLE__SHIFT 0x1f +#define BIF_RST_GFXVF_FLR_IDLE__VF0_TRANS_IDLE_MASK 0x00000001L +#define BIF_RST_GFXVF_FLR_IDLE__VF1_TRANS_IDLE_MASK 0x00000002L +#define BIF_RST_GFXVF_FLR_IDLE__VF2_TRANS_IDLE_MASK 0x00000004L +#define BIF_RST_GFXVF_FLR_IDLE__VF3_TRANS_IDLE_MASK 0x00000008L +#define BIF_RST_GFXVF_FLR_IDLE__VF4_TRANS_IDLE_MASK 0x00000010L +#define BIF_RST_GFXVF_FLR_IDLE__VF5_TRANS_IDLE_MASK 0x00000020L +#define BIF_RST_GFXVF_FLR_IDLE__VF6_TRANS_IDLE_MASK 0x00000040L +#define BIF_RST_GFXVF_FLR_IDLE__VF7_TRANS_IDLE_MASK 0x00000080L +#define BIF_RST_GFXVF_FLR_IDLE__VF8_TRANS_IDLE_MASK 0x00000100L +#define BIF_RST_GFXVF_FLR_IDLE__VF9_TRANS_IDLE_MASK 0x00000200L +#define BIF_RST_GFXVF_FLR_IDLE__VF10_TRANS_IDLE_MASK 0x00000400L +#define BIF_RST_GFXVF_FLR_IDLE__VF11_TRANS_IDLE_MASK 0x00000800L +#define BIF_RST_GFXVF_FLR_IDLE__VF12_TRANS_IDLE_MASK 0x00001000L +#define BIF_RST_GFXVF_FLR_IDLE__VF13_TRANS_IDLE_MASK 0x00002000L +#define BIF_RST_GFXVF_FLR_IDLE__VF14_TRANS_IDLE_MASK 0x00004000L +#define BIF_RST_GFXVF_FLR_IDLE__VF15_TRANS_IDLE_MASK 0x00008000L +#define BIF_RST_GFXVF_FLR_IDLE__SOFTPF_TRANS_IDLE_MASK 0x80000000L +//DEV0_PF0_FLR_RST_CTRL +#define DEV0_PF0_FLR_RST_CTRL__PF_CFG_EN__SHIFT 0x0 +#define DEV0_PF0_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1 +#define DEV0_PF0_FLR_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2 +#define DEV0_PF0_FLR_RST_CTRL__PF_PRV_EN__SHIFT 0x3 +#define DEV0_PF0_FLR_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4 +#define DEV0_PF0_FLR_RST_CTRL__VF_CFG_EN__SHIFT 0x5 +#define DEV0_PF0_FLR_RST_CTRL__VF_CFG_STICKY_EN__SHIFT 0x6 +#define DEV0_PF0_FLR_RST_CTRL__VF_PRV_EN__SHIFT 0x7 +#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_CFG_EN__SHIFT 0x8 +#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_CFG_FLR_EXC_EN__SHIFT 0x9 +#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_CFG_STICKY_EN__SHIFT 0xa +#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_PRV_EN__SHIFT 0xb +#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_PRV_STICKY_EN__SHIFT 0xc +#define DEV0_PF0_FLR_RST_CTRL__VF_VF_CFG_EN__SHIFT 0xd +#define DEV0_PF0_FLR_RST_CTRL__VF_VF_CFG_STICKY_EN__SHIFT 0xe +#define DEV0_PF0_FLR_RST_CTRL__VF_VF_PRV_EN__SHIFT 0xf +#define DEV0_PF0_FLR_RST_CTRL__FLR_TWICE_EN__SHIFT 0x10 +#define DEV0_PF0_FLR_RST_CTRL__FLR_GRACE_MODE__SHIFT 0x11 +#define DEV0_PF0_FLR_RST_CTRL__FLR_GRACE_TIMEOUT__SHIFT 0x12 +#define DEV0_PF0_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS__SHIFT 0x17 +#define DEV0_PF0_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS__SHIFT 0x19 +#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_PFCOPY_PRV_EN__SHIFT 0x1f +#define DEV0_PF0_FLR_RST_CTRL__PF_CFG_EN_MASK 0x00000001L +#define DEV0_PF0_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L +#define DEV0_PF0_FLR_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L +#define DEV0_PF0_FLR_RST_CTRL__PF_PRV_EN_MASK 0x00000008L +#define DEV0_PF0_FLR_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L +#define DEV0_PF0_FLR_RST_CTRL__VF_CFG_EN_MASK 0x00000020L +#define DEV0_PF0_FLR_RST_CTRL__VF_CFG_STICKY_EN_MASK 0x00000040L +#define DEV0_PF0_FLR_RST_CTRL__VF_PRV_EN_MASK 0x00000080L +#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_CFG_EN_MASK 0x00000100L +#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_CFG_FLR_EXC_EN_MASK 0x00000200L +#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_CFG_STICKY_EN_MASK 0x00000400L +#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_PRV_EN_MASK 0x00000800L +#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_PRV_STICKY_EN_MASK 0x00001000L +#define DEV0_PF0_FLR_RST_CTRL__VF_VF_CFG_EN_MASK 0x00002000L +#define DEV0_PF0_FLR_RST_CTRL__VF_VF_CFG_STICKY_EN_MASK 0x00004000L +#define DEV0_PF0_FLR_RST_CTRL__VF_VF_PRV_EN_MASK 0x00008000L +#define DEV0_PF0_FLR_RST_CTRL__FLR_TWICE_EN_MASK 0x00010000L +#define DEV0_PF0_FLR_RST_CTRL__FLR_GRACE_MODE_MASK 0x00020000L +#define DEV0_PF0_FLR_RST_CTRL__FLR_GRACE_TIMEOUT_MASK 0x001C0000L +#define DEV0_PF0_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS_MASK 0x01800000L +#define DEV0_PF0_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS_MASK 0x06000000L +#define DEV0_PF0_FLR_RST_CTRL__SOFT_PF_PFCOPY_PRV_EN_MASK 0x80000000L +//DEV0_PF1_FLR_RST_CTRL +#define DEV0_PF1_FLR_RST_CTRL__PF_CFG_EN__SHIFT 0x0 +#define DEV0_PF1_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1 +#define DEV0_PF1_FLR_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2 +#define DEV0_PF1_FLR_RST_CTRL__PF_PRV_EN__SHIFT 0x3 +#define DEV0_PF1_FLR_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4 +#define DEV0_PF1_FLR_RST_CTRL__FLR_GRACE_MODE__SHIFT 0x11 +#define DEV0_PF1_FLR_RST_CTRL__FLR_GRACE_TIMEOUT__SHIFT 0x12 +#define DEV0_PF1_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS__SHIFT 0x17 +#define DEV0_PF1_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS__SHIFT 0x19 +#define DEV0_PF1_FLR_RST_CTRL__PF_CFG_EN_MASK 0x00000001L +#define DEV0_PF1_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L +#define DEV0_PF1_FLR_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L +#define DEV0_PF1_FLR_RST_CTRL__PF_PRV_EN_MASK 0x00000008L +#define DEV0_PF1_FLR_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L +#define DEV0_PF1_FLR_RST_CTRL__FLR_GRACE_MODE_MASK 0x00020000L +#define DEV0_PF1_FLR_RST_CTRL__FLR_GRACE_TIMEOUT_MASK 0x001C0000L +#define DEV0_PF1_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS_MASK 0x01800000L +#define DEV0_PF1_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS_MASK 0x06000000L +//DEV0_PF2_FLR_RST_CTRL +#define DEV0_PF2_FLR_RST_CTRL__PF_CFG_EN__SHIFT 0x0 +#define DEV0_PF2_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1 +#define DEV0_PF2_FLR_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2 +#define DEV0_PF2_FLR_RST_CTRL__PF_PRV_EN__SHIFT 0x3 +#define DEV0_PF2_FLR_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4 +#define DEV0_PF2_FLR_RST_CTRL__FLR_GRACE_MODE__SHIFT 0x11 +#define DEV0_PF2_FLR_RST_CTRL__FLR_GRACE_TIMEOUT__SHIFT 0x12 +#define DEV0_PF2_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS__SHIFT 0x17 +#define DEV0_PF2_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS__SHIFT 0x19 +#define DEV0_PF2_FLR_RST_CTRL__PF_CFG_EN_MASK 0x00000001L +#define DEV0_PF2_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L +#define DEV0_PF2_FLR_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L +#define DEV0_PF2_FLR_RST_CTRL__PF_PRV_EN_MASK 0x00000008L +#define DEV0_PF2_FLR_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L +#define DEV0_PF2_FLR_RST_CTRL__FLR_GRACE_MODE_MASK 0x00020000L +#define DEV0_PF2_FLR_RST_CTRL__FLR_GRACE_TIMEOUT_MASK 0x001C0000L +#define DEV0_PF2_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS_MASK 0x01800000L +#define DEV0_PF2_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS_MASK 0x06000000L +//DEV0_PF3_FLR_RST_CTRL +#define DEV0_PF3_FLR_RST_CTRL__PF_CFG_EN__SHIFT 0x0 +#define DEV0_PF3_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1 +#define DEV0_PF3_FLR_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2 +#define DEV0_PF3_FLR_RST_CTRL__PF_PRV_EN__SHIFT 0x3 +#define DEV0_PF3_FLR_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4 +#define DEV0_PF3_FLR_RST_CTRL__FLR_GRACE_MODE__SHIFT 0x11 +#define DEV0_PF3_FLR_RST_CTRL__FLR_GRACE_TIMEOUT__SHIFT 0x12 +#define DEV0_PF3_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS__SHIFT 0x17 +#define DEV0_PF3_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS__SHIFT 0x19 +#define DEV0_PF3_FLR_RST_CTRL__PF_CFG_EN_MASK 0x00000001L +#define DEV0_PF3_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L +#define DEV0_PF3_FLR_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L +#define DEV0_PF3_FLR_RST_CTRL__PF_PRV_EN_MASK 0x00000008L +#define DEV0_PF3_FLR_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L +#define DEV0_PF3_FLR_RST_CTRL__FLR_GRACE_MODE_MASK 0x00020000L +#define DEV0_PF3_FLR_RST_CTRL__FLR_GRACE_TIMEOUT_MASK 0x001C0000L +#define DEV0_PF3_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS_MASK 0x01800000L +#define DEV0_PF3_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS_MASK 0x06000000L +//DEV0_PF4_FLR_RST_CTRL +#define DEV0_PF4_FLR_RST_CTRL__PF_CFG_EN__SHIFT 0x0 +#define DEV0_PF4_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1 +#define DEV0_PF4_FLR_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2 +#define DEV0_PF4_FLR_RST_CTRL__PF_PRV_EN__SHIFT 0x3 +#define DEV0_PF4_FLR_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4 +#define DEV0_PF4_FLR_RST_CTRL__FLR_GRACE_MODE__SHIFT 0x11 +#define DEV0_PF4_FLR_RST_CTRL__FLR_GRACE_TIMEOUT__SHIFT 0x12 +#define DEV0_PF4_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS__SHIFT 0x17 +#define DEV0_PF4_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS__SHIFT 0x19 +#define DEV0_PF4_FLR_RST_CTRL__PF_CFG_EN_MASK 0x00000001L +#define DEV0_PF4_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L +#define DEV0_PF4_FLR_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L +#define DEV0_PF4_FLR_RST_CTRL__PF_PRV_EN_MASK 0x00000008L +#define DEV0_PF4_FLR_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L +#define DEV0_PF4_FLR_RST_CTRL__FLR_GRACE_MODE_MASK 0x00020000L +#define DEV0_PF4_FLR_RST_CTRL__FLR_GRACE_TIMEOUT_MASK 0x001C0000L +#define DEV0_PF4_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS_MASK 0x01800000L +#define DEV0_PF4_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS_MASK 0x06000000L +//DEV0_PF5_FLR_RST_CTRL +#define DEV0_PF5_FLR_RST_CTRL__PF_CFG_EN__SHIFT 0x0 +#define DEV0_PF5_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1 +#define DEV0_PF5_FLR_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2 +#define DEV0_PF5_FLR_RST_CTRL__PF_PRV_EN__SHIFT 0x3 +#define DEV0_PF5_FLR_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4 +#define DEV0_PF5_FLR_RST_CTRL__FLR_GRACE_MODE__SHIFT 0x11 +#define DEV0_PF5_FLR_RST_CTRL__FLR_GRACE_TIMEOUT__SHIFT 0x12 +#define DEV0_PF5_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS__SHIFT 0x17 +#define DEV0_PF5_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS__SHIFT 0x19 +#define DEV0_PF5_FLR_RST_CTRL__PF_CFG_EN_MASK 0x00000001L +#define DEV0_PF5_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L +#define DEV0_PF5_FLR_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L +#define DEV0_PF5_FLR_RST_CTRL__PF_PRV_EN_MASK 0x00000008L +#define DEV0_PF5_FLR_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L +#define DEV0_PF5_FLR_RST_CTRL__FLR_GRACE_MODE_MASK 0x00020000L +#define DEV0_PF5_FLR_RST_CTRL__FLR_GRACE_TIMEOUT_MASK 0x001C0000L +#define DEV0_PF5_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS_MASK 0x01800000L +#define DEV0_PF5_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS_MASK 0x06000000L +//DEV0_PF6_FLR_RST_CTRL +#define DEV0_PF6_FLR_RST_CTRL__PF_CFG_EN__SHIFT 0x0 +#define DEV0_PF6_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1 +#define DEV0_PF6_FLR_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2 +#define DEV0_PF6_FLR_RST_CTRL__PF_PRV_EN__SHIFT 0x3 +#define DEV0_PF6_FLR_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4 +#define DEV0_PF6_FLR_RST_CTRL__FLR_GRACE_MODE__SHIFT 0x11 +#define DEV0_PF6_FLR_RST_CTRL__FLR_GRACE_TIMEOUT__SHIFT 0x12 +#define DEV0_PF6_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS__SHIFT 0x17 +#define DEV0_PF6_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS__SHIFT 0x19 +#define DEV0_PF6_FLR_RST_CTRL__PF_CFG_EN_MASK 0x00000001L +#define DEV0_PF6_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L +#define DEV0_PF6_FLR_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L +#define DEV0_PF6_FLR_RST_CTRL__PF_PRV_EN_MASK 0x00000008L +#define DEV0_PF6_FLR_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L +#define DEV0_PF6_FLR_RST_CTRL__FLR_GRACE_MODE_MASK 0x00020000L +#define DEV0_PF6_FLR_RST_CTRL__FLR_GRACE_TIMEOUT_MASK 0x001C0000L +#define DEV0_PF6_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS_MASK 0x01800000L +#define DEV0_PF6_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS_MASK 0x06000000L +//DEV0_PF7_FLR_RST_CTRL +#define DEV0_PF7_FLR_RST_CTRL__PF_CFG_EN__SHIFT 0x0 +#define DEV0_PF7_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1 +#define DEV0_PF7_FLR_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2 +#define DEV0_PF7_FLR_RST_CTRL__PF_PRV_EN__SHIFT 0x3 +#define DEV0_PF7_FLR_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4 +#define DEV0_PF7_FLR_RST_CTRL__FLR_GRACE_MODE__SHIFT 0x11 +#define DEV0_PF7_FLR_RST_CTRL__FLR_GRACE_TIMEOUT__SHIFT 0x12 +#define DEV0_PF7_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS__SHIFT 0x17 +#define DEV0_PF7_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS__SHIFT 0x19 +#define DEV0_PF7_FLR_RST_CTRL__PF_CFG_EN_MASK 0x00000001L +#define DEV0_PF7_FLR_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L +#define DEV0_PF7_FLR_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L +#define DEV0_PF7_FLR_RST_CTRL__PF_PRV_EN_MASK 0x00000008L +#define DEV0_PF7_FLR_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L +#define DEV0_PF7_FLR_RST_CTRL__FLR_GRACE_MODE_MASK 0x00020000L +#define DEV0_PF7_FLR_RST_CTRL__FLR_GRACE_TIMEOUT_MASK 0x001C0000L +#define DEV0_PF7_FLR_RST_CTRL__FLR_DMA_DUMMY_RSPSTS_MASK 0x01800000L +#define DEV0_PF7_FLR_RST_CTRL__FLR_HST_DUMMY_RSPSTS_MASK 0x06000000L +//BIF_INST_RESET_INTR_STS +#define BIF_INST_RESET_INTR_STS__EP0_LINK_RESET_INTR_STS__SHIFT 0x0 +#define BIF_INST_RESET_INTR_STS__EP0_LINK_RESET_CFG_ONLY_INTR_STS__SHIFT 0x1 +#define BIF_INST_RESET_INTR_STS__DRV_RESET_M0_INTR_STS__SHIFT 0x2 +#define BIF_INST_RESET_INTR_STS__DRV_RESET_M1_INTR_STS__SHIFT 0x3 +#define BIF_INST_RESET_INTR_STS__DRV_RESET_M2_INTR_STS__SHIFT 0x4 +#define BIF_INST_RESET_INTR_STS__EP0_LINK_RESET_INTR_STS_MASK 0x00000001L +#define BIF_INST_RESET_INTR_STS__EP0_LINK_RESET_CFG_ONLY_INTR_STS_MASK 0x00000002L +#define BIF_INST_RESET_INTR_STS__DRV_RESET_M0_INTR_STS_MASK 0x00000004L +#define BIF_INST_RESET_INTR_STS__DRV_RESET_M1_INTR_STS_MASK 0x00000008L +#define BIF_INST_RESET_INTR_STS__DRV_RESET_M2_INTR_STS_MASK 0x00000010L +//BIF_PF_FLR_INTR_STS +#define BIF_PF_FLR_INTR_STS__DEV0_PF0_FLR_INTR_STS__SHIFT 0x0 +#define BIF_PF_FLR_INTR_STS__DEV0_PF1_FLR_INTR_STS__SHIFT 0x1 +#define BIF_PF_FLR_INTR_STS__DEV0_PF2_FLR_INTR_STS__SHIFT 0x2 +#define BIF_PF_FLR_INTR_STS__DEV0_PF3_FLR_INTR_STS__SHIFT 0x3 +#define BIF_PF_FLR_INTR_STS__DEV0_PF4_FLR_INTR_STS__SHIFT 0x4 +#define BIF_PF_FLR_INTR_STS__DEV0_PF5_FLR_INTR_STS__SHIFT 0x5 +#define BIF_PF_FLR_INTR_STS__DEV0_PF6_FLR_INTR_STS__SHIFT 0x6 +#define BIF_PF_FLR_INTR_STS__DEV0_PF7_FLR_INTR_STS__SHIFT 0x7 +#define BIF_PF_FLR_INTR_STS__DEV0_PF0_FLR_INTR_STS_MASK 0x00000001L +#define BIF_PF_FLR_INTR_STS__DEV0_PF1_FLR_INTR_STS_MASK 0x00000002L +#define BIF_PF_FLR_INTR_STS__DEV0_PF2_FLR_INTR_STS_MASK 0x00000004L +#define BIF_PF_FLR_INTR_STS__DEV0_PF3_FLR_INTR_STS_MASK 0x00000008L +#define BIF_PF_FLR_INTR_STS__DEV0_PF4_FLR_INTR_STS_MASK 0x00000010L +#define BIF_PF_FLR_INTR_STS__DEV0_PF5_FLR_INTR_STS_MASK 0x00000020L +#define BIF_PF_FLR_INTR_STS__DEV0_PF6_FLR_INTR_STS_MASK 0x00000040L +#define BIF_PF_FLR_INTR_STS__DEV0_PF7_FLR_INTR_STS_MASK 0x00000080L +//BIF_D3HOTD0_INTR_STS +#define BIF_D3HOTD0_INTR_STS__DEV0_PF0_D3HOTD0_INTR_STS__SHIFT 0x0 +#define BIF_D3HOTD0_INTR_STS__DEV0_PF1_D3HOTD0_INTR_STS__SHIFT 0x1 +#define BIF_D3HOTD0_INTR_STS__DEV0_PF2_D3HOTD0_INTR_STS__SHIFT 0x2 +#define BIF_D3HOTD0_INTR_STS__DEV0_PF3_D3HOTD0_INTR_STS__SHIFT 0x3 +#define BIF_D3HOTD0_INTR_STS__DEV0_PF4_D3HOTD0_INTR_STS__SHIFT 0x4 +#define BIF_D3HOTD0_INTR_STS__DEV0_PF5_D3HOTD0_INTR_STS__SHIFT 0x5 +#define BIF_D3HOTD0_INTR_STS__DEV0_PF6_D3HOTD0_INTR_STS__SHIFT 0x6 +#define BIF_D3HOTD0_INTR_STS__DEV0_PF7_D3HOTD0_INTR_STS__SHIFT 0x7 +#define BIF_D3HOTD0_INTR_STS__DEV0_PF0_D3HOTD0_INTR_STS_MASK 0x00000001L +#define BIF_D3HOTD0_INTR_STS__DEV0_PF1_D3HOTD0_INTR_STS_MASK 0x00000002L +#define BIF_D3HOTD0_INTR_STS__DEV0_PF2_D3HOTD0_INTR_STS_MASK 0x00000004L +#define BIF_D3HOTD0_INTR_STS__DEV0_PF3_D3HOTD0_INTR_STS_MASK 0x00000008L +#define BIF_D3HOTD0_INTR_STS__DEV0_PF4_D3HOTD0_INTR_STS_MASK 0x00000010L +#define BIF_D3HOTD0_INTR_STS__DEV0_PF5_D3HOTD0_INTR_STS_MASK 0x00000020L +#define BIF_D3HOTD0_INTR_STS__DEV0_PF6_D3HOTD0_INTR_STS_MASK 0x00000040L +#define BIF_D3HOTD0_INTR_STS__DEV0_PF7_D3HOTD0_INTR_STS_MASK 0x00000080L +//BIF_POWER_INTR_STS +#define BIF_POWER_INTR_STS__DEV0_PME_TURN_OFF_INTR_STS__SHIFT 0x0 +#define BIF_POWER_INTR_STS__PORT0_DSTATE_INTR_STS__SHIFT 0x10 +#define BIF_POWER_INTR_STS__DEV0_PME_TURN_OFF_INTR_STS_MASK 0x00000001L +#define BIF_POWER_INTR_STS__PORT0_DSTATE_INTR_STS_MASK 0x00010000L +//BIF_PF_DSTATE_INTR_STS +#define BIF_PF_DSTATE_INTR_STS__DEV0_PF0_DSTATE_INTR_STS__SHIFT 0x0 +#define BIF_PF_DSTATE_INTR_STS__DEV0_PF1_DSTATE_INTR_STS__SHIFT 0x1 +#define BIF_PF_DSTATE_INTR_STS__DEV0_PF2_DSTATE_INTR_STS__SHIFT 0x2 +#define BIF_PF_DSTATE_INTR_STS__DEV0_PF3_DSTATE_INTR_STS__SHIFT 0x3 +#define BIF_PF_DSTATE_INTR_STS__DEV0_PF4_DSTATE_INTR_STS__SHIFT 0x4 +#define BIF_PF_DSTATE_INTR_STS__DEV0_PF5_DSTATE_INTR_STS__SHIFT 0x5 +#define BIF_PF_DSTATE_INTR_STS__DEV0_PF6_DSTATE_INTR_STS__SHIFT 0x6 +#define BIF_PF_DSTATE_INTR_STS__DEV0_PF7_DSTATE_INTR_STS__SHIFT 0x7 +#define BIF_PF_DSTATE_INTR_STS__DEV0_PF0_DSTATE_INTR_STS_MASK 0x00000001L +#define BIF_PF_DSTATE_INTR_STS__DEV0_PF1_DSTATE_INTR_STS_MASK 0x00000002L +#define BIF_PF_DSTATE_INTR_STS__DEV0_PF2_DSTATE_INTR_STS_MASK 0x00000004L +#define BIF_PF_DSTATE_INTR_STS__DEV0_PF3_DSTATE_INTR_STS_MASK 0x00000008L +#define BIF_PF_DSTATE_INTR_STS__DEV0_PF4_DSTATE_INTR_STS_MASK 0x00000010L +#define BIF_PF_DSTATE_INTR_STS__DEV0_PF5_DSTATE_INTR_STS_MASK 0x00000020L +#define BIF_PF_DSTATE_INTR_STS__DEV0_PF6_DSTATE_INTR_STS_MASK 0x00000040L +#define BIF_PF_DSTATE_INTR_STS__DEV0_PF7_DSTATE_INTR_STS_MASK 0x00000080L +//BIF_PF0_VF_FLR_INTR_STS +#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF0_FLR_INTR_STS__SHIFT 0x0 +#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF1_FLR_INTR_STS__SHIFT 0x1 +#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF2_FLR_INTR_STS__SHIFT 0x2 +#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF3_FLR_INTR_STS__SHIFT 0x3 +#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF4_FLR_INTR_STS__SHIFT 0x4 +#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF5_FLR_INTR_STS__SHIFT 0x5 +#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF6_FLR_INTR_STS__SHIFT 0x6 +#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF7_FLR_INTR_STS__SHIFT 0x7 +#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF8_FLR_INTR_STS__SHIFT 0x8 +#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF9_FLR_INTR_STS__SHIFT 0x9 +#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF10_FLR_INTR_STS__SHIFT 0xa +#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF11_FLR_INTR_STS__SHIFT 0xb +#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF12_FLR_INTR_STS__SHIFT 0xc +#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF13_FLR_INTR_STS__SHIFT 0xd +#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF14_FLR_INTR_STS__SHIFT 0xe +#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF15_FLR_INTR_STS__SHIFT 0xf +#define BIF_PF0_VF_FLR_INTR_STS__PF0_SOFTPF_FLR_INTR_STS__SHIFT 0x1f +#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF0_FLR_INTR_STS_MASK 0x00000001L +#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF1_FLR_INTR_STS_MASK 0x00000002L +#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF2_FLR_INTR_STS_MASK 0x00000004L +#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF3_FLR_INTR_STS_MASK 0x00000008L +#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF4_FLR_INTR_STS_MASK 0x00000010L +#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF5_FLR_INTR_STS_MASK 0x00000020L +#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF6_FLR_INTR_STS_MASK 0x00000040L +#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF7_FLR_INTR_STS_MASK 0x00000080L +#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF8_FLR_INTR_STS_MASK 0x00000100L +#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF9_FLR_INTR_STS_MASK 0x00000200L +#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF10_FLR_INTR_STS_MASK 0x00000400L +#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF11_FLR_INTR_STS_MASK 0x00000800L +#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF12_FLR_INTR_STS_MASK 0x00001000L +#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF13_FLR_INTR_STS_MASK 0x00002000L +#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF14_FLR_INTR_STS_MASK 0x00004000L +#define BIF_PF0_VF_FLR_INTR_STS__PF0_VF15_FLR_INTR_STS_MASK 0x00008000L +#define BIF_PF0_VF_FLR_INTR_STS__PF0_SOFTPF_FLR_INTR_STS_MASK 0x80000000L +//BIF_INST_RESET_INTR_MASK +#define BIF_INST_RESET_INTR_MASK__EP0_LINK_RESET_INTR_MASK__SHIFT 0x0 +#define BIF_INST_RESET_INTR_MASK__EP0_LINK_RESET_CFG_ONLY_INTR_MASK__SHIFT 0x1 +#define BIF_INST_RESET_INTR_MASK__DRV_RESET_M0_INTR_MASK__SHIFT 0x2 +#define BIF_INST_RESET_INTR_MASK__DRV_RESET_M1_INTR_MASK__SHIFT 0x3 +#define BIF_INST_RESET_INTR_MASK__DRV_RESET_M2_INTR_MASK__SHIFT 0x4 +#define BIF_INST_RESET_INTR_MASK__EP0_LINK_RESET_INTR_MASK_MASK 0x00000001L +#define BIF_INST_RESET_INTR_MASK__EP0_LINK_RESET_CFG_ONLY_INTR_MASK_MASK 0x00000002L +#define BIF_INST_RESET_INTR_MASK__DRV_RESET_M0_INTR_MASK_MASK 0x00000004L +#define BIF_INST_RESET_INTR_MASK__DRV_RESET_M1_INTR_MASK_MASK 0x00000008L +#define BIF_INST_RESET_INTR_MASK__DRV_RESET_M2_INTR_MASK_MASK 0x00000010L +//BIF_PF_FLR_INTR_MASK +#define BIF_PF_FLR_INTR_MASK__DEV0_PF0_FLR_INTR_MASK__SHIFT 0x0 +#define BIF_PF_FLR_INTR_MASK__DEV0_PF1_FLR_INTR_MASK__SHIFT 0x1 +#define BIF_PF_FLR_INTR_MASK__DEV0_PF2_FLR_INTR_MASK__SHIFT 0x2 +#define BIF_PF_FLR_INTR_MASK__DEV0_PF3_FLR_INTR_MASK__SHIFT 0x3 +#define BIF_PF_FLR_INTR_MASK__DEV0_PF4_FLR_INTR_MASK__SHIFT 0x4 +#define BIF_PF_FLR_INTR_MASK__DEV0_PF5_FLR_INTR_MASK__SHIFT 0x5 +#define BIF_PF_FLR_INTR_MASK__DEV0_PF6_FLR_INTR_MASK__SHIFT 0x6 +#define BIF_PF_FLR_INTR_MASK__DEV0_PF7_FLR_INTR_MASK__SHIFT 0x7 +#define BIF_PF_FLR_INTR_MASK__DEV0_PF0_FLR_INTR_MASK_MASK 0x00000001L +#define BIF_PF_FLR_INTR_MASK__DEV0_PF1_FLR_INTR_MASK_MASK 0x00000002L +#define BIF_PF_FLR_INTR_MASK__DEV0_PF2_FLR_INTR_MASK_MASK 0x00000004L +#define BIF_PF_FLR_INTR_MASK__DEV0_PF3_FLR_INTR_MASK_MASK 0x00000008L +#define BIF_PF_FLR_INTR_MASK__DEV0_PF4_FLR_INTR_MASK_MASK 0x00000010L +#define BIF_PF_FLR_INTR_MASK__DEV0_PF5_FLR_INTR_MASK_MASK 0x00000020L +#define BIF_PF_FLR_INTR_MASK__DEV0_PF6_FLR_INTR_MASK_MASK 0x00000040L +#define BIF_PF_FLR_INTR_MASK__DEV0_PF7_FLR_INTR_MASK_MASK 0x00000080L +//BIF_D3HOTD0_INTR_MASK +#define BIF_D3HOTD0_INTR_MASK__DEV0_PF0_D3HOTD0_INTR_MASK__SHIFT 0x0 +#define BIF_D3HOTD0_INTR_MASK__DEV0_PF1_D3HOTD0_INTR_MASK__SHIFT 0x1 +#define BIF_D3HOTD0_INTR_MASK__DEV0_PF2_D3HOTD0_INTR_MASK__SHIFT 0x2 +#define BIF_D3HOTD0_INTR_MASK__DEV0_PF3_D3HOTD0_INTR_MASK__SHIFT 0x3 +#define BIF_D3HOTD0_INTR_MASK__DEV0_PF4_D3HOTD0_INTR_MASK__SHIFT 0x4 +#define BIF_D3HOTD0_INTR_MASK__DEV0_PF5_D3HOTD0_INTR_MASK__SHIFT 0x5 +#define BIF_D3HOTD0_INTR_MASK__DEV0_PF6_D3HOTD0_INTR_MASK__SHIFT 0x6 +#define BIF_D3HOTD0_INTR_MASK__DEV0_PF7_D3HOTD0_INTR_MASK__SHIFT 0x7 +#define BIF_D3HOTD0_INTR_MASK__DEV0_PF0_D3HOTD0_INTR_MASK_MASK 0x00000001L +#define BIF_D3HOTD0_INTR_MASK__DEV0_PF1_D3HOTD0_INTR_MASK_MASK 0x00000002L +#define BIF_D3HOTD0_INTR_MASK__DEV0_PF2_D3HOTD0_INTR_MASK_MASK 0x00000004L +#define BIF_D3HOTD0_INTR_MASK__DEV0_PF3_D3HOTD0_INTR_MASK_MASK 0x00000008L +#define BIF_D3HOTD0_INTR_MASK__DEV0_PF4_D3HOTD0_INTR_MASK_MASK 0x00000010L +#define BIF_D3HOTD0_INTR_MASK__DEV0_PF5_D3HOTD0_INTR_MASK_MASK 0x00000020L +#define BIF_D3HOTD0_INTR_MASK__DEV0_PF6_D3HOTD0_INTR_MASK_MASK 0x00000040L +#define BIF_D3HOTD0_INTR_MASK__DEV0_PF7_D3HOTD0_INTR_MASK_MASK 0x00000080L +//BIF_POWER_INTR_MASK +#define BIF_POWER_INTR_MASK__DEV0_PME_TURN_OFF_INTR_MASK__SHIFT 0x0 +#define BIF_POWER_INTR_MASK__PORT0_DSTATE_INTR_MASK__SHIFT 0x10 +#define BIF_POWER_INTR_MASK__DEV0_PME_TURN_OFF_INTR_MASK_MASK 0x00000001L +#define BIF_POWER_INTR_MASK__PORT0_DSTATE_INTR_MASK_MASK 0x00010000L +//BIF_PF_DSTATE_INTR_MASK +#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF0_DSTATE_INTR_MASK__SHIFT 0x0 +#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF1_DSTATE_INTR_MASK__SHIFT 0x1 +#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF2_DSTATE_INTR_MASK__SHIFT 0x2 +#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF3_DSTATE_INTR_MASK__SHIFT 0x3 +#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF4_DSTATE_INTR_MASK__SHIFT 0x4 +#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF5_DSTATE_INTR_MASK__SHIFT 0x5 +#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF6_DSTATE_INTR_MASK__SHIFT 0x6 +#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF7_DSTATE_INTR_MASK__SHIFT 0x7 +#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF0_DSTATE_INTR_MASK_MASK 0x00000001L +#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF1_DSTATE_INTR_MASK_MASK 0x00000002L +#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF2_DSTATE_INTR_MASK_MASK 0x00000004L +#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF3_DSTATE_INTR_MASK_MASK 0x00000008L +#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF4_DSTATE_INTR_MASK_MASK 0x00000010L +#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF5_DSTATE_INTR_MASK_MASK 0x00000020L +#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF6_DSTATE_INTR_MASK_MASK 0x00000040L +#define BIF_PF_DSTATE_INTR_MASK__DEV0_PF7_DSTATE_INTR_MASK_MASK 0x00000080L +//BIF_PF0_VF_FLR_INTR_MASK +#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF0_FLR_INTR_MASK__SHIFT 0x0 +#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF1_FLR_INTR_MASK__SHIFT 0x1 +#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF2_FLR_INTR_MASK__SHIFT 0x2 +#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF3_FLR_INTR_MASK__SHIFT 0x3 +#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF4_FLR_INTR_MASK__SHIFT 0x4 +#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF5_FLR_INTR_MASK__SHIFT 0x5 +#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF6_FLR_INTR_MASK__SHIFT 0x6 +#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF7_FLR_INTR_MASK__SHIFT 0x7 +#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF8_FLR_INTR_MASK__SHIFT 0x8 +#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF9_FLR_INTR_MASK__SHIFT 0x9 +#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF10_FLR_INTR_MASK__SHIFT 0xa +#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF11_FLR_INTR_MASK__SHIFT 0xb +#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF12_FLR_INTR_MASK__SHIFT 0xc +#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF13_FLR_INTR_MASK__SHIFT 0xd +#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF14_FLR_INTR_MASK__SHIFT 0xe +#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF15_FLR_INTR_MASK__SHIFT 0xf +#define BIF_PF0_VF_FLR_INTR_MASK__PF0_SOFTPF_FLR_INTR_MASK__SHIFT 0x1f +#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF0_FLR_INTR_MASK_MASK 0x00000001L +#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF1_FLR_INTR_MASK_MASK 0x00000002L +#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF2_FLR_INTR_MASK_MASK 0x00000004L +#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF3_FLR_INTR_MASK_MASK 0x00000008L +#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF4_FLR_INTR_MASK_MASK 0x00000010L +#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF5_FLR_INTR_MASK_MASK 0x00000020L +#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF6_FLR_INTR_MASK_MASK 0x00000040L +#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF7_FLR_INTR_MASK_MASK 0x00000080L +#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF8_FLR_INTR_MASK_MASK 0x00000100L +#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF9_FLR_INTR_MASK_MASK 0x00000200L +#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF10_FLR_INTR_MASK_MASK 0x00000400L +#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF11_FLR_INTR_MASK_MASK 0x00000800L +#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF12_FLR_INTR_MASK_MASK 0x00001000L +#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF13_FLR_INTR_MASK_MASK 0x00002000L +#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF14_FLR_INTR_MASK_MASK 0x00004000L +#define BIF_PF0_VF_FLR_INTR_MASK__PF0_VF15_FLR_INTR_MASK_MASK 0x00008000L +#define BIF_PF0_VF_FLR_INTR_MASK__PF0_SOFTPF_FLR_INTR_MASK_MASK 0x80000000L +//BIF_PF_FLR_RST +#define BIF_PF_FLR_RST__DEV0_PF0_FLR_RST__SHIFT 0x0 +#define BIF_PF_FLR_RST__DEV0_PF1_FLR_RST__SHIFT 0x1 +#define BIF_PF_FLR_RST__DEV0_PF2_FLR_RST__SHIFT 0x2 +#define BIF_PF_FLR_RST__DEV0_PF3_FLR_RST__SHIFT 0x3 +#define BIF_PF_FLR_RST__DEV0_PF4_FLR_RST__SHIFT 0x4 +#define BIF_PF_FLR_RST__DEV0_PF5_FLR_RST__SHIFT 0x5 +#define BIF_PF_FLR_RST__DEV0_PF6_FLR_RST__SHIFT 0x6 +#define BIF_PF_FLR_RST__DEV0_PF7_FLR_RST__SHIFT 0x7 +#define BIF_PF_FLR_RST__DEV0_PF0_FLR_RST_MASK 0x00000001L +#define BIF_PF_FLR_RST__DEV0_PF1_FLR_RST_MASK 0x00000002L +#define BIF_PF_FLR_RST__DEV0_PF2_FLR_RST_MASK 0x00000004L +#define BIF_PF_FLR_RST__DEV0_PF3_FLR_RST_MASK 0x00000008L +#define BIF_PF_FLR_RST__DEV0_PF4_FLR_RST_MASK 0x00000010L +#define BIF_PF_FLR_RST__DEV0_PF5_FLR_RST_MASK 0x00000020L +#define BIF_PF_FLR_RST__DEV0_PF6_FLR_RST_MASK 0x00000040L +#define BIF_PF_FLR_RST__DEV0_PF7_FLR_RST_MASK 0x00000080L +//BIF_PF0_VF_FLR_RST +#define BIF_PF0_VF_FLR_RST__PF0_VF0_FLR_RST__SHIFT 0x0 +#define BIF_PF0_VF_FLR_RST__PF0_VF1_FLR_RST__SHIFT 0x1 +#define BIF_PF0_VF_FLR_RST__PF0_VF2_FLR_RST__SHIFT 0x2 +#define BIF_PF0_VF_FLR_RST__PF0_VF3_FLR_RST__SHIFT 0x3 +#define BIF_PF0_VF_FLR_RST__PF0_VF4_FLR_RST__SHIFT 0x4 +#define BIF_PF0_VF_FLR_RST__PF0_VF5_FLR_RST__SHIFT 0x5 +#define BIF_PF0_VF_FLR_RST__PF0_VF6_FLR_RST__SHIFT 0x6 +#define BIF_PF0_VF_FLR_RST__PF0_VF7_FLR_RST__SHIFT 0x7 +#define BIF_PF0_VF_FLR_RST__PF0_VF8_FLR_RST__SHIFT 0x8 +#define BIF_PF0_VF_FLR_RST__PF0_VF9_FLR_RST__SHIFT 0x9 +#define BIF_PF0_VF_FLR_RST__PF0_VF10_FLR_RST__SHIFT 0xa +#define BIF_PF0_VF_FLR_RST__PF0_VF11_FLR_RST__SHIFT 0xb +#define BIF_PF0_VF_FLR_RST__PF0_VF12_FLR_RST__SHIFT 0xc +#define BIF_PF0_VF_FLR_RST__PF0_VF13_FLR_RST__SHIFT 0xd +#define BIF_PF0_VF_FLR_RST__PF0_VF14_FLR_RST__SHIFT 0xe +#define BIF_PF0_VF_FLR_RST__PF0_VF15_FLR_RST__SHIFT 0xf +#define BIF_PF0_VF_FLR_RST__PF0_SOFTPF_FLR_RST__SHIFT 0x1f +#define BIF_PF0_VF_FLR_RST__PF0_VF0_FLR_RST_MASK 0x00000001L +#define BIF_PF0_VF_FLR_RST__PF0_VF1_FLR_RST_MASK 0x00000002L +#define BIF_PF0_VF_FLR_RST__PF0_VF2_FLR_RST_MASK 0x00000004L +#define BIF_PF0_VF_FLR_RST__PF0_VF3_FLR_RST_MASK 0x00000008L +#define BIF_PF0_VF_FLR_RST__PF0_VF4_FLR_RST_MASK 0x00000010L +#define BIF_PF0_VF_FLR_RST__PF0_VF5_FLR_RST_MASK 0x00000020L +#define BIF_PF0_VF_FLR_RST__PF0_VF6_FLR_RST_MASK 0x00000040L +#define BIF_PF0_VF_FLR_RST__PF0_VF7_FLR_RST_MASK 0x00000080L +#define BIF_PF0_VF_FLR_RST__PF0_VF8_FLR_RST_MASK 0x00000100L +#define BIF_PF0_VF_FLR_RST__PF0_VF9_FLR_RST_MASK 0x00000200L +#define BIF_PF0_VF_FLR_RST__PF0_VF10_FLR_RST_MASK 0x00000400L +#define BIF_PF0_VF_FLR_RST__PF0_VF11_FLR_RST_MASK 0x00000800L +#define BIF_PF0_VF_FLR_RST__PF0_VF12_FLR_RST_MASK 0x00001000L +#define BIF_PF0_VF_FLR_RST__PF0_VF13_FLR_RST_MASK 0x00002000L +#define BIF_PF0_VF_FLR_RST__PF0_VF14_FLR_RST_MASK 0x00004000L +#define BIF_PF0_VF_FLR_RST__PF0_VF15_FLR_RST_MASK 0x00008000L +#define BIF_PF0_VF_FLR_RST__PF0_SOFTPF_FLR_RST_MASK 0x80000000L +//BIF_DEV0_PF0_DSTATE_VALUE +#define BIF_DEV0_PF0_DSTATE_VALUE__DEV0_PF0_DSTATE_TGT_VALUE__SHIFT 0x0 +#define BIF_DEV0_PF0_DSTATE_VALUE__DEV0_PF0_DSTATE_NEED_D3TOD0_RESET__SHIFT 0x2 +#define BIF_DEV0_PF0_DSTATE_VALUE__DEV0_PF0_DSTATE_ACK_VALUE__SHIFT 0x10 +#define BIF_DEV0_PF0_DSTATE_VALUE__DEV0_PF0_DSTATE_TGT_VALUE_MASK 0x00000003L +#define BIF_DEV0_PF0_DSTATE_VALUE__DEV0_PF0_DSTATE_NEED_D3TOD0_RESET_MASK 0x00000004L +#define BIF_DEV0_PF0_DSTATE_VALUE__DEV0_PF0_DSTATE_ACK_VALUE_MASK 0x00030000L +//BIF_DEV0_PF1_DSTATE_VALUE +#define BIF_DEV0_PF1_DSTATE_VALUE__DEV0_PF1_DSTATE_TGT_VALUE__SHIFT 0x0 +#define BIF_DEV0_PF1_DSTATE_VALUE__DEV0_PF1_DSTATE_NEED_D3TOD0_RESET__SHIFT 0x2 +#define BIF_DEV0_PF1_DSTATE_VALUE__DEV0_PF1_DSTATE_ACK_VALUE__SHIFT 0x10 +#define BIF_DEV0_PF1_DSTATE_VALUE__DEV0_PF1_DSTATE_TGT_VALUE_MASK 0x00000003L +#define BIF_DEV0_PF1_DSTATE_VALUE__DEV0_PF1_DSTATE_NEED_D3TOD0_RESET_MASK 0x00000004L +#define BIF_DEV0_PF1_DSTATE_VALUE__DEV0_PF1_DSTATE_ACK_VALUE_MASK 0x00030000L +//BIF_DEV0_PF2_DSTATE_VALUE +#define BIF_DEV0_PF2_DSTATE_VALUE__DEV0_PF2_DSTATE_TGT_VALUE__SHIFT 0x0 +#define BIF_DEV0_PF2_DSTATE_VALUE__DEV0_PF2_DSTATE_NEED_D3TOD0_RESET__SHIFT 0x2 +#define BIF_DEV0_PF2_DSTATE_VALUE__DEV0_PF2_DSTATE_ACK_VALUE__SHIFT 0x10 +#define BIF_DEV0_PF2_DSTATE_VALUE__DEV0_PF2_DSTATE_TGT_VALUE_MASK 0x00000003L +#define BIF_DEV0_PF2_DSTATE_VALUE__DEV0_PF2_DSTATE_NEED_D3TOD0_RESET_MASK 0x00000004L +#define BIF_DEV0_PF2_DSTATE_VALUE__DEV0_PF2_DSTATE_ACK_VALUE_MASK 0x00030000L +//BIF_DEV0_PF3_DSTATE_VALUE +#define BIF_DEV0_PF3_DSTATE_VALUE__DEV0_PF3_DSTATE_TGT_VALUE__SHIFT 0x0 +#define BIF_DEV0_PF3_DSTATE_VALUE__DEV0_PF3_DSTATE_NEED_D3TOD0_RESET__SHIFT 0x2 +#define BIF_DEV0_PF3_DSTATE_VALUE__DEV0_PF3_DSTATE_ACK_VALUE__SHIFT 0x10 +#define BIF_DEV0_PF3_DSTATE_VALUE__DEV0_PF3_DSTATE_TGT_VALUE_MASK 0x00000003L +#define BIF_DEV0_PF3_DSTATE_VALUE__DEV0_PF3_DSTATE_NEED_D3TOD0_RESET_MASK 0x00000004L +#define BIF_DEV0_PF3_DSTATE_VALUE__DEV0_PF3_DSTATE_ACK_VALUE_MASK 0x00030000L +//BIF_DEV0_PF4_DSTATE_VALUE +#define BIF_DEV0_PF4_DSTATE_VALUE__DEV0_PF4_DSTATE_TGT_VALUE__SHIFT 0x0 +#define BIF_DEV0_PF4_DSTATE_VALUE__DEV0_PF4_DSTATE_NEED_D3TOD0_RESET__SHIFT 0x2 +#define BIF_DEV0_PF4_DSTATE_VALUE__DEV0_PF4_DSTATE_ACK_VALUE__SHIFT 0x10 +#define BIF_DEV0_PF4_DSTATE_VALUE__DEV0_PF4_DSTATE_TGT_VALUE_MASK 0x00000003L +#define BIF_DEV0_PF4_DSTATE_VALUE__DEV0_PF4_DSTATE_NEED_D3TOD0_RESET_MASK 0x00000004L +#define BIF_DEV0_PF4_DSTATE_VALUE__DEV0_PF4_DSTATE_ACK_VALUE_MASK 0x00030000L +//BIF_DEV0_PF5_DSTATE_VALUE +#define BIF_DEV0_PF5_DSTATE_VALUE__DEV0_PF5_DSTATE_TGT_VALUE__SHIFT 0x0 +#define BIF_DEV0_PF5_DSTATE_VALUE__DEV0_PF5_DSTATE_NEED_D3TOD0_RESET__SHIFT 0x2 +#define BIF_DEV0_PF5_DSTATE_VALUE__DEV0_PF5_DSTATE_ACK_VALUE__SHIFT 0x10 +#define BIF_DEV0_PF5_DSTATE_VALUE__DEV0_PF5_DSTATE_TGT_VALUE_MASK 0x00000003L +#define BIF_DEV0_PF5_DSTATE_VALUE__DEV0_PF5_DSTATE_NEED_D3TOD0_RESET_MASK 0x00000004L +#define BIF_DEV0_PF5_DSTATE_VALUE__DEV0_PF5_DSTATE_ACK_VALUE_MASK 0x00030000L +//BIF_DEV0_PF6_DSTATE_VALUE +#define BIF_DEV0_PF6_DSTATE_VALUE__DEV0_PF6_DSTATE_TGT_VALUE__SHIFT 0x0 +#define BIF_DEV0_PF6_DSTATE_VALUE__DEV0_PF6_DSTATE_NEED_D3TOD0_RESET__SHIFT 0x2 +#define BIF_DEV0_PF6_DSTATE_VALUE__DEV0_PF6_DSTATE_ACK_VALUE__SHIFT 0x10 +#define BIF_DEV0_PF6_DSTATE_VALUE__DEV0_PF6_DSTATE_TGT_VALUE_MASK 0x00000003L +#define BIF_DEV0_PF6_DSTATE_VALUE__DEV0_PF6_DSTATE_NEED_D3TOD0_RESET_MASK 0x00000004L +#define BIF_DEV0_PF6_DSTATE_VALUE__DEV0_PF6_DSTATE_ACK_VALUE_MASK 0x00030000L +//BIF_DEV0_PF7_DSTATE_VALUE +#define BIF_DEV0_PF7_DSTATE_VALUE__DEV0_PF7_DSTATE_TGT_VALUE__SHIFT 0x0 +#define BIF_DEV0_PF7_DSTATE_VALUE__DEV0_PF7_DSTATE_NEED_D3TOD0_RESET__SHIFT 0x2 +#define BIF_DEV0_PF7_DSTATE_VALUE__DEV0_PF7_DSTATE_ACK_VALUE__SHIFT 0x10 +#define BIF_DEV0_PF7_DSTATE_VALUE__DEV0_PF7_DSTATE_TGT_VALUE_MASK 0x00000003L +#define BIF_DEV0_PF7_DSTATE_VALUE__DEV0_PF7_DSTATE_NEED_D3TOD0_RESET_MASK 0x00000004L +#define BIF_DEV0_PF7_DSTATE_VALUE__DEV0_PF7_DSTATE_ACK_VALUE_MASK 0x00030000L +//DEV0_PF0_D3HOTD0_RST_CTRL +#define DEV0_PF0_D3HOTD0_RST_CTRL__PF_CFG_EN__SHIFT 0x0 +#define DEV0_PF0_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1 +#define DEV0_PF0_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2 +#define DEV0_PF0_D3HOTD0_RST_CTRL__PF_PRV_EN__SHIFT 0x3 +#define DEV0_PF0_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4 +#define DEV0_PF0_D3HOTD0_RST_CTRL__PF_CFG_EN_MASK 0x00000001L +#define DEV0_PF0_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L +#define DEV0_PF0_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L +#define DEV0_PF0_D3HOTD0_RST_CTRL__PF_PRV_EN_MASK 0x00000008L +#define DEV0_PF0_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L +//DEV0_PF1_D3HOTD0_RST_CTRL +#define DEV0_PF1_D3HOTD0_RST_CTRL__PF_CFG_EN__SHIFT 0x0 +#define DEV0_PF1_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1 +#define DEV0_PF1_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2 +#define DEV0_PF1_D3HOTD0_RST_CTRL__PF_PRV_EN__SHIFT 0x3 +#define DEV0_PF1_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4 +#define DEV0_PF1_D3HOTD0_RST_CTRL__PF_CFG_EN_MASK 0x00000001L +#define DEV0_PF1_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L +#define DEV0_PF1_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L +#define DEV0_PF1_D3HOTD0_RST_CTRL__PF_PRV_EN_MASK 0x00000008L +#define DEV0_PF1_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L +//DEV0_PF2_D3HOTD0_RST_CTRL +#define DEV0_PF2_D3HOTD0_RST_CTRL__PF_CFG_EN__SHIFT 0x0 +#define DEV0_PF2_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1 +#define DEV0_PF2_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2 +#define DEV0_PF2_D3HOTD0_RST_CTRL__PF_PRV_EN__SHIFT 0x3 +#define DEV0_PF2_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4 +#define DEV0_PF2_D3HOTD0_RST_CTRL__PF_CFG_EN_MASK 0x00000001L +#define DEV0_PF2_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L +#define DEV0_PF2_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L +#define DEV0_PF2_D3HOTD0_RST_CTRL__PF_PRV_EN_MASK 0x00000008L +#define DEV0_PF2_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L +//DEV0_PF3_D3HOTD0_RST_CTRL +#define DEV0_PF3_D3HOTD0_RST_CTRL__PF_CFG_EN__SHIFT 0x0 +#define DEV0_PF3_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1 +#define DEV0_PF3_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2 +#define DEV0_PF3_D3HOTD0_RST_CTRL__PF_PRV_EN__SHIFT 0x3 +#define DEV0_PF3_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4 +#define DEV0_PF3_D3HOTD0_RST_CTRL__PF_CFG_EN_MASK 0x00000001L +#define DEV0_PF3_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L +#define DEV0_PF3_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L +#define DEV0_PF3_D3HOTD0_RST_CTRL__PF_PRV_EN_MASK 0x00000008L +#define DEV0_PF3_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L +//DEV0_PF4_D3HOTD0_RST_CTRL +#define DEV0_PF4_D3HOTD0_RST_CTRL__PF_CFG_EN__SHIFT 0x0 +#define DEV0_PF4_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1 +#define DEV0_PF4_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2 +#define DEV0_PF4_D3HOTD0_RST_CTRL__PF_PRV_EN__SHIFT 0x3 +#define DEV0_PF4_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4 +#define DEV0_PF4_D3HOTD0_RST_CTRL__PF_CFG_EN_MASK 0x00000001L +#define DEV0_PF4_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L +#define DEV0_PF4_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L +#define DEV0_PF4_D3HOTD0_RST_CTRL__PF_PRV_EN_MASK 0x00000008L +#define DEV0_PF4_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L +//DEV0_PF5_D3HOTD0_RST_CTRL +#define DEV0_PF5_D3HOTD0_RST_CTRL__PF_CFG_EN__SHIFT 0x0 +#define DEV0_PF5_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1 +#define DEV0_PF5_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2 +#define DEV0_PF5_D3HOTD0_RST_CTRL__PF_PRV_EN__SHIFT 0x3 +#define DEV0_PF5_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4 +#define DEV0_PF5_D3HOTD0_RST_CTRL__PF_CFG_EN_MASK 0x00000001L +#define DEV0_PF5_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L +#define DEV0_PF5_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L +#define DEV0_PF5_D3HOTD0_RST_CTRL__PF_PRV_EN_MASK 0x00000008L +#define DEV0_PF5_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L +//DEV0_PF6_D3HOTD0_RST_CTRL +#define DEV0_PF6_D3HOTD0_RST_CTRL__PF_CFG_EN__SHIFT 0x0 +#define DEV0_PF6_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1 +#define DEV0_PF6_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2 +#define DEV0_PF6_D3HOTD0_RST_CTRL__PF_PRV_EN__SHIFT 0x3 +#define DEV0_PF6_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4 +#define DEV0_PF6_D3HOTD0_RST_CTRL__PF_CFG_EN_MASK 0x00000001L +#define DEV0_PF6_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L +#define DEV0_PF6_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L +#define DEV0_PF6_D3HOTD0_RST_CTRL__PF_PRV_EN_MASK 0x00000008L +#define DEV0_PF6_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L +//DEV0_PF7_D3HOTD0_RST_CTRL +#define DEV0_PF7_D3HOTD0_RST_CTRL__PF_CFG_EN__SHIFT 0x0 +#define DEV0_PF7_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN__SHIFT 0x1 +#define DEV0_PF7_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN__SHIFT 0x2 +#define DEV0_PF7_D3HOTD0_RST_CTRL__PF_PRV_EN__SHIFT 0x3 +#define DEV0_PF7_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN__SHIFT 0x4 +#define DEV0_PF7_D3HOTD0_RST_CTRL__PF_CFG_EN_MASK 0x00000001L +#define DEV0_PF7_D3HOTD0_RST_CTRL__PF_CFG_FLR_EXC_EN_MASK 0x00000002L +#define DEV0_PF7_D3HOTD0_RST_CTRL__PF_CFG_STICKY_EN_MASK 0x00000004L +#define DEV0_PF7_D3HOTD0_RST_CTRL__PF_PRV_EN_MASK 0x00000008L +#define DEV0_PF7_D3HOTD0_RST_CTRL__PF_PRV_STICKY_EN_MASK 0x00000010L +//BIF_PORT0_DSTATE_VALUE +#define BIF_PORT0_DSTATE_VALUE__PORT0_DSTATE_TGT_VALUE__SHIFT 0x0 +#define BIF_PORT0_DSTATE_VALUE__PORT0_DSTATE_ACK_VALUE__SHIFT 0x10 +#define BIF_PORT0_DSTATE_VALUE__PORT0_DSTATE_TGT_VALUE_MASK 0x00000003L +#define BIF_PORT0_DSTATE_VALUE__PORT0_DSTATE_ACK_VALUE_MASK 0x00030000L + + +// addressBlock: nbio_nbif0_bif_ras_bif_ras_regblk +//BIFL_RAS_CENTRAL_CNTL +#define BIFL_RAS_CENTRAL_CNTL__BIFL_RAS_CONTL_ERREVENT_DIS__SHIFT 0x1d +#define BIFL_RAS_CENTRAL_CNTL__BIFL_RAS_CONTL_INTR_DIS__SHIFT 0x1e +#define BIFL_RAS_CENTRAL_CNTL__BIFL_LINKDIS_TRIG_EGRESS_STALL_DIS__SHIFT 0x1f +#define BIFL_RAS_CENTRAL_CNTL__BIFL_RAS_CONTL_ERREVENT_DIS_MASK 0x20000000L +#define BIFL_RAS_CENTRAL_CNTL__BIFL_RAS_CONTL_INTR_DIS_MASK 0x40000000L +#define BIFL_RAS_CENTRAL_CNTL__BIFL_LINKDIS_TRIG_EGRESS_STALL_DIS_MASK 0x80000000L +//BIFL_RAS_CENTRAL_STATUS +#define BIFL_RAS_CENTRAL_STATUS__BIFL_L2C_EgStall_det__SHIFT 0x0 +#define BIFL_RAS_CENTRAL_STATUS__BIFL_L2C_ErrEvent_det__SHIFT 0x1 +#define BIFL_RAS_CENTRAL_STATUS__BIFL_C2L_EgStall_det__SHIFT 0x2 +#define BIFL_RAS_CENTRAL_STATUS__BIFL_C2L_ErrEvent_det__SHIFT 0x3 +#define BIFL_RAS_CENTRAL_STATUS__BIFL_RasContller_ErrEvent_Recv__SHIFT 0x1d +#define BIFL_RAS_CENTRAL_STATUS__BIFL_RasContller_Intr_Recv__SHIFT 0x1e +#define BIFL_RAS_CENTRAL_STATUS__BIFL_LinkDis_Recv__SHIFT 0x1f +#define BIFL_RAS_CENTRAL_STATUS__BIFL_L2C_EgStall_det_MASK 0x00000001L +#define BIFL_RAS_CENTRAL_STATUS__BIFL_L2C_ErrEvent_det_MASK 0x00000002L +#define BIFL_RAS_CENTRAL_STATUS__BIFL_C2L_EgStall_det_MASK 0x00000004L +#define BIFL_RAS_CENTRAL_STATUS__BIFL_C2L_ErrEvent_det_MASK 0x00000008L +#define BIFL_RAS_CENTRAL_STATUS__BIFL_RasContller_ErrEvent_Recv_MASK 0x20000000L +#define BIFL_RAS_CENTRAL_STATUS__BIFL_RasContller_Intr_Recv_MASK 0x40000000L +#define BIFL_RAS_CENTRAL_STATUS__BIFL_LinkDis_Recv_MASK 0x80000000L +//BIFL_RAS_LEAF0_CTRL +#define BIFL_RAS_LEAF0_CTRL__ERR_EVENT_DET_EN__SHIFT 0x0 +#define BIFL_RAS_LEAF0_CTRL__POISON_ERREVENT_EN__SHIFT 0x1 +#define BIFL_RAS_LEAF0_CTRL__POISON_STALL_EN__SHIFT 0x2 +#define BIFL_RAS_LEAF0_CTRL__PARITY_ERREVENT_EN__SHIFT 0x3 +#define BIFL_RAS_LEAF0_CTRL__PARITY_STALL_EN__SHIFT 0x4 +#define BIFL_RAS_LEAF0_CTRL__ERR_EVENT_GEN_EN__SHIFT 0x8 +#define BIFL_RAS_LEAF0_CTRL__EGRESS_STALL_GEN_EN__SHIFT 0x9 +#define BIFL_RAS_LEAF0_CTRL__ERR_EVENT_PROP_EN__SHIFT 0xa +#define BIFL_RAS_LEAF0_CTRL__EGRESS_STALL_PROP_EN__SHIFT 0xb +#define BIFL_RAS_LEAF0_CTRL__ERR_EVENT_RAS_INTR_EN__SHIFT 0x10 +#define BIFL_RAS_LEAF0_CTRL__ERR_EVENT_DET_EN_MASK 0x00000001L +#define BIFL_RAS_LEAF0_CTRL__POISON_ERREVENT_EN_MASK 0x00000002L +#define BIFL_RAS_LEAF0_CTRL__POISON_STALL_EN_MASK 0x00000004L +#define BIFL_RAS_LEAF0_CTRL__PARITY_ERREVENT_EN_MASK 0x00000008L +#define BIFL_RAS_LEAF0_CTRL__PARITY_STALL_EN_MASK 0x00000010L +#define BIFL_RAS_LEAF0_CTRL__ERR_EVENT_GEN_EN_MASK 0x00000100L +#define BIFL_RAS_LEAF0_CTRL__EGRESS_STALL_GEN_EN_MASK 0x00000200L +#define BIFL_RAS_LEAF0_CTRL__ERR_EVENT_PROP_EN_MASK 0x00000400L +#define BIFL_RAS_LEAF0_CTRL__EGRESS_STALL_PROP_EN_MASK 0x00000800L +#define BIFL_RAS_LEAF0_CTRL__ERR_EVENT_RAS_INTR_EN_MASK 0x00010000L +//BIFL_RAS_LEAF1_CTRL +#define BIFL_RAS_LEAF1_CTRL__ERR_EVENT_DET_EN__SHIFT 0x0 +#define BIFL_RAS_LEAF1_CTRL__POISON_ERREVENT_EN__SHIFT 0x1 +#define BIFL_RAS_LEAF1_CTRL__POISON_STALL_EN__SHIFT 0x2 +#define BIFL_RAS_LEAF1_CTRL__PARITY_ERREVENT_EN__SHIFT 0x3 +#define BIFL_RAS_LEAF1_CTRL__PARITY_STALL_EN__SHIFT 0x4 +#define BIFL_RAS_LEAF1_CTRL__ERR_EVENT_GEN_EN__SHIFT 0x8 +#define BIFL_RAS_LEAF1_CTRL__EGRESS_STALL_GEN_EN__SHIFT 0x9 +#define BIFL_RAS_LEAF1_CTRL__ERR_EVENT_PROP_EN__SHIFT 0xa +#define BIFL_RAS_LEAF1_CTRL__EGRESS_STALL_PROP_EN__SHIFT 0xb +#define BIFL_RAS_LEAF1_CTRL__ERR_EVENT_RAS_INTR_EN__SHIFT 0x10 +#define BIFL_RAS_LEAF1_CTRL__ERR_EVENT_DET_EN_MASK 0x00000001L +#define BIFL_RAS_LEAF1_CTRL__POISON_ERREVENT_EN_MASK 0x00000002L +#define BIFL_RAS_LEAF1_CTRL__POISON_STALL_EN_MASK 0x00000004L +#define BIFL_RAS_LEAF1_CTRL__PARITY_ERREVENT_EN_MASK 0x00000008L +#define BIFL_RAS_LEAF1_CTRL__PARITY_STALL_EN_MASK 0x00000010L +#define BIFL_RAS_LEAF1_CTRL__ERR_EVENT_GEN_EN_MASK 0x00000100L +#define BIFL_RAS_LEAF1_CTRL__EGRESS_STALL_GEN_EN_MASK 0x00000200L +#define BIFL_RAS_LEAF1_CTRL__ERR_EVENT_PROP_EN_MASK 0x00000400L +#define BIFL_RAS_LEAF1_CTRL__EGRESS_STALL_PROP_EN_MASK 0x00000800L +#define BIFL_RAS_LEAF1_CTRL__ERR_EVENT_RAS_INTR_EN_MASK 0x00010000L +//BIFL_RAS_LEAF2_CTRL +#define BIFL_RAS_LEAF2_CTRL__ERR_EVENT_DET_EN__SHIFT 0x0 +#define BIFL_RAS_LEAF2_CTRL__POISON_ERREVENT_EN__SHIFT 0x1 +#define BIFL_RAS_LEAF2_CTRL__POISON_STALL_EN__SHIFT 0x2 +#define BIFL_RAS_LEAF2_CTRL__PARITY_ERREVENT_EN__SHIFT 0x3 +#define BIFL_RAS_LEAF2_CTRL__PARITY_STALL_EN__SHIFT 0x4 +#define BIFL_RAS_LEAF2_CTRL__ERR_EVENT_GEN_EN__SHIFT 0x8 +#define BIFL_RAS_LEAF2_CTRL__EGRESS_STALL_GEN_EN__SHIFT 0x9 +#define BIFL_RAS_LEAF2_CTRL__ERR_EVENT_PROP_EN__SHIFT 0xa +#define BIFL_RAS_LEAF2_CTRL__EGRESS_STALL_PROP_EN__SHIFT 0xb +#define BIFL_RAS_LEAF2_CTRL__ERR_EVENT_RAS_INTR_EN__SHIFT 0x10 +#define BIFL_RAS_LEAF2_CTRL__ERR_EVENT_DET_EN_MASK 0x00000001L +#define BIFL_RAS_LEAF2_CTRL__POISON_ERREVENT_EN_MASK 0x00000002L +#define BIFL_RAS_LEAF2_CTRL__POISON_STALL_EN_MASK 0x00000004L +#define BIFL_RAS_LEAF2_CTRL__PARITY_ERREVENT_EN_MASK 0x00000008L +#define BIFL_RAS_LEAF2_CTRL__PARITY_STALL_EN_MASK 0x00000010L +#define BIFL_RAS_LEAF2_CTRL__ERR_EVENT_GEN_EN_MASK 0x00000100L +#define BIFL_RAS_LEAF2_CTRL__EGRESS_STALL_GEN_EN_MASK 0x00000200L +#define BIFL_RAS_LEAF2_CTRL__ERR_EVENT_PROP_EN_MASK 0x00000400L +#define BIFL_RAS_LEAF2_CTRL__EGRESS_STALL_PROP_EN_MASK 0x00000800L +#define BIFL_RAS_LEAF2_CTRL__ERR_EVENT_RAS_INTR_EN_MASK 0x00010000L +//BIFL_RAS_LEAF0_STATUS +#define BIFL_RAS_LEAF0_STATUS__ERR_EVENT_RECV__SHIFT 0x0 +#define BIFL_RAS_LEAF0_STATUS__POISON_ERR_DET__SHIFT 0x1 +#define BIFL_RAS_LEAF0_STATUS__PARITY_ERR_DET__SHIFT 0x2 +#define BIFL_RAS_LEAF0_STATUS__ERR_EVENT_GENN_STAT__SHIFT 0x8 +#define BIFL_RAS_LEAF0_STATUS__EGRESS_STALLED_GENN_STAT__SHIFT 0x9 +#define BIFL_RAS_LEAF0_STATUS__ERR_EVENT_PROP_STAT__SHIFT 0xa +#define BIFL_RAS_LEAF0_STATUS__EGRESS_STALLED_PROP_STAT__SHIFT 0xb +#define BIFL_RAS_LEAF0_STATUS__ERR_EVENT_RECV_MASK 0x00000001L +#define BIFL_RAS_LEAF0_STATUS__POISON_ERR_DET_MASK 0x00000002L +#define BIFL_RAS_LEAF0_STATUS__PARITY_ERR_DET_MASK 0x00000004L +#define BIFL_RAS_LEAF0_STATUS__ERR_EVENT_GENN_STAT_MASK 0x00000100L +#define BIFL_RAS_LEAF0_STATUS__EGRESS_STALLED_GENN_STAT_MASK 0x00000200L +#define BIFL_RAS_LEAF0_STATUS__ERR_EVENT_PROP_STAT_MASK 0x00000400L +#define BIFL_RAS_LEAF0_STATUS__EGRESS_STALLED_PROP_STAT_MASK 0x00000800L +//BIFL_RAS_LEAF1_STATUS +#define BIFL_RAS_LEAF1_STATUS__ERR_EVENT_RECV__SHIFT 0x0 +#define BIFL_RAS_LEAF1_STATUS__POISON_ERR_DET__SHIFT 0x1 +#define BIFL_RAS_LEAF1_STATUS__PARITY_ERR_DET__SHIFT 0x2 +#define BIFL_RAS_LEAF1_STATUS__ERR_EVENT_GENN_STAT__SHIFT 0x8 +#define BIFL_RAS_LEAF1_STATUS__EGRESS_STALLED_GENN_STAT__SHIFT 0x9 +#define BIFL_RAS_LEAF1_STATUS__ERR_EVENT_PROP_STAT__SHIFT 0xa +#define BIFL_RAS_LEAF1_STATUS__EGRESS_STALLED_PROP_STAT__SHIFT 0xb +#define BIFL_RAS_LEAF1_STATUS__ERR_EVENT_RECV_MASK 0x00000001L +#define BIFL_RAS_LEAF1_STATUS__POISON_ERR_DET_MASK 0x00000002L +#define BIFL_RAS_LEAF1_STATUS__PARITY_ERR_DET_MASK 0x00000004L +#define BIFL_RAS_LEAF1_STATUS__ERR_EVENT_GENN_STAT_MASK 0x00000100L +#define BIFL_RAS_LEAF1_STATUS__EGRESS_STALLED_GENN_STAT_MASK 0x00000200L +#define BIFL_RAS_LEAF1_STATUS__ERR_EVENT_PROP_STAT_MASK 0x00000400L +#define BIFL_RAS_LEAF1_STATUS__EGRESS_STALLED_PROP_STAT_MASK 0x00000800L +//BIFL_RAS_LEAF2_STATUS +#define BIFL_RAS_LEAF2_STATUS__ERR_EVENT_RECV__SHIFT 0x0 +#define BIFL_RAS_LEAF2_STATUS__POISON_ERR_DET__SHIFT 0x1 +#define BIFL_RAS_LEAF2_STATUS__PARITY_ERR_DET__SHIFT 0x2 +#define BIFL_RAS_LEAF2_STATUS__ERR_EVENT_GENN_STAT__SHIFT 0x8 +#define BIFL_RAS_LEAF2_STATUS__EGRESS_STALLED_GENN_STAT__SHIFT 0x9 +#define BIFL_RAS_LEAF2_STATUS__ERR_EVENT_PROP_STAT__SHIFT 0xa +#define BIFL_RAS_LEAF2_STATUS__EGRESS_STALLED_PROP_STAT__SHIFT 0xb +#define BIFL_RAS_LEAF2_STATUS__ERR_EVENT_RECV_MASK 0x00000001L +#define BIFL_RAS_LEAF2_STATUS__POISON_ERR_DET_MASK 0x00000002L +#define BIFL_RAS_LEAF2_STATUS__PARITY_ERR_DET_MASK 0x00000004L +#define BIFL_RAS_LEAF2_STATUS__ERR_EVENT_GENN_STAT_MASK 0x00000100L +#define BIFL_RAS_LEAF2_STATUS__EGRESS_STALLED_GENN_STAT_MASK 0x00000200L +#define BIFL_RAS_LEAF2_STATUS__ERR_EVENT_PROP_STAT_MASK 0x00000400L +#define BIFL_RAS_LEAF2_STATUS__EGRESS_STALLED_PROP_STAT_MASK 0x00000800L +//BIFL_IOHUB_RAS_IH_CNTL +#define BIFL_IOHUB_RAS_IH_CNTL__BIFL_RAS_IH_INTR_EN__SHIFT 0x0 +#define BIFL_IOHUB_RAS_IH_CNTL__BIFL_RAS_IH_INTR_EN_MASK 0x00000001L +//BIFL_RAS_VWR_FROM_IOHUB +#define BIFL_RAS_VWR_FROM_IOHUB__BIFL_RAS_IH_INTR_TRIG__SHIFT 0x0 +#define BIFL_RAS_VWR_FROM_IOHUB__BIFL_RAS_IH_INTR_TRIG_MASK 0x00000001L + + +// addressBlock: nbio_nbif0_bif_swus_SUMDEC +//SUM_INDEX +#define SUM_INDEX__SUM_INDEX__SHIFT 0x0 +#define SUM_INDEX__SUM_INDEX_MASK 0xFFFFFFFFL +//SUM_DATA +#define SUM_DATA__SUM_DATA__SHIFT 0x0 +#define SUM_DATA__SUM_DATA_MASK 0xFFFFFFFFL + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_bifcfgdecp +//BIF_CFG_DEV0_EPF0_0_VENDOR_ID +#define BIF_CFG_DEV0_EPF0_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_0_DEVICE_ID +#define BIF_CFG_DEV0_EPF0_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_0_COMMAND +#define BIF_CFG_DEV0_EPF0_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_COMMAND__AD_STEPPING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_0_COMMAND__SERR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_COMMAND__FAST_B2B_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_0_COMMAND__INT_DIS__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_COMMAND__AD_STEPPING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_0_COMMAND__SERR_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_0_COMMAND__FAST_B2B_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_0_COMMAND__INT_DIS_MASK 0x0400L +//BIF_CFG_DEV0_EPF0_0_STATUS +#define BIF_CFG_DEV0_EPF0_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_STATUS__INT_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_STATUS__CAP_LIST__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_STATUS__PCI_66_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_STATUS__DEVSEL_TIMING__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_0_STATUS__INT_STATUS_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_0_STATUS__CAP_LIST_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_0_STATUS__PCI_66_CAP_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_0_STATUS__DEVSEL_TIMING_MASK 0x0600L +#define BIF_CFG_DEV0_EPF0_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_REVISION_ID +#define BIF_CFG_DEV0_EPF0_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_0_PROG_INTERFACE +#define BIF_CFG_DEV0_EPF0_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_0_SUB_CLASS +#define BIF_CFG_DEV0_EPF0_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_0_BASE_CLASS +#define BIF_CFG_DEV0_EPF0_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_0_CACHE_LINE +#define BIF_CFG_DEV0_EPF0_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_0_LATENCY +#define BIF_CFG_DEV0_EPF0_0_LATENCY__LATENCY_TIMER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LATENCY__LATENCY_TIMER_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_0_HEADER +#define BIF_CFG_DEV0_EPF0_0_HEADER__HEADER_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_HEADER__DEVICE_TYPE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_0_HEADER__HEADER_TYPE_MASK 0x7FL +#define BIF_CFG_DEV0_EPF0_0_HEADER__DEVICE_TYPE_MASK 0x80L +//BIF_CFG_DEV0_EPF0_0_BIST +#define BIF_CFG_DEV0_EPF0_0_BIST__BIST_COMP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_BIST__BIST_STRT__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_BIST__BIST_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_0_BIST__BIST_COMP_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_0_BIST__BIST_STRT_MASK 0x40L +#define BIF_CFG_DEV0_EPF0_0_BIST__BIST_CAP_MASK 0x80L +//BIF_CFG_DEV0_EPF0_0_BASE_ADDR_1 +#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_BASE_ADDR_2 +#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_BASE_ADDR_3 +#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_BASE_ADDR_4 +#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_BASE_ADDR_5 +#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_BASE_ADDR_6 +#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_ADAPTER_ID +#define BIF_CFG_DEV0_EPF0_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_ROM_BASE_ADDR +#define BIF_CFG_DEV0_EPF0_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_CAP_PTR +#define BIF_CFG_DEV0_EPF0_0_CAP_PTR__CAP_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL +//BIF_CFG_DEV0_EPF0_0_INTERRUPT_LINE +#define BIF_CFG_DEV0_EPF0_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_0_INTERRUPT_PIN +#define BIF_CFG_DEV0_EPF0_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_0_MIN_GRANT +#define BIF_CFG_DEV0_EPF0_0_MIN_GRANT__MIN_GNT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_MIN_GRANT__MIN_GNT_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_0_MAX_LATENCY +#define BIF_CFG_DEV0_EPF0_0_MAX_LATENCY__MAX_LAT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_MAX_LATENCY__MAX_LAT_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_0_VENDOR_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_VENDOR_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_VENDOR_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_VENDOR_CAP_LIST__LENGTH__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_VENDOR_CAP_LIST__CAP_ID_MASK 0x000000FFL +#define BIF_CFG_DEV0_EPF0_0_VENDOR_CAP_LIST__NEXT_PTR_MASK 0x0000FF00L +#define BIF_CFG_DEV0_EPF0_0_VENDOR_CAP_LIST__LENGTH_MASK 0x00FF0000L +//BIF_CFG_DEV0_EPF0_0_ADAPTER_ID_W +#define BIF_CFG_DEV0_EPF0_0_ADAPTER_ID_W__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_ADAPTER_ID_W__SUBSYSTEM_ID__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_ADAPTER_ID_W__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_ADAPTER_ID_W__SUBSYSTEM_ID_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PMI_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PMI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PMI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PMI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_0_PMI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_PMI_CAP +#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__PME_CLOCK__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__DEV_SPECIFIC_INIT__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__AUX_CURRENT__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__D1_SUPPORT__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__D2_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__PME_SUPPORT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__VERSION_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__PME_CLOCK_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__DEV_SPECIFIC_INIT_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__AUX_CURRENT_MASK 0x01C0L +#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__D1_SUPPORT_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__D2_SUPPORT_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_0_PMI_CAP__PME_SUPPORT_MASK 0xF800L +//BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL +#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__POWER_STATE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__NO_SOFT_RESET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__PME_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__DATA_SELECT__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__DATA_SCALE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__PME_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__B2_B3_SUPPORT__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__BUS_PWR_EN__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__PMI_DATA__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__POWER_STATE_MASK 0x00000003L +#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__NO_SOFT_RESET_MASK 0x00000008L +#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__PME_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__DATA_SELECT_MASK 0x00001E00L +#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__DATA_SCALE_MASK 0x00006000L +#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__PME_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__B2_B3_SUPPORT_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__BUS_PWR_EN_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_0_PMI_STATUS_CNTL__PMI_DATA_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_0_PCIE_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_PCIE_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP__VERSION_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L +#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L +//BIF_CFG_DEV0_EPF0_0_DEVICE_CAP +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L +//BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS +#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_0_LINK_CAP +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_0_LINK_CNTL +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__LINK_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__LINK_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L +//BIF_CFG_DEV0_EPF0_0_LINK_STATUS +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L +//BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L +#define BIF_CFG_DEV0_EPF0_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS2 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_0_LINK_CAP2 +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__RESERVED__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP2__RESERVED_MASK 0xFE000000L +//BIF_CFG_DEV0_EPF0_0_LINK_CNTL2 +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L +//BIF_CFG_DEV0_EPF0_0_LINK_STATUS2 +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L +//BIF_CFG_DEV0_EPF0_0_SLOT_CAP2 +#define BIF_CFG_DEV0_EPF0_0_SLOT_CAP2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_SLOT_CNTL2 +#define BIF_CFG_DEV0_EPF0_0_SLOT_CNTL2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_0_SLOT_STATUS2 +#define BIF_CFG_DEV0_EPF0_0_SLOT_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_0_MSI_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL +#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L +//BIF_CFG_DEV0_EPF0_0_MSI_MSG_ADDR_LO +#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_EPF0_0_MSI_MSG_ADDR_HI +#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_MSI_MSG_DATA +#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_0_MSI_MASK +#define BIF_CFG_DEV0_EPF0_0_MSI_MASK__MSI_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_MSI_MSG_DATA_64 +#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_0_MSI_MASK_64 +#define BIF_CFG_DEV0_EPF0_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_MSI_PENDING +#define BIF_CFG_DEV0_EPF0_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_MSI_PENDING_64 +#define BIF_CFG_DEV0_EPF0_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_MSIX_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_MSIX_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL +#define BIF_CFG_DEV0_EPF0_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_MSIX_TABLE +#define BIF_CFG_DEV0_EPF0_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_0_MSIX_PBA +#define BIF_CFG_DEV0_EPF0_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1__EXT_VC_COUNT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1__LOW_PRIORITY_EXT_VC_COUNT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1__REF_CLK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1__PORT_ARB_TABLE_ENTRY_SIZE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1__EXT_VC_COUNT_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1__LOW_PRIORITY_EXT_VC_COUNT_MASK 0x00000070L +#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1__REF_CLK_MASK 0x00000300L +#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG1__PORT_ARB_TABLE_ENTRY_SIZE_MASK 0x00000C00L +//BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG2__VC_ARB_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG2__VC_ARB_TABLE_OFFSET__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG2__VC_ARB_CAP_MASK 0x000000FFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CAP_REG2__VC_ARB_TABLE_OFFSET_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CNTL__LOAD_VC_ARB_TABLE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CNTL__VC_ARB_SELECT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CNTL__LOAD_VC_ARB_TABLE_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_CNTL__VC_ARB_SELECT_MASK 0x000EL +//BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_STATUS +#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_STATUS__VC_ARB_TABLE_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PORT_VC_STATUS__VC_ARB_TABLE_STATUS_MASK 0x0001L +//BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP__REJECT_SNOOP_TRANS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP__MAX_TIME_SLOTS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_CAP_MASK 0x000000FFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP__REJECT_SNOOP_TRANS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP__MAX_TIME_SLOTS_MASK 0x003F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC0__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC1_7__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__PORT_ARB_SELECT__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__VC_ID__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__VC_ENABLE__SHIFT 0x1f +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC0_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC1_7_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__PORT_ARB_SELECT_MASK 0x000E0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__VC_ID_MASK 0x07000000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_CNTL__VC_ENABLE_MASK 0x80000000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_STATUS +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_STATUS__VC_NEGOTIATION_PENDING__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC0_RESOURCE_STATUS__VC_NEGOTIATION_PENDING_MASK 0x0002L +//BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP__REJECT_SNOOP_TRANS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP__MAX_TIME_SLOTS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_CAP_MASK 0x000000FFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP__REJECT_SNOOP_TRANS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP__MAX_TIME_SLOTS_MASK 0x003F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC0__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC1_7__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__PORT_ARB_SELECT__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__VC_ID__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__VC_ENABLE__SHIFT 0x1f +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC0_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC1_7_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__PORT_ARB_SELECT_MASK 0x000E0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__VC_ID_MASK 0x07000000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_CNTL__VC_ENABLE_MASK 0x80000000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_STATUS +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_STATUS__VC_NEGOTIATION_PENDING__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VC1_RESOURCE_STATUS__VC_NEGOTIATION_PENDING_MASK 0x0002L +//BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_DW1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_DW1__SERIAL_NUMBER_LO__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_DW1__SERIAL_NUMBER_LO_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_DW2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_DW2__SERIAL_NUMBER_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DEV_SERIAL_NUM_DW2__SERIAL_NUMBER_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L +//BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG3 +#define BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG3 +#define BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_BAR_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CNTL__BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CNTL__BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CNTL__BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CNTL__BAR_INDEX_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR1_CNTL__BAR_SIZE_MASK 0x3F00L +//BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CNTL__BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CNTL__BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CNTL__BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CNTL__BAR_INDEX_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR2_CNTL__BAR_SIZE_MASK 0x3F00L +//BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CNTL__BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CNTL__BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CNTL__BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CNTL__BAR_INDEX_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR3_CNTL__BAR_SIZE_MASK 0x3F00L +//BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CNTL__BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CNTL__BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CNTL__BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CNTL__BAR_INDEX_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR4_CNTL__BAR_SIZE_MASK 0x3F00L +//BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CNTL__BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CNTL__BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CNTL__BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CNTL__BAR_INDEX_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR5_CNTL__BAR_SIZE_MASK 0x3F00L +//BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CNTL__BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CNTL__BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CNTL__BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CNTL__BAR_INDEX_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_0_PCIE_BAR6_CNTL__BAR_SIZE_MASK 0x3F00L +//BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA_SELECT +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA_SELECT__DATA_SELECT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA_SELECT__DATA_SELECT_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__BASE_POWER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__DATA_SCALE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__PM_SUB_STATE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__PM_STATE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__TYPE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__POWER_RAIL__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__BASE_POWER_MASK 0x000000FFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__DATA_SCALE_MASK 0x00000300L +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__PM_SUB_STATE_MASK 0x00001C00L +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__PM_STATE_MASK 0x00006000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__TYPE_MASK 0x00038000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_DATA__POWER_RAIL_MASK 0x001C0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_CAP__SYSTEM_ALLOCATED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PWR_BUDGET_CAP__SYSTEM_ALLOCATED_MASK 0x01L +//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__SUBSTATE_MAX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__TRANS_LAT_UNIT__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__PWR_ALLOC_SCALE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__TRANS_LAT_VAL_0__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__TRANS_LAT_VAL_1__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__SUBSTATE_MAX_MASK 0x0000001FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__TRANS_LAT_UNIT_MASK 0x00000300L +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__PWR_ALLOC_SCALE_MASK 0x00003000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__TRANS_LAT_VAL_0_MASK 0x00FF0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CAP__TRANS_LAT_VAL_1_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_LATENCY_INDICATOR +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_STATUS +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_STATUS__SUBSTATE_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_STATUS__SUBSTATE_CNTL_ENABLED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_STATUS__SUBSTATE_STATUS_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_STATUS__SUBSTATE_CNTL_ENABLED_MASK 0x0100L +//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CNTL__SUBSTATE_CNTL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_CNTL__SUBSTATE_CNTL_MASK 0x1FL +//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_SECONDARY_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SECONDARY_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_SECONDARY_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_LINK_CNTL3 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LINK_CNTL3__PERFORM_EQUALIZATION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LINK_CNTL3__LINK_EQUALIZATION_REQ_INT_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LINK_CNTL3__RESERVED__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LINK_CNTL3__PERFORM_EQUALIZATION_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LINK_CNTL3__LINK_EQUALIZATION_REQ_INT_EN_MASK 0x00000002L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LINK_CNTL3__RESERVED_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_ERROR_STATUS +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_ERROR_STATUS__LANE_ERROR_STATUS_BITS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_ERROR_STATUS__RESERVED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_ERROR_STATUS__LANE_ERROR_STATUS_BITS_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_ERROR_STATUS__RESERVED_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_0_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_1_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_2_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_3_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_4_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_5_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_6_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_7_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_8_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_9_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_10_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_11_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_12_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_13_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_14_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LANE_15_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_PCIE_ACS_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__SOURCE_VALIDATION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__TRANSLATION_BLOCKING__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__UPSTREAM_FORWARDING__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__P2P_EGRESS_CONTROL__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__SOURCE_VALIDATION_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__TRANSLATION_BLOCKING_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__UPSTREAM_FORWARDING_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__P2P_EGRESS_CONTROL_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_0_PCIE_ATS_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CNTL__STU__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CNTL__STU_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_CNTL__PRI_ENABLE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_CNTL__PRI_RESET__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_CNTL__PRI_ENABLE_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_CNTL__PRI_RESET_MASK 0x0002L +//BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS +#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS__RESPONSE_FAILURE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS__UNEXPECTED_PAGE_REQ_GRP_INDEX__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS__STOPPED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS__PRG_RESPONSE_PASID_REQUIRED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS__RESPONSE_FAILURE_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS__UNEXPECTED_PAGE_REQ_GRP_INDEX_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS__STOPPED_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_0_PCIE_PAGE_REQ_STATUS__PRG_RESPONSE_PASID_REQUIRED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_PCIE_OUTSTAND_PAGE_REQ_CAPACITY +#define BIF_CFG_DEV0_EPF0_0_PCIE_OUTSTAND_PAGE_REQ_CAPACITY__OUTSTAND_PAGE_REQ_CAPACITY__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_OUTSTAND_PAGE_REQ_CAPACITY__OUTSTAND_PAGE_REQ_CAPACITY_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_OUTSTAND_PAGE_REQ_ALLOC +#define BIF_CFG_DEV0_EPF0_0_PCIE_OUTSTAND_PAGE_REQ_ALLOC__OUTSTAND_PAGE_REQ_ALLOC__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_OUTSTAND_PAGE_REQ_ALLOC__OUTSTAND_PAGE_REQ_ALLOC_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_PASID_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CAP__PASID_EXE_PERMISSION_SUPPORTED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CAP__PASID_PRIV_MODE_SUPPORTED__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CAP__MAX_PASID_WIDTH__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CAP__PASID_EXE_PERMISSION_SUPPORTED_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CAP__PASID_PRIV_MODE_SUPPORTED_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CAP__MAX_PASID_WIDTH_MASK 0x1F00L +//BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CNTL__PASID_ENABLE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CNTL__PASID_EXE_PERMISSION_ENABLE__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CNTL__PASID_PRIV_MODE_SUPPORTED_ENABLE__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CNTL__PASID_ENABLE_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CNTL__PASID_EXE_PERMISSION_ENABLE_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_0_PCIE_PASID_CNTL__PASID_PRIV_MODE_SUPPORTED_ENABLE_MASK 0x0004L +//BIF_CFG_DEV0_EPF0_0_PCIE_MC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_MC_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CAP__MC_MAX_GROUP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CAP__MC_WIN_SIZE_REQ__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CAP__MC_ECRC_REGEN_SUPP__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CAP__MC_MAX_GROUP_MASK 0x003FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CAP__MC_WIN_SIZE_REQ_MASK 0x3F00L +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CAP__MC_ECRC_REGEN_SUPP_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_PCIE_MC_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CNTL__MC_NUM_GROUP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CNTL__MC_ENABLE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CNTL__MC_NUM_GROUP_MASK 0x003FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_CNTL__MC_ENABLE_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR0__MC_INDEX_POS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR0__MC_BASE_ADDR_0__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR0__MC_INDEX_POS_MASK 0x0000003FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR0__MC_BASE_ADDR_0_MASK 0xFFFFF000L +//BIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR1__MC_BASE_ADDR_1__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_ADDR1__MC_BASE_ADDR_1_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_MC_RCV0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_RCV0__MC_RECEIVE_0__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_RCV0__MC_RECEIVE_0_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_MC_RCV1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_RCV1__MC_RECEIVE_1__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_RCV1__MC_RECEIVE_1_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_ALL0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_ALL0__MC_BLOCK_ALL_0__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_ALL0__MC_BLOCK_ALL_0_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_ALL1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_ALL1__MC_BLOCK_ALL_1__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_ALL1__MC_BLOCK_ALL_1_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_UNTRANSLATED_0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_UNTRANSLATED_0__MC_BLOCK_UNTRANSLATED_0__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_UNTRANSLATED_0__MC_BLOCK_UNTRANSLATED_0_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_UNTRANSLATED_1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_UNTRANSLATED_1__MC_BLOCK_UNTRANSLATED_1__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_MC_BLOCK_UNTRANSLATED_1__MC_BLOCK_UNTRANSLATED_1_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_LTR_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_VALUE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_SCALE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_VALUE__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_SCALE__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_VALUE_MASK 0x000003FFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_SCALE_MASK 0x00001C00L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_VALUE_MASK 0x03FF0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_SCALE_MASK 0x1C000000L +//BIF_CFG_DEV0_EPF0_0_PCIE_ARI_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L +//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP__SRIOV_ARI_CAP_HIERARCHY_PRESERVED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP__SRIOV_VF_TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_INTR_MSG_NUM__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_CAP_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP__SRIOV_ARI_CAP_HIERARCHY_PRESERVED_MASK 0x00000002L +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP__SRIOV_VF_TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00000004L +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_INTR_MSG_NUM_MASK 0xFFE00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_ENABLE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_ENABLE__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_INTR_ENABLE__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MSE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_ARI_CAP_HIERARCHY__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_ENABLE_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_ENABLE_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_INTR_ENABLE_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MSE_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_ARI_CAP_HIERARCHY_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_CONTROL__SRIOV_VF_TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x0020L +//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_STATUS +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_STATUS__SRIOV_VF_MIGRATION_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_STATUS__SRIOV_VF_MIGRATION_STATUS_MASK 0x0001L +//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_INITIAL_VFS +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_INITIAL_VFS__SRIOV_INITIAL_VFS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_INITIAL_VFS__SRIOV_INITIAL_VFS_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_TOTAL_VFS +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_TOTAL_VFS__SRIOV_TOTAL_VFS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_TOTAL_VFS__SRIOV_TOTAL_VFS_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_NUM_VFS +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_NUM_VFS__SRIOV_NUM_VFS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_NUM_VFS__SRIOV_NUM_VFS_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_FUNC_DEP_LINK +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_FUNC_DEP_LINK__SRIOV_FUNC_DEP_LINK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_FUNC_DEP_LINK__SRIOV_FUNC_DEP_LINK_MASK 0x00FFL +//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_FIRST_VF_OFFSET +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_FIRST_VF_OFFSET__SRIOV_FIRST_VF_OFFSET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_FIRST_VF_OFFSET__SRIOV_FIRST_VF_OFFSET_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_STRIDE +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_STRIDE__SRIOV_VF_STRIDE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_STRIDE__SRIOV_VF_STRIDE_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_DEVICE_ID +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_DEVICE_ID__SRIOV_VF_DEVICE_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_DEVICE_ID__SRIOV_VF_DEVICE_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_SUPPORTED_PAGE_SIZE +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_SUPPORTED_PAGE_SIZE__SRIOV_SUPPORTED_PAGE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_SUPPORTED_PAGE_SIZE__SRIOV_SUPPORTED_PAGE_SIZE_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_SYSTEM_PAGE_SIZE +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_SYSTEM_PAGE_SIZE__SRIOV_SYSTEM_PAGE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_SYSTEM_PAGE_SIZE__SRIOV_SYSTEM_PAGE_SIZE_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_0__VF_BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_0__VF_BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_1__VF_BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_1__VF_BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_2__VF_BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_2__VF_BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_3 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_3__VF_BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_3__VF_BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_4__VF_BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_4__VF_BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_5__VF_BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_BASE_ADDR_5__VF_BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_BIF__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_BIF_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_NO_ST_MODE_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_INT_VEC_MODE_SUPPORTED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_DEV_SPC_MODE_SUPPORTED__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_EXTND_TPH_REQR_SUPPORED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_ST_TABLE_LOCATION__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_ST_TABLE_SIZE__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_NO_ST_MODE_SUPPORTED_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_INT_VEC_MODE_SUPPORTED_MASK 0x00000002L +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_DEV_SPC_MODE_SUPPORTED_MASK 0x00000004L +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_EXTND_TPH_REQR_SUPPORED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_ST_TABLE_LOCATION_MASK 0x00000600L +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CAP__TPH_REQR_ST_TABLE_SIZE_MASK 0x07FF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CNTL__TPH_REQR_ST_MODE_SEL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CNTL__TPH_REQR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CNTL__TPH_REQR_ST_MODE_SEL_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_0_PCIE_TPH_REQR_CNTL__TPH_REQR_EN_MASK 0x00000300L +//BIF_CFG_DEV0_EPF0_0_PCIE_DLF_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PCIE_DLF_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DLF_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DLF_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_DLF_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_DLF_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_DLF_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_CAP +#define BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_CAP__LOCAL_DLF_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_CAP__DLF_EXCHANGE_ENABLE__SHIFT 0x1f +#define BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_CAP__LOCAL_DLF_SUPPORTED_MASK 0x007FFFFFL +#define BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_CAP__DLF_EXCHANGE_ENABLE_MASK 0x80000000L +//BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_STATUS +#define BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_VALID__SHIFT 0x1f +#define BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_MASK 0x007FFFFFL +#define BIF_CFG_DEV0_EPF0_0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_VALID_MASK 0x80000000L +//BIF_CFG_DEV0_EPF0_0_PHY_16GT_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PHY_16GT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PHY_16GT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PHY_16GT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PHY_16GT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PHY_16GT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PHY_16GT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_LINK_CAP_16GT +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP_16GT__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LINK_CAP_16GT__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_LINK_CNTL_16GT +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL_16GT__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LINK_CNTL_16GT__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__EQUALIZATION_COMPLETE_16GT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__EQUALIZATION_PHASE1_SUCCESS_16GT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__EQUALIZATION_PHASE2_SUCCESS_16GT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__EQUALIZATION_PHASE3_SUCCESS_16GT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__LINK_EQUALIZATION_REQUEST_16GT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__EQUALIZATION_COMPLETE_16GT_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__EQUALIZATION_PHASE1_SUCCESS_16GT_MASK 0x00000002L +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__EQUALIZATION_PHASE2_SUCCESS_16GT_MASK 0x00000004L +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__EQUALIZATION_PHASE3_SUCCESS_16GT_MASK 0x00000008L +#define BIF_CFG_DEV0_EPF0_0_LINK_STATUS_16GT__LINK_EQUALIZATION_REQUEST_16GT_MASK 0x00000010L +//BIF_CFG_DEV0_EPF0_0_LOCAL_PARITY_MISMATCH_STATUS_16GT +#define BIF_CFG_DEV0_EPF0_0_LOCAL_PARITY_MISMATCH_STATUS_16GT__LOCAL_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LOCAL_PARITY_MISMATCH_STATUS_16GT__LOCAL_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_0_RTM1_PARITY_MISMATCH_STATUS_16GT +#define BIF_CFG_DEV0_EPF0_0_RTM1_PARITY_MISMATCH_STATUS_16GT__RTM1_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_RTM1_PARITY_MISMATCH_STATUS_16GT__RTM1_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_0_RTM2_PARITY_MISMATCH_STATUS_16GT +#define BIF_CFG_DEV0_EPF0_0_RTM2_PARITY_MISMATCH_STATUS_16GT__RTM2_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_RTM2_PARITY_MISMATCH_STATUS_16GT__RTM2_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_0_LANE_0_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF0_0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_0_LANE_1_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF0_0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_0_LANE_2_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF0_0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_0_LANE_3_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF0_0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_0_LANE_4_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF0_0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_0_LANE_5_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF0_0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_0_LANE_6_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF0_0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_0_LANE_7_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF0_0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_0_LANE_8_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF0_0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_0_LANE_9_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF0_0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_0_LANE_10_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF0_0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_0_LANE_11_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF0_0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_0_LANE_12_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF0_0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_0_LANE_13_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF0_0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_0_LANE_14_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF0_0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_0_LANE_15_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF0_0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_0_MARGINING_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_MARGINING_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_MARGINING_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_MARGINING_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_MARGINING_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_MARGINING_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_MARGINING_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_MARGINING_PORT_CAP +#define BIF_CFG_DEV0_EPF0_0_MARGINING_PORT_CAP__MARGINING_USES_SOFTWARE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_MARGINING_PORT_CAP__MARGINING_USES_SOFTWARE_MASK 0x0001L +//BIF_CFG_DEV0_EPF0_0_MARGINING_PORT_STATUS +#define BIF_CFG_DEV0_EPF0_0_MARGINING_PORT_STATUS__MARGINING_READY__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_MARGINING_PORT_STATUS__MARGINING_SOFTWARE_READY__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_MARGINING_PORT_STATUS__MARGINING_READY_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_0_MARGINING_PORT_STATUS__MARGINING_SOFTWARE_READY_MASK 0x0002L +//BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_INDEX_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_SIZE_MASK 0x00003F00L +//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_INDEX_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_SIZE_MASK 0x00003F00L +//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_INDEX_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_SIZE_MASK 0x00003F00L +//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_INDEX_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_SIZE_MASK 0x00003F00L +//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_INDEX_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_SIZE_MASK 0x00003F00L +//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CAP +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CNTL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_INDEX_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_SIZE_MASK 0x00003F00L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_REV__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_LENGTH__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_REV_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_LENGTH_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW__VF_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW__VF_NUM__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW__VF_EN_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW__VF_NUM_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_CMD_COMPLETE_INTR_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_HANG_NEED_FLR_INTR_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_CMD_COMPLETE_INTR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_HANG_NEED_FLR_INTR_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_CMD_COMPLETE_INTR_EN__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_HANG_NEED_FLR_INTR_EN__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_CMD_COMPLETE_INTR_EN__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_HANG_NEED_FLR_INTR_EN__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__HVVM_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__HVVM_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_CMD_COMPLETE_INTR_EN_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00000002L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_HANG_NEED_FLR_INTR_EN_MASK 0x00000004L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00000008L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_CMD_COMPLETE_INTR_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_HANG_NEED_FLR_INTR_EN_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_CMD_COMPLETE_INTR_EN_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_HANG_NEED_FLR_INTR_EN_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_CMD_COMPLETE_INTR_EN_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_HANG_NEED_FLR_INTR_EN_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__HVVM_MAILBOX_TRN_ACK_INTR_EN_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__HVVM_MAILBOX_RCV_VALID_INTR_EN_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_CMD_COMPLETE_INTR_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_CMD_COMPLETE_INTR_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_HANG_NEED_FLR_INTR_STATUS__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_CMD_COMPLETE_INTR_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_HANG_NEED_FLR_INTR_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_CMD_COMPLETE_INTR_STATUS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__HVVM_MAILBOX_TRN_ACK_INTR_STATUS__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__HVVM_MAILBOX_RCV_VALID_INTR_STATUS__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_CMD_COMPLETE_INTR_STATUS_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00000002L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_HANG_NEED_FLR_INTR_STATUS_MASK 0x00000004L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00000008L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_CMD_COMPLETE_INTR_STATUS_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_HANG_NEED_FLR_INTR_STATUS_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_CMD_COMPLETE_INTR_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_HANG_NEED_FLR_INTR_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_CMD_COMPLETE_INTR_STATUS_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_HANG_NEED_FLR_INTR_STATUS_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__HVVM_MAILBOX_TRN_ACK_INTR_STATUS_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__HVVM_MAILBOX_RCV_VALID_INTR_STATUS_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_RESET_CONTROL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_RESET_CONTROL__SOFT_PF_FLR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_RESET_CONTROL__SOFT_PF_FLR_MASK 0x0001L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__VF_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__TRN_MSG_DATA__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__TRN_MSG_VALID__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__RCV_MSG_DATA__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__RCV_MSG_ACK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__VF_INDEX_MASK 0x000000FFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__TRN_MSG_DATA_MASK 0x00000F00L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__TRN_MSG_VALID_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__RCV_MSG_DATA_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__RCV_MSG_ACK_MASK 0x01000000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF0_TRN_ACK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF0_RCV_VALID__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF1_TRN_ACK__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF1_RCV_VALID__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF2_TRN_ACK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF2_RCV_VALID__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF3_TRN_ACK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF3_RCV_VALID__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF4_TRN_ACK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF4_RCV_VALID__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF5_TRN_ACK__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF5_RCV_VALID__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF6_TRN_ACK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF6_RCV_VALID__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF7_TRN_ACK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF7_RCV_VALID__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF8_TRN_ACK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF8_RCV_VALID__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF9_TRN_ACK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF9_RCV_VALID__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF10_TRN_ACK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF10_RCV_VALID__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF11_TRN_ACK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF11_RCV_VALID__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF12_TRN_ACK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF12_RCV_VALID__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF13_TRN_ACK__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF13_RCV_VALID__SHIFT 0x1b +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF14_TRN_ACK__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF14_RCV_VALID__SHIFT 0x1d +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF15_TRN_ACK__SHIFT 0x1e +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF15_RCV_VALID__SHIFT 0x1f +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF0_TRN_ACK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF0_RCV_VALID_MASK 0x00000002L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF1_TRN_ACK_MASK 0x00000004L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF1_RCV_VALID_MASK 0x00000008L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF2_TRN_ACK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF2_RCV_VALID_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF3_TRN_ACK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF3_RCV_VALID_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF4_TRN_ACK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF4_RCV_VALID_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF5_TRN_ACK_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF5_RCV_VALID_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF6_TRN_ACK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF6_RCV_VALID_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF7_TRN_ACK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF7_RCV_VALID_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF8_TRN_ACK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF8_RCV_VALID_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF9_TRN_ACK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF9_RCV_VALID_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF10_TRN_ACK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF10_RCV_VALID_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF11_TRN_ACK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF11_RCV_VALID_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF12_TRN_ACK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF12_RCV_VALID_MASK 0x02000000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF13_TRN_ACK_MASK 0x04000000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF13_RCV_VALID_MASK 0x08000000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF14_TRN_ACK_MASK 0x10000000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF14_RCV_VALID_MASK 0x20000000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF15_TRN_ACK_MASK 0x40000000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF15_RCV_VALID_MASK 0x80000000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF16_TRN_ACK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF16_RCV_VALID__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF17_TRN_ACK__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF17_RCV_VALID__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF18_TRN_ACK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF18_RCV_VALID__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF19_TRN_ACK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF19_RCV_VALID__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF20_TRN_ACK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF20_RCV_VALID__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF21_TRN_ACK__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF21_RCV_VALID__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF22_TRN_ACK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF22_RCV_VALID__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF23_TRN_ACK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF23_RCV_VALID__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF24_TRN_ACK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF24_RCV_VALID__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF25_TRN_ACK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF25_RCV_VALID__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF26_TRN_ACK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF26_RCV_VALID__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF27_TRN_ACK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF27_RCV_VALID__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF28_TRN_ACK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF28_RCV_VALID__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF29_TRN_ACK__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF29_RCV_VALID__SHIFT 0x1b +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF30_TRN_ACK__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF30_RCV_VALID__SHIFT 0x1d +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__PF_TRN_ACK__SHIFT 0x1e +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__PF_RCV_VALID__SHIFT 0x1f +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF16_TRN_ACK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF16_RCV_VALID_MASK 0x00000002L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF17_TRN_ACK_MASK 0x00000004L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF17_RCV_VALID_MASK 0x00000008L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF18_TRN_ACK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF18_RCV_VALID_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF19_TRN_ACK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF19_RCV_VALID_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF20_TRN_ACK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF20_RCV_VALID_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF21_TRN_ACK_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF21_RCV_VALID_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF22_TRN_ACK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF22_RCV_VALID_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF23_TRN_ACK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF23_RCV_VALID_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF24_TRN_ACK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF24_RCV_VALID_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF25_TRN_ACK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF25_RCV_VALID_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF26_TRN_ACK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF26_RCV_VALID_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF27_TRN_ACK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF27_RCV_VALID_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF28_TRN_ACK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF28_RCV_VALID_MASK 0x02000000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF29_TRN_ACK_MASK 0x04000000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF29_RCV_VALID_MASK 0x08000000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF30_TRN_ACK_MASK 0x10000000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF30_RCV_VALID_MASK 0x20000000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__PF_TRN_ACK_MASK 0x40000000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__PF_RCV_VALID_MASK 0x80000000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__CONTEXT_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__LOC__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__CONTEXT_OFFSET__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__CONTEXT_SIZE_MASK 0x0000007FL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__LOC_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__CONTEXT_OFFSET_MASK 0xFFFFFC00L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB__TOTAL_FB_AVAILABLE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB__TOTAL_FB_CONSUMED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB__TOTAL_FB_AVAILABLE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB__TOTAL_FB_CONSUMED_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__UVDSCH_OFFSET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__VCESCH_OFFSET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__GFXSCH_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__UVD1SCH_OFFSET__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__UVDSCH_OFFSET_MASK 0x000000FFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__VCESCH_OFFSET_MASK 0x0000FF00L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__GFXSCH_OFFSET_MASK 0x00FF0000L +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__UVD1SCH_OFFSET_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE__P2P_OVER_XGMI_ENABLE_VF__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE__P2P_OVER_XGMI_ENABLE_PF__SHIFT 0x1f +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE__P2P_OVER_XGMI_ENABLE_VF_MASK 0x7FFFFFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE__P2P_OVER_XGMI_ENABLE_PF_MASK 0x80000000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB__VF0_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB__VF0_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB__VF0_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB__VF0_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB__VF1_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB__VF1_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB__VF1_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB__VF1_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB__VF2_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB__VF2_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB__VF2_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB__VF2_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB__VF3_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB__VF3_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB__VF3_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB__VF3_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB__VF4_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB__VF4_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB__VF4_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB__VF4_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB__VF5_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB__VF5_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB__VF5_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB__VF5_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB__VF6_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB__VF6_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB__VF6_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB__VF6_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB__VF7_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB__VF7_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB__VF7_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB__VF7_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB__VF8_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB__VF8_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB__VF8_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB__VF8_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB__VF9_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB__VF9_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB__VF9_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB__VF9_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB__VF10_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB__VF10_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB__VF10_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB__VF10_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB__VF11_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB__VF11_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB__VF11_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB__VF11_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB__VF12_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB__VF12_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB__VF12_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB__VF12_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB__VF13_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB__VF13_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB__VF13_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB__VF13_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB__VF14_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB__VF14_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB__VF14_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB__VF14_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB__VF15_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB__VF15_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB__VF15_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB__VF15_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB__VF16_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB__VF16_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB__VF16_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB__VF16_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB__VF17_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB__VF17_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB__VF17_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB__VF17_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB__VF18_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB__VF18_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB__VF18_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB__VF18_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB__VF19_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB__VF19_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB__VF19_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB__VF19_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB__VF20_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB__VF20_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB__VF20_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB__VF20_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB__VF21_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB__VF21_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB__VF21_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB__VF21_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB__VF22_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB__VF22_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB__VF22_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB__VF22_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB__VF23_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB__VF23_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB__VF23_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB__VF23_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB__VF24_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB__VF24_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB__VF24_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB__VF24_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB__VF25_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB__VF25_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB__VF25_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB__VF25_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB__VF26_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB__VF26_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB__VF26_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB__VF26_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB__VF27_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB__VF27_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB__VF27_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB__VF27_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB__VF28_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB__VF28_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB__VF28_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB__VF28_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB__VF29_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB__VF29_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB__VF29_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB__VF29_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB__VF30_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB__VF30_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB__VF30_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB__VF30_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW0__DW0__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW0__DW0_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW1__DW1__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW1__DW1_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW2__DW2__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW2__DW2_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW3 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW3__DW3__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW3__DW3_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW4__DW4__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW4__DW4_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW5__DW5__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW5__DW5_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW6 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW6__DW6__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW6__DW6_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW7 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW7__DW7__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW7__DW7_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW8__DW8__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW8__DW8_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW0__DW0__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW0__DW0_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW1__DW1__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW1__DW1_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW2__DW2__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW2__DW2_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW3 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW3__DW3__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW3__DW3_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW4__DW4__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW4__DW4_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW5__DW5__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW5__DW5_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW6 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW6__DW6__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW6__DW6_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW7 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW7__DW7__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW7__DW7_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW8__DW8__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW8__DW8_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW0__DW0__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW0__DW0_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW1__DW1__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW1__DW1_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW2__DW2__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW2__DW2_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW3 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW3__DW3__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW3__DW3_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW4__DW4__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW4__DW4_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW5__DW5__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW5__DW5_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW6 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW6__DW6__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW6__DW6_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW7 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW7__DW7__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW7__DW7_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW8__DW8__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW8__DW8_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW0__DW0__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW0__DW0_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW1 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW1__DW1__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW1__DW1_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW2 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW2__DW2__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW2__DW2_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW3 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW3__DW3__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW3__DW3_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW4 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW4__DW4__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW4__DW4_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW5 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW5__DW5__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW5__DW5_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW6 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW6__DW6__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW6__DW6_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW7 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW7__DW7__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW7__DW7_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW8 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW8__DW8__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW8__DW8_MASK 0xFFFFFFFFL + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf1_bifcfgdecp +//BIF_CFG_DEV0_EPF1_0_VENDOR_ID +#define BIF_CFG_DEV0_EPF1_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF1_0_DEVICE_ID +#define BIF_CFG_DEV0_EPF1_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF1_0_COMMAND +#define BIF_CFG_DEV0_EPF1_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF1_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_COMMAND__AD_STEPPING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF1_0_COMMAND__SERR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_COMMAND__FAST_B2B_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF1_0_COMMAND__INT_DIS__SHIFT 0xa +#define BIF_CFG_DEV0_EPF1_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF1_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF1_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF1_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF1_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF1_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF1_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_COMMAND__AD_STEPPING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF1_0_COMMAND__SERR_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF1_0_COMMAND__FAST_B2B_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF1_0_COMMAND__INT_DIS_MASK 0x0400L +//BIF_CFG_DEV0_EPF1_0_STATUS +#define BIF_CFG_DEV0_EPF1_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_STATUS__INT_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_STATUS__CAP_LIST__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_STATUS__PCI_66_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF1_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_STATUS__DEVSEL_TIMING__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF1_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF1_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd +#define BIF_CFG_DEV0_EPF1_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe +#define BIF_CFG_DEV0_EPF1_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF1_0_STATUS__INT_STATUS_MASK 0x0008L +#define BIF_CFG_DEV0_EPF1_0_STATUS__CAP_LIST_MASK 0x0010L +#define BIF_CFG_DEV0_EPF1_0_STATUS__PCI_66_CAP_MASK 0x0020L +#define BIF_CFG_DEV0_EPF1_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L +#define BIF_CFG_DEV0_EPF1_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L +#define BIF_CFG_DEV0_EPF1_0_STATUS__DEVSEL_TIMING_MASK 0x0600L +#define BIF_CFG_DEV0_EPF1_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L +#define BIF_CFG_DEV0_EPF1_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L +#define BIF_CFG_DEV0_EPF1_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L +#define BIF_CFG_DEV0_EPF1_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L +#define BIF_CFG_DEV0_EPF1_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_REVISION_ID +#define BIF_CFG_DEV0_EPF1_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL +#define BIF_CFG_DEV0_EPF1_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L +//BIF_CFG_DEV0_EPF1_0_PROG_INTERFACE +#define BIF_CFG_DEV0_EPF1_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL +//BIF_CFG_DEV0_EPF1_0_SUB_CLASS +#define BIF_CFG_DEV0_EPF1_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF1_0_BASE_CLASS +#define BIF_CFG_DEV0_EPF1_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF1_0_CACHE_LINE +#define BIF_CFG_DEV0_EPF1_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL +//BIF_CFG_DEV0_EPF1_0_LATENCY +#define BIF_CFG_DEV0_EPF1_0_LATENCY__LATENCY_TIMER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LATENCY__LATENCY_TIMER_MASK 0xFFL +//BIF_CFG_DEV0_EPF1_0_HEADER +#define BIF_CFG_DEV0_EPF1_0_HEADER__HEADER_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_HEADER__DEVICE_TYPE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF1_0_HEADER__HEADER_TYPE_MASK 0x7FL +#define BIF_CFG_DEV0_EPF1_0_HEADER__DEVICE_TYPE_MASK 0x80L +//BIF_CFG_DEV0_EPF1_0_BIST +#define BIF_CFG_DEV0_EPF1_0_BIST__BIST_COMP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_BIST__BIST_STRT__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_BIST__BIST_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF1_0_BIST__BIST_COMP_MASK 0x0FL +#define BIF_CFG_DEV0_EPF1_0_BIST__BIST_STRT_MASK 0x40L +#define BIF_CFG_DEV0_EPF1_0_BIST__BIST_CAP_MASK 0x80L +//BIF_CFG_DEV0_EPF1_0_BASE_ADDR_1 +#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_BASE_ADDR_2 +#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_BASE_ADDR_3 +#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_BASE_ADDR_4 +#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_BASE_ADDR_5 +#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_BASE_ADDR_6 +#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_ADAPTER_ID +#define BIF_CFG_DEV0_EPF1_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_ROM_BASE_ADDR +#define BIF_CFG_DEV0_EPF1_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_CAP_PTR +#define BIF_CFG_DEV0_EPF1_0_CAP_PTR__CAP_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL +//BIF_CFG_DEV0_EPF1_0_INTERRUPT_LINE +#define BIF_CFG_DEV0_EPF1_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL +//BIF_CFG_DEV0_EPF1_0_INTERRUPT_PIN +#define BIF_CFG_DEV0_EPF1_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL +//BIF_CFG_DEV0_EPF1_0_MIN_GRANT +#define BIF_CFG_DEV0_EPF1_0_MIN_GRANT__MIN_GNT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_MIN_GRANT__MIN_GNT_MASK 0xFFL +//BIF_CFG_DEV0_EPF1_0_MAX_LATENCY +#define BIF_CFG_DEV0_EPF1_0_MAX_LATENCY__MAX_LAT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_MAX_LATENCY__MAX_LAT_MASK 0xFFL +//BIF_CFG_DEV0_EPF1_0_VENDOR_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_VENDOR_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_VENDOR_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_VENDOR_CAP_LIST__LENGTH__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_VENDOR_CAP_LIST__CAP_ID_MASK 0x000000FFL +#define BIF_CFG_DEV0_EPF1_0_VENDOR_CAP_LIST__NEXT_PTR_MASK 0x0000FF00L +#define BIF_CFG_DEV0_EPF1_0_VENDOR_CAP_LIST__LENGTH_MASK 0x00FF0000L +//BIF_CFG_DEV0_EPF1_0_ADAPTER_ID_W +#define BIF_CFG_DEV0_EPF1_0_ADAPTER_ID_W__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_ADAPTER_ID_W__SUBSYSTEM_ID__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_ADAPTER_ID_W__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_ADAPTER_ID_W__SUBSYSTEM_ID_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PMI_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PMI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PMI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PMI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF1_0_PMI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_PMI_CAP +#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__PME_CLOCK__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__DEV_SPECIFIC_INIT__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__AUX_CURRENT__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__D1_SUPPORT__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__D2_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__PME_SUPPORT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__VERSION_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__PME_CLOCK_MASK 0x0008L +#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__IMMEDIATE_READINESS_ON_RETURN_TO_D0_MASK 0x0010L +#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__DEV_SPECIFIC_INIT_MASK 0x0020L +#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__AUX_CURRENT_MASK 0x01C0L +#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__D1_SUPPORT_MASK 0x0200L +#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__D2_SUPPORT_MASK 0x0400L +#define BIF_CFG_DEV0_EPF1_0_PMI_CAP__PME_SUPPORT_MASK 0xF800L +//BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL +#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__POWER_STATE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__NO_SOFT_RESET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__PME_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__DATA_SELECT__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__DATA_SCALE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__PME_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__B2_B3_SUPPORT__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__BUS_PWR_EN__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__PMI_DATA__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__POWER_STATE_MASK 0x00000003L +#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__NO_SOFT_RESET_MASK 0x00000008L +#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__PME_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__DATA_SELECT_MASK 0x00001E00L +#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__DATA_SCALE_MASK 0x00006000L +#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__PME_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__B2_B3_SUPPORT_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__BUS_PWR_EN_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF1_0_PMI_STATUS_CNTL__PMI_DATA_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF1_0_PCIE_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_PCIE_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP__VERSION_MASK 0x000FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L +#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L +#define BIF_CFG_DEV0_EPF1_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L +//BIF_CFG_DEV0_EPF1_0_DEVICE_CAP +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L +//BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS +#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF1_0_LINK_CAP +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF1_0_LINK_CNTL +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__LINK_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__LINK_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L +//BIF_CFG_DEV0_EPF1_0_LINK_STATUS +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L +//BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L +#define BIF_CFG_DEV0_EPF1_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS2 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF1_0_LINK_CAP2 +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__RESERVED__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP2__RESERVED_MASK 0xFE000000L +//BIF_CFG_DEV0_EPF1_0_LINK_CNTL2 +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L +//BIF_CFG_DEV0_EPF1_0_LINK_STATUS2 +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L +//BIF_CFG_DEV0_EPF1_0_SLOT_CAP2 +#define BIF_CFG_DEV0_EPF1_0_SLOT_CAP2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_SLOT_CNTL2 +#define BIF_CFG_DEV0_EPF1_0_SLOT_CNTL2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF1_0_SLOT_STATUS2 +#define BIF_CFG_DEV0_EPF1_0_SLOT_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF1_0_MSI_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF1_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL +#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL +#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L +#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L +#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L +//BIF_CFG_DEV0_EPF1_0_MSI_MSG_ADDR_LO +#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_EPF1_0_MSI_MSG_ADDR_HI +#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_MSI_MSG_DATA +#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF1_0_MSI_MASK +#define BIF_CFG_DEV0_EPF1_0_MSI_MASK__MSI_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_MSI_MSG_DATA_64 +#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF1_0_MSI_MASK_64 +#define BIF_CFG_DEV0_EPF1_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_MSI_PENDING +#define BIF_CFG_DEV0_EPF1_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_MSI_PENDING_64 +#define BIF_CFG_DEV0_EPF1_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_MSIX_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF1_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_MSIX_MSG_CNTL +#define BIF_CFG_DEV0_EPF1_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF1_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL +#define BIF_CFG_DEV0_EPF1_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L +#define BIF_CFG_DEV0_EPF1_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_MSIX_TABLE +#define BIF_CFG_DEV0_EPF1_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF1_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF1_0_MSIX_PBA +#define BIF_CFG_DEV0_EPF1_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF1_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG1__EXT_VC_COUNT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG1__LOW_PRIORITY_EXT_VC_COUNT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG1__REF_CLK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG1__PORT_ARB_TABLE_ENTRY_SIZE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG1__EXT_VC_COUNT_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG1__LOW_PRIORITY_EXT_VC_COUNT_MASK 0x00000070L +#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG1__REF_CLK_MASK 0x00000300L +#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG1__PORT_ARB_TABLE_ENTRY_SIZE_MASK 0x00000C00L +//BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG2__VC_ARB_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG2__VC_ARB_TABLE_OFFSET__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG2__VC_ARB_CAP_MASK 0x000000FFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CAP_REG2__VC_ARB_TABLE_OFFSET_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CNTL__LOAD_VC_ARB_TABLE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CNTL__VC_ARB_SELECT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CNTL__LOAD_VC_ARB_TABLE_MASK 0x0001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_CNTL__VC_ARB_SELECT_MASK 0x000EL +//BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_STATUS +#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_STATUS__VC_ARB_TABLE_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PORT_VC_STATUS__VC_ARB_TABLE_STATUS_MASK 0x0001L +//BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CAP__REJECT_SNOOP_TRANS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CAP__MAX_TIME_SLOTS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_CAP_MASK 0x000000FFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CAP__REJECT_SNOOP_TRANS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CAP__MAX_TIME_SLOTS_MASK 0x003F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC0__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC1_7__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__PORT_ARB_SELECT__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__VC_ID__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__VC_ENABLE__SHIFT 0x1f +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC0_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__TC_VC_MAP_TC1_7_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__PORT_ARB_SELECT_MASK 0x000E0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__VC_ID_MASK 0x07000000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_CNTL__VC_ENABLE_MASK 0x80000000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_STATUS +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_STATUS__VC_NEGOTIATION_PENDING__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC0_RESOURCE_STATUS__VC_NEGOTIATION_PENDING_MASK 0x0002L +//BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CAP__REJECT_SNOOP_TRANS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CAP__MAX_TIME_SLOTS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_CAP_MASK 0x000000FFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CAP__REJECT_SNOOP_TRANS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CAP__MAX_TIME_SLOTS_MASK 0x003F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CAP__PORT_ARB_TABLE_OFFSET_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC0__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC1_7__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__PORT_ARB_SELECT__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__VC_ID__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__VC_ENABLE__SHIFT 0x1f +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC0_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__TC_VC_MAP_TC1_7_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__LOAD_PORT_ARB_TABLE_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__PORT_ARB_SELECT_MASK 0x000E0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__VC_ID_MASK 0x07000000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_CNTL__VC_ENABLE_MASK 0x80000000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_STATUS +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_STATUS__VC_NEGOTIATION_PENDING__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_STATUS__PORT_ARB_TABLE_STATUS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VC1_RESOURCE_STATUS__VC_NEGOTIATION_PENDING_MASK 0x0002L +//BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_DW1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_DW1__SERIAL_NUMBER_LO__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_DW1__SERIAL_NUMBER_LO_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_DW2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_DW2__SERIAL_NUMBER_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DEV_SERIAL_NUM_DW2__SERIAL_NUMBER_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L +//BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L +//BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L +//BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L +//BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L +//BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L +//BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG3 +#define BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG3 +#define BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_BAR_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CNTL__BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CNTL__BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CNTL__BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CNTL__BAR_INDEX_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR1_CNTL__BAR_SIZE_MASK 0x3F00L +//BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CNTL__BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CNTL__BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CNTL__BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CNTL__BAR_INDEX_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR2_CNTL__BAR_SIZE_MASK 0x3F00L +//BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CNTL__BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CNTL__BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CNTL__BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CNTL__BAR_INDEX_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR3_CNTL__BAR_SIZE_MASK 0x3F00L +//BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CNTL__BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CNTL__BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CNTL__BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CNTL__BAR_INDEX_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR4_CNTL__BAR_SIZE_MASK 0x3F00L +//BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CNTL__BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CNTL__BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CNTL__BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CNTL__BAR_INDEX_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR5_CNTL__BAR_SIZE_MASK 0x3F00L +//BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CAP__BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CAP__BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CNTL__BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CNTL__BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CNTL__BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CNTL__BAR_INDEX_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CNTL__BAR_TOTAL_NUM_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF1_0_PCIE_BAR6_CNTL__BAR_SIZE_MASK 0x3F00L +//BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA_SELECT +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA_SELECT__DATA_SELECT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA_SELECT__DATA_SELECT_MASK 0xFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__BASE_POWER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__DATA_SCALE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__PM_SUB_STATE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__PM_STATE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__TYPE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__POWER_RAIL__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__BASE_POWER_MASK 0x000000FFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__DATA_SCALE_MASK 0x00000300L +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__PM_SUB_STATE_MASK 0x00001C00L +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__PM_STATE_MASK 0x00006000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__TYPE_MASK 0x00038000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_DATA__POWER_RAIL_MASK 0x001C0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_CAP__SYSTEM_ALLOCATED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PWR_BUDGET_CAP__SYSTEM_ALLOCATED_MASK 0x01L +//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__SUBSTATE_MAX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__TRANS_LAT_UNIT__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__PWR_ALLOC_SCALE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__TRANS_LAT_VAL_0__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__TRANS_LAT_VAL_1__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__SUBSTATE_MAX_MASK 0x0000001FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__TRANS_LAT_UNIT_MASK 0x00000300L +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__PWR_ALLOC_SCALE_MASK 0x00003000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__TRANS_LAT_VAL_0_MASK 0x00FF0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CAP__TRANS_LAT_VAL_1_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_LATENCY_INDICATOR +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_LATENCY_INDICATOR__TRANS_LAT_INDICATOR_BITS_MASK 0xFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_STATUS +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_STATUS__SUBSTATE_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_STATUS__SUBSTATE_CNTL_ENABLED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_STATUS__SUBSTATE_STATUS_MASK 0x001FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_STATUS__SUBSTATE_CNTL_ENABLED_MASK 0x0100L +//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CNTL__SUBSTATE_CNTL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_CNTL__SUBSTATE_CNTL_MASK 0x1FL +//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_0__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_1__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_2__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_3__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_4__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_5__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_6__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DPA_SUBSTATE_PWR_ALLOC_7__SUBSTATE_PWR_ALLOC_MASK 0xFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_SECONDARY_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SECONDARY_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_SECONDARY_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_SECONDARY_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_LINK_CNTL3 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LINK_CNTL3__PERFORM_EQUALIZATION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LINK_CNTL3__LINK_EQUALIZATION_REQ_INT_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LINK_CNTL3__RESERVED__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LINK_CNTL3__PERFORM_EQUALIZATION_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LINK_CNTL3__LINK_EQUALIZATION_REQ_INT_EN_MASK 0x00000002L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LINK_CNTL3__RESERVED_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_ERROR_STATUS +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_ERROR_STATUS__LANE_ERROR_STATUS_BITS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_ERROR_STATUS__RESERVED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_ERROR_STATUS__LANE_ERROR_STATUS_BITS_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_ERROR_STATUS__RESERVED_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_0_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_1_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_2_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_3_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_4_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_5_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_6_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_7_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_8_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_9_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_10_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_11_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_12_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_13_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_14_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL__RESERVED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_TX_PRESET_MASK 0x000FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL__DOWNSTREAM_PORT_RX_PRESET_HINT_MASK 0x0070L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_TX_PRESET_MASK 0x0F00L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL__UPSTREAM_PORT_RX_PRESET_HINT_MASK 0x7000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LANE_15_EQUALIZATION_CNTL__RESERVED_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_PCIE_ACS_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__SOURCE_VALIDATION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__TRANSLATION_BLOCKING__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__UPSTREAM_FORWARDING__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__P2P_EGRESS_CONTROL__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__SOURCE_VALIDATION_MASK 0x0001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__TRANSLATION_BLOCKING_MASK 0x0002L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__P2P_REQUEST_REDIRECT_MASK 0x0004L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__P2P_COMPLETION_REDIRECT_MASK 0x0008L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__UPSTREAM_FORWARDING_MASK 0x0010L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__P2P_EGRESS_CONTROL_MASK 0x0020L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__DIRECT_TRANSLATED_P2P_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CAP__EGRESS_CONTROL_VECTOR_SIZE_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__SOURCE_VALIDATION_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__TRANSLATION_BLOCKING_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__P2P_REQUEST_REDIRECT_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__P2P_COMPLETION_REDIRECT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__UPSTREAM_FORWARDING_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__P2P_EGRESS_CONTROL_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ACS_CNTL__DIRECT_TRANSLATED_P2P_EN_MASK 0x0040L +//BIF_CFG_DEV0_EPF1_0_PCIE_ATS_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CNTL__STU__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CNTL__STU_MASK 0x001FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_CNTL__PRI_ENABLE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_CNTL__PRI_RESET__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_CNTL__PRI_ENABLE_MASK 0x0001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_CNTL__PRI_RESET_MASK 0x0002L +//BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_STATUS +#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_STATUS__RESPONSE_FAILURE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_STATUS__UNEXPECTED_PAGE_REQ_GRP_INDEX__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_STATUS__STOPPED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_STATUS__PRG_RESPONSE_PASID_REQUIRED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_STATUS__RESPONSE_FAILURE_MASK 0x0001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_STATUS__UNEXPECTED_PAGE_REQ_GRP_INDEX_MASK 0x0002L +#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_STATUS__STOPPED_MASK 0x0100L +#define BIF_CFG_DEV0_EPF1_0_PCIE_PAGE_REQ_STATUS__PRG_RESPONSE_PASID_REQUIRED_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_PCIE_OUTSTAND_PAGE_REQ_CAPACITY +#define BIF_CFG_DEV0_EPF1_0_PCIE_OUTSTAND_PAGE_REQ_CAPACITY__OUTSTAND_PAGE_REQ_CAPACITY__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_OUTSTAND_PAGE_REQ_CAPACITY__OUTSTAND_PAGE_REQ_CAPACITY_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_OUTSTAND_PAGE_REQ_ALLOC +#define BIF_CFG_DEV0_EPF1_0_PCIE_OUTSTAND_PAGE_REQ_ALLOC__OUTSTAND_PAGE_REQ_ALLOC__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_OUTSTAND_PAGE_REQ_ALLOC__OUTSTAND_PAGE_REQ_ALLOC_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_PASID_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CAP__PASID_EXE_PERMISSION_SUPPORTED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CAP__PASID_PRIV_MODE_SUPPORTED__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CAP__MAX_PASID_WIDTH__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CAP__PASID_EXE_PERMISSION_SUPPORTED_MASK 0x0002L +#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CAP__PASID_PRIV_MODE_SUPPORTED_MASK 0x0004L +#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CAP__MAX_PASID_WIDTH_MASK 0x1F00L +//BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CNTL__PASID_ENABLE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CNTL__PASID_EXE_PERMISSION_ENABLE__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CNTL__PASID_PRIV_MODE_SUPPORTED_ENABLE__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CNTL__PASID_ENABLE_MASK 0x0001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CNTL__PASID_EXE_PERMISSION_ENABLE_MASK 0x0002L +#define BIF_CFG_DEV0_EPF1_0_PCIE_PASID_CNTL__PASID_PRIV_MODE_SUPPORTED_ENABLE_MASK 0x0004L +//BIF_CFG_DEV0_EPF1_0_PCIE_MC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_MC_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_CAP__MC_MAX_GROUP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_CAP__MC_WIN_SIZE_REQ__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_CAP__MC_ECRC_REGEN_SUPP__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_CAP__MC_MAX_GROUP_MASK 0x003FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_CAP__MC_WIN_SIZE_REQ_MASK 0x3F00L +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_CAP__MC_ECRC_REGEN_SUPP_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_PCIE_MC_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_CNTL__MC_NUM_GROUP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_CNTL__MC_ENABLE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_CNTL__MC_NUM_GROUP_MASK 0x003FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_CNTL__MC_ENABLE_MASK 0x8000L +//BIF_CFG_DEV0_EPF1_0_PCIE_MC_ADDR0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ADDR0__MC_INDEX_POS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ADDR0__MC_BASE_ADDR_0__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ADDR0__MC_INDEX_POS_MASK 0x0000003FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ADDR0__MC_BASE_ADDR_0_MASK 0xFFFFF000L +//BIF_CFG_DEV0_EPF1_0_PCIE_MC_ADDR1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ADDR1__MC_BASE_ADDR_1__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_ADDR1__MC_BASE_ADDR_1_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_MC_RCV0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_RCV0__MC_RECEIVE_0__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_RCV0__MC_RECEIVE_0_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_MC_RCV1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_RCV1__MC_RECEIVE_1__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_RCV1__MC_RECEIVE_1_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_ALL0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_ALL0__MC_BLOCK_ALL_0__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_ALL0__MC_BLOCK_ALL_0_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_ALL1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_ALL1__MC_BLOCK_ALL_1__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_ALL1__MC_BLOCK_ALL_1_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_UNTRANSLATED_0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_UNTRANSLATED_0__MC_BLOCK_UNTRANSLATED_0__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_UNTRANSLATED_0__MC_BLOCK_UNTRANSLATED_0_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_UNTRANSLATED_1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_UNTRANSLATED_1__MC_BLOCK_UNTRANSLATED_1__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_MC_BLOCK_UNTRANSLATED_1__MC_BLOCK_UNTRANSLATED_1_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_LTR_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_LTR_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_VALUE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_SCALE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_VALUE__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_SCALE__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_VALUE_MASK 0x000003FFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_CAP__LTR_MAX_S_LATENCY_SCALE_MASK 0x00001C00L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_VALUE_MASK 0x03FF0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_LTR_CAP__LTR_MAX_NS_LATENCY_SCALE_MASK 0x1C000000L +//BIF_CFG_DEV0_EPF1_0_PCIE_ARI_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF1_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L +//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CAP__SRIOV_ARI_CAP_HIERARCHY_PRESERVED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CAP__SRIOV_VF_TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_INTR_MSG_NUM__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_CAP_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CAP__SRIOV_ARI_CAP_HIERARCHY_PRESERVED_MASK 0x00000002L +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CAP__SRIOV_VF_TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00000004L +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CAP__SRIOV_VF_MIGRATION_INTR_MSG_NUM_MASK 0xFFE00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_VF_ENABLE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_ENABLE__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_INTR_ENABLE__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MSE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_ARI_CAP_HIERARCHY__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_VF_TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_VF_ENABLE_MASK 0x0001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_ENABLE_MASK 0x0002L +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MIGRATION_INTR_ENABLE_MASK 0x0004L +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_VF_MSE_MASK 0x0008L +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_ARI_CAP_HIERARCHY_MASK 0x0010L +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_CONTROL__SRIOV_VF_TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x0020L +//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_STATUS +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_STATUS__SRIOV_VF_MIGRATION_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_STATUS__SRIOV_VF_MIGRATION_STATUS_MASK 0x0001L +//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_INITIAL_VFS +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_INITIAL_VFS__SRIOV_INITIAL_VFS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_INITIAL_VFS__SRIOV_INITIAL_VFS_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_TOTAL_VFS +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_TOTAL_VFS__SRIOV_TOTAL_VFS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_TOTAL_VFS__SRIOV_TOTAL_VFS_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_NUM_VFS +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_NUM_VFS__SRIOV_NUM_VFS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_NUM_VFS__SRIOV_NUM_VFS_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_FUNC_DEP_LINK +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_FUNC_DEP_LINK__SRIOV_FUNC_DEP_LINK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_FUNC_DEP_LINK__SRIOV_FUNC_DEP_LINK_MASK 0x00FFL +//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_FIRST_VF_OFFSET +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_FIRST_VF_OFFSET__SRIOV_FIRST_VF_OFFSET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_FIRST_VF_OFFSET__SRIOV_FIRST_VF_OFFSET_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_STRIDE +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_STRIDE__SRIOV_VF_STRIDE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_STRIDE__SRIOV_VF_STRIDE_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_DEVICE_ID +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_DEVICE_ID__SRIOV_VF_DEVICE_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_DEVICE_ID__SRIOV_VF_DEVICE_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_SUPPORTED_PAGE_SIZE +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_SUPPORTED_PAGE_SIZE__SRIOV_SUPPORTED_PAGE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_SUPPORTED_PAGE_SIZE__SRIOV_SUPPORTED_PAGE_SIZE_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_SYSTEM_PAGE_SIZE +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_SYSTEM_PAGE_SIZE__SRIOV_SYSTEM_PAGE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_SYSTEM_PAGE_SIZE__SRIOV_SYSTEM_PAGE_SIZE_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_0__VF_BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_0__VF_BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_1__VF_BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_1__VF_BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_2__VF_BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_2__VF_BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_3 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_3__VF_BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_3__VF_BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_4__VF_BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_4__VF_BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_5__VF_BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_BASE_ADDR_5__VF_BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_BIF__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_BIF_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF1_0_PCIE_SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET__SRIOV_VF_MIGRATION_STATE_ARRAY_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_NO_ST_MODE_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_INT_VEC_MODE_SUPPORTED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_DEV_SPC_MODE_SUPPORTED__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_EXTND_TPH_REQR_SUPPORED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_ST_TABLE_LOCATION__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_ST_TABLE_SIZE__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_NO_ST_MODE_SUPPORTED_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_INT_VEC_MODE_SUPPORTED_MASK 0x00000002L +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_DEV_SPC_MODE_SUPPORTED_MASK 0x00000004L +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_EXTND_TPH_REQR_SUPPORED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_ST_TABLE_LOCATION_MASK 0x00000600L +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CAP__TPH_REQR_ST_TABLE_SIZE_MASK 0x07FF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CNTL__TPH_REQR_ST_MODE_SEL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CNTL__TPH_REQR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CNTL__TPH_REQR_ST_MODE_SEL_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF1_0_PCIE_TPH_REQR_CNTL__TPH_REQR_EN_MASK 0x00000300L +//BIF_CFG_DEV0_EPF1_0_PCIE_DLF_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PCIE_DLF_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DLF_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DLF_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_DLF_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_DLF_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_DLF_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_CAP +#define BIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_CAP__LOCAL_DLF_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_CAP__DLF_EXCHANGE_ENABLE__SHIFT 0x1f +#define BIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_CAP__LOCAL_DLF_SUPPORTED_MASK 0x007FFFFFL +#define BIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_CAP__DLF_EXCHANGE_ENABLE_MASK 0x80000000L +//BIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_STATUS +#define BIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_VALID__SHIFT 0x1f +#define BIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_MASK 0x007FFFFFL +#define BIF_CFG_DEV0_EPF1_0_DATA_LINK_FEATURE_STATUS__REMOTE_DLF_SUPPORTED_VALID_MASK 0x80000000L +//BIF_CFG_DEV0_EPF1_0_PHY_16GT_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PHY_16GT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PHY_16GT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PHY_16GT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PHY_16GT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PHY_16GT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PHY_16GT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_LINK_CAP_16GT +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP_16GT__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LINK_CAP_16GT__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_LINK_CNTL_16GT +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL_16GT__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LINK_CNTL_16GT__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT__EQUALIZATION_COMPLETE_16GT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT__EQUALIZATION_PHASE1_SUCCESS_16GT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT__EQUALIZATION_PHASE2_SUCCESS_16GT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT__EQUALIZATION_PHASE3_SUCCESS_16GT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT__LINK_EQUALIZATION_REQUEST_16GT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT__EQUALIZATION_COMPLETE_16GT_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT__EQUALIZATION_PHASE1_SUCCESS_16GT_MASK 0x00000002L +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT__EQUALIZATION_PHASE2_SUCCESS_16GT_MASK 0x00000004L +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT__EQUALIZATION_PHASE3_SUCCESS_16GT_MASK 0x00000008L +#define BIF_CFG_DEV0_EPF1_0_LINK_STATUS_16GT__LINK_EQUALIZATION_REQUEST_16GT_MASK 0x00000010L +//BIF_CFG_DEV0_EPF1_0_LOCAL_PARITY_MISMATCH_STATUS_16GT +#define BIF_CFG_DEV0_EPF1_0_LOCAL_PARITY_MISMATCH_STATUS_16GT__LOCAL_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LOCAL_PARITY_MISMATCH_STATUS_16GT__LOCAL_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF1_0_RTM1_PARITY_MISMATCH_STATUS_16GT +#define BIF_CFG_DEV0_EPF1_0_RTM1_PARITY_MISMATCH_STATUS_16GT__RTM1_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_RTM1_PARITY_MISMATCH_STATUS_16GT__RTM1_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF1_0_RTM2_PARITY_MISMATCH_STATUS_16GT +#define BIF_CFG_DEV0_EPF1_0_RTM2_PARITY_MISMATCH_STATUS_16GT__RTM2_PARITY_MISMATCH_STATUS_BITS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_RTM2_PARITY_MISMATCH_STATUS_16GT__RTM2_PARITY_MISMATCH_STATUS_BITS_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF1_0_LANE_0_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF1_0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF1_0_LANE_0_EQUALIZATION_CNTL_16GT__LANE_0_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF1_0_LANE_1_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF1_0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF1_0_LANE_1_EQUALIZATION_CNTL_16GT__LANE_1_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF1_0_LANE_2_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF1_0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF1_0_LANE_2_EQUALIZATION_CNTL_16GT__LANE_2_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF1_0_LANE_3_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF1_0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF1_0_LANE_3_EQUALIZATION_CNTL_16GT__LANE_3_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF1_0_LANE_4_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF1_0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF1_0_LANE_4_EQUALIZATION_CNTL_16GT__LANE_4_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF1_0_LANE_5_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF1_0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF1_0_LANE_5_EQUALIZATION_CNTL_16GT__LANE_5_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF1_0_LANE_6_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF1_0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF1_0_LANE_6_EQUALIZATION_CNTL_16GT__LANE_6_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF1_0_LANE_7_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF1_0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF1_0_LANE_7_EQUALIZATION_CNTL_16GT__LANE_7_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF1_0_LANE_8_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF1_0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF1_0_LANE_8_EQUALIZATION_CNTL_16GT__LANE_8_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF1_0_LANE_9_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF1_0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF1_0_LANE_9_EQUALIZATION_CNTL_16GT__LANE_9_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF1_0_LANE_10_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF1_0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF1_0_LANE_10_EQUALIZATION_CNTL_16GT__LANE_10_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF1_0_LANE_11_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF1_0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF1_0_LANE_11_EQUALIZATION_CNTL_16GT__LANE_11_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF1_0_LANE_12_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF1_0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF1_0_LANE_12_EQUALIZATION_CNTL_16GT__LANE_12_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF1_0_LANE_13_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF1_0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF1_0_LANE_13_EQUALIZATION_CNTL_16GT__LANE_13_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF1_0_LANE_14_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF1_0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF1_0_LANE_14_EQUALIZATION_CNTL_16GT__LANE_14_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF1_0_LANE_15_EQUALIZATION_CNTL_16GT +#define BIF_CFG_DEV0_EPF1_0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_DSP_16GT_TX_PRESET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_USP_16GT_TX_PRESET__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_DSP_16GT_TX_PRESET_MASK 0x0FL +#define BIF_CFG_DEV0_EPF1_0_LANE_15_EQUALIZATION_CNTL_16GT__LANE_15_USP_16GT_TX_PRESET_MASK 0xF0L +//BIF_CFG_DEV0_EPF1_0_MARGINING_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_MARGINING_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_MARGINING_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_MARGINING_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_MARGINING_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_MARGINING_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_MARGINING_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_MARGINING_PORT_CAP +#define BIF_CFG_DEV0_EPF1_0_MARGINING_PORT_CAP__MARGINING_USES_SOFTWARE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_MARGINING_PORT_CAP__MARGINING_USES_SOFTWARE_MASK 0x0001L +//BIF_CFG_DEV0_EPF1_0_MARGINING_PORT_STATUS +#define BIF_CFG_DEV0_EPF1_0_MARGINING_PORT_STATUS__MARGINING_READY__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_MARGINING_PORT_STATUS__MARGINING_SOFTWARE_READY__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_MARGINING_PORT_STATUS__MARGINING_READY_MASK 0x0001L +#define BIF_CFG_DEV0_EPF1_0_MARGINING_PORT_STATUS__MARGINING_SOFTWARE_READY_MASK 0x0002L +//BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_CNTL__LANE_0_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_0_MARGINING_LANE_STATUS__LANE_0_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_CNTL__LANE_1_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_1_MARGINING_LANE_STATUS__LANE_1_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_CNTL__LANE_2_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_2_MARGINING_LANE_STATUS__LANE_2_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_CNTL__LANE_3_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_3_MARGINING_LANE_STATUS__LANE_3_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_CNTL__LANE_4_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_4_MARGINING_LANE_STATUS__LANE_4_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_CNTL__LANE_5_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_5_MARGINING_LANE_STATUS__LANE_5_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_CNTL__LANE_6_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_6_MARGINING_LANE_STATUS__LANE_6_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_CNTL__LANE_7_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_7_MARGINING_LANE_STATUS__LANE_7_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_CNTL__LANE_8_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_8_MARGINING_LANE_STATUS__LANE_8_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_CNTL__LANE_9_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_9_MARGINING_LANE_STATUS__LANE_9_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_CNTL__LANE_10_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_10_MARGINING_LANE_STATUS__LANE_10_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_CNTL__LANE_11_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_11_MARGINING_LANE_STATUS__LANE_11_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_CNTL__LANE_12_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_12_MARGINING_LANE_STATUS__LANE_12_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_CNTL__LANE_13_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_13_MARGINING_LANE_STATUS__LANE_13_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_CNTL__LANE_14_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_14_MARGINING_LANE_STATUS__LANE_14_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_CNTL +#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_RECEIVER_NUMBER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_TYPE__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_USAGE_MODEL__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_PAYLOAD__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_RECEIVER_NUMBER_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_TYPE_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_USAGE_MODEL_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_CNTL__LANE_15_MARGIN_PAYLOAD_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_STATUS +#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_RECEIVER_NUMBER_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_TYPE_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_USAGE_MODEL_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_PAYLOAD_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_RECEIVER_NUMBER_STATUS_MASK 0x0007L +#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_TYPE_STATUS_MASK 0x0038L +#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_USAGE_MODEL_STATUS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF1_0_LANE_15_MARGINING_LANE_STATUS__LANE_15_MARGIN_PAYLOAD_STATUS_MASK 0xFF00L +//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_INDEX_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR1_CNTL__VF_BAR_SIZE_MASK 0x00003F00L +//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_INDEX_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR2_CNTL__VF_BAR_SIZE_MASK 0x00003F00L +//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_INDEX_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR3_CNTL__VF_BAR_SIZE_MASK 0x00003F00L +//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_INDEX_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR4_CNTL__VF_BAR_SIZE_MASK 0x00003F00L +//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_INDEX_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR5_CNTL__VF_BAR_SIZE_MASK 0x00003F00L +//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CAP +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CAP__VF_BAR_SIZE_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CAP__VF_BAR_SIZE_SUPPORTED_MASK 0x00FFFFF0L +//BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CNTL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_TOTAL_NUM__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_SIZE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_INDEX_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_TOTAL_NUM_MASK 0x000000E0L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VF_RESIZE_BAR6_CNTL__VF_BAR_SIZE_MASK 0x00003F00L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST_GPUIOV__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_REV__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_LENGTH__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_REV_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV__VSEC_LENGTH_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW__VF_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW__VF_NUM__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW__VF_EN_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_SRIOV_SHADOW__VF_NUM_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_CMD_COMPLETE_INTR_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_HANG_NEED_FLR_INTR_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_CMD_COMPLETE_INTR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_HANG_NEED_FLR_INTR_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_CMD_COMPLETE_INTR_EN__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0xd +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_HANG_NEED_FLR_INTR_EN__SHIFT 0xe +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_CMD_COMPLETE_INTR_EN__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_HANG_SELF_RECOVERED_INTR_EN__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_HANG_NEED_FLR_INTR_EN__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_VM_BUSY_TRANSITION_INTR_EN__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__HVVM_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__HVVM_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_CMD_COMPLETE_INTR_EN_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00000002L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_HANG_NEED_FLR_INTR_EN_MASK 0x00000004L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__GFX_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00000008L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_CMD_COMPLETE_INTR_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_HANG_NEED_FLR_INTR_EN_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_CMD_COMPLETE_INTR_EN_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_HANG_NEED_FLR_INTR_EN_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__UVD1_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_CMD_COMPLETE_INTR_EN_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_HANG_SELF_RECOVERED_INTR_EN_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_HANG_NEED_FLR_INTR_EN_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__VCE_VM_BUSY_TRANSITION_INTR_EN_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__HVVM_MAILBOX_TRN_ACK_INTR_EN_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_ENABLE__HVVM_MAILBOX_RCV_VALID_INTR_EN_MASK 0x02000000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_CMD_COMPLETE_INTR_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_CMD_COMPLETE_INTR_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_HANG_NEED_FLR_INTR_STATUS__SHIFT 0xa +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0xb +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_CMD_COMPLETE_INTR_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_HANG_NEED_FLR_INTR_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_CMD_COMPLETE_INTR_STATUS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_HANG_SELF_RECOVERED_INTR_STATUS__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_HANG_NEED_FLR_INTR_STATUS__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_VM_BUSY_TRANSITION_INTR_STATUS__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__HVVM_MAILBOX_TRN_ACK_INTR_STATUS__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__HVVM_MAILBOX_RCV_VALID_INTR_STATUS__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_CMD_COMPLETE_INTR_STATUS_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00000002L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_HANG_NEED_FLR_INTR_STATUS_MASK 0x00000004L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__GFX_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00000008L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_CMD_COMPLETE_INTR_STATUS_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_HANG_NEED_FLR_INTR_STATUS_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_CMD_COMPLETE_INTR_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_HANG_NEED_FLR_INTR_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__UVD1_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_CMD_COMPLETE_INTR_STATUS_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_HANG_SELF_RECOVERED_INTR_STATUS_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_HANG_NEED_FLR_INTR_STATUS_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__VCE_VM_BUSY_TRANSITION_INTR_STATUS_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__HVVM_MAILBOX_TRN_ACK_INTR_STATUS_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_INTR_STATUS__HVVM_MAILBOX_RCV_VALID_INTR_STATUS_MASK 0x02000000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_RESET_CONTROL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_RESET_CONTROL__SOFT_PF_FLR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_RESET_CONTROL__SOFT_PF_FLR_MASK 0x0001L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__VF_INDEX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__TRN_MSG_DATA__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__TRN_MSG_VALID__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__RCV_MSG_DATA__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__RCV_MSG_ACK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__VF_INDEX_MASK 0x000000FFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__TRN_MSG_DATA_MASK 0x00000F00L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__TRN_MSG_VALID_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__RCV_MSG_DATA_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW0__RCV_MSG_ACK_MASK 0x01000000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF0_TRN_ACK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF0_RCV_VALID__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF1_TRN_ACK__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF1_RCV_VALID__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF2_TRN_ACK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF2_RCV_VALID__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF3_TRN_ACK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF3_RCV_VALID__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF4_TRN_ACK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF4_RCV_VALID__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF5_TRN_ACK__SHIFT 0xa +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF5_RCV_VALID__SHIFT 0xb +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF6_TRN_ACK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF6_RCV_VALID__SHIFT 0xd +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF7_TRN_ACK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF7_RCV_VALID__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF8_TRN_ACK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF8_RCV_VALID__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF9_TRN_ACK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF9_RCV_VALID__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF10_TRN_ACK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF10_RCV_VALID__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF11_TRN_ACK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF11_RCV_VALID__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF12_TRN_ACK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF12_RCV_VALID__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF13_TRN_ACK__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF13_RCV_VALID__SHIFT 0x1b +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF14_TRN_ACK__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF14_RCV_VALID__SHIFT 0x1d +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF15_TRN_ACK__SHIFT 0x1e +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF15_RCV_VALID__SHIFT 0x1f +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF0_TRN_ACK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF0_RCV_VALID_MASK 0x00000002L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF1_TRN_ACK_MASK 0x00000004L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF1_RCV_VALID_MASK 0x00000008L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF2_TRN_ACK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF2_RCV_VALID_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF3_TRN_ACK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF3_RCV_VALID_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF4_TRN_ACK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF4_RCV_VALID_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF5_TRN_ACK_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF5_RCV_VALID_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF6_TRN_ACK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF6_RCV_VALID_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF7_TRN_ACK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF7_RCV_VALID_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF8_TRN_ACK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF8_RCV_VALID_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF9_TRN_ACK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF9_RCV_VALID_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF10_TRN_ACK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF10_RCV_VALID_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF11_TRN_ACK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF11_RCV_VALID_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF12_TRN_ACK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF12_RCV_VALID_MASK 0x02000000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF13_TRN_ACK_MASK 0x04000000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF13_RCV_VALID_MASK 0x08000000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF14_TRN_ACK_MASK 0x10000000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF14_RCV_VALID_MASK 0x20000000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF15_TRN_ACK_MASK 0x40000000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW1__VF15_RCV_VALID_MASK 0x80000000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF16_TRN_ACK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF16_RCV_VALID__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF17_TRN_ACK__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF17_RCV_VALID__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF18_TRN_ACK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF18_RCV_VALID__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF19_TRN_ACK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF19_RCV_VALID__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF20_TRN_ACK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF20_RCV_VALID__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF21_TRN_ACK__SHIFT 0xa +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF21_RCV_VALID__SHIFT 0xb +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF22_TRN_ACK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF22_RCV_VALID__SHIFT 0xd +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF23_TRN_ACK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF23_RCV_VALID__SHIFT 0xf +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF24_TRN_ACK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF24_RCV_VALID__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF25_TRN_ACK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF25_RCV_VALID__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF26_TRN_ACK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF26_RCV_VALID__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF27_TRN_ACK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF27_RCV_VALID__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF28_TRN_ACK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF28_RCV_VALID__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF29_TRN_ACK__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF29_RCV_VALID__SHIFT 0x1b +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF30_TRN_ACK__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF30_RCV_VALID__SHIFT 0x1d +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__PF_TRN_ACK__SHIFT 0x1e +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__PF_RCV_VALID__SHIFT 0x1f +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF16_TRN_ACK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF16_RCV_VALID_MASK 0x00000002L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF17_TRN_ACK_MASK 0x00000004L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF17_RCV_VALID_MASK 0x00000008L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF18_TRN_ACK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF18_RCV_VALID_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF19_TRN_ACK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF19_RCV_VALID_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF20_TRN_ACK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF20_RCV_VALID_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF21_TRN_ACK_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF21_RCV_VALID_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF22_TRN_ACK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF22_RCV_VALID_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF23_TRN_ACK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF23_RCV_VALID_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF24_TRN_ACK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF24_RCV_VALID_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF25_TRN_ACK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF25_RCV_VALID_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF26_TRN_ACK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF26_RCV_VALID_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF27_TRN_ACK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF27_RCV_VALID_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF28_TRN_ACK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF28_RCV_VALID_MASK 0x02000000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF29_TRN_ACK_MASK 0x04000000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF29_RCV_VALID_MASK 0x08000000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF30_TRN_ACK_MASK 0x10000000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__VF30_RCV_VALID_MASK 0x20000000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__PF_TRN_ACK_MASK 0x40000000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_HVVM_MBOX_DW2__PF_RCV_VALID_MASK 0x80000000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__CONTEXT_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__LOC__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__CONTEXT_OFFSET__SHIFT 0xa +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__CONTEXT_SIZE_MASK 0x0000007FL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__LOC_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_CONTEXT__CONTEXT_OFFSET_MASK 0xFFFFFC00L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB__TOTAL_FB_AVAILABLE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB__TOTAL_FB_CONSUMED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB__TOTAL_FB_AVAILABLE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_TOTAL_FB__TOTAL_FB_CONSUMED_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__UVDSCH_OFFSET__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__VCESCH_OFFSET__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__GFXSCH_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__UVD1SCH_OFFSET__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__UVDSCH_OFFSET_MASK 0x000000FFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__VCESCH_OFFSET_MASK 0x0000FF00L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__GFXSCH_OFFSET_MASK 0x00FF0000L +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_OFFSETS__UVD1SCH_OFFSET_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE__P2P_OVER_XGMI_ENABLE_VF__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE__P2P_OVER_XGMI_ENABLE_PF__SHIFT 0x1f +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE__P2P_OVER_XGMI_ENABLE_VF_MASK 0x7FFFFFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_P2P_OVER_XGMI_ENABLE__P2P_OVER_XGMI_ENABLE_PF_MASK 0x80000000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB__VF0_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB__VF0_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB__VF0_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF0_FB__VF0_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB__VF1_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB__VF1_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB__VF1_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF1_FB__VF1_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB__VF2_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB__VF2_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB__VF2_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF2_FB__VF2_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB__VF3_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB__VF3_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB__VF3_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF3_FB__VF3_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB__VF4_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB__VF4_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB__VF4_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF4_FB__VF4_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB__VF5_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB__VF5_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB__VF5_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF5_FB__VF5_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB__VF6_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB__VF6_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB__VF6_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF6_FB__VF6_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB__VF7_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB__VF7_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB__VF7_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF7_FB__VF7_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB__VF8_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB__VF8_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB__VF8_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF8_FB__VF8_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB__VF9_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB__VF9_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB__VF9_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF9_FB__VF9_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB__VF10_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB__VF10_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB__VF10_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF10_FB__VF10_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB__VF11_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB__VF11_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB__VF11_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF11_FB__VF11_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB__VF12_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB__VF12_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB__VF12_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF12_FB__VF12_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB__VF13_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB__VF13_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB__VF13_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF13_FB__VF13_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB__VF14_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB__VF14_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB__VF14_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF14_FB__VF14_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB__VF15_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB__VF15_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB__VF15_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF15_FB__VF15_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB__VF16_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB__VF16_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB__VF16_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF16_FB__VF16_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB__VF17_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB__VF17_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB__VF17_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF17_FB__VF17_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB__VF18_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB__VF18_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB__VF18_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF18_FB__VF18_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB__VF19_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB__VF19_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB__VF19_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF19_FB__VF19_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB__VF20_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB__VF20_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB__VF20_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF20_FB__VF20_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB__VF21_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB__VF21_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB__VF21_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF21_FB__VF21_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB__VF22_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB__VF22_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB__VF22_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF22_FB__VF22_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB__VF23_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB__VF23_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB__VF23_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF23_FB__VF23_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB__VF24_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB__VF24_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB__VF24_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF24_FB__VF24_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB__VF25_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB__VF25_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB__VF25_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF25_FB__VF25_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB__VF26_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB__VF26_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB__VF26_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF26_FB__VF26_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB__VF27_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB__VF27_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB__VF27_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF27_FB__VF27_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB__VF28_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB__VF28_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB__VF28_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF28_FB__VF28_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB__VF29_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB__VF29_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB__VF29_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF29_FB__VF29_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB__VF30_FB_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB__VF30_FB_OFFSET__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB__VF30_FB_SIZE_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VF30_FB__VF30_FB_OFFSET_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW0__DW0__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW0__DW0_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW1__DW1__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW1__DW1_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW2__DW2__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW2__DW2_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW3 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW3__DW3__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW3__DW3_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW4__DW4__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW4__DW4_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW5__DW5__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW5__DW5_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW6 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW6__DW6__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW6__DW6_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW7 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW7__DW7__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW7__DW7_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW8__DW8__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVDSCH_DW8__DW8_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW0__DW0__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW0__DW0_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW1__DW1__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW1__DW1_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW2__DW2__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW2__DW2_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW3 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW3__DW3__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW3__DW3_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW4__DW4__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW4__DW4_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW5__DW5__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW5__DW5_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW6 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW6__DW6__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW6__DW6_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW7 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW7__DW7__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW7__DW7_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW8__DW8__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_VCESCH_DW8__DW8_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW0__DW0__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW0__DW0_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW1__DW1__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW1__DW1_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW2__DW2__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW2__DW2_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW3 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW3__DW3__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW3__DW3_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW4__DW4__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW4__DW4_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW5__DW5__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW5__DW5_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW6 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW6__DW6__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW6__DW6_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW7 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW7__DW7__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW7__DW7_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW8__DW8__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_GFXSCH_DW8__DW8_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW0__DW0__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW0__DW0_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW1 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW1__DW1__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW1__DW1_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW2 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW2__DW2__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW2__DW2_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW3 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW3__DW3__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW3__DW3_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW4 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW4__DW4__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW4__DW4_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW5 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW5__DW5__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW5__DW5_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW6 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW6__DW6__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW6__DW6_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW7 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW7__DW7__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW7__DW7_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW8 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW8__DW8__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF1_0_PCIE_VENDOR_SPECIFIC_HDR_GPUIOV_UVD1SCH_DW8__DW8_MASK 0xFFFFFFFFL + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf0_bifcfgdecp +//BIF_CFG_DEV0_EPF0_VF0_0_VENDOR_ID +#define BIF_CFG_DEV0_EPF0_VF0_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_ID +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_COMMAND +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__AD_STEPPING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__SERR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__FAST_B2B_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__INT_DIS__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__AD_STEPPING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__SERR_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__FAST_B2B_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF0_0_COMMAND__INT_DIS_MASK 0x0400L +//BIF_CFG_DEV0_EPF0_VF0_0_STATUS +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__INT_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__CAP_LIST__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__PCI_66_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__DEVSEL_TIMING__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__INT_STATUS_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__CAP_LIST_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__PCI_66_CAP_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__DEVSEL_TIMING_MASK 0x0600L +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF0_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF0_0_REVISION_ID +#define BIF_CFG_DEV0_EPF0_VF0_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF0_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF0_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_VF0_0_PROG_INTERFACE +#define BIF_CFG_DEV0_EPF0_VF0_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF0_0_SUB_CLASS +#define BIF_CFG_DEV0_EPF0_VF0_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF0_0_BASE_CLASS +#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF0_0_CACHE_LINE +#define BIF_CFG_DEV0_EPF0_VF0_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF0_0_LATENCY +#define BIF_CFG_DEV0_EPF0_VF0_0_LATENCY__LATENCY_TIMER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_LATENCY__LATENCY_TIMER_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF0_0_HEADER +#define BIF_CFG_DEV0_EPF0_VF0_0_HEADER__HEADER_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_HEADER__DEVICE_TYPE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF0_0_HEADER__HEADER_TYPE_MASK 0x7FL +#define BIF_CFG_DEV0_EPF0_VF0_0_HEADER__DEVICE_TYPE_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF0_0_BIST +#define BIF_CFG_DEV0_EPF0_VF0_0_BIST__BIST_COMP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_BIST__BIST_STRT__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF0_0_BIST__BIST_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF0_0_BIST__BIST_COMP_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF0_0_BIST__BIST_STRT_MASK 0x40L +#define BIF_CFG_DEV0_EPF0_VF0_0_BIST__BIST_CAP_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_1 +#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_2 +#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_3 +#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_4 +#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_5 +#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_6 +#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_ADAPTER_ID +#define BIF_CFG_DEV0_EPF0_VF0_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF0_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF0_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_VF0_0_ROM_BASE_ADDR +#define BIF_CFG_DEV0_EPF0_VF0_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_CAP_PTR +#define BIF_CFG_DEV0_EPF0_VF0_0_CAP_PTR__CAP_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL +//BIF_CFG_DEV0_EPF0_VF0_0_INTERRUPT_LINE +#define BIF_CFG_DEV0_EPF0_VF0_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF0_0_INTERRUPT_PIN +#define BIF_CFG_DEV0_EPF0_VF0_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP__VERSION_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L +//BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L +//BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__LINK_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__LINK_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L +//BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L +//BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2__RESERVED__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CAP2__RESERVED_MASK 0xFE000000L +//BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L +//BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L +#define BIF_CFG_DEV0_EPF0_VF0_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L +//BIF_CFG_DEV0_EPF0_VF0_0_SLOT_CAP2 +#define BIF_CFG_DEV0_EPF0_VF0_0_SLOT_CAP2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_SLOT_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF0_0_SLOT_CNTL2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_SLOT_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF0_0_SLOT_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_MSI_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L +//BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_ADDR_LO +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_ADDR_HI +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_DATA +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_MSI_MASK +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MASK__MSI_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_DATA_64 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_MSI_MASK_64 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_MSI_PENDING +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_MSI_PENDING_64 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_MSIX_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF0_0_MSIX_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL +#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF0_0_MSIX_TABLE +#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF0_0_MSIX_PBA +#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF0_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_HDR +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC1 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC2 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG1 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG2 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG3 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG1 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG2 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG3 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CAP +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CNTL +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CNTL__STU__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CNTL__STU_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CAP +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CNTL +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF0_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf1_bifcfgdecp +//BIF_CFG_DEV0_EPF0_VF1_0_VENDOR_ID +#define BIF_CFG_DEV0_EPF0_VF1_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_ID +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_COMMAND +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__AD_STEPPING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__SERR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__FAST_B2B_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__INT_DIS__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__AD_STEPPING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__SERR_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__FAST_B2B_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF1_0_COMMAND__INT_DIS_MASK 0x0400L +//BIF_CFG_DEV0_EPF0_VF1_0_STATUS +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__INT_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__CAP_LIST__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__PCI_66_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__DEVSEL_TIMING__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__INT_STATUS_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__CAP_LIST_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__PCI_66_CAP_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__DEVSEL_TIMING_MASK 0x0600L +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF1_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF1_0_REVISION_ID +#define BIF_CFG_DEV0_EPF0_VF1_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF1_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF1_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_VF1_0_PROG_INTERFACE +#define BIF_CFG_DEV0_EPF0_VF1_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF1_0_SUB_CLASS +#define BIF_CFG_DEV0_EPF0_VF1_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF1_0_BASE_CLASS +#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF1_0_CACHE_LINE +#define BIF_CFG_DEV0_EPF0_VF1_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF1_0_LATENCY +#define BIF_CFG_DEV0_EPF0_VF1_0_LATENCY__LATENCY_TIMER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_LATENCY__LATENCY_TIMER_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF1_0_HEADER +#define BIF_CFG_DEV0_EPF0_VF1_0_HEADER__HEADER_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_HEADER__DEVICE_TYPE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF1_0_HEADER__HEADER_TYPE_MASK 0x7FL +#define BIF_CFG_DEV0_EPF0_VF1_0_HEADER__DEVICE_TYPE_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF1_0_BIST +#define BIF_CFG_DEV0_EPF0_VF1_0_BIST__BIST_COMP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_BIST__BIST_STRT__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF1_0_BIST__BIST_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF1_0_BIST__BIST_COMP_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF1_0_BIST__BIST_STRT_MASK 0x40L +#define BIF_CFG_DEV0_EPF0_VF1_0_BIST__BIST_CAP_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_1 +#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_2 +#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_3 +#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_4 +#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_5 +#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_6 +#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_ADAPTER_ID +#define BIF_CFG_DEV0_EPF0_VF1_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF1_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF1_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_VF1_0_ROM_BASE_ADDR +#define BIF_CFG_DEV0_EPF0_VF1_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_CAP_PTR +#define BIF_CFG_DEV0_EPF0_VF1_0_CAP_PTR__CAP_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL +//BIF_CFG_DEV0_EPF0_VF1_0_INTERRUPT_LINE +#define BIF_CFG_DEV0_EPF0_VF1_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF1_0_INTERRUPT_PIN +#define BIF_CFG_DEV0_EPF0_VF1_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP__VERSION_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L +//BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L +//BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__LINK_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__LINK_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L +//BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L +//BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2__RESERVED__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CAP2__RESERVED_MASK 0xFE000000L +//BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L +//BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L +#define BIF_CFG_DEV0_EPF0_VF1_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L +//BIF_CFG_DEV0_EPF0_VF1_0_SLOT_CAP2 +#define BIF_CFG_DEV0_EPF0_VF1_0_SLOT_CAP2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_SLOT_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF1_0_SLOT_CNTL2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_SLOT_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF1_0_SLOT_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_MSI_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L +//BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_ADDR_LO +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_ADDR_HI +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_DATA +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_MSI_MASK +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MASK__MSI_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_DATA_64 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_MSI_MASK_64 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_MSI_PENDING +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_MSI_PENDING_64 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_MSIX_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF1_0_MSIX_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL +#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF1_0_MSIX_TABLE +#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF1_0_MSIX_PBA +#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF1_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_HDR +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC1 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC2 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG1 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG2 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG3 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG1 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG2 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG3 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CAP +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CNTL +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CNTL__STU__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CNTL__STU_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CAP +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CNTL +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF1_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf2_bifcfgdecp +//BIF_CFG_DEV0_EPF0_VF2_0_VENDOR_ID +#define BIF_CFG_DEV0_EPF0_VF2_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_ID +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_COMMAND +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__AD_STEPPING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__SERR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__FAST_B2B_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__INT_DIS__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__AD_STEPPING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__SERR_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__FAST_B2B_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF2_0_COMMAND__INT_DIS_MASK 0x0400L +//BIF_CFG_DEV0_EPF0_VF2_0_STATUS +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__INT_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__CAP_LIST__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__PCI_66_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__DEVSEL_TIMING__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__INT_STATUS_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__CAP_LIST_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__PCI_66_CAP_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__DEVSEL_TIMING_MASK 0x0600L +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF2_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF2_0_REVISION_ID +#define BIF_CFG_DEV0_EPF0_VF2_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF2_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF2_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_VF2_0_PROG_INTERFACE +#define BIF_CFG_DEV0_EPF0_VF2_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF2_0_SUB_CLASS +#define BIF_CFG_DEV0_EPF0_VF2_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF2_0_BASE_CLASS +#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF2_0_CACHE_LINE +#define BIF_CFG_DEV0_EPF0_VF2_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF2_0_LATENCY +#define BIF_CFG_DEV0_EPF0_VF2_0_LATENCY__LATENCY_TIMER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_LATENCY__LATENCY_TIMER_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF2_0_HEADER +#define BIF_CFG_DEV0_EPF0_VF2_0_HEADER__HEADER_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_HEADER__DEVICE_TYPE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF2_0_HEADER__HEADER_TYPE_MASK 0x7FL +#define BIF_CFG_DEV0_EPF0_VF2_0_HEADER__DEVICE_TYPE_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF2_0_BIST +#define BIF_CFG_DEV0_EPF0_VF2_0_BIST__BIST_COMP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_BIST__BIST_STRT__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF2_0_BIST__BIST_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF2_0_BIST__BIST_COMP_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF2_0_BIST__BIST_STRT_MASK 0x40L +#define BIF_CFG_DEV0_EPF0_VF2_0_BIST__BIST_CAP_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_1 +#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_2 +#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_3 +#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_4 +#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_5 +#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_6 +#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_ADAPTER_ID +#define BIF_CFG_DEV0_EPF0_VF2_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF2_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF2_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_VF2_0_ROM_BASE_ADDR +#define BIF_CFG_DEV0_EPF0_VF2_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_CAP_PTR +#define BIF_CFG_DEV0_EPF0_VF2_0_CAP_PTR__CAP_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL +//BIF_CFG_DEV0_EPF0_VF2_0_INTERRUPT_LINE +#define BIF_CFG_DEV0_EPF0_VF2_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF2_0_INTERRUPT_PIN +#define BIF_CFG_DEV0_EPF0_VF2_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP__VERSION_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L +//BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L +//BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__LINK_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__LINK_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L +//BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L +//BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2__RESERVED__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CAP2__RESERVED_MASK 0xFE000000L +//BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L +//BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L +#define BIF_CFG_DEV0_EPF0_VF2_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L +//BIF_CFG_DEV0_EPF0_VF2_0_SLOT_CAP2 +#define BIF_CFG_DEV0_EPF0_VF2_0_SLOT_CAP2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_SLOT_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF2_0_SLOT_CNTL2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_SLOT_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF2_0_SLOT_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_MSI_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L +//BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_ADDR_LO +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_ADDR_HI +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_DATA +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_MSI_MASK +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MASK__MSI_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_DATA_64 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_MSI_MASK_64 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_MSI_PENDING +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_MSI_PENDING_64 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_MSIX_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF2_0_MSIX_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL +#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF2_0_MSIX_TABLE +#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF2_0_MSIX_PBA +#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF2_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_HDR +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC1 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC2 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG1 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG2 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG3 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG1 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG2 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG3 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CAP +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CNTL +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CNTL__STU__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CNTL__STU_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CAP +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CNTL +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF2_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf3_bifcfgdecp +//BIF_CFG_DEV0_EPF0_VF3_0_VENDOR_ID +#define BIF_CFG_DEV0_EPF0_VF3_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_ID +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_COMMAND +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__AD_STEPPING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__SERR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__FAST_B2B_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__INT_DIS__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__AD_STEPPING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__SERR_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__FAST_B2B_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF3_0_COMMAND__INT_DIS_MASK 0x0400L +//BIF_CFG_DEV0_EPF0_VF3_0_STATUS +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__INT_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__CAP_LIST__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__PCI_66_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__DEVSEL_TIMING__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__INT_STATUS_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__CAP_LIST_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__PCI_66_CAP_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__DEVSEL_TIMING_MASK 0x0600L +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF3_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF3_0_REVISION_ID +#define BIF_CFG_DEV0_EPF0_VF3_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF3_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF3_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_VF3_0_PROG_INTERFACE +#define BIF_CFG_DEV0_EPF0_VF3_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF3_0_SUB_CLASS +#define BIF_CFG_DEV0_EPF0_VF3_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF3_0_BASE_CLASS +#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF3_0_CACHE_LINE +#define BIF_CFG_DEV0_EPF0_VF3_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF3_0_LATENCY +#define BIF_CFG_DEV0_EPF0_VF3_0_LATENCY__LATENCY_TIMER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_LATENCY__LATENCY_TIMER_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF3_0_HEADER +#define BIF_CFG_DEV0_EPF0_VF3_0_HEADER__HEADER_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_HEADER__DEVICE_TYPE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF3_0_HEADER__HEADER_TYPE_MASK 0x7FL +#define BIF_CFG_DEV0_EPF0_VF3_0_HEADER__DEVICE_TYPE_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF3_0_BIST +#define BIF_CFG_DEV0_EPF0_VF3_0_BIST__BIST_COMP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_BIST__BIST_STRT__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF3_0_BIST__BIST_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF3_0_BIST__BIST_COMP_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF3_0_BIST__BIST_STRT_MASK 0x40L +#define BIF_CFG_DEV0_EPF0_VF3_0_BIST__BIST_CAP_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_1 +#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_2 +#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_3 +#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_4 +#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_5 +#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_6 +#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_ADAPTER_ID +#define BIF_CFG_DEV0_EPF0_VF3_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF3_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF3_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_VF3_0_ROM_BASE_ADDR +#define BIF_CFG_DEV0_EPF0_VF3_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_CAP_PTR +#define BIF_CFG_DEV0_EPF0_VF3_0_CAP_PTR__CAP_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL +//BIF_CFG_DEV0_EPF0_VF3_0_INTERRUPT_LINE +#define BIF_CFG_DEV0_EPF0_VF3_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF3_0_INTERRUPT_PIN +#define BIF_CFG_DEV0_EPF0_VF3_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP__VERSION_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L +//BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L +//BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__LINK_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__LINK_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L +//BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L +//BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2__RESERVED__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CAP2__RESERVED_MASK 0xFE000000L +//BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L +//BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L +#define BIF_CFG_DEV0_EPF0_VF3_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L +//BIF_CFG_DEV0_EPF0_VF3_0_SLOT_CAP2 +#define BIF_CFG_DEV0_EPF0_VF3_0_SLOT_CAP2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_SLOT_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF3_0_SLOT_CNTL2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_SLOT_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF3_0_SLOT_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_MSI_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L +//BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_ADDR_LO +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_ADDR_HI +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_DATA +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_MSI_MASK +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MASK__MSI_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_DATA_64 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_MSI_MASK_64 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_MSI_PENDING +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_MSI_PENDING_64 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_MSIX_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF3_0_MSIX_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL +#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF3_0_MSIX_TABLE +#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF3_0_MSIX_PBA +#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF3_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_HDR +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC1 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC2 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG1 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG2 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG3 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG1 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG2 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG3 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CAP +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CNTL +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CNTL__STU__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CNTL__STU_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CAP +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CNTL +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF3_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf4_bifcfgdecp +//BIF_CFG_DEV0_EPF0_VF4_0_VENDOR_ID +#define BIF_CFG_DEV0_EPF0_VF4_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_ID +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_COMMAND +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__AD_STEPPING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__SERR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__FAST_B2B_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__INT_DIS__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__AD_STEPPING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__SERR_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__FAST_B2B_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF4_0_COMMAND__INT_DIS_MASK 0x0400L +//BIF_CFG_DEV0_EPF0_VF4_0_STATUS +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__INT_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__CAP_LIST__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__PCI_66_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__DEVSEL_TIMING__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__INT_STATUS_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__CAP_LIST_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__PCI_66_CAP_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__DEVSEL_TIMING_MASK 0x0600L +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF4_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF4_0_REVISION_ID +#define BIF_CFG_DEV0_EPF0_VF4_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF4_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF4_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_VF4_0_PROG_INTERFACE +#define BIF_CFG_DEV0_EPF0_VF4_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF4_0_SUB_CLASS +#define BIF_CFG_DEV0_EPF0_VF4_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF4_0_BASE_CLASS +#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF4_0_CACHE_LINE +#define BIF_CFG_DEV0_EPF0_VF4_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF4_0_LATENCY +#define BIF_CFG_DEV0_EPF0_VF4_0_LATENCY__LATENCY_TIMER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_LATENCY__LATENCY_TIMER_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF4_0_HEADER +#define BIF_CFG_DEV0_EPF0_VF4_0_HEADER__HEADER_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_HEADER__DEVICE_TYPE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF4_0_HEADER__HEADER_TYPE_MASK 0x7FL +#define BIF_CFG_DEV0_EPF0_VF4_0_HEADER__DEVICE_TYPE_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF4_0_BIST +#define BIF_CFG_DEV0_EPF0_VF4_0_BIST__BIST_COMP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_BIST__BIST_STRT__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF4_0_BIST__BIST_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF4_0_BIST__BIST_COMP_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF4_0_BIST__BIST_STRT_MASK 0x40L +#define BIF_CFG_DEV0_EPF0_VF4_0_BIST__BIST_CAP_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_1 +#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_2 +#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_3 +#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_4 +#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_5 +#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_6 +#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_ADAPTER_ID +#define BIF_CFG_DEV0_EPF0_VF4_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF4_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF4_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_VF4_0_ROM_BASE_ADDR +#define BIF_CFG_DEV0_EPF0_VF4_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_CAP_PTR +#define BIF_CFG_DEV0_EPF0_VF4_0_CAP_PTR__CAP_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL +//BIF_CFG_DEV0_EPF0_VF4_0_INTERRUPT_LINE +#define BIF_CFG_DEV0_EPF0_VF4_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF4_0_INTERRUPT_PIN +#define BIF_CFG_DEV0_EPF0_VF4_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP__VERSION_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L +//BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L +//BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__LINK_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__LINK_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L +//BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L +//BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2__RESERVED__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CAP2__RESERVED_MASK 0xFE000000L +//BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L +//BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L +#define BIF_CFG_DEV0_EPF0_VF4_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L +//BIF_CFG_DEV0_EPF0_VF4_0_SLOT_CAP2 +#define BIF_CFG_DEV0_EPF0_VF4_0_SLOT_CAP2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_SLOT_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF4_0_SLOT_CNTL2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_SLOT_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF4_0_SLOT_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_MSI_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L +//BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_ADDR_LO +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_ADDR_HI +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_DATA +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_MSI_MASK +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MASK__MSI_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_DATA_64 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_MSI_MASK_64 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_MSI_PENDING +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_MSI_PENDING_64 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_MSIX_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF4_0_MSIX_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL +#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF4_0_MSIX_TABLE +#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF4_0_MSIX_PBA +#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF4_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_HDR +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC1 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC2 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG1 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG2 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG3 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG1 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG2 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG3 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CAP +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CNTL +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CNTL__STU__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CNTL__STU_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CAP +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CNTL +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF4_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf5_bifcfgdecp +//BIF_CFG_DEV0_EPF0_VF5_0_VENDOR_ID +#define BIF_CFG_DEV0_EPF0_VF5_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_ID +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_COMMAND +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__AD_STEPPING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__SERR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__FAST_B2B_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__INT_DIS__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__AD_STEPPING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__SERR_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__FAST_B2B_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF5_0_COMMAND__INT_DIS_MASK 0x0400L +//BIF_CFG_DEV0_EPF0_VF5_0_STATUS +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__INT_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__CAP_LIST__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__PCI_66_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__DEVSEL_TIMING__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__INT_STATUS_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__CAP_LIST_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__PCI_66_CAP_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__DEVSEL_TIMING_MASK 0x0600L +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF5_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF5_0_REVISION_ID +#define BIF_CFG_DEV0_EPF0_VF5_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF5_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF5_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_VF5_0_PROG_INTERFACE +#define BIF_CFG_DEV0_EPF0_VF5_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF5_0_SUB_CLASS +#define BIF_CFG_DEV0_EPF0_VF5_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF5_0_BASE_CLASS +#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF5_0_CACHE_LINE +#define BIF_CFG_DEV0_EPF0_VF5_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF5_0_LATENCY +#define BIF_CFG_DEV0_EPF0_VF5_0_LATENCY__LATENCY_TIMER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_LATENCY__LATENCY_TIMER_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF5_0_HEADER +#define BIF_CFG_DEV0_EPF0_VF5_0_HEADER__HEADER_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_HEADER__DEVICE_TYPE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF5_0_HEADER__HEADER_TYPE_MASK 0x7FL +#define BIF_CFG_DEV0_EPF0_VF5_0_HEADER__DEVICE_TYPE_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF5_0_BIST +#define BIF_CFG_DEV0_EPF0_VF5_0_BIST__BIST_COMP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_BIST__BIST_STRT__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF5_0_BIST__BIST_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF5_0_BIST__BIST_COMP_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF5_0_BIST__BIST_STRT_MASK 0x40L +#define BIF_CFG_DEV0_EPF0_VF5_0_BIST__BIST_CAP_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_1 +#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_2 +#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_3 +#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_4 +#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_5 +#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_6 +#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_ADAPTER_ID +#define BIF_CFG_DEV0_EPF0_VF5_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF5_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF5_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_VF5_0_ROM_BASE_ADDR +#define BIF_CFG_DEV0_EPF0_VF5_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_CAP_PTR +#define BIF_CFG_DEV0_EPF0_VF5_0_CAP_PTR__CAP_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL +//BIF_CFG_DEV0_EPF0_VF5_0_INTERRUPT_LINE +#define BIF_CFG_DEV0_EPF0_VF5_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF5_0_INTERRUPT_PIN +#define BIF_CFG_DEV0_EPF0_VF5_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP__VERSION_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L +//BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L +//BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__LINK_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__LINK_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L +//BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L +//BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2__RESERVED__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CAP2__RESERVED_MASK 0xFE000000L +//BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L +//BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L +#define BIF_CFG_DEV0_EPF0_VF5_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L +//BIF_CFG_DEV0_EPF0_VF5_0_SLOT_CAP2 +#define BIF_CFG_DEV0_EPF0_VF5_0_SLOT_CAP2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_SLOT_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF5_0_SLOT_CNTL2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_SLOT_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF5_0_SLOT_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_MSI_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L +//BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_ADDR_LO +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_ADDR_HI +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_DATA +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_MSI_MASK +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MASK__MSI_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_DATA_64 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_MSI_MASK_64 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_MSI_PENDING +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_MSI_PENDING_64 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_MSIX_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF5_0_MSIX_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL +#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF5_0_MSIX_TABLE +#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF5_0_MSIX_PBA +#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF5_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_HDR +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC1 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC2 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG1 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG2 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG3 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG1 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG2 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG3 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CAP +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CNTL +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CNTL__STU__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CNTL__STU_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CAP +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CNTL +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF5_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf6_bifcfgdecp +//BIF_CFG_DEV0_EPF0_VF6_0_VENDOR_ID +#define BIF_CFG_DEV0_EPF0_VF6_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_ID +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_COMMAND +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__AD_STEPPING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__SERR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__FAST_B2B_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__INT_DIS__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__AD_STEPPING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__SERR_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__FAST_B2B_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF6_0_COMMAND__INT_DIS_MASK 0x0400L +//BIF_CFG_DEV0_EPF0_VF6_0_STATUS +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__INT_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__CAP_LIST__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__PCI_66_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__DEVSEL_TIMING__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__INT_STATUS_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__CAP_LIST_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__PCI_66_CAP_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__DEVSEL_TIMING_MASK 0x0600L +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF6_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF6_0_REVISION_ID +#define BIF_CFG_DEV0_EPF0_VF6_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF6_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF6_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_VF6_0_PROG_INTERFACE +#define BIF_CFG_DEV0_EPF0_VF6_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF6_0_SUB_CLASS +#define BIF_CFG_DEV0_EPF0_VF6_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF6_0_BASE_CLASS +#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF6_0_CACHE_LINE +#define BIF_CFG_DEV0_EPF0_VF6_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF6_0_LATENCY +#define BIF_CFG_DEV0_EPF0_VF6_0_LATENCY__LATENCY_TIMER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_LATENCY__LATENCY_TIMER_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF6_0_HEADER +#define BIF_CFG_DEV0_EPF0_VF6_0_HEADER__HEADER_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_HEADER__DEVICE_TYPE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF6_0_HEADER__HEADER_TYPE_MASK 0x7FL +#define BIF_CFG_DEV0_EPF0_VF6_0_HEADER__DEVICE_TYPE_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF6_0_BIST +#define BIF_CFG_DEV0_EPF0_VF6_0_BIST__BIST_COMP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_BIST__BIST_STRT__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF6_0_BIST__BIST_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF6_0_BIST__BIST_COMP_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF6_0_BIST__BIST_STRT_MASK 0x40L +#define BIF_CFG_DEV0_EPF0_VF6_0_BIST__BIST_CAP_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_1 +#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_2 +#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_3 +#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_4 +#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_5 +#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_6 +#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_ADAPTER_ID +#define BIF_CFG_DEV0_EPF0_VF6_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF6_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF6_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_VF6_0_ROM_BASE_ADDR +#define BIF_CFG_DEV0_EPF0_VF6_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_CAP_PTR +#define BIF_CFG_DEV0_EPF0_VF6_0_CAP_PTR__CAP_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL +//BIF_CFG_DEV0_EPF0_VF6_0_INTERRUPT_LINE +#define BIF_CFG_DEV0_EPF0_VF6_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF6_0_INTERRUPT_PIN +#define BIF_CFG_DEV0_EPF0_VF6_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP__VERSION_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L +//BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L +//BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__LINK_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__LINK_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L +//BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L +//BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2__RESERVED__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CAP2__RESERVED_MASK 0xFE000000L +//BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L +//BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L +#define BIF_CFG_DEV0_EPF0_VF6_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L +//BIF_CFG_DEV0_EPF0_VF6_0_SLOT_CAP2 +#define BIF_CFG_DEV0_EPF0_VF6_0_SLOT_CAP2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_SLOT_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF6_0_SLOT_CNTL2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_SLOT_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF6_0_SLOT_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_MSI_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L +//BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_ADDR_LO +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_ADDR_HI +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_DATA +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_MSI_MASK +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MASK__MSI_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_DATA_64 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_MSI_MASK_64 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_MSI_PENDING +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_MSI_PENDING_64 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_MSIX_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF6_0_MSIX_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL +#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF6_0_MSIX_TABLE +#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF6_0_MSIX_PBA +#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF6_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_HDR +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC1 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC2 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG1 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG2 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG3 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG1 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG2 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG3 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CAP +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CNTL +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CNTL__STU__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CNTL__STU_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CAP +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CNTL +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF6_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf7_bifcfgdecp +//BIF_CFG_DEV0_EPF0_VF7_0_VENDOR_ID +#define BIF_CFG_DEV0_EPF0_VF7_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_ID +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_COMMAND +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__AD_STEPPING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__SERR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__FAST_B2B_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__INT_DIS__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__AD_STEPPING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__SERR_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__FAST_B2B_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF7_0_COMMAND__INT_DIS_MASK 0x0400L +//BIF_CFG_DEV0_EPF0_VF7_0_STATUS +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__INT_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__CAP_LIST__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__PCI_66_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__DEVSEL_TIMING__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__INT_STATUS_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__CAP_LIST_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__PCI_66_CAP_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__DEVSEL_TIMING_MASK 0x0600L +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF7_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF7_0_REVISION_ID +#define BIF_CFG_DEV0_EPF0_VF7_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF7_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF7_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_VF7_0_PROG_INTERFACE +#define BIF_CFG_DEV0_EPF0_VF7_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF7_0_SUB_CLASS +#define BIF_CFG_DEV0_EPF0_VF7_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF7_0_BASE_CLASS +#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF7_0_CACHE_LINE +#define BIF_CFG_DEV0_EPF0_VF7_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF7_0_LATENCY +#define BIF_CFG_DEV0_EPF0_VF7_0_LATENCY__LATENCY_TIMER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_LATENCY__LATENCY_TIMER_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF7_0_HEADER +#define BIF_CFG_DEV0_EPF0_VF7_0_HEADER__HEADER_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_HEADER__DEVICE_TYPE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF7_0_HEADER__HEADER_TYPE_MASK 0x7FL +#define BIF_CFG_DEV0_EPF0_VF7_0_HEADER__DEVICE_TYPE_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF7_0_BIST +#define BIF_CFG_DEV0_EPF0_VF7_0_BIST__BIST_COMP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_BIST__BIST_STRT__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF7_0_BIST__BIST_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF7_0_BIST__BIST_COMP_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF7_0_BIST__BIST_STRT_MASK 0x40L +#define BIF_CFG_DEV0_EPF0_VF7_0_BIST__BIST_CAP_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_1 +#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_2 +#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_3 +#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_4 +#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_5 +#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_6 +#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_ADAPTER_ID +#define BIF_CFG_DEV0_EPF0_VF7_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF7_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF7_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_VF7_0_ROM_BASE_ADDR +#define BIF_CFG_DEV0_EPF0_VF7_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_CAP_PTR +#define BIF_CFG_DEV0_EPF0_VF7_0_CAP_PTR__CAP_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL +//BIF_CFG_DEV0_EPF0_VF7_0_INTERRUPT_LINE +#define BIF_CFG_DEV0_EPF0_VF7_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF7_0_INTERRUPT_PIN +#define BIF_CFG_DEV0_EPF0_VF7_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP__VERSION_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L +//BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L +//BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__LINK_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__LINK_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L +//BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L +//BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2__RESERVED__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CAP2__RESERVED_MASK 0xFE000000L +//BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L +//BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L +#define BIF_CFG_DEV0_EPF0_VF7_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L +//BIF_CFG_DEV0_EPF0_VF7_0_SLOT_CAP2 +#define BIF_CFG_DEV0_EPF0_VF7_0_SLOT_CAP2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_SLOT_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF7_0_SLOT_CNTL2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_SLOT_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF7_0_SLOT_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_MSI_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L +//BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_ADDR_LO +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_ADDR_HI +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_DATA +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_MSI_MASK +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MASK__MSI_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_DATA_64 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_MSI_MASK_64 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_MSI_PENDING +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_MSI_PENDING_64 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_MSIX_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF7_0_MSIX_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL +#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF7_0_MSIX_TABLE +#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF7_0_MSIX_PBA +#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF7_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_HDR +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC1 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC2 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG1 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG2 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG3 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG1 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG2 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG3 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CAP +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CNTL +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CNTL__STU__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CNTL__STU_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CAP +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CNTL +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF7_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf8_bifcfgdecp +//BIF_CFG_DEV0_EPF0_VF8_0_VENDOR_ID +#define BIF_CFG_DEV0_EPF0_VF8_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_ID +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_COMMAND +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__AD_STEPPING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__SERR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__FAST_B2B_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__INT_DIS__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__AD_STEPPING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__SERR_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__FAST_B2B_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF8_0_COMMAND__INT_DIS_MASK 0x0400L +//BIF_CFG_DEV0_EPF0_VF8_0_STATUS +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__INT_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__CAP_LIST__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__PCI_66_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__DEVSEL_TIMING__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__INT_STATUS_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__CAP_LIST_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__PCI_66_CAP_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__DEVSEL_TIMING_MASK 0x0600L +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF8_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF8_0_REVISION_ID +#define BIF_CFG_DEV0_EPF0_VF8_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF8_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF8_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_VF8_0_PROG_INTERFACE +#define BIF_CFG_DEV0_EPF0_VF8_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF8_0_SUB_CLASS +#define BIF_CFG_DEV0_EPF0_VF8_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF8_0_BASE_CLASS +#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF8_0_CACHE_LINE +#define BIF_CFG_DEV0_EPF0_VF8_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF8_0_LATENCY +#define BIF_CFG_DEV0_EPF0_VF8_0_LATENCY__LATENCY_TIMER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_LATENCY__LATENCY_TIMER_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF8_0_HEADER +#define BIF_CFG_DEV0_EPF0_VF8_0_HEADER__HEADER_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_HEADER__DEVICE_TYPE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF8_0_HEADER__HEADER_TYPE_MASK 0x7FL +#define BIF_CFG_DEV0_EPF0_VF8_0_HEADER__DEVICE_TYPE_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF8_0_BIST +#define BIF_CFG_DEV0_EPF0_VF8_0_BIST__BIST_COMP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_BIST__BIST_STRT__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF8_0_BIST__BIST_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF8_0_BIST__BIST_COMP_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF8_0_BIST__BIST_STRT_MASK 0x40L +#define BIF_CFG_DEV0_EPF0_VF8_0_BIST__BIST_CAP_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_1 +#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_2 +#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_3 +#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_4 +#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_5 +#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_6 +#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_ADAPTER_ID +#define BIF_CFG_DEV0_EPF0_VF8_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF8_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF8_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_VF8_0_ROM_BASE_ADDR +#define BIF_CFG_DEV0_EPF0_VF8_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_CAP_PTR +#define BIF_CFG_DEV0_EPF0_VF8_0_CAP_PTR__CAP_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL +//BIF_CFG_DEV0_EPF0_VF8_0_INTERRUPT_LINE +#define BIF_CFG_DEV0_EPF0_VF8_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF8_0_INTERRUPT_PIN +#define BIF_CFG_DEV0_EPF0_VF8_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP__VERSION_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L +//BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L +//BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__LINK_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__LINK_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L +//BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L +//BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2__RESERVED__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CAP2__RESERVED_MASK 0xFE000000L +//BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L +//BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L +#define BIF_CFG_DEV0_EPF0_VF8_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L +//BIF_CFG_DEV0_EPF0_VF8_0_SLOT_CAP2 +#define BIF_CFG_DEV0_EPF0_VF8_0_SLOT_CAP2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_SLOT_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF8_0_SLOT_CNTL2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_SLOT_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF8_0_SLOT_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_MSI_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L +//BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_ADDR_LO +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_ADDR_HI +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_DATA +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_MSI_MASK +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MASK__MSI_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_DATA_64 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_MSI_MASK_64 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_MSI_PENDING +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_MSI_PENDING_64 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_MSIX_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF8_0_MSIX_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL +#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF8_0_MSIX_TABLE +#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF8_0_MSIX_PBA +#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF8_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_HDR +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC1 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC2 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG1 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG2 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG3 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG1 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG2 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG3 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CAP +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CNTL +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CNTL__STU__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CNTL__STU_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CAP +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CNTL +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF8_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf9_bifcfgdecp +//BIF_CFG_DEV0_EPF0_VF9_0_VENDOR_ID +#define BIF_CFG_DEV0_EPF0_VF9_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_ID +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_COMMAND +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__AD_STEPPING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__SERR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__FAST_B2B_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__INT_DIS__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__AD_STEPPING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__SERR_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__FAST_B2B_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF9_0_COMMAND__INT_DIS_MASK 0x0400L +//BIF_CFG_DEV0_EPF0_VF9_0_STATUS +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__INT_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__CAP_LIST__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__PCI_66_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__DEVSEL_TIMING__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__INT_STATUS_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__CAP_LIST_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__PCI_66_CAP_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__DEVSEL_TIMING_MASK 0x0600L +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF9_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF9_0_REVISION_ID +#define BIF_CFG_DEV0_EPF0_VF9_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF9_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF9_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_VF9_0_PROG_INTERFACE +#define BIF_CFG_DEV0_EPF0_VF9_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF9_0_SUB_CLASS +#define BIF_CFG_DEV0_EPF0_VF9_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF9_0_BASE_CLASS +#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF9_0_CACHE_LINE +#define BIF_CFG_DEV0_EPF0_VF9_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF9_0_LATENCY +#define BIF_CFG_DEV0_EPF0_VF9_0_LATENCY__LATENCY_TIMER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_LATENCY__LATENCY_TIMER_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF9_0_HEADER +#define BIF_CFG_DEV0_EPF0_VF9_0_HEADER__HEADER_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_HEADER__DEVICE_TYPE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF9_0_HEADER__HEADER_TYPE_MASK 0x7FL +#define BIF_CFG_DEV0_EPF0_VF9_0_HEADER__DEVICE_TYPE_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF9_0_BIST +#define BIF_CFG_DEV0_EPF0_VF9_0_BIST__BIST_COMP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_BIST__BIST_STRT__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF9_0_BIST__BIST_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF9_0_BIST__BIST_COMP_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF9_0_BIST__BIST_STRT_MASK 0x40L +#define BIF_CFG_DEV0_EPF0_VF9_0_BIST__BIST_CAP_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_1 +#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_2 +#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_3 +#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_4 +#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_5 +#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_6 +#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_ADAPTER_ID +#define BIF_CFG_DEV0_EPF0_VF9_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF9_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF9_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_VF9_0_ROM_BASE_ADDR +#define BIF_CFG_DEV0_EPF0_VF9_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_CAP_PTR +#define BIF_CFG_DEV0_EPF0_VF9_0_CAP_PTR__CAP_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL +//BIF_CFG_DEV0_EPF0_VF9_0_INTERRUPT_LINE +#define BIF_CFG_DEV0_EPF0_VF9_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF9_0_INTERRUPT_PIN +#define BIF_CFG_DEV0_EPF0_VF9_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP__VERSION_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L +//BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L +//BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__LINK_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__LINK_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L +//BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L +//BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2__RESERVED__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CAP2__RESERVED_MASK 0xFE000000L +//BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L +//BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L +#define BIF_CFG_DEV0_EPF0_VF9_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L +//BIF_CFG_DEV0_EPF0_VF9_0_SLOT_CAP2 +#define BIF_CFG_DEV0_EPF0_VF9_0_SLOT_CAP2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_SLOT_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF9_0_SLOT_CNTL2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_SLOT_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF9_0_SLOT_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_MSI_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L +//BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_ADDR_LO +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_ADDR_HI +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_DATA +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_MSI_MASK +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MASK__MSI_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_DATA_64 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_MSI_MASK_64 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_MSI_PENDING +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_MSI_PENDING_64 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_MSIX_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF9_0_MSIX_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL +#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF9_0_MSIX_TABLE +#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF9_0_MSIX_PBA +#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF9_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_HDR +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC1 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC2 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG1 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG2 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG3 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG1 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG2 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG3 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CAP +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CNTL +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CNTL__STU__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CNTL__STU_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CAP +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CNTL +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF9_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf10_bifcfgdecp +//BIF_CFG_DEV0_EPF0_VF10_0_VENDOR_ID +#define BIF_CFG_DEV0_EPF0_VF10_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_ID +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_COMMAND +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__AD_STEPPING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__SERR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__FAST_B2B_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__INT_DIS__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__AD_STEPPING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__SERR_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__FAST_B2B_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF10_0_COMMAND__INT_DIS_MASK 0x0400L +//BIF_CFG_DEV0_EPF0_VF10_0_STATUS +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__INT_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__CAP_LIST__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__PCI_66_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__DEVSEL_TIMING__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__INT_STATUS_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__CAP_LIST_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__PCI_66_CAP_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__DEVSEL_TIMING_MASK 0x0600L +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF10_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF10_0_REVISION_ID +#define BIF_CFG_DEV0_EPF0_VF10_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF10_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF10_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_VF10_0_PROG_INTERFACE +#define BIF_CFG_DEV0_EPF0_VF10_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF10_0_SUB_CLASS +#define BIF_CFG_DEV0_EPF0_VF10_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF10_0_BASE_CLASS +#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF10_0_CACHE_LINE +#define BIF_CFG_DEV0_EPF0_VF10_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF10_0_LATENCY +#define BIF_CFG_DEV0_EPF0_VF10_0_LATENCY__LATENCY_TIMER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_LATENCY__LATENCY_TIMER_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF10_0_HEADER +#define BIF_CFG_DEV0_EPF0_VF10_0_HEADER__HEADER_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_HEADER__DEVICE_TYPE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF10_0_HEADER__HEADER_TYPE_MASK 0x7FL +#define BIF_CFG_DEV0_EPF0_VF10_0_HEADER__DEVICE_TYPE_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF10_0_BIST +#define BIF_CFG_DEV0_EPF0_VF10_0_BIST__BIST_COMP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_BIST__BIST_STRT__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF10_0_BIST__BIST_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF10_0_BIST__BIST_COMP_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF10_0_BIST__BIST_STRT_MASK 0x40L +#define BIF_CFG_DEV0_EPF0_VF10_0_BIST__BIST_CAP_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_1 +#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_2 +#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_3 +#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_4 +#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_5 +#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_6 +#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_ADAPTER_ID +#define BIF_CFG_DEV0_EPF0_VF10_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF10_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF10_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_VF10_0_ROM_BASE_ADDR +#define BIF_CFG_DEV0_EPF0_VF10_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_CAP_PTR +#define BIF_CFG_DEV0_EPF0_VF10_0_CAP_PTR__CAP_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL +//BIF_CFG_DEV0_EPF0_VF10_0_INTERRUPT_LINE +#define BIF_CFG_DEV0_EPF0_VF10_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF10_0_INTERRUPT_PIN +#define BIF_CFG_DEV0_EPF0_VF10_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP__VERSION_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L +//BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L +//BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__LINK_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__LINK_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L +//BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L +//BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2__RESERVED__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CAP2__RESERVED_MASK 0xFE000000L +//BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L +//BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L +#define BIF_CFG_DEV0_EPF0_VF10_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L +//BIF_CFG_DEV0_EPF0_VF10_0_SLOT_CAP2 +#define BIF_CFG_DEV0_EPF0_VF10_0_SLOT_CAP2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_SLOT_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF10_0_SLOT_CNTL2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_SLOT_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF10_0_SLOT_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_MSI_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L +//BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_ADDR_LO +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_ADDR_HI +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_DATA +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_MSI_MASK +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MASK__MSI_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_DATA_64 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_MSI_MASK_64 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_MSI_PENDING +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_MSI_PENDING_64 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_MSIX_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF10_0_MSIX_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL +#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF10_0_MSIX_TABLE +#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF10_0_MSIX_PBA +#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF10_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_HDR +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC1 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC2 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG1 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG2 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG3 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG1 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG2 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG3 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CAP +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CNTL +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CNTL__STU__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CNTL__STU_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CAP +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CNTL +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF10_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf11_bifcfgdecp +//BIF_CFG_DEV0_EPF0_VF11_0_VENDOR_ID +#define BIF_CFG_DEV0_EPF0_VF11_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_ID +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_COMMAND +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__AD_STEPPING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__SERR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__FAST_B2B_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__INT_DIS__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__AD_STEPPING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__SERR_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__FAST_B2B_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF11_0_COMMAND__INT_DIS_MASK 0x0400L +//BIF_CFG_DEV0_EPF0_VF11_0_STATUS +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__INT_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__CAP_LIST__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__PCI_66_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__DEVSEL_TIMING__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__INT_STATUS_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__CAP_LIST_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__PCI_66_CAP_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__DEVSEL_TIMING_MASK 0x0600L +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF11_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF11_0_REVISION_ID +#define BIF_CFG_DEV0_EPF0_VF11_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF11_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF11_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_VF11_0_PROG_INTERFACE +#define BIF_CFG_DEV0_EPF0_VF11_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF11_0_SUB_CLASS +#define BIF_CFG_DEV0_EPF0_VF11_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF11_0_BASE_CLASS +#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF11_0_CACHE_LINE +#define BIF_CFG_DEV0_EPF0_VF11_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF11_0_LATENCY +#define BIF_CFG_DEV0_EPF0_VF11_0_LATENCY__LATENCY_TIMER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_LATENCY__LATENCY_TIMER_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF11_0_HEADER +#define BIF_CFG_DEV0_EPF0_VF11_0_HEADER__HEADER_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_HEADER__DEVICE_TYPE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF11_0_HEADER__HEADER_TYPE_MASK 0x7FL +#define BIF_CFG_DEV0_EPF0_VF11_0_HEADER__DEVICE_TYPE_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF11_0_BIST +#define BIF_CFG_DEV0_EPF0_VF11_0_BIST__BIST_COMP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_BIST__BIST_STRT__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF11_0_BIST__BIST_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF11_0_BIST__BIST_COMP_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF11_0_BIST__BIST_STRT_MASK 0x40L +#define BIF_CFG_DEV0_EPF0_VF11_0_BIST__BIST_CAP_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_1 +#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_2 +#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_3 +#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_4 +#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_5 +#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_6 +#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_ADAPTER_ID +#define BIF_CFG_DEV0_EPF0_VF11_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF11_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF11_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_VF11_0_ROM_BASE_ADDR +#define BIF_CFG_DEV0_EPF0_VF11_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_CAP_PTR +#define BIF_CFG_DEV0_EPF0_VF11_0_CAP_PTR__CAP_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL +//BIF_CFG_DEV0_EPF0_VF11_0_INTERRUPT_LINE +#define BIF_CFG_DEV0_EPF0_VF11_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF11_0_INTERRUPT_PIN +#define BIF_CFG_DEV0_EPF0_VF11_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP__VERSION_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L +//BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L +//BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__LINK_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__LINK_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L +//BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L +//BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2__RESERVED__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CAP2__RESERVED_MASK 0xFE000000L +//BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L +//BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L +#define BIF_CFG_DEV0_EPF0_VF11_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L +//BIF_CFG_DEV0_EPF0_VF11_0_SLOT_CAP2 +#define BIF_CFG_DEV0_EPF0_VF11_0_SLOT_CAP2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_SLOT_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF11_0_SLOT_CNTL2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_SLOT_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF11_0_SLOT_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_MSI_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L +//BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_ADDR_LO +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_ADDR_HI +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_DATA +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_MSI_MASK +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MASK__MSI_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_DATA_64 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_MSI_MASK_64 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_MSI_PENDING +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_MSI_PENDING_64 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_MSIX_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF11_0_MSIX_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL +#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF11_0_MSIX_TABLE +#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF11_0_MSIX_PBA +#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF11_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_HDR +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC1 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC2 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG1 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG2 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG3 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG1 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG2 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG3 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CAP +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CNTL +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CNTL__STU__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CNTL__STU_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CAP +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CNTL +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF11_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf12_bifcfgdecp +//BIF_CFG_DEV0_EPF0_VF12_0_VENDOR_ID +#define BIF_CFG_DEV0_EPF0_VF12_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_ID +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_COMMAND +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__AD_STEPPING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__SERR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__FAST_B2B_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__INT_DIS__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__AD_STEPPING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__SERR_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__FAST_B2B_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF12_0_COMMAND__INT_DIS_MASK 0x0400L +//BIF_CFG_DEV0_EPF0_VF12_0_STATUS +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__INT_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__CAP_LIST__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__PCI_66_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__DEVSEL_TIMING__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__INT_STATUS_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__CAP_LIST_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__PCI_66_CAP_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__DEVSEL_TIMING_MASK 0x0600L +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF12_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF12_0_REVISION_ID +#define BIF_CFG_DEV0_EPF0_VF12_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF12_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF12_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_VF12_0_PROG_INTERFACE +#define BIF_CFG_DEV0_EPF0_VF12_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF12_0_SUB_CLASS +#define BIF_CFG_DEV0_EPF0_VF12_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF12_0_BASE_CLASS +#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF12_0_CACHE_LINE +#define BIF_CFG_DEV0_EPF0_VF12_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF12_0_LATENCY +#define BIF_CFG_DEV0_EPF0_VF12_0_LATENCY__LATENCY_TIMER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_LATENCY__LATENCY_TIMER_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF12_0_HEADER +#define BIF_CFG_DEV0_EPF0_VF12_0_HEADER__HEADER_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_HEADER__DEVICE_TYPE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF12_0_HEADER__HEADER_TYPE_MASK 0x7FL +#define BIF_CFG_DEV0_EPF0_VF12_0_HEADER__DEVICE_TYPE_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF12_0_BIST +#define BIF_CFG_DEV0_EPF0_VF12_0_BIST__BIST_COMP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_BIST__BIST_STRT__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF12_0_BIST__BIST_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF12_0_BIST__BIST_COMP_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF12_0_BIST__BIST_STRT_MASK 0x40L +#define BIF_CFG_DEV0_EPF0_VF12_0_BIST__BIST_CAP_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_1 +#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_2 +#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_3 +#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_4 +#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_5 +#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_6 +#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_ADAPTER_ID +#define BIF_CFG_DEV0_EPF0_VF12_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF12_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF12_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_VF12_0_ROM_BASE_ADDR +#define BIF_CFG_DEV0_EPF0_VF12_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_CAP_PTR +#define BIF_CFG_DEV0_EPF0_VF12_0_CAP_PTR__CAP_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL +//BIF_CFG_DEV0_EPF0_VF12_0_INTERRUPT_LINE +#define BIF_CFG_DEV0_EPF0_VF12_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF12_0_INTERRUPT_PIN +#define BIF_CFG_DEV0_EPF0_VF12_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP__VERSION_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L +//BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L +//BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__LINK_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__LINK_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L +//BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L +//BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2__RESERVED__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CAP2__RESERVED_MASK 0xFE000000L +//BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L +//BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L +#define BIF_CFG_DEV0_EPF0_VF12_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L +//BIF_CFG_DEV0_EPF0_VF12_0_SLOT_CAP2 +#define BIF_CFG_DEV0_EPF0_VF12_0_SLOT_CAP2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_SLOT_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF12_0_SLOT_CNTL2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_SLOT_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF12_0_SLOT_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_MSI_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L +//BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_ADDR_LO +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_ADDR_HI +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_DATA +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_MSI_MASK +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MASK__MSI_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_DATA_64 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_MSI_MASK_64 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_MSI_PENDING +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_MSI_PENDING_64 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_MSIX_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF12_0_MSIX_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL +#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF12_0_MSIX_TABLE +#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF12_0_MSIX_PBA +#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF12_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_HDR +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC1 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC2 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG1 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG2 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG3 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG1 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG2 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG3 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CAP +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CNTL +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CNTL__STU__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CNTL__STU_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CAP +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CNTL +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF12_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf13_bifcfgdecp +//BIF_CFG_DEV0_EPF0_VF13_0_VENDOR_ID +#define BIF_CFG_DEV0_EPF0_VF13_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_ID +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_COMMAND +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__AD_STEPPING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__SERR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__FAST_B2B_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__INT_DIS__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__AD_STEPPING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__SERR_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__FAST_B2B_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF13_0_COMMAND__INT_DIS_MASK 0x0400L +//BIF_CFG_DEV0_EPF0_VF13_0_STATUS +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__INT_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__CAP_LIST__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__PCI_66_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__DEVSEL_TIMING__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__INT_STATUS_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__CAP_LIST_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__PCI_66_CAP_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__DEVSEL_TIMING_MASK 0x0600L +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF13_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF13_0_REVISION_ID +#define BIF_CFG_DEV0_EPF0_VF13_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF13_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF13_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_VF13_0_PROG_INTERFACE +#define BIF_CFG_DEV0_EPF0_VF13_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF13_0_SUB_CLASS +#define BIF_CFG_DEV0_EPF0_VF13_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF13_0_BASE_CLASS +#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF13_0_CACHE_LINE +#define BIF_CFG_DEV0_EPF0_VF13_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF13_0_LATENCY +#define BIF_CFG_DEV0_EPF0_VF13_0_LATENCY__LATENCY_TIMER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_LATENCY__LATENCY_TIMER_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF13_0_HEADER +#define BIF_CFG_DEV0_EPF0_VF13_0_HEADER__HEADER_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_HEADER__DEVICE_TYPE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF13_0_HEADER__HEADER_TYPE_MASK 0x7FL +#define BIF_CFG_DEV0_EPF0_VF13_0_HEADER__DEVICE_TYPE_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF13_0_BIST +#define BIF_CFG_DEV0_EPF0_VF13_0_BIST__BIST_COMP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_BIST__BIST_STRT__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF13_0_BIST__BIST_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF13_0_BIST__BIST_COMP_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF13_0_BIST__BIST_STRT_MASK 0x40L +#define BIF_CFG_DEV0_EPF0_VF13_0_BIST__BIST_CAP_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_1 +#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_2 +#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_3 +#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_4 +#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_5 +#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_6 +#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_ADAPTER_ID +#define BIF_CFG_DEV0_EPF0_VF13_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF13_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF13_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_VF13_0_ROM_BASE_ADDR +#define BIF_CFG_DEV0_EPF0_VF13_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_CAP_PTR +#define BIF_CFG_DEV0_EPF0_VF13_0_CAP_PTR__CAP_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL +//BIF_CFG_DEV0_EPF0_VF13_0_INTERRUPT_LINE +#define BIF_CFG_DEV0_EPF0_VF13_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF13_0_INTERRUPT_PIN +#define BIF_CFG_DEV0_EPF0_VF13_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP__VERSION_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L +//BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L +//BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__LINK_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__LINK_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L +//BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L +//BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2__RESERVED__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CAP2__RESERVED_MASK 0xFE000000L +//BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L +//BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L +#define BIF_CFG_DEV0_EPF0_VF13_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L +//BIF_CFG_DEV0_EPF0_VF13_0_SLOT_CAP2 +#define BIF_CFG_DEV0_EPF0_VF13_0_SLOT_CAP2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_SLOT_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF13_0_SLOT_CNTL2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_SLOT_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF13_0_SLOT_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_MSI_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L +//BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_ADDR_LO +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_ADDR_HI +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_DATA +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_MSI_MASK +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MASK__MSI_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_DATA_64 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_MSI_MASK_64 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_MSI_PENDING +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_MSI_PENDING_64 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_MSIX_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF13_0_MSIX_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL +#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF13_0_MSIX_TABLE +#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF13_0_MSIX_PBA +#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF13_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_HDR +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC1 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC2 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG1 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG2 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG3 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG1 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG2 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG3 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CAP +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CNTL +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CNTL__STU__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CNTL__STU_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CAP +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CNTL +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF13_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf14_bifcfgdecp +//BIF_CFG_DEV0_EPF0_VF14_0_VENDOR_ID +#define BIF_CFG_DEV0_EPF0_VF14_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_ID +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_COMMAND +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__AD_STEPPING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__SERR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__FAST_B2B_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__INT_DIS__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__AD_STEPPING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__SERR_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__FAST_B2B_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF14_0_COMMAND__INT_DIS_MASK 0x0400L +//BIF_CFG_DEV0_EPF0_VF14_0_STATUS +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__INT_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__CAP_LIST__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__PCI_66_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__DEVSEL_TIMING__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__INT_STATUS_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__CAP_LIST_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__PCI_66_CAP_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__DEVSEL_TIMING_MASK 0x0600L +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF14_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF14_0_REVISION_ID +#define BIF_CFG_DEV0_EPF0_VF14_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF14_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF14_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_VF14_0_PROG_INTERFACE +#define BIF_CFG_DEV0_EPF0_VF14_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF14_0_SUB_CLASS +#define BIF_CFG_DEV0_EPF0_VF14_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF14_0_BASE_CLASS +#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF14_0_CACHE_LINE +#define BIF_CFG_DEV0_EPF0_VF14_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF14_0_LATENCY +#define BIF_CFG_DEV0_EPF0_VF14_0_LATENCY__LATENCY_TIMER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_LATENCY__LATENCY_TIMER_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF14_0_HEADER +#define BIF_CFG_DEV0_EPF0_VF14_0_HEADER__HEADER_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_HEADER__DEVICE_TYPE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF14_0_HEADER__HEADER_TYPE_MASK 0x7FL +#define BIF_CFG_DEV0_EPF0_VF14_0_HEADER__DEVICE_TYPE_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF14_0_BIST +#define BIF_CFG_DEV0_EPF0_VF14_0_BIST__BIST_COMP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_BIST__BIST_STRT__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF14_0_BIST__BIST_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF14_0_BIST__BIST_COMP_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF14_0_BIST__BIST_STRT_MASK 0x40L +#define BIF_CFG_DEV0_EPF0_VF14_0_BIST__BIST_CAP_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_1 +#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_2 +#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_3 +#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_4 +#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_5 +#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_6 +#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_ADAPTER_ID +#define BIF_CFG_DEV0_EPF0_VF14_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF14_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF14_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_VF14_0_ROM_BASE_ADDR +#define BIF_CFG_DEV0_EPF0_VF14_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_CAP_PTR +#define BIF_CFG_DEV0_EPF0_VF14_0_CAP_PTR__CAP_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL +//BIF_CFG_DEV0_EPF0_VF14_0_INTERRUPT_LINE +#define BIF_CFG_DEV0_EPF0_VF14_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF14_0_INTERRUPT_PIN +#define BIF_CFG_DEV0_EPF0_VF14_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP__VERSION_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L +//BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L +//BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__LINK_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__LINK_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L +//BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L +//BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2__RESERVED__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CAP2__RESERVED_MASK 0xFE000000L +//BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L +//BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L +#define BIF_CFG_DEV0_EPF0_VF14_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L +//BIF_CFG_DEV0_EPF0_VF14_0_SLOT_CAP2 +#define BIF_CFG_DEV0_EPF0_VF14_0_SLOT_CAP2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_SLOT_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF14_0_SLOT_CNTL2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_SLOT_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF14_0_SLOT_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_MSI_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L +//BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_ADDR_LO +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_ADDR_HI +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_DATA +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_MSI_MASK +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MASK__MSI_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_DATA_64 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_MSI_MASK_64 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_MSI_PENDING +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_MSI_PENDING_64 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_MSIX_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF14_0_MSIX_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL +#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF14_0_MSIX_TABLE +#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF14_0_MSIX_PBA +#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF14_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_HDR +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC1 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC2 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG1 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG2 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG3 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG1 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG2 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG3 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CAP +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CNTL +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CNTL__STU__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CNTL__STU_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CAP +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CNTL +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF14_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L + + +// addressBlock: nbio_nbif0_bif_cfg_dev0_epf0_vf15_bifcfgdecp +//BIF_CFG_DEV0_EPF0_VF15_0_VENDOR_ID +#define BIF_CFG_DEV0_EPF0_VF15_0_VENDOR_ID__VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_VENDOR_ID__VENDOR_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_ID +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_ID__DEVICE_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_ID__DEVICE_ID_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_COMMAND +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__IO_ACCESS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__MEM_ACCESS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__BUS_MASTER_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__SPECIAL_CYCLE_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__MEM_WRITE_INVALIDATE_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__PAL_SNOOP_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__PARITY_ERROR_RESPONSE__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__AD_STEPPING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__SERR_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__FAST_B2B_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__INT_DIS__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__IO_ACCESS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__MEM_ACCESS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__BUS_MASTER_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__SPECIAL_CYCLE_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__MEM_WRITE_INVALIDATE_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__PAL_SNOOP_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__PARITY_ERROR_RESPONSE_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__AD_STEPPING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__SERR_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__FAST_B2B_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF15_0_COMMAND__INT_DIS_MASK 0x0400L +//BIF_CFG_DEV0_EPF0_VF15_0_STATUS +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__IMMEDIATE_READINESS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__INT_STATUS__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__CAP_LIST__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__PCI_66_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__FAST_BACK_CAPABLE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__MASTER_DATA_PARITY_ERROR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__DEVSEL_TIMING__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__SIGNAL_TARGET_ABORT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__RECEIVED_TARGET_ABORT__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__RECEIVED_MASTER_ABORT__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__SIGNALED_SYSTEM_ERROR__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__PARITY_ERROR_DETECTED__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__IMMEDIATE_READINESS_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__INT_STATUS_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__CAP_LIST_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__PCI_66_CAP_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__FAST_BACK_CAPABLE_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__MASTER_DATA_PARITY_ERROR_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__DEVSEL_TIMING_MASK 0x0600L +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__SIGNAL_TARGET_ABORT_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__RECEIVED_TARGET_ABORT_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__RECEIVED_MASTER_ABORT_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__SIGNALED_SYSTEM_ERROR_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF15_0_STATUS__PARITY_ERROR_DETECTED_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF15_0_REVISION_ID +#define BIF_CFG_DEV0_EPF0_VF15_0_REVISION_ID__MINOR_REV_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_REVISION_ID__MAJOR_REV_ID__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF15_0_REVISION_ID__MINOR_REV_ID_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF15_0_REVISION_ID__MAJOR_REV_ID_MASK 0xF0L +//BIF_CFG_DEV0_EPF0_VF15_0_PROG_INTERFACE +#define BIF_CFG_DEV0_EPF0_VF15_0_PROG_INTERFACE__PROG_INTERFACE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PROG_INTERFACE__PROG_INTERFACE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF15_0_SUB_CLASS +#define BIF_CFG_DEV0_EPF0_VF15_0_SUB_CLASS__SUB_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_SUB_CLASS__SUB_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF15_0_BASE_CLASS +#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_CLASS__BASE_CLASS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_CLASS__BASE_CLASS_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF15_0_CACHE_LINE +#define BIF_CFG_DEV0_EPF0_VF15_0_CACHE_LINE__CACHE_LINE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_CACHE_LINE__CACHE_LINE_SIZE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF15_0_LATENCY +#define BIF_CFG_DEV0_EPF0_VF15_0_LATENCY__LATENCY_TIMER__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_LATENCY__LATENCY_TIMER_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF15_0_HEADER +#define BIF_CFG_DEV0_EPF0_VF15_0_HEADER__HEADER_TYPE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_HEADER__DEVICE_TYPE__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF15_0_HEADER__HEADER_TYPE_MASK 0x7FL +#define BIF_CFG_DEV0_EPF0_VF15_0_HEADER__DEVICE_TYPE_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF15_0_BIST +#define BIF_CFG_DEV0_EPF0_VF15_0_BIST__BIST_COMP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_BIST__BIST_STRT__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF15_0_BIST__BIST_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF15_0_BIST__BIST_COMP_MASK 0x0FL +#define BIF_CFG_DEV0_EPF0_VF15_0_BIST__BIST_STRT_MASK 0x40L +#define BIF_CFG_DEV0_EPF0_VF15_0_BIST__BIST_CAP_MASK 0x80L +//BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_1 +#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_1__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_1__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_2 +#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_2__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_2__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_3 +#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_3__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_3__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_4 +#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_4__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_4__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_5 +#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_5__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_5__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_6 +#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_6__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_BASE_ADDR_6__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_ADAPTER_ID +#define BIF_CFG_DEV0_EPF0_VF15_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_ADAPTER_ID__SUBSYSTEM_ID__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF15_0_ADAPTER_ID__SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF15_0_ADAPTER_ID__SUBSYSTEM_ID_MASK 0xFFFF0000L +//BIF_CFG_DEV0_EPF0_VF15_0_ROM_BASE_ADDR +#define BIF_CFG_DEV0_EPF0_VF15_0_ROM_BASE_ADDR__BASE_ADDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_ROM_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_CAP_PTR +#define BIF_CFG_DEV0_EPF0_VF15_0_CAP_PTR__CAP_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_CAP_PTR__CAP_PTR_MASK 0x000000FFL +//BIF_CFG_DEV0_EPF0_VF15_0_INTERRUPT_LINE +#define BIF_CFG_DEV0_EPF0_VF15_0_INTERRUPT_LINE__INTERRUPT_LINE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_INTERRUPT_LINE__INTERRUPT_LINE_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF15_0_INTERRUPT_PIN +#define BIF_CFG_DEV0_EPF0_VF15_0_INTERRUPT_PIN__INTERRUPT_PIN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_INTERRUPT_PIN__INTERRUPT_PIN_MASK 0xFFL +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP__VERSION__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP__DEVICE_TYPE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP__SLOT_IMPLEMENTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP__INT_MESSAGE_NUM__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP__VERSION_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP__DEVICE_TYPE_MASK 0x00F0L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP__SLOT_IMPLEMENTED_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CAP__INT_MESSAGE_NUM_MASK 0x3E00L +//BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__PHANTOM_FUNC__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__EXTENDED_TAG__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__FLR_CAPABLE__SHIFT 0x1c +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__MAX_PAYLOAD_SUPPORT_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__PHANTOM_FUNC_MASK 0x00000018L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__EXTENDED_TAG_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__L0S_ACCEPTABLE_LATENCY_MASK 0x000001C0L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__L1_ACCEPTABLE_LATENCY_MASK 0x00000E00L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__ROLE_BASED_ERR_REPORTING_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__CAPTURED_SLOT_POWER_LIMIT_MASK 0x03FC0000L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__CAPTURED_SLOT_POWER_SCALE_MASK 0x0C000000L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP__FLR_CAPABLE_MASK 0x10000000L +//BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__CORR_ERR_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__NON_FATAL_ERR_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__FATAL_ERR_EN__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__USR_REPORT_EN__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__RELAXED_ORD_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__EXTENDED_TAG_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__PHANTOM_FUNC_EN__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__AUX_POWER_PM_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__NO_SNOOP_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__INITIATE_FLR__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__CORR_ERR_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__NON_FATAL_ERR_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__FATAL_ERR_EN_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__USR_REPORT_EN_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__RELAXED_ORD_EN_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__MAX_PAYLOAD_SIZE_MASK 0x00E0L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__EXTENDED_TAG_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__PHANTOM_FUNC_EN_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__AUX_POWER_PM_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__NO_SNOOP_EN_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__MAX_READ_REQUEST_SIZE_MASK 0x7000L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL__INITIATE_FLR_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__CORR_ERR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__NON_FATAL_ERR__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__FATAL_ERR__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__USR_DETECTED__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__AUX_PWR__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__TRANSACTIONS_PEND__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__CORR_ERR_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__NON_FATAL_ERR_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__FATAL_ERR_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__USR_DETECTED_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__AUX_PWR_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__TRANSACTIONS_PEND_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS__EMER_POWER_REDUCTION_DETECTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__PM_SUPPORT__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__L0S_EXIT_LATENCY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__L1_EXIT_LATENCY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__CLOCK_POWER_MANAGEMENT__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__PORT_NUMBER__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__LINK_SPEED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__LINK_WIDTH_MASK 0x000003F0L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__PM_SUPPORT_MASK 0x00000C00L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__L0S_EXIT_LATENCY_MASK 0x00007000L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__L1_EXIT_LATENCY_MASK 0x00038000L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__SURPRISE_DOWN_ERR_REPORTING_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__DL_ACTIVE_REPORTING_CAPABLE_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__LINK_BW_NOTIFICATION_CAP_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__ASPM_OPTIONALITY_COMPLIANCE_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP__PORT_NUMBER_MASK 0xFF000000L +//BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__PM_CONTROL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__READ_CPL_BOUNDARY__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__LINK_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__RETRAIN_LINK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__COMMON_CLOCK_CFG__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__EXTENDED_SYNC__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__PM_CONTROL_MASK 0x0003L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__READ_CPL_BOUNDARY_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__LINK_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__RETRAIN_LINK_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__COMMON_CLOCK_CFG_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__EXTENDED_SYNC_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__CLOCK_POWER_MANAGEMENT_EN_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__HW_AUTONOMOUS_WIDTH_DISABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__LINK_BW_MANAGEMENT_INT_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL__LINK_AUTONOMOUS_BW_INT_EN_MASK 0x0800L +//BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__CURRENT_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__LINK_TRAINING__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__SLOT_CLOCK_CFG__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__DL_ACTIVE__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__CURRENT_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__NEGOTIATED_LINK_WIDTH_MASK 0x03F0L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__LINK_TRAINING_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__SLOT_CLOCK_CFG_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__DL_ACTIVE_MASK 0x2000L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__LINK_BW_MANAGEMENT_STATUS_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS__LINK_AUTONOMOUS_BW_STATUS_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__LTR_SUPPORTED__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__OBFF_SUPPORTED__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ__SHIFT 0x1a +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__CPL_TIMEOUT_RANGE_SUPPORTED_MASK 0x0000000FL +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__CPL_TIMEOUT_DIS_SUPPORTED_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__ARI_FORWARDING_SUPPORTED_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__ATOMICOP_ROUTING_SUPPORTED_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__ATOMICOP_32CMPLT_SUPPORTED_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__ATOMICOP_64CMPLT_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__CAS128_CMPLT_SUPPORTED_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__NO_RO_ENABLED_P2P_PASSING_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__LTR_SUPPORTED_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__TPH_CPLR_SUPPORTED_MASK 0x00003000L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__TEN_BIT_TAG_COMPLETER_SUPPORTED_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__TEN_BIT_TAG_REQUESTER_SUPPORTED_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__OBFF_SUPPORTED_MASK 0x000C0000L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__EXTENDED_FMT_FIELD_SUPPORTED_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__END_END_TLP_PREFIX_SUPPORTED_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__MAX_END_END_TLP_PREFIXES_MASK 0x00C00000L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__EMER_POWER_REDUCTION_SUPPORTED_MASK 0x03000000L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CAP2__EMER_POWER_REDUCTION_INIT_REQ_MASK 0x04000000L +//BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__ARI_FORWARDING_EN__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__LTR_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__OBFF_EN__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__CPL_TIMEOUT_VALUE_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__CPL_TIMEOUT_DIS_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__ARI_FORWARDING_EN_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__ATOMICOP_REQUEST_EN_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__ATOMICOP_EGRESS_BLOCKING_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__IDO_REQUEST_ENABLE_MASK 0x0100L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__IDO_COMPLETION_ENABLE_MASK 0x0200L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__EMER_POWER_REDUCTION_REQUEST_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__TEN_BIT_TAG_REQUESTER_ENABLE_MASK 0x1000L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__OBFF_EN_MASK 0x6000L +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_CNTL2__END_END_TLP_PREFIX_BLOCKING_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_DEVICE_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2__SUPPORTED_LINK_SPEED__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2__CROSSLINK_SUPPORTED__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2__RESERVED__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2__SUPPORTED_LINK_SPEED_MASK 0x000000FEL +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2__CROSSLINK_SUPPORTED_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2__RTM1_PRESENCE_DET_SUPPORT_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2__RTM2_PRESENCE_DET_SUPPORT_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CAP2__RESERVED_MASK 0xFE000000L +//BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__TARGET_LINK_SPEED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__ENTER_COMPLIANCE__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__XMIT_MARGIN__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__COMPLIANCE_SOS__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__TARGET_LINK_SPEED_MASK 0x000FL +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__ENTER_COMPLIANCE_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__HW_AUTONOMOUS_SPEED_DISABLE_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__SELECTABLE_DEEMPHASIS_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__XMIT_MARGIN_MASK 0x0380L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__ENTER_MOD_COMPLIANCE_MASK 0x0400L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__COMPLIANCE_SOS_MASK 0x0800L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_CNTL2__COMPLIANCE_DEEMPHASIS_MASK 0xF000L +//BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__RTM1_PRESENCE_DET__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__RTM2_PRESENCE_DET__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__CROSSLINK_RESOLUTION__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__CUR_DEEMPHASIS_LEVEL_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__EQUALIZATION_COMPLETE_8GT_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__EQUALIZATION_PHASE1_SUCCESS_8GT_MASK 0x0004L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__EQUALIZATION_PHASE2_SUCCESS_8GT_MASK 0x0008L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__EQUALIZATION_PHASE3_SUCCESS_8GT_MASK 0x0010L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__LINK_EQUALIZATION_REQUEST_8GT_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__RTM1_PRESENCE_DET_MASK 0x0040L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__RTM2_PRESENCE_DET_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__CROSSLINK_RESOLUTION_MASK 0x0300L +#define BIF_CFG_DEV0_EPF0_VF15_0_LINK_STATUS2__DOWNSTREAM_COMPONENT_PRESENCE_MASK 0x7000L +//BIF_CFG_DEV0_EPF0_VF15_0_SLOT_CAP2 +#define BIF_CFG_DEV0_EPF0_VF15_0_SLOT_CAP2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_SLOT_CAP2__RESERVED_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_SLOT_CNTL2 +#define BIF_CFG_DEV0_EPF0_VF15_0_SLOT_CNTL2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_SLOT_CNTL2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_SLOT_STATUS2 +#define BIF_CFG_DEV0_EPF0_VF15_0_SLOT_STATUS2__RESERVED__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_SLOT_STATUS2__RESERVED_MASK 0xFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_MSI_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL__MSI_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL__MSI_MULTI_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL__MSI_MULTI_EN__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL__MSI_64BIT__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL__MSI_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL__MSI_MULTI_CAP_MASK 0x000EL +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL__MSI_MULTI_EN_MASK 0x0070L +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL__MSI_64BIT_MASK 0x0080L +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_CNTL__MSI_PERVECTOR_MASKING_CAP_MASK 0x0100L +//BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_ADDR_LO +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO__SHIFT 0x2 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_ADDR_LO__MSI_MSG_ADDR_LO_MASK 0xFFFFFFFCL +//BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_ADDR_HI +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_ADDR_HI__MSI_MSG_ADDR_HI_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_DATA +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_DATA__MSI_DATA__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_DATA__MSI_DATA_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_MSI_MASK +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MASK__MSI_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MASK__MSI_MASK_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_DATA_64 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_DATA_64__MSI_DATA_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MSG_DATA_64__MSI_DATA_64_MASK 0x0000FFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_MSI_MASK_64 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MASK_64__MSI_MASK_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_MASK_64__MSI_MASK_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_MSI_PENDING +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_PENDING__MSI_PENDING__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_PENDING__MSI_PENDING_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_MSI_PENDING_64 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_PENDING_64__MSI_PENDING_64__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSI_PENDING_64__MSI_PENDING_64_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_MSIX_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_CAP_LIST__NEXT_PTR__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_CAP_LIST__CAP_ID_MASK 0x00FFL +#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_CAP_LIST__NEXT_PTR_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF15_0_MSIX_MSG_CNTL +#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_MSG_CNTL__MSIX_EN__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_MSG_CNTL__MSIX_TABLE_SIZE_MASK 0x07FFL +#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_MSG_CNTL__MSIX_FUNC_MASK_MASK 0x4000L +#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_MSG_CNTL__MSIX_EN_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF15_0_MSIX_TABLE +#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_TABLE__MSIX_TABLE_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_TABLE__MSIX_TABLE_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_TABLE__MSIX_TABLE_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_TABLE__MSIX_TABLE_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF15_0_MSIX_PBA +#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_PBA__MSIX_PBA_BIR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_PBA__MSIX_PBA_OFFSET__SHIFT 0x3 +#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_PBA__MSIX_PBA_BIR_MASK 0x00000007L +#define BIF_CFG_DEV0_EPF0_VF15_0_MSIX_PBA__MSIX_PBA_OFFSET_MASK 0xFFFFFFF8L +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_HDR +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_REV_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC_HDR__VSEC_LENGTH_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC1 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC1__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC1__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC2 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC2__SCRATCH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_VENDOR_SPECIFIC2__SCRATCH_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_RPT_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__DLP_ERR_STATUS_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__SURPDN_ERR_STATUS_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__PSN_ERR_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__FC_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__CPL_TIMEOUT_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__CPL_ABORT_ERR_STATUS_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__UNEXP_CPL_STATUS_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__RCV_OVFL_STATUS_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__MAL_TLP_STATUS_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__ECRC_ERR_STATUS_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__UNSUPP_REQ_ERR_STATUS_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__ACS_VIOLATION_STATUS_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__UNCORR_INT_ERR_STATUS_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__MC_BLOCKED_TLP_STATUS_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__ATOMICOP_EGRESS_BLOCKED_STATUS_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_STATUS__TLP_PREFIX_BLOCKED_ERR_STATUS_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__DLP_ERR_MASK_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__SURPDN_ERR_MASK_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__PSN_ERR_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__FC_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__CPL_TIMEOUT_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__CPL_ABORT_ERR_MASK_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__UNEXP_CPL_MASK_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__RCV_OVFL_MASK_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__MAL_TLP_MASK_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__ECRC_ERR_MASK_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__UNSUPP_REQ_ERR_MASK_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__ACS_VIOLATION_MASK_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__UNCORR_INT_ERR_MASK_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__MC_BLOCKED_TLP_MASK_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__ATOMICOP_EGRESS_BLOCKED_MASK_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_MASK__TLP_PREFIX_BLOCKED_ERR_MASK_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY__SHIFT 0x11 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY__SHIFT 0x12 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY__SHIFT 0x13 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY__SHIFT 0x15 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY__SHIFT 0x16 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY__SHIFT 0x17 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY__SHIFT 0x18 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY__SHIFT 0x19 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__DLP_ERR_SEVERITY_MASK 0x00000010L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__SURPDN_ERR_SEVERITY_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__PSN_ERR_SEVERITY_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__FC_ERR_SEVERITY_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__CPL_TIMEOUT_SEVERITY_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__CPL_ABORT_ERR_SEVERITY_MASK 0x00008000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__UNEXP_CPL_SEVERITY_MASK 0x00010000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__RCV_OVFL_SEVERITY_MASK 0x00020000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__MAL_TLP_SEVERITY_MASK 0x00040000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__ECRC_ERR_SEVERITY_MASK 0x00080000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__UNSUPP_REQ_ERR_SEVERITY_MASK 0x00100000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__ACS_VIOLATION_SEVERITY_MASK 0x00200000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__UNCORR_INT_ERR_SEVERITY_MASK 0x00400000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__MC_BLOCKED_TLP_SEVERITY_MASK 0x00800000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__ATOMICOP_EGRESS_BLOCKED_SEVERITY_MASK 0x01000000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_UNCORR_ERR_SEVERITY__TLP_PREFIX_BLOCKED_ERR_SEVERITY_MASK 0x02000000L +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__RCV_ERR_STATUS_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__BAD_TLP_STATUS_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__BAD_DLLP_STATUS_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__REPLAY_NUM_ROLLOVER_STATUS_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__REPLAY_TIMER_TIMEOUT_STATUS_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__ADVISORY_NONFATAL_ERR_STATUS_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__CORR_INT_ERR_STATUS_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_STATUS__HDR_LOG_OVFL_STATUS_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK__SHIFT 0xd +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK__SHIFT 0xe +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__RCV_ERR_MASK_MASK 0x00000001L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__BAD_TLP_MASK_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__BAD_DLLP_MASK_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__REPLAY_NUM_ROLLOVER_MASK_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__REPLAY_TIMER_TIMEOUT_MASK_MASK 0x00001000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__ADVISORY_NONFATAL_ERR_MASK_MASK 0x00002000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__CORR_INT_ERR_MASK_MASK 0x00004000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_CORR_ERR_MASK__HDR_LOG_OVFL_MASK_MASK 0x00008000L +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP__SHIFT 0x7 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP__SHIFT 0x9 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN__SHIFT 0xa +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT__SHIFT 0xb +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE__SHIFT 0xc +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__FIRST_ERR_PTR_MASK 0x0000001FL +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_CAP_MASK 0x00000020L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_GEN_EN_MASK 0x00000040L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_CAP_MASK 0x00000080L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__ECRC_CHECK_EN_MASK 0x00000100L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_CAP_MASK 0x00000200L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__MULTI_HDR_RECD_EN_MASK 0x00000400L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__TLP_PREFIX_LOG_PRESENT_MASK 0x00000800L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ADV_ERR_CAP_CNTL__COMPLETION_TIMEOUT_LOG_CAPABLE_MASK 0x00001000L +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG0__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG0__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG1 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG1__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG1__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG2 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG2__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG2__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG3 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG3__TLP_HDR__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_HDR_LOG3__TLP_HDR_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG0__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG1 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG1__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG2 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG2__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG3 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_TLP_PREFIX_LOG3__TLP_PREFIX_MASK 0xFFFFFFFFL +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CAP +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST__SHIFT 0x5 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED__SHIFT 0x6 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CAP__INVALIDATE_Q_DEPTH_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CAP__PAGE_ALIGNED_REQUEST_MASK 0x0020L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CAP__GLOBAL_INVALIDATE_SUPPORTED_MASK 0x0040L +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CNTL +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CNTL__STU__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0xf +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CNTL__STU_MASK 0x001FL +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x8000L +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_ENH_CAP_LIST +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER__SHIFT 0x10 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR__SHIFT 0x14 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_ENH_CAP_LIST__CAP_ID_MASK 0x0000FFFFL +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_ENH_CAP_LIST__CAP_VER_MASK 0x000F0000L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_ENH_CAP_LIST__NEXT_PTR_MASK 0xFFF00000L +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CAP +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM__SHIFT 0x8 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CAP__ARI_MFVC_FUNC_GROUPS_CAP_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CAP__ARI_ACS_FUNC_GROUPS_CAP_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CAP__ARI_NEXT_FUNC_NUM_MASK 0xFF00L +//BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CNTL +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN__SHIFT 0x0 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN__SHIFT 0x1 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP__SHIFT 0x4 +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CNTL__ARI_MFVC_FUNC_GROUPS_EN_MASK 0x0001L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CNTL__ARI_ACS_FUNC_GROUPS_EN_MASK 0x0002L +#define BIF_CFG_DEV0_EPF0_VF15_0_PCIE_ARI_CNTL__ARI_FUNCTION_GROUP_MASK 0x0070L + + +// addressBlock: nbio_pcie0_pswusp0_pciedir_p +//PCIEP_RESERVED +#define PCIEP_RESERVED__RESERVED__SHIFT 0x0 +#define PCIEP_RESERVED__RESERVED_MASK 0xFFFFFFFFL +//PCIEP_SCRATCH +#define PCIEP_SCRATCH__PCIEP_SCRATCH__SHIFT 0x0 +#define PCIEP_SCRATCH__PCIEP_SCRATCH_MASK 0xFFFFFFFFL +//PCIEP_PORT_CNTL +#define PCIEP_PORT_CNTL__SLV_PORT_REQ_EN__SHIFT 0x0 +#define PCIEP_PORT_CNTL__CI_SNOOP_OVERRIDE__SHIFT 0x1 +#define PCIEP_PORT_CNTL__HOTPLUG_MSG_EN__SHIFT 0x2 +#define PCIEP_PORT_CNTL__NATIVE_PME_EN__SHIFT 0x3 +#define PCIEP_PORT_CNTL__PWR_FAULT_EN__SHIFT 0x4 +#define PCIEP_PORT_CNTL__PMI_BM_DIS__SHIFT 0x5 +#define PCIEP_PORT_CNTL__CI_SLV_CPL_STATIC_ALLOC_LIMIT_S__SHIFT 0x8 +#define PCIEP_PORT_CNTL__CI_PRIV_MAX_CPL_PAYLOAD_SIZE__SHIFT 0x12 +#define PCIEP_PORT_CNTL__CI_SLV_RSP_POISONED_UR_MODE__SHIFT 0x18 +#define PCIEP_PORT_CNTL__CI_MAX_CPL_PAYLOAD_SIZE_MODE__SHIFT 0x1a +#define PCIEP_PORT_CNTL__SLV_PORT_REQ_EN_MASK 0x00000001L +#define PCIEP_PORT_CNTL__CI_SNOOP_OVERRIDE_MASK 0x00000002L +#define PCIEP_PORT_CNTL__HOTPLUG_MSG_EN_MASK 0x00000004L +#define PCIEP_PORT_CNTL__NATIVE_PME_EN_MASK 0x00000008L +#define PCIEP_PORT_CNTL__PWR_FAULT_EN_MASK 0x00000010L +#define PCIEP_PORT_CNTL__PMI_BM_DIS_MASK 0x00000020L +#define PCIEP_PORT_CNTL__CI_SLV_CPL_STATIC_ALLOC_LIMIT_S_MASK 0x0003FF00L +#define PCIEP_PORT_CNTL__CI_PRIV_MAX_CPL_PAYLOAD_SIZE_MASK 0x001C0000L +#define PCIEP_PORT_CNTL__CI_SLV_RSP_POISONED_UR_MODE_MASK 0x03000000L +#define PCIEP_PORT_CNTL__CI_MAX_CPL_PAYLOAD_SIZE_MODE_MASK 0x0C000000L +//PCIE_TX_CNTL +#define PCIE_TX_CNTL__TX_SNR_OVERRIDE__SHIFT 0xa +#define PCIE_TX_CNTL__TX_RO_OVERRIDE__SHIFT 0xc +#define PCIE_TX_CNTL__TX_PACK_PACKET_DIS__SHIFT 0xe +#define PCIE_TX_CNTL__TX_FLUSH_TLP_DIS__SHIFT 0xf +#define PCIE_TX_CNTL__TX_CPL_PASS_P__SHIFT 0x14 +#define PCIE_TX_CNTL__TX_NP_PASS_P__SHIFT 0x15 +#define PCIE_TX_CNTL__TX_CLEAR_EXTRA_PM_REQS__SHIFT 0x16 +#define PCIE_TX_CNTL__TX_FC_UPDATE_TIMEOUT_DIS__SHIFT 0x17 +#define PCIE_TX_CNTL__TX_F0_TPH_DIS__SHIFT 0x18 +#define PCIE_TX_CNTL__TX_F1_TPH_DIS__SHIFT 0x19 +#define PCIE_TX_CNTL__TX_F2_TPH_DIS__SHIFT 0x1a +#define PCIE_TX_CNTL__TX_SNR_OVERRIDE_MASK 0x00000C00L +#define PCIE_TX_CNTL__TX_RO_OVERRIDE_MASK 0x00003000L +#define PCIE_TX_CNTL__TX_PACK_PACKET_DIS_MASK 0x00004000L +#define PCIE_TX_CNTL__TX_FLUSH_TLP_DIS_MASK 0x00008000L +#define PCIE_TX_CNTL__TX_CPL_PASS_P_MASK 0x00100000L +#define PCIE_TX_CNTL__TX_NP_PASS_P_MASK 0x00200000L +#define PCIE_TX_CNTL__TX_CLEAR_EXTRA_PM_REQS_MASK 0x00400000L +#define PCIE_TX_CNTL__TX_FC_UPDATE_TIMEOUT_DIS_MASK 0x00800000L +#define PCIE_TX_CNTL__TX_F0_TPH_DIS_MASK 0x01000000L +#define PCIE_TX_CNTL__TX_F1_TPH_DIS_MASK 0x02000000L +#define PCIE_TX_CNTL__TX_F2_TPH_DIS_MASK 0x04000000L +//PCIE_TX_REQUESTER_ID +#define PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_FUNCTION__SHIFT 0x0 +#define PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_DEVICE__SHIFT 0x3 +#define PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_BUS__SHIFT 0x8 +#define PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_FUNCTION_MASK 0x00000007L +#define PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_DEVICE_MASK 0x000000F8L +#define PCIE_TX_REQUESTER_ID__TX_REQUESTER_ID_BUS_MASK 0x0000FF00L +//PCIE_TX_VENDOR_SPECIFIC +#define PCIE_TX_VENDOR_SPECIFIC__TX_VENDOR_DATA__SHIFT 0x0 +#define PCIE_TX_VENDOR_SPECIFIC__TX_VENDOR_SEND__SHIFT 0x18 +#define PCIE_TX_VENDOR_SPECIFIC__TX_VENDOR_DATA_MASK 0x00FFFFFFL +#define PCIE_TX_VENDOR_SPECIFIC__TX_VENDOR_SEND_MASK 0x01000000L +//PCIE_TX_REQUEST_NUM_CNTL +#define PCIE_TX_REQUEST_NUM_CNTL__TX_NUM_OUTSTANDING_NP__SHIFT 0x18 +#define PCIE_TX_REQUEST_NUM_CNTL__TX_NUM_OUTSTANDING_NP_VC1_EN__SHIFT 0x1e +#define PCIE_TX_REQUEST_NUM_CNTL__TX_NUM_OUTSTANDING_NP_EN__SHIFT 0x1f +#define PCIE_TX_REQUEST_NUM_CNTL__TX_NUM_OUTSTANDING_NP_MASK 0x3F000000L +#define PCIE_TX_REQUEST_NUM_CNTL__TX_NUM_OUTSTANDING_NP_VC1_EN_MASK 0x40000000L +#define PCIE_TX_REQUEST_NUM_CNTL__TX_NUM_OUTSTANDING_NP_EN_MASK 0x80000000L +//PCIE_TX_SEQ +#define PCIE_TX_SEQ__TX_NEXT_TRANSMIT_SEQ__SHIFT 0x0 +#define PCIE_TX_SEQ__TX_ACKD_SEQ__SHIFT 0x10 +#define PCIE_TX_SEQ__TX_NEXT_TRANSMIT_SEQ_MASK 0x00000FFFL +#define PCIE_TX_SEQ__TX_ACKD_SEQ_MASK 0x0FFF0000L +//PCIE_TX_REPLAY +#define PCIE_TX_REPLAY__TX_REPLAY_NUM__SHIFT 0x0 +#define PCIE_TX_REPLAY__TX_REPLAY_TIMER_OVERWRITE__SHIFT 0xf +#define PCIE_TX_REPLAY__TX_REPLAY_TIMER__SHIFT 0x10 +#define PCIE_TX_REPLAY__TX_REPLAY_NUM_MASK 0x00000007L +#define PCIE_TX_REPLAY__TX_REPLAY_TIMER_OVERWRITE_MASK 0x00008000L +#define PCIE_TX_REPLAY__TX_REPLAY_TIMER_MASK 0xFFFF0000L +//PCIE_TX_ACK_LATENCY_LIMIT +#define PCIE_TX_ACK_LATENCY_LIMIT__TX_ACK_LATENCY_LIMIT__SHIFT 0x0 +#define PCIE_TX_ACK_LATENCY_LIMIT__TX_ACK_LATENCY_LIMIT_OVERWRITE__SHIFT 0xc +#define PCIE_TX_ACK_LATENCY_LIMIT__TX_ACK_LATENCY_LIMIT_MASK 0x00000FFFL +#define PCIE_TX_ACK_LATENCY_LIMIT__TX_ACK_LATENCY_LIMIT_OVERWRITE_MASK 0x00001000L +//PCIE_TX_NOP_DLLP +#define PCIE_TX_NOP_DLLP__TX_NOP_DATA__SHIFT 0x0 +#define PCIE_TX_NOP_DLLP__TX_NOP_SEND__SHIFT 0x18 +#define PCIE_TX_NOP_DLLP__TX_NOP_DATA_MASK 0x00FFFFFFL +#define PCIE_TX_NOP_DLLP__TX_NOP_SEND_MASK 0x01000000L +//PCIE_TX_CREDITS_ADVT_P +#define PCIE_TX_CREDITS_ADVT_P__TX_CREDITS_ADVT_PD__SHIFT 0x0 +#define PCIE_TX_CREDITS_ADVT_P__TX_CREDITS_ADVT_PH__SHIFT 0x10 +#define PCIE_TX_CREDITS_ADVT_P__TX_CREDITS_ADVT_PD_MASK 0x00000FFFL +#define PCIE_TX_CREDITS_ADVT_P__TX_CREDITS_ADVT_PH_MASK 0x00FF0000L +//PCIE_TX_CREDITS_ADVT_NP +#define PCIE_TX_CREDITS_ADVT_NP__TX_CREDITS_ADVT_NPD__SHIFT 0x0 +#define PCIE_TX_CREDITS_ADVT_NP__TX_CREDITS_ADVT_NPH__SHIFT 0x10 +#define PCIE_TX_CREDITS_ADVT_NP__TX_CREDITS_ADVT_NPD_MASK 0x00000FFFL +#define PCIE_TX_CREDITS_ADVT_NP__TX_CREDITS_ADVT_NPH_MASK 0x00FF0000L +//PCIE_TX_CREDITS_ADVT_CPL +#define PCIE_TX_CREDITS_ADVT_CPL__TX_CREDITS_ADVT_CPLD__SHIFT 0x0 +#define PCIE_TX_CREDITS_ADVT_CPL__TX_CREDITS_ADVT_CPLH__SHIFT 0x10 +#define PCIE_TX_CREDITS_ADVT_CPL__TX_CREDITS_ADVT_CPLD_MASK 0x00000FFFL +#define PCIE_TX_CREDITS_ADVT_CPL__TX_CREDITS_ADVT_CPLH_MASK 0x00FF0000L +//PCIE_TX_CREDITS_INIT_P +#define PCIE_TX_CREDITS_INIT_P__TX_CREDITS_INIT_PD__SHIFT 0x0 +#define PCIE_TX_CREDITS_INIT_P__TX_CREDITS_INIT_PH__SHIFT 0x10 +#define PCIE_TX_CREDITS_INIT_P__TX_CREDITS_INIT_PD_MASK 0x00000FFFL +#define PCIE_TX_CREDITS_INIT_P__TX_CREDITS_INIT_PH_MASK 0x00FF0000L +//PCIE_TX_CREDITS_INIT_NP +#define PCIE_TX_CREDITS_INIT_NP__TX_CREDITS_INIT_NPD__SHIFT 0x0 +#define PCIE_TX_CREDITS_INIT_NP__TX_CREDITS_INIT_NPH__SHIFT 0x10 +#define PCIE_TX_CREDITS_INIT_NP__TX_CREDITS_INIT_NPD_MASK 0x00000FFFL +#define PCIE_TX_CREDITS_INIT_NP__TX_CREDITS_INIT_NPH_MASK 0x00FF0000L +//PCIE_TX_CREDITS_INIT_CPL +#define PCIE_TX_CREDITS_INIT_CPL__TX_CREDITS_INIT_CPLD__SHIFT 0x0 +#define PCIE_TX_CREDITS_INIT_CPL__TX_CREDITS_INIT_CPLH__SHIFT 0x10 +#define PCIE_TX_CREDITS_INIT_CPL__TX_CREDITS_INIT_CPLD_MASK 0x00000FFFL +#define PCIE_TX_CREDITS_INIT_CPL__TX_CREDITS_INIT_CPLH_MASK 0x00FF0000L +//PCIE_TX_CREDITS_STATUS +#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_PD__SHIFT 0x0 +#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_PH__SHIFT 0x1 +#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_NPD__SHIFT 0x2 +#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_NPH__SHIFT 0x3 +#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_CPLD__SHIFT 0x4 +#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_CPLH__SHIFT 0x5 +#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_PD__SHIFT 0x10 +#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_PH__SHIFT 0x11 +#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_NPD__SHIFT 0x12 +#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_NPH__SHIFT 0x13 +#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_CPLD__SHIFT 0x14 +#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_CPLH__SHIFT 0x15 +#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_PD_MASK 0x00000001L +#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_PH_MASK 0x00000002L +#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_NPD_MASK 0x00000004L +#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_NPH_MASK 0x00000008L +#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_CPLD_MASK 0x00000010L +#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_ERR_CPLH_MASK 0x00000020L +#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_PD_MASK 0x00010000L +#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_PH_MASK 0x00020000L +#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_NPD_MASK 0x00040000L +#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_NPH_MASK 0x00080000L +#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_CPLD_MASK 0x00100000L +#define PCIE_TX_CREDITS_STATUS__TX_CREDITS_CUR_STATUS_CPLH_MASK 0x00200000L +//PCIE_TX_CREDITS_FCU_THRESHOLD +#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_P_VC0__SHIFT 0x0 +#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_NP_VC0__SHIFT 0x4 +#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_CPL_VC0__SHIFT 0x8 +#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_P_VC1__SHIFT 0x10 +#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_NP_VC1__SHIFT 0x14 +#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_CPL_VC1__SHIFT 0x18 +#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_P_VC0_MASK 0x00000007L +#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_NP_VC0_MASK 0x00000070L +#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_CPL_VC0_MASK 0x00000700L +#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_P_VC1_MASK 0x00070000L +#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_NP_VC1_MASK 0x00700000L +#define PCIE_TX_CREDITS_FCU_THRESHOLD__TX_FCU_THRESHOLD_CPL_VC1_MASK 0x07000000L +//PCIE_P_PORT_LANE_STATUS +#define PCIE_P_PORT_LANE_STATUS__PORT_LANE_REVERSAL__SHIFT 0x0 +#define PCIE_P_PORT_LANE_STATUS__PHY_LINK_WIDTH__SHIFT 0x1 +#define PCIE_P_PORT_LANE_STATUS__PORT_LANE_REVERSAL_MASK 0x00000001L +#define PCIE_P_PORT_LANE_STATUS__PHY_LINK_WIDTH_MASK 0x0000007EL +//PCIE_FC_P +#define PCIE_FC_P__PD_CREDITS__SHIFT 0x0 +#define PCIE_FC_P__PH_CREDITS__SHIFT 0x10 +#define PCIE_FC_P__PD_CREDITS_MASK 0x0000FFFFL +#define PCIE_FC_P__PH_CREDITS_MASK 0x0FFF0000L +//PCIE_FC_NP +#define PCIE_FC_NP__NPD_CREDITS__SHIFT 0x0 +#define PCIE_FC_NP__NPH_CREDITS__SHIFT 0x10 +#define PCIE_FC_NP__NPD_CREDITS_MASK 0x0000FFFFL +#define PCIE_FC_NP__NPH_CREDITS_MASK 0x0FFF0000L +//PCIE_FC_CPL +#define PCIE_FC_CPL__CPLD_CREDITS__SHIFT 0x0 +#define PCIE_FC_CPL__CPLH_CREDITS__SHIFT 0x10 +#define PCIE_FC_CPL__CPLD_CREDITS_MASK 0x0000FFFFL +#define PCIE_FC_CPL__CPLH_CREDITS_MASK 0x0FFF0000L +//PCIE_FC_P_VC1 +#define PCIE_FC_P_VC1__ADVT_FC_VC1_PD_CREDITS__SHIFT 0x0 +#define PCIE_FC_P_VC1__ADVT_FC_VC1_PH_CREDITS__SHIFT 0x10 +#define PCIE_FC_P_VC1__ADVT_FC_VC1_PD_CREDITS_MASK 0x0000FFFFL +#define PCIE_FC_P_VC1__ADVT_FC_VC1_PH_CREDITS_MASK 0x0FFF0000L +//PCIE_FC_NP_VC1 +#define PCIE_FC_NP_VC1__ADVT_FC_VC1_NPD_CREDITS__SHIFT 0x0 +#define PCIE_FC_NP_VC1__ADVT_FC_VC1_NPH_CREDITS__SHIFT 0x10 +#define PCIE_FC_NP_VC1__ADVT_FC_VC1_NPD_CREDITS_MASK 0x0000FFFFL +#define PCIE_FC_NP_VC1__ADVT_FC_VC1_NPH_CREDITS_MASK 0x0FFF0000L +//PCIE_FC_CPL_VC1 +#define PCIE_FC_CPL_VC1__ADVT_FC_VC1_CPLD_CREDITS__SHIFT 0x0 +#define PCIE_FC_CPL_VC1__ADVT_FC_VC1_CPLH_CREDITS__SHIFT 0x10 +#define PCIE_FC_CPL_VC1__ADVT_FC_VC1_CPLD_CREDITS_MASK 0x0000FFFFL +#define PCIE_FC_CPL_VC1__ADVT_FC_VC1_CPLH_CREDITS_MASK 0x0FFF0000L +//PSWUSP0_PCIE_ERR_CNTL +#define PSWUSP0_PCIE_ERR_CNTL__ERR_REPORTING_DIS__SHIFT 0x0 +#define PSWUSP0_PCIE_ERR_CNTL__STRAP_FIRST_RCVD_ERR_LOG__SHIFT 0x1 +#define PSWUSP0_PCIE_ERR_CNTL__RX_DROP_ECRC_FAILURES__SHIFT 0x2 +#define PSWUSP0_PCIE_ERR_CNTL__TX_GENERATE_LCRC_ERR__SHIFT 0x4 +#define PSWUSP0_PCIE_ERR_CNTL__RX_GENERATE_LCRC_ERR__SHIFT 0x5 +#define PSWUSP0_PCIE_ERR_CNTL__TX_GENERATE_ECRC_ERR__SHIFT 0x6 +#define PSWUSP0_PCIE_ERR_CNTL__RX_GENERATE_ECRC_ERR__SHIFT 0x7 +#define PSWUSP0_PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT__SHIFT 0x8 +#define PSWUSP0_PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED__SHIFT 0xb +#define PSWUSP0_PCIE_ERR_CNTL__AER_HDR_LOG_F1_TIMER_EXPIRED__SHIFT 0xc +#define PSWUSP0_PCIE_ERR_CNTL__AER_HDR_LOG_F2_TIMER_EXPIRED__SHIFT 0xd +#define PSWUSP0_PCIE_ERR_CNTL__CI_P_SLV_BUF_RD_HALT_STATUS__SHIFT 0xe +#define PSWUSP0_PCIE_ERR_CNTL__CI_NP_SLV_BUF_RD_HALT_STATUS__SHIFT 0xf +#define PSWUSP0_PCIE_ERR_CNTL__CI_SLV_BUF_HALT_RESET__SHIFT 0x10 +#define PSWUSP0_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY__SHIFT 0x11 +#define PSWUSP0_PCIE_ERR_CNTL__STRAP_POISONED_ADVISORY_NONFATAL__SHIFT 0x12 +#define PSWUSP0_PCIE_ERR_CNTL__ERR_REPORTING_DIS_MASK 0x00000001L +#define PSWUSP0_PCIE_ERR_CNTL__STRAP_FIRST_RCVD_ERR_LOG_MASK 0x00000002L +#define PSWUSP0_PCIE_ERR_CNTL__RX_DROP_ECRC_FAILURES_MASK 0x00000004L +#define PSWUSP0_PCIE_ERR_CNTL__TX_GENERATE_LCRC_ERR_MASK 0x00000010L +#define PSWUSP0_PCIE_ERR_CNTL__RX_GENERATE_LCRC_ERR_MASK 0x00000020L +#define PSWUSP0_PCIE_ERR_CNTL__TX_GENERATE_ECRC_ERR_MASK 0x00000040L +#define PSWUSP0_PCIE_ERR_CNTL__RX_GENERATE_ECRC_ERR_MASK 0x00000080L +#define PSWUSP0_PCIE_ERR_CNTL__AER_HDR_LOG_TIMEOUT_MASK 0x00000700L +#define PSWUSP0_PCIE_ERR_CNTL__AER_HDR_LOG_F0_TIMER_EXPIRED_MASK 0x00000800L +#define PSWUSP0_PCIE_ERR_CNTL__AER_HDR_LOG_F1_TIMER_EXPIRED_MASK 0x00001000L +#define PSWUSP0_PCIE_ERR_CNTL__AER_HDR_LOG_F2_TIMER_EXPIRED_MASK 0x00002000L +#define PSWUSP0_PCIE_ERR_CNTL__CI_P_SLV_BUF_RD_HALT_STATUS_MASK 0x00004000L +#define PSWUSP0_PCIE_ERR_CNTL__CI_NP_SLV_BUF_RD_HALT_STATUS_MASK 0x00008000L +#define PSWUSP0_PCIE_ERR_CNTL__CI_SLV_BUF_HALT_RESET_MASK 0x00010000L +#define PSWUSP0_PCIE_ERR_CNTL__SEND_ERR_MSG_IMMEDIATELY_MASK 0x00020000L +#define PSWUSP0_PCIE_ERR_CNTL__STRAP_POISONED_ADVISORY_NONFATAL_MASK 0x00040000L +//PSWUSP0_PCIE_RX_CNTL +#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_IO_ERR__SHIFT 0x0 +#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_BE_ERR__SHIFT 0x1 +#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_MSG_ERR__SHIFT 0x2 +#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_CRC_ERR__SHIFT 0x3 +#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_CFG_ERR__SHIFT 0x4 +#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_CPL_ERR__SHIFT 0x5 +#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_EP_ERR__SHIFT 0x6 +#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_LEN_MISMATCH_ERR__SHIFT 0x7 +#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR__SHIFT 0x8 +#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_TC_ERR__SHIFT 0x9 +#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_CFG_UR__SHIFT 0xa +#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_IO_UR__SHIFT 0xb +#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_AT_ERR__SHIFT 0xc +#define PSWUSP0_PCIE_RX_CNTL__RX_NAK_IF_FIFO_FULL__SHIFT 0xd +#define PSWUSP0_PCIE_RX_CNTL__RX_GEN_ONE_NAK__SHIFT 0xe +#define PSWUSP0_PCIE_RX_CNTL__RX_FC_INIT_FROM_REG__SHIFT 0xf +#define PSWUSP0_PCIE_RX_CNTL__RX_RCB_CPL_TIMEOUT__SHIFT 0x10 +#define PSWUSP0_PCIE_RX_CNTL__RX_RCB_CPL_TIMEOUT_MODE__SHIFT 0x13 +#define PSWUSP0_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS__SHIFT 0x14 +#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR__SHIFT 0x15 +#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_MAXPREFIX_ERR__SHIFT 0x16 +#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_CPLPREFIX_ERR__SHIFT 0x17 +#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_INVALIDPASID_ERR__SHIFT 0x18 +#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_NOT_PASID_UR__SHIFT 0x19 +#define PSWUSP0_PCIE_RX_CNTL__RX_TPH_DIS__SHIFT 0x1a +#define PSWUSP0_PCIE_RX_CNTL__RX_RCB_FLR_TIMEOUT_DIS__SHIFT 0x1b +#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_IO_ERR_MASK 0x00000001L +#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_BE_ERR_MASK 0x00000002L +#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_MSG_ERR_MASK 0x00000004L +#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_CRC_ERR_MASK 0x00000008L +#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_CFG_ERR_MASK 0x00000010L +#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_CPL_ERR_MASK 0x00000020L +#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_EP_ERR_MASK 0x00000040L +#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_LEN_MISMATCH_ERR_MASK 0x00000080L +#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_MAX_PAYLOAD_ERR_MASK 0x00000100L +#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_TC_ERR_MASK 0x00000200L +#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_CFG_UR_MASK 0x00000400L +#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_IO_UR_MASK 0x00000800L +#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_AT_ERR_MASK 0x00001000L +#define PSWUSP0_PCIE_RX_CNTL__RX_NAK_IF_FIFO_FULL_MASK 0x00002000L +#define PSWUSP0_PCIE_RX_CNTL__RX_GEN_ONE_NAK_MASK 0x00004000L +#define PSWUSP0_PCIE_RX_CNTL__RX_FC_INIT_FROM_REG_MASK 0x00008000L +#define PSWUSP0_PCIE_RX_CNTL__RX_RCB_CPL_TIMEOUT_MASK 0x00070000L +#define PSWUSP0_PCIE_RX_CNTL__RX_RCB_CPL_TIMEOUT_MODE_MASK 0x00080000L +#define PSWUSP0_PCIE_RX_CNTL__RX_PCIE_CPL_TIMEOUT_DIS_MASK 0x00100000L +#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_SHORTPREFIX_ERR_MASK 0x00200000L +#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_MAXPREFIX_ERR_MASK 0x00400000L +#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_CPLPREFIX_ERR_MASK 0x00800000L +#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_INVALIDPASID_ERR_MASK 0x01000000L +#define PSWUSP0_PCIE_RX_CNTL__RX_IGNORE_NOT_PASID_UR_MASK 0x02000000L +#define PSWUSP0_PCIE_RX_CNTL__RX_TPH_DIS_MASK 0x04000000L +#define PSWUSP0_PCIE_RX_CNTL__RX_RCB_FLR_TIMEOUT_DIS_MASK 0x08000000L +//PCIE_RX_EXPECTED_SEQNUM +#define PCIE_RX_EXPECTED_SEQNUM__RX_EXPECTED_SEQNUM__SHIFT 0x0 +#define PCIE_RX_EXPECTED_SEQNUM__RX_EXPECTED_SEQNUM_MASK 0x00000FFFL +//PCIE_RX_VENDOR_SPECIFIC +#define PCIE_RX_VENDOR_SPECIFIC__RX_VENDOR_DATA__SHIFT 0x0 +#define PCIE_RX_VENDOR_SPECIFIC__RX_VENDOR_STATUS__SHIFT 0x18 +#define PCIE_RX_VENDOR_SPECIFIC__RX_VENDOR_DATA_MASK 0x00FFFFFFL +#define PCIE_RX_VENDOR_SPECIFIC__RX_VENDOR_STATUS_MASK 0x01000000L +//PCIE_RX_CNTL3 +#define PCIE_RX_CNTL3__RX_IGNORE_RC_TRANSMRDPASID_UR__SHIFT 0x0 +#define PCIE_RX_CNTL3__RX_IGNORE_RC_TRANSMWRPASID_UR__SHIFT 0x1 +#define PCIE_RX_CNTL3__RX_IGNORE_RC_PRGRESPMSG_UR__SHIFT 0x2 +#define PCIE_RX_CNTL3__RX_IGNORE_RC_INVREQ_UR__SHIFT 0x3 +#define PCIE_RX_CNTL3__RX_IGNORE_RC_INVCPLPASID_UR__SHIFT 0x4 +#define PCIE_RX_CNTL3__RX_IGNORE_RC_TRANSMRDPASID_UR_MASK 0x00000001L +#define PCIE_RX_CNTL3__RX_IGNORE_RC_TRANSMWRPASID_UR_MASK 0x00000002L +#define PCIE_RX_CNTL3__RX_IGNORE_RC_PRGRESPMSG_UR_MASK 0x00000004L +#define PCIE_RX_CNTL3__RX_IGNORE_RC_INVREQ_UR_MASK 0x00000008L +#define PCIE_RX_CNTL3__RX_IGNORE_RC_INVCPLPASID_UR_MASK 0x00000010L +//PCIE_RX_CREDITS_ALLOCATED_P +#define PCIE_RX_CREDITS_ALLOCATED_P__RX_CREDITS_ALLOCATED_PD__SHIFT 0x0 +#define PCIE_RX_CREDITS_ALLOCATED_P__RX_CREDITS_ALLOCATED_PH__SHIFT 0x10 +#define PCIE_RX_CREDITS_ALLOCATED_P__RX_CREDITS_ALLOCATED_PD_MASK 0x00000FFFL +#define PCIE_RX_CREDITS_ALLOCATED_P__RX_CREDITS_ALLOCATED_PH_MASK 0x00FF0000L +//PCIE_RX_CREDITS_ALLOCATED_NP +#define PCIE_RX_CREDITS_ALLOCATED_NP__RX_CREDITS_ALLOCATED_NPD__SHIFT 0x0 +#define PCIE_RX_CREDITS_ALLOCATED_NP__RX_CREDITS_ALLOCATED_NPH__SHIFT 0x10 +#define PCIE_RX_CREDITS_ALLOCATED_NP__RX_CREDITS_ALLOCATED_NPD_MASK 0x00000FFFL +#define PCIE_RX_CREDITS_ALLOCATED_NP__RX_CREDITS_ALLOCATED_NPH_MASK 0x00FF0000L +//PCIE_RX_CREDITS_ALLOCATED_CPL +#define PCIE_RX_CREDITS_ALLOCATED_CPL__RX_CREDITS_ALLOCATED_CPLD__SHIFT 0x0 +#define PCIE_RX_CREDITS_ALLOCATED_CPL__RX_CREDITS_ALLOCATED_CPLH__SHIFT 0x10 +#define PCIE_RX_CREDITS_ALLOCATED_CPL__RX_CREDITS_ALLOCATED_CPLD_MASK 0x00000FFFL +#define PCIE_RX_CREDITS_ALLOCATED_CPL__RX_CREDITS_ALLOCATED_CPLH_MASK 0x00FF0000L +//PCIEP_ERROR_INJECT_PHYSICAL +#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_LANE_ERR__SHIFT 0x0 +#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_FRAMING_ERR__SHIFT 0x2 +#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_BAD_PARITY_IN_SKP__SHIFT 0x4 +#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_BAD_LFSR_IN_SKP__SHIFT 0x6 +#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_LOOPBACK_UFLOW__SHIFT 0x8 +#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_LOOPBACK_OFLOW__SHIFT 0xa +#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_DESKEW_ERR__SHIFT 0xc +#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_8B10B_DISPARITY_ERR__SHIFT 0xe +#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_8B10B_DECODE_ERR__SHIFT 0x10 +#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_SKP_OS_ERROR__SHIFT 0x12 +#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_INV_OS_IDENTIFIER__SHIFT 0x14 +#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_BAD_SYNC_HEADER__SHIFT 0x16 +#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_LANE_ERR_MASK 0x00000003L +#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_FRAMING_ERR_MASK 0x0000000CL +#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_BAD_PARITY_IN_SKP_MASK 0x00000030L +#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_BAD_LFSR_IN_SKP_MASK 0x000000C0L +#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_LOOPBACK_UFLOW_MASK 0x00000300L +#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_LOOPBACK_OFLOW_MASK 0x00000C00L +#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_DESKEW_ERR_MASK 0x00003000L +#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_8B10B_DISPARITY_ERR_MASK 0x0000C000L +#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_8B10B_DECODE_ERR_MASK 0x00030000L +#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_SKP_OS_ERROR_MASK 0x000C0000L +#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_INV_OS_IDENTIFIER_MASK 0x00300000L +#define PCIEP_ERROR_INJECT_PHYSICAL__ERROR_INJECT_PL_BAD_SYNC_HEADER_MASK 0x00C00000L +//PCIEP_ERROR_INJECT_TRANSACTION +#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_FLOW_CTL_ERR__SHIFT 0x0 +#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_REPLAY_NUM_ROLLOVER__SHIFT 0x2 +#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_BAD_DLLP__SHIFT 0x4 +#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_BAD_TLP__SHIFT 0x6 +#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_UNSUPPORTED_REQ__SHIFT 0x8 +#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_ECRC_ERROR__SHIFT 0xa +#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_MALFORMED_TLP__SHIFT 0xc +#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_UNEXPECTED_CMPLT__SHIFT 0xe +#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_COMPLETER_ABORT__SHIFT 0x10 +#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_COMPLETION_TIMEOUT__SHIFT 0x12 +#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_FLOW_CTL_ERR_MASK 0x00000003L +#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_REPLAY_NUM_ROLLOVER_MASK 0x0000000CL +#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_BAD_DLLP_MASK 0x00000030L +#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_BAD_TLP_MASK 0x000000C0L +#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_UNSUPPORTED_REQ_MASK 0x00000300L +#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_ECRC_ERROR_MASK 0x00000C00L +#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_MALFORMED_TLP_MASK 0x00003000L +#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_UNEXPECTED_CMPLT_MASK 0x0000C000L +#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_COMPLETER_ABORT_MASK 0x00030000L +#define PCIEP_ERROR_INJECT_TRANSACTION__ERROR_INJECT_TL_COMPLETION_TIMEOUT_MASK 0x000C0000L +//PCIEP_SRIOV_PRIV_CTRL +#define PCIEP_SRIOV_PRIV_CTRL__RX_SRIOV_VF_MAPPING_MODE__SHIFT 0x0 +#define PCIEP_SRIOV_PRIV_CTRL__SRIOV_SAVE_VFS_ON_VFENABLE_CLR__SHIFT 0x2 +#define PCIEP_SRIOV_PRIV_CTRL__RX_SRIOV_VF_MAPPING_MODE_MASK 0x00000003L +#define PCIEP_SRIOV_PRIV_CTRL__SRIOV_SAVE_VFS_ON_VFENABLE_CLR_MASK 0x0000000CL +//PCIEP_NAK_COUNTER +#define PCIEP_NAK_COUNTER__RX_NUM_NAK_RECEIVED_PORT__SHIFT 0x0 +#define PCIEP_NAK_COUNTER__RX_NUM_NAK_GENERATED_PORT__SHIFT 0x10 +#define PCIEP_NAK_COUNTER__RX_NUM_NAK_RECEIVED_PORT_MASK 0x0000FFFFL +#define PCIEP_NAK_COUNTER__RX_NUM_NAK_GENERATED_PORT_MASK 0xFFFF0000L +//PCIE_LC_CNTL +#define PCIE_LC_CNTL__LC_DONT_ENTER_L23_IN_D0__SHIFT 0x1 +#define PCIE_LC_CNTL__LC_RESET_L_IDLE_COUNT_EN__SHIFT 0x2 +#define PCIE_LC_CNTL__LC_RESET_LINK__SHIFT 0x3 +#define PCIE_LC_CNTL__LC_16X_CLEAR_TX_PIPE__SHIFT 0x4 +#define PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT 0x8 +#define PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT 0xc +#define PCIE_LC_CNTL__LC_PMI_TO_L1_DIS__SHIFT 0x10 +#define PCIE_LC_CNTL__LC_INC_N_FTS_EN__SHIFT 0x11 +#define PCIE_LC_CNTL__LC_LOOK_FOR_IDLE_IN_L1L23__SHIFT 0x12 +#define PCIE_LC_CNTL__LC_FACTOR_IN_EXT_SYNC__SHIFT 0x14 +#define PCIE_LC_CNTL__LC_WAIT_FOR_PM_ACK_DIS__SHIFT 0x15 +#define PCIE_LC_CNTL__LC_WAKE_FROM_L23__SHIFT 0x16 +#define PCIE_LC_CNTL__LC_L1_IMMEDIATE_ACK__SHIFT 0x17 +#define PCIE_LC_CNTL__LC_ASPM_TO_L1_DIS__SHIFT 0x18 +#define PCIE_LC_CNTL__LC_DELAY_COUNT__SHIFT 0x19 +#define PCIE_LC_CNTL__LC_DELAY_L0S_EXIT__SHIFT 0x1b +#define PCIE_LC_CNTL__LC_DELAY_L1_EXIT__SHIFT 0x1c +#define PCIE_LC_CNTL__LC_EXTEND_WAIT_FOR_EL_IDLE__SHIFT 0x1d +#define PCIE_LC_CNTL__LC_ESCAPE_L1L23_EN__SHIFT 0x1e +#define PCIE_LC_CNTL__LC_GATE_RCVR_IDLE__SHIFT 0x1f +#define PCIE_LC_CNTL__LC_DONT_ENTER_L23_IN_D0_MASK 0x00000002L +#define PCIE_LC_CNTL__LC_RESET_L_IDLE_COUNT_EN_MASK 0x00000004L +#define PCIE_LC_CNTL__LC_RESET_LINK_MASK 0x00000008L +#define PCIE_LC_CNTL__LC_16X_CLEAR_TX_PIPE_MASK 0x000000F0L +#define PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK 0x00000F00L +#define PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK 0x0000F000L +#define PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK 0x00010000L +#define PCIE_LC_CNTL__LC_INC_N_FTS_EN_MASK 0x00020000L +#define PCIE_LC_CNTL__LC_LOOK_FOR_IDLE_IN_L1L23_MASK 0x000C0000L +#define PCIE_LC_CNTL__LC_FACTOR_IN_EXT_SYNC_MASK 0x00100000L +#define PCIE_LC_CNTL__LC_WAIT_FOR_PM_ACK_DIS_MASK 0x00200000L +#define PCIE_LC_CNTL__LC_WAKE_FROM_L23_MASK 0x00400000L +#define PCIE_LC_CNTL__LC_L1_IMMEDIATE_ACK_MASK 0x00800000L +#define PCIE_LC_CNTL__LC_ASPM_TO_L1_DIS_MASK 0x01000000L +#define PCIE_LC_CNTL__LC_DELAY_COUNT_MASK 0x06000000L +#define PCIE_LC_CNTL__LC_DELAY_L0S_EXIT_MASK 0x08000000L +#define PCIE_LC_CNTL__LC_DELAY_L1_EXIT_MASK 0x10000000L +#define PCIE_LC_CNTL__LC_EXTEND_WAIT_FOR_EL_IDLE_MASK 0x20000000L +#define PCIE_LC_CNTL__LC_ESCAPE_L1L23_EN_MASK 0x40000000L +#define PCIE_LC_CNTL__LC_GATE_RCVR_IDLE_MASK 0x80000000L +//PCIE_LC_TRAINING_CNTL +#define PCIE_LC_TRAINING_CNTL__LC_TRAINING_CNTL__SHIFT 0x0 +#define PCIE_LC_TRAINING_CNTL__LC_COMPLIANCE_RECEIVE__SHIFT 0x4 +#define PCIE_LC_TRAINING_CNTL__LC_LOOK_FOR_MORE_NON_MATCHING_TS1__SHIFT 0x5 +#define PCIE_LC_TRAINING_CNTL__LC_L0S_L1_TRAINING_CNTL_EN__SHIFT 0x6 +#define PCIE_LC_TRAINING_CNTL__LC_L1_LONG_WAKE_FIX_EN__SHIFT 0x7 +#define PCIE_LC_TRAINING_CNTL__LC_POWER_STATE__SHIFT 0x8 +#define PCIE_LC_TRAINING_CNTL__LC_DONT_GO_TO_L0S_IF_L1_ARMED__SHIFT 0xb +#define PCIE_LC_TRAINING_CNTL__LC_INIT_SPD_CHG_WITH_CSR_EN__SHIFT 0xc +#define PCIE_LC_TRAINING_CNTL__LC_DISABLE_TRAINING_BIT_ARCH__SHIFT 0xd +#define PCIE_LC_TRAINING_CNTL__LC_WAIT_FOR_SETS_IN_RCFG__SHIFT 0xe +#define PCIE_LC_TRAINING_CNTL__LC_HOT_RESET_QUICK_EXIT_EN__SHIFT 0xf +#define PCIE_LC_TRAINING_CNTL__LC_EXTEND_WAIT_FOR_SKP__SHIFT 0x10 +#define PCIE_LC_TRAINING_CNTL__LC_AUTONOMOUS_CHANGE_OFF__SHIFT 0x11 +#define PCIE_LC_TRAINING_CNTL__LC_UPCONFIGURE_CAP_OFF__SHIFT 0x12 +#define PCIE_LC_TRAINING_CNTL__LC_HW_LINK_DIS_EN__SHIFT 0x13 +#define PCIE_LC_TRAINING_CNTL__LC_LINK_DIS_BY_HW__SHIFT 0x14 +#define PCIE_LC_TRAINING_CNTL__LC_STATIC_TX_PIPE_COUNT_EN__SHIFT 0x15 +#define PCIE_LC_TRAINING_CNTL__LC_ASPM_L1_NAK_TIMER_SEL__SHIFT 0x16 +#define PCIE_LC_TRAINING_CNTL__LC_DONT_DEASSERT_RX_EN_IN_R_SPEED__SHIFT 0x18 +#define PCIE_LC_TRAINING_CNTL__LC_DONT_DEASSERT_RX_EN_IN_TEST__SHIFT 0x19 +#define PCIE_LC_TRAINING_CNTL__LC_RESET_ASPM_L1_NAK_TIMER__SHIFT 0x1a +#define PCIE_LC_TRAINING_CNTL__LC_SHORT_RCFG_TIMEOUT__SHIFT 0x1b +#define PCIE_LC_TRAINING_CNTL__LC_ALLOW_TX_L1_CONTROL__SHIFT 0x1c +#define PCIE_LC_TRAINING_CNTL__LC_WAIT_FOR_FOM_VALID_AFTER_TRACK__SHIFT 0x1d +#define PCIE_LC_TRAINING_CNTL__LC_EXTEND_EQ_REQ_TIME__SHIFT 0x1e +#define PCIE_LC_TRAINING_CNTL__LC_TRAINING_CNTL_MASK 0x0000000FL +#define PCIE_LC_TRAINING_CNTL__LC_COMPLIANCE_RECEIVE_MASK 0x00000010L +#define PCIE_LC_TRAINING_CNTL__LC_LOOK_FOR_MORE_NON_MATCHING_TS1_MASK 0x00000020L +#define PCIE_LC_TRAINING_CNTL__LC_L0S_L1_TRAINING_CNTL_EN_MASK 0x00000040L +#define PCIE_LC_TRAINING_CNTL__LC_L1_LONG_WAKE_FIX_EN_MASK 0x00000080L +#define PCIE_LC_TRAINING_CNTL__LC_POWER_STATE_MASK 0x00000700L +#define PCIE_LC_TRAINING_CNTL__LC_DONT_GO_TO_L0S_IF_L1_ARMED_MASK 0x00000800L +#define PCIE_LC_TRAINING_CNTL__LC_INIT_SPD_CHG_WITH_CSR_EN_MASK 0x00001000L +#define PCIE_LC_TRAINING_CNTL__LC_DISABLE_TRAINING_BIT_ARCH_MASK 0x00002000L +#define PCIE_LC_TRAINING_CNTL__LC_WAIT_FOR_SETS_IN_RCFG_MASK 0x00004000L +#define PCIE_LC_TRAINING_CNTL__LC_HOT_RESET_QUICK_EXIT_EN_MASK 0x00008000L +#define PCIE_LC_TRAINING_CNTL__LC_EXTEND_WAIT_FOR_SKP_MASK 0x00010000L +#define PCIE_LC_TRAINING_CNTL__LC_AUTONOMOUS_CHANGE_OFF_MASK 0x00020000L +#define PCIE_LC_TRAINING_CNTL__LC_UPCONFIGURE_CAP_OFF_MASK 0x00040000L +#define PCIE_LC_TRAINING_CNTL__LC_HW_LINK_DIS_EN_MASK 0x00080000L +#define PCIE_LC_TRAINING_CNTL__LC_LINK_DIS_BY_HW_MASK 0x00100000L +#define PCIE_LC_TRAINING_CNTL__LC_STATIC_TX_PIPE_COUNT_EN_MASK 0x00200000L +#define PCIE_LC_TRAINING_CNTL__LC_ASPM_L1_NAK_TIMER_SEL_MASK 0x00C00000L +#define PCIE_LC_TRAINING_CNTL__LC_DONT_DEASSERT_RX_EN_IN_R_SPEED_MASK 0x01000000L +#define PCIE_LC_TRAINING_CNTL__LC_DONT_DEASSERT_RX_EN_IN_TEST_MASK 0x02000000L +#define PCIE_LC_TRAINING_CNTL__LC_RESET_ASPM_L1_NAK_TIMER_MASK 0x04000000L +#define PCIE_LC_TRAINING_CNTL__LC_SHORT_RCFG_TIMEOUT_MASK 0x08000000L +#define PCIE_LC_TRAINING_CNTL__LC_ALLOW_TX_L1_CONTROL_MASK 0x10000000L +#define PCIE_LC_TRAINING_CNTL__LC_WAIT_FOR_FOM_VALID_AFTER_TRACK_MASK 0x20000000L +#define PCIE_LC_TRAINING_CNTL__LC_EXTEND_EQ_REQ_TIME_MASK 0xC0000000L +//PCIE_LC_LINK_WIDTH_CNTL +#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH__SHIFT 0x0 +#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4 +#define PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_ARC_MISSING_ESCAPE__SHIFT 0x7 +#define PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_NOW__SHIFT 0x8 +#define PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATION_SUPPORT__SHIFT 0x9 +#define PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATE_EN__SHIFT 0xa +#define PCIE_LC_LINK_WIDTH_CNTL__LC_SHORT_RECONFIG_EN__SHIFT 0xb +#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_SUPPORT__SHIFT 0xc +#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_DIS__SHIFT 0xd +#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCFG_WAIT_FOR_RCVR_DIS__SHIFT 0xe +#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCFG_TIMER_SEL__SHIFT 0xf +#define PCIE_LC_LINK_WIDTH_CNTL__LC_DEASSERT_TX_PDNB__SHIFT 0x10 +#define PCIE_LC_LINK_WIDTH_CNTL__LC_L1_RECONFIG_EN__SHIFT 0x11 +#define PCIE_LC_LINK_WIDTH_CNTL__LC_DYNLINK_MST_EN__SHIFT 0x12 +#define PCIE_LC_LINK_WIDTH_CNTL__LC_DUAL_END_RECONFIG_EN__SHIFT 0x13 +#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_CAPABLE__SHIFT 0x14 +#define PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE__SHIFT 0x15 +#define PCIE_LC_LINK_WIDTH_CNTL__LC_EQ_REVERSAL_LOGIC_EN__SHIFT 0x17 +#define PCIE_LC_LINK_WIDTH_CNTL__LC_MULT_REVERSE_ATTEMP_EN__SHIFT 0x18 +#define PCIE_LC_LINK_WIDTH_CNTL__LC_RESET_TSX_CNT_IN_RCONFIG_EN__SHIFT 0x19 +#define PCIE_LC_LINK_WIDTH_CNTL__LC_WAIT_FOR_L_IDLE_IN_R_IDLE__SHIFT 0x1a +#define PCIE_LC_LINK_WIDTH_CNTL__LC_WAIT_FOR_NON_EI_ON_RXL0S_EXIT__SHIFT 0x1b +#define PCIE_LC_LINK_WIDTH_CNTL__LC_HOLD_EI_FOR_RSPEED_CMD_CHANGE__SHIFT 0x1c +#define PCIE_LC_LINK_WIDTH_CNTL__LC_BYPASS_RXL0S_ON_SHORT_EI__SHIFT 0x1d +#define PCIE_LC_LINK_WIDTH_CNTL__LC_TURN_OFF_UNUSED_LANES__SHIFT 0x1e +#define PCIE_LC_LINK_WIDTH_CNTL__LC_BYPASS_RXSTANDBY_STATUS__SHIFT 0x1f +#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_MASK 0x00000007L +#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L +#define PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_ARC_MISSING_ESCAPE_MASK 0x00000080L +#define PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_NOW_MASK 0x00000100L +#define PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATION_SUPPORT_MASK 0x00000200L +#define PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATE_EN_MASK 0x00000400L +#define PCIE_LC_LINK_WIDTH_CNTL__LC_SHORT_RECONFIG_EN_MASK 0x00000800L +#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_SUPPORT_MASK 0x00001000L +#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_DIS_MASK 0x00002000L +#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCFG_WAIT_FOR_RCVR_DIS_MASK 0x00004000L +#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCFG_TIMER_SEL_MASK 0x00008000L +#define PCIE_LC_LINK_WIDTH_CNTL__LC_DEASSERT_TX_PDNB_MASK 0x00010000L +#define PCIE_LC_LINK_WIDTH_CNTL__LC_L1_RECONFIG_EN_MASK 0x00020000L +#define PCIE_LC_LINK_WIDTH_CNTL__LC_DYNLINK_MST_EN_MASK 0x00040000L +#define PCIE_LC_LINK_WIDTH_CNTL__LC_DUAL_END_RECONFIG_EN_MASK 0x00080000L +#define PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_CAPABLE_MASK 0x00100000L +#define PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE_MASK 0x00600000L +#define PCIE_LC_LINK_WIDTH_CNTL__LC_EQ_REVERSAL_LOGIC_EN_MASK 0x00800000L +#define PCIE_LC_LINK_WIDTH_CNTL__LC_MULT_REVERSE_ATTEMP_EN_MASK 0x01000000L +#define PCIE_LC_LINK_WIDTH_CNTL__LC_RESET_TSX_CNT_IN_RCONFIG_EN_MASK 0x02000000L +#define PCIE_LC_LINK_WIDTH_CNTL__LC_WAIT_FOR_L_IDLE_IN_R_IDLE_MASK 0x04000000L +#define PCIE_LC_LINK_WIDTH_CNTL__LC_WAIT_FOR_NON_EI_ON_RXL0S_EXIT_MASK 0x08000000L +#define PCIE_LC_LINK_WIDTH_CNTL__LC_HOLD_EI_FOR_RSPEED_CMD_CHANGE_MASK 0x10000000L +#define PCIE_LC_LINK_WIDTH_CNTL__LC_BYPASS_RXL0S_ON_SHORT_EI_MASK 0x20000000L +#define PCIE_LC_LINK_WIDTH_CNTL__LC_TURN_OFF_UNUSED_LANES_MASK 0x40000000L +#define PCIE_LC_LINK_WIDTH_CNTL__LC_BYPASS_RXSTANDBY_STATUS_MASK 0x80000000L +//PCIE_LC_N_FTS_CNTL +#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS__SHIFT 0x0 +#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_OVERRIDE_EN__SHIFT 0x8 +#define PCIE_LC_N_FTS_CNTL__LC_XMIT_FTS_BEFORE_RECOVERY__SHIFT 0x9 +#define PCIE_LC_N_FTS_CNTL__LC_N_EIE_SEL__SHIFT 0xa +#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_8GT_CNTL__SHIFT 0xe +#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_16GT_CNTL__SHIFT 0xf +#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_LIMIT__SHIFT 0x10 +#define PCIE_LC_N_FTS_CNTL__LC_N_FTS__SHIFT 0x18 +#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_MASK 0x000000FFL +#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_OVERRIDE_EN_MASK 0x00000100L +#define PCIE_LC_N_FTS_CNTL__LC_XMIT_FTS_BEFORE_RECOVERY_MASK 0x00000200L +#define PCIE_LC_N_FTS_CNTL__LC_N_EIE_SEL_MASK 0x00000400L +#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_8GT_CNTL_MASK 0x00004000L +#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_16GT_CNTL_MASK 0x00008000L +#define PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_LIMIT_MASK 0x00FF0000L +#define PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK 0xFF000000L +//PSWUSP0_PCIE_LC_SPEED_CNTL +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP__SHIFT 0x0 +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP__SHIFT 0x1 +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP__SHIFT 0x2 +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_TARGET_LINK_SPEED_OVERRIDE_EN__SHIFT 0x3 +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_TARGET_LINK_SPEED_OVERRIDE__SHIFT 0x4 +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_FORCE_EN_SW_SPEED_CHANGE__SHIFT 0x6 +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_SW_SPEED_CHANGE__SHIFT 0x7 +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_FORCE_EN_HW_SPEED_CHANGE__SHIFT 0x8 +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_HW_SPEED_CHANGE__SHIFT 0x9 +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE__SHIFT 0xa +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_SPEED_CHANGE_ATTEMPTS_ALLOWED__SHIFT 0xb +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_SPEED_CHANGE_ATTEMPT_FAILED__SHIFT 0xd +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0xe +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_DONT_CLR_TARGET_SPD_CHANGE_STATUS__SHIFT 0x10 +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CLR_FAILED_SPD_CHANGE_CNT__SHIFT 0x11 +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_1_OR_MORE_TS2_SPEED_ARC_EN__SHIFT 0x12 +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_EVER_SENT_GEN2__SHIFT 0x13 +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_SUPPORTS_GEN2__SHIFT 0x14 +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_EVER_SENT_GEN3__SHIFT 0x15 +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_SUPPORTS_GEN3__SHIFT 0x16 +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_EVER_SENT_GEN4__SHIFT 0x17 +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_SUPPORTS_GEN4__SHIFT 0x18 +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_SPEED_CHANGE_STATUS__SHIFT 0x19 +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_DATA_RATE_ADVERTISED__SHIFT 0x1a +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CHECK_DATA_RATE__SHIFT 0x1c +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_MULT_UPSTREAM_AUTO_SPD_CHNG_EN__SHIFT 0x1d +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_INIT_SPEED_NEG_IN_L0s_EN__SHIFT 0x1e +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_INIT_SPEED_NEG_IN_L1_EN__SHIFT 0x1f +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_GEN2_EN_STRAP_MASK 0x00000001L +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_GEN3_EN_STRAP_MASK 0x00000002L +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_GEN4_EN_STRAP_MASK 0x00000004L +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_TARGET_LINK_SPEED_OVERRIDE_EN_MASK 0x00000008L +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_TARGET_LINK_SPEED_OVERRIDE_MASK 0x00000030L +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_FORCE_EN_SW_SPEED_CHANGE_MASK 0x00000040L +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_SW_SPEED_CHANGE_MASK 0x00000080L +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_FORCE_EN_HW_SPEED_CHANGE_MASK 0x00000100L +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_HW_SPEED_CHANGE_MASK 0x00000200L +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE_MASK 0x00000400L +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK 0x00001800L +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_SPEED_CHANGE_ATTEMPT_FAILED_MASK 0x00002000L +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0x0000C000L +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_DONT_CLR_TARGET_SPD_CHANGE_STATUS_MASK 0x00010000L +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CLR_FAILED_SPD_CHANGE_CNT_MASK 0x00020000L +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_1_OR_MORE_TS2_SPEED_ARC_EN_MASK 0x00040000L +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_EVER_SENT_GEN2_MASK 0x00080000L +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_SUPPORTS_GEN2_MASK 0x00100000L +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_EVER_SENT_GEN3_MASK 0x00200000L +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_SUPPORTS_GEN3_MASK 0x00400000L +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_EVER_SENT_GEN4_MASK 0x00800000L +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_OTHER_SIDE_SUPPORTS_GEN4_MASK 0x01000000L +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_SPEED_CHANGE_STATUS_MASK 0x02000000L +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_DATA_RATE_ADVERTISED_MASK 0x0C000000L +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CHECK_DATA_RATE_MASK 0x10000000L +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_MULT_UPSTREAM_AUTO_SPD_CHNG_EN_MASK 0x20000000L +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_INIT_SPEED_NEG_IN_L0s_EN_MASK 0x40000000L +#define PSWUSP0_PCIE_LC_SPEED_CNTL__LC_INIT_SPEED_NEG_IN_L1_EN_MASK 0x80000000L +//PCIE_LC_STATE0 +#define PCIE_LC_STATE0__LC_CURRENT_STATE__SHIFT 0x0 +#define PCIE_LC_STATE0__LC_PREV_STATE1__SHIFT 0x8 +#define PCIE_LC_STATE0__LC_PREV_STATE2__SHIFT 0x10 +#define PCIE_LC_STATE0__LC_PREV_STATE3__SHIFT 0x18 +#define PCIE_LC_STATE0__LC_CURRENT_STATE_MASK 0x0000003FL +#define PCIE_LC_STATE0__LC_PREV_STATE1_MASK 0x00003F00L +#define PCIE_LC_STATE0__LC_PREV_STATE2_MASK 0x003F0000L +#define PCIE_LC_STATE0__LC_PREV_STATE3_MASK 0x3F000000L +//PCIE_LC_STATE1 +#define PCIE_LC_STATE1__LC_PREV_STATE4__SHIFT 0x0 +#define PCIE_LC_STATE1__LC_PREV_STATE5__SHIFT 0x8 +#define PCIE_LC_STATE1__LC_PREV_STATE6__SHIFT 0x10 +#define PCIE_LC_STATE1__LC_PREV_STATE7__SHIFT 0x18 +#define PCIE_LC_STATE1__LC_PREV_STATE4_MASK 0x0000003FL +#define PCIE_LC_STATE1__LC_PREV_STATE5_MASK 0x00003F00L +#define PCIE_LC_STATE1__LC_PREV_STATE6_MASK 0x003F0000L +#define PCIE_LC_STATE1__LC_PREV_STATE7_MASK 0x3F000000L +//PCIE_LC_STATE2 +#define PCIE_LC_STATE2__LC_PREV_STATE8__SHIFT 0x0 +#define PCIE_LC_STATE2__LC_PREV_STATE9__SHIFT 0x8 +#define PCIE_LC_STATE2__LC_PREV_STATE10__SHIFT 0x10 +#define PCIE_LC_STATE2__LC_PREV_STATE11__SHIFT 0x18 +#define PCIE_LC_STATE2__LC_PREV_STATE8_MASK 0x0000003FL +#define PCIE_LC_STATE2__LC_PREV_STATE9_MASK 0x00003F00L +#define PCIE_LC_STATE2__LC_PREV_STATE10_MASK 0x003F0000L +#define PCIE_LC_STATE2__LC_PREV_STATE11_MASK 0x3F000000L +//PCIE_LC_STATE3 +#define PCIE_LC_STATE3__LC_PREV_STATE12__SHIFT 0x0 +#define PCIE_LC_STATE3__LC_PREV_STATE13__SHIFT 0x8 +#define PCIE_LC_STATE3__LC_PREV_STATE14__SHIFT 0x10 +#define PCIE_LC_STATE3__LC_PREV_STATE15__SHIFT 0x18 +#define PCIE_LC_STATE3__LC_PREV_STATE12_MASK 0x0000003FL +#define PCIE_LC_STATE3__LC_PREV_STATE13_MASK 0x00003F00L +#define PCIE_LC_STATE3__LC_PREV_STATE14_MASK 0x003F0000L +#define PCIE_LC_STATE3__LC_PREV_STATE15_MASK 0x3F000000L +//PCIE_LC_STATE4 +#define PCIE_LC_STATE4__LC_PREV_STATE16__SHIFT 0x0 +#define PCIE_LC_STATE4__LC_PREV_STATE17__SHIFT 0x8 +#define PCIE_LC_STATE4__LC_PREV_STATE18__SHIFT 0x10 +#define PCIE_LC_STATE4__LC_PREV_STATE19__SHIFT 0x18 +#define PCIE_LC_STATE4__LC_PREV_STATE16_MASK 0x0000003FL +#define PCIE_LC_STATE4__LC_PREV_STATE17_MASK 0x00003F00L +#define PCIE_LC_STATE4__LC_PREV_STATE18_MASK 0x003F0000L +#define PCIE_LC_STATE4__LC_PREV_STATE19_MASK 0x3F000000L +//PCIE_LC_STATE5 +#define PCIE_LC_STATE5__LC_PREV_STATE20__SHIFT 0x0 +#define PCIE_LC_STATE5__LC_PREV_STATE21__SHIFT 0x8 +#define PCIE_LC_STATE5__LC_PREV_STATE22__SHIFT 0x10 +#define PCIE_LC_STATE5__LC_PREV_STATE23__SHIFT 0x18 +#define PCIE_LC_STATE5__LC_PREV_STATE20_MASK 0x0000003FL +#define PCIE_LC_STATE5__LC_PREV_STATE21_MASK 0x00003F00L +#define PCIE_LC_STATE5__LC_PREV_STATE22_MASK 0x003F0000L +#define PCIE_LC_STATE5__LC_PREV_STATE23_MASK 0x3F000000L +//PCIE_LINK_MANAGEMENT_CNTL2 +#define PCIE_LINK_MANAGEMENT_CNTL2__QUIESCE_RCVD__SHIFT 0x0 +#define PCIE_LINK_MANAGEMENT_CNTL2__QUIESCE_SENT__SHIFT 0x1 +#define PCIE_LINK_MANAGEMENT_CNTL2__REQ_EQ_RCVD__SHIFT 0x2 +#define PCIE_LINK_MANAGEMENT_CNTL2__REQ_EQ_SENT__SHIFT 0x3 +#define PCIE_LINK_MANAGEMENT_CNTL2__BW_HINT_MODE__SHIFT 0x4 +#define PCIE_LINK_MANAGEMENT_CNTL2__LOW_BW_THRESHOLD_G2__SHIFT 0x7 +#define PCIE_LINK_MANAGEMENT_CNTL2__HIGH_BW_THRESHOLD_G2__SHIFT 0xb +#define PCIE_LINK_MANAGEMENT_CNTL2__LOW_BW_THRESHOLD_G3__SHIFT 0xf +#define PCIE_LINK_MANAGEMENT_CNTL2__HIGH_BW_THRESHOLD_G3__SHIFT 0x13 +#define PCIE_LINK_MANAGEMENT_CNTL2__LOW_BW_THRESHOLD_G4__SHIFT 0x17 +#define PCIE_LINK_MANAGEMENT_CNTL2__HIGH_BW_THRESHOLD_G4__SHIFT 0x1b +#define PCIE_LINK_MANAGEMENT_CNTL2__QUIESCE_RCVD_MASK 0x00000001L +#define PCIE_LINK_MANAGEMENT_CNTL2__QUIESCE_SENT_MASK 0x00000002L +#define PCIE_LINK_MANAGEMENT_CNTL2__REQ_EQ_RCVD_MASK 0x00000004L +#define PCIE_LINK_MANAGEMENT_CNTL2__REQ_EQ_SENT_MASK 0x00000008L +#define PCIE_LINK_MANAGEMENT_CNTL2__BW_HINT_MODE_MASK 0x00000070L +#define PCIE_LINK_MANAGEMENT_CNTL2__LOW_BW_THRESHOLD_G2_MASK 0x00000780L +#define PCIE_LINK_MANAGEMENT_CNTL2__HIGH_BW_THRESHOLD_G2_MASK 0x00007800L +#define PCIE_LINK_MANAGEMENT_CNTL2__LOW_BW_THRESHOLD_G3_MASK 0x00078000L +#define PCIE_LINK_MANAGEMENT_CNTL2__HIGH_BW_THRESHOLD_G3_MASK 0x00780000L +#define PCIE_LINK_MANAGEMENT_CNTL2__LOW_BW_THRESHOLD_G4_MASK 0x07800000L +#define PCIE_LINK_MANAGEMENT_CNTL2__HIGH_BW_THRESHOLD_G4_MASK 0x78000000L +//PSWUSP0_PCIE_LC_CNTL2 +#define PSWUSP0_PCIE_LC_CNTL2__LC_TIMED_OUT_STATE__SHIFT 0x0 +#define PSWUSP0_PCIE_LC_CNTL2__LC_STATE_TIMED_OUT__SHIFT 0x6 +#define PSWUSP0_PCIE_LC_CNTL2__LC_LOOK_FOR_BW_REDUCTION__SHIFT 0x7 +#define PSWUSP0_PCIE_LC_CNTL2__LC_MORE_TS2_EN__SHIFT 0x8 +#define PSWUSP0_PCIE_LC_CNTL2__LC_X12_NEGOTIATION_DIS__SHIFT 0x9 +#define PSWUSP0_PCIE_LC_CNTL2__LC_LINK_UP_REVERSAL_EN__SHIFT 0xa +#define PSWUSP0_PCIE_LC_CNTL2__LC_ILLEGAL_STATE__SHIFT 0xb +#define PSWUSP0_PCIE_LC_CNTL2__LC_ILLEGAL_STATE_RESTART_EN__SHIFT 0xc +#define PSWUSP0_PCIE_LC_CNTL2__LC_WAIT_FOR_OTHER_LANES_MODE__SHIFT 0xd +#define PSWUSP0_PCIE_LC_CNTL2__LC_ELEC_IDLE_MODE__SHIFT 0xe +#define PSWUSP0_PCIE_LC_CNTL2__LC_DISABLE_INFERRED_ELEC_IDLE_DET__SHIFT 0x10 +#define PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1__SHIFT 0x11 +#define PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23__SHIFT 0x12 +#define PSWUSP0_PCIE_LC_CNTL2__LC_BLOCK_EL_IDLE_IN_L0__SHIFT 0x14 +#define PSWUSP0_PCIE_LC_CNTL2__LC_RCV_L0_TO_RCV_L0S_DIS__SHIFT 0x15 +#define PSWUSP0_PCIE_LC_CNTL2__LC_ASSERT_INACTIVE_DURING_HOLD__SHIFT 0x16 +#define PSWUSP0_PCIE_LC_CNTL2__LC_WAIT_FOR_LANES_IN_LW_NEG__SHIFT 0x17 +#define PSWUSP0_PCIE_LC_CNTL2__LC_PWR_DOWN_NEG_OFF_LANES__SHIFT 0x19 +#define PSWUSP0_PCIE_LC_CNTL2__LC_DISABLE_LOST_SYM_LOCK_ARCS__SHIFT 0x1a +#define PSWUSP0_PCIE_LC_CNTL2__LC_LINK_BW_NOTIFICATION_DIS__SHIFT 0x1b +#define PSWUSP0_PCIE_LC_CNTL2__LC_PMI_L1_WAIT_FOR_SLV_IDLE__SHIFT 0x1c +#define PSWUSP0_PCIE_LC_CNTL2__LC_TEST_TIMER_SEL__SHIFT 0x1d +#define PSWUSP0_PCIE_LC_CNTL2__LC_ENABLE_INFERRED_ELEC_IDLE_FOR_PI__SHIFT 0x1f +#define PSWUSP0_PCIE_LC_CNTL2__LC_TIMED_OUT_STATE_MASK 0x0000003FL +#define PSWUSP0_PCIE_LC_CNTL2__LC_STATE_TIMED_OUT_MASK 0x00000040L +#define PSWUSP0_PCIE_LC_CNTL2__LC_LOOK_FOR_BW_REDUCTION_MASK 0x00000080L +#define PSWUSP0_PCIE_LC_CNTL2__LC_MORE_TS2_EN_MASK 0x00000100L +#define PSWUSP0_PCIE_LC_CNTL2__LC_X12_NEGOTIATION_DIS_MASK 0x00000200L +#define PSWUSP0_PCIE_LC_CNTL2__LC_LINK_UP_REVERSAL_EN_MASK 0x00000400L +#define PSWUSP0_PCIE_LC_CNTL2__LC_ILLEGAL_STATE_MASK 0x00000800L +#define PSWUSP0_PCIE_LC_CNTL2__LC_ILLEGAL_STATE_RESTART_EN_MASK 0x00001000L +#define PSWUSP0_PCIE_LC_CNTL2__LC_WAIT_FOR_OTHER_LANES_MODE_MASK 0x00002000L +#define PSWUSP0_PCIE_LC_CNTL2__LC_ELEC_IDLE_MODE_MASK 0x0000C000L +#define PSWUSP0_PCIE_LC_CNTL2__LC_DISABLE_INFERRED_ELEC_IDLE_DET_MASK 0x00010000L +#define PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK 0x00020000L +#define PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK 0x00040000L +#define PSWUSP0_PCIE_LC_CNTL2__LC_BLOCK_EL_IDLE_IN_L0_MASK 0x00100000L +#define PSWUSP0_PCIE_LC_CNTL2__LC_RCV_L0_TO_RCV_L0S_DIS_MASK 0x00200000L +#define PSWUSP0_PCIE_LC_CNTL2__LC_ASSERT_INACTIVE_DURING_HOLD_MASK 0x00400000L +#define PSWUSP0_PCIE_LC_CNTL2__LC_WAIT_FOR_LANES_IN_LW_NEG_MASK 0x01800000L +#define PSWUSP0_PCIE_LC_CNTL2__LC_PWR_DOWN_NEG_OFF_LANES_MASK 0x02000000L +#define PSWUSP0_PCIE_LC_CNTL2__LC_DISABLE_LOST_SYM_LOCK_ARCS_MASK 0x04000000L +#define PSWUSP0_PCIE_LC_CNTL2__LC_LINK_BW_NOTIFICATION_DIS_MASK 0x08000000L +#define PSWUSP0_PCIE_LC_CNTL2__LC_PMI_L1_WAIT_FOR_SLV_IDLE_MASK 0x10000000L +#define PSWUSP0_PCIE_LC_CNTL2__LC_TEST_TIMER_SEL_MASK 0x60000000L +#define PSWUSP0_PCIE_LC_CNTL2__LC_ENABLE_INFERRED_ELEC_IDLE_FOR_PI_MASK 0x80000000L +//PCIE_LC_BW_CHANGE_CNTL +#define PCIE_LC_BW_CHANGE_CNTL__LC_BW_CHANGE_INT_EN__SHIFT 0x0 +#define PCIE_LC_BW_CHANGE_CNTL__LC_HW_INIT_SPEED_CHANGE__SHIFT 0x1 +#define PCIE_LC_BW_CHANGE_CNTL__LC_SW_INIT_SPEED_CHANGE__SHIFT 0x2 +#define PCIE_LC_BW_CHANGE_CNTL__LC_OTHER_INIT_SPEED_CHANGE__SHIFT 0x3 +#define PCIE_LC_BW_CHANGE_CNTL__LC_RELIABILITY_SPEED_CHANGE__SHIFT 0x4 +#define PCIE_LC_BW_CHANGE_CNTL__LC_FAILED_SPEED_NEG__SHIFT 0x5 +#define PCIE_LC_BW_CHANGE_CNTL__LC_LONG_LW_CHANGE__SHIFT 0x6 +#define PCIE_LC_BW_CHANGE_CNTL__LC_SHORT_LW_CHANGE__SHIFT 0x7 +#define PCIE_LC_BW_CHANGE_CNTL__LC_LW_CHANGE_OTHER__SHIFT 0x8 +#define PCIE_LC_BW_CHANGE_CNTL__LC_LW_CHANGE_FAILED__SHIFT 0x9 +#define PCIE_LC_BW_CHANGE_CNTL__LC_LINK_BW_NOTIFICATION_DETECT_MODE__SHIFT 0xa +#define PCIE_LC_BW_CHANGE_CNTL__LC_SPEED_NEG_UNSUCCESSFUL__SHIFT 0xb +#define PCIE_LC_BW_CHANGE_CNTL__LC_BW_CHANGE_INT_EN_MASK 0x00000001L +#define PCIE_LC_BW_CHANGE_CNTL__LC_HW_INIT_SPEED_CHANGE_MASK 0x00000002L +#define PCIE_LC_BW_CHANGE_CNTL__LC_SW_INIT_SPEED_CHANGE_MASK 0x00000004L +#define PCIE_LC_BW_CHANGE_CNTL__LC_OTHER_INIT_SPEED_CHANGE_MASK 0x00000008L +#define PCIE_LC_BW_CHANGE_CNTL__LC_RELIABILITY_SPEED_CHANGE_MASK 0x00000010L +#define PCIE_LC_BW_CHANGE_CNTL__LC_FAILED_SPEED_NEG_MASK 0x00000020L +#define PCIE_LC_BW_CHANGE_CNTL__LC_LONG_LW_CHANGE_MASK 0x00000040L +#define PCIE_LC_BW_CHANGE_CNTL__LC_SHORT_LW_CHANGE_MASK 0x00000080L +#define PCIE_LC_BW_CHANGE_CNTL__LC_LW_CHANGE_OTHER_MASK 0x00000100L +#define PCIE_LC_BW_CHANGE_CNTL__LC_LW_CHANGE_FAILED_MASK 0x00000200L +#define PCIE_LC_BW_CHANGE_CNTL__LC_LINK_BW_NOTIFICATION_DETECT_MODE_MASK 0x00000400L +#define PCIE_LC_BW_CHANGE_CNTL__LC_SPEED_NEG_UNSUCCESSFUL_MASK 0x00000800L +//PCIE_LC_CDR_CNTL +#define PCIE_LC_CDR_CNTL__LC_CDR_TEST_OFF__SHIFT 0x0 +#define PCIE_LC_CDR_CNTL__LC_CDR_TEST_SETS__SHIFT 0xc +#define PCIE_LC_CDR_CNTL__LC_CDR_SET_TYPE__SHIFT 0x18 +#define PCIE_LC_CDR_CNTL__LC_CDR_TEST_OFF_MASK 0x00000FFFL +#define PCIE_LC_CDR_CNTL__LC_CDR_TEST_SETS_MASK 0x00FFF000L +#define PCIE_LC_CDR_CNTL__LC_CDR_SET_TYPE_MASK 0x03000000L +//PCIE_LC_LANE_CNTL +#define PCIE_LC_LANE_CNTL__LC_CORRUPTED_LANES__SHIFT 0x0 +#define PCIE_LC_LANE_CNTL__LC_LANE_DIS__SHIFT 0x10 +#define PCIE_LC_LANE_CNTL__LC_CORRUPTED_LANES_MASK 0x0000FFFFL +#define PCIE_LC_LANE_CNTL__LC_LANE_DIS_MASK 0xFFFF0000L +//PCIE_LC_CNTL3 +#define PCIE_LC_CNTL3__LC_SELECT_DEEMPHASIS__SHIFT 0x0 +#define PCIE_LC_CNTL3__LC_SELECT_DEEMPHASIS_CNTL__SHIFT 0x1 +#define PCIE_LC_CNTL3__LC_RCVD_DEEMPHASIS__SHIFT 0x3 +#define PCIE_LC_CNTL3__LC_COMP_TO_DETECT__SHIFT 0x4 +#define PCIE_LC_CNTL3__LC_RESET_TSX_CNT_IN_RLOCK_EN__SHIFT 0x5 +#define PCIE_LC_CNTL3__LC_AUTO_SPEED_CHANGE_ATTEMPTS_ALLOWED__SHIFT 0x6 +#define PCIE_LC_CNTL3__LC_AUTO_SPEED_CHANGE_ATTEMPT_FAILED__SHIFT 0x8 +#define PCIE_LC_CNTL3__LC_CLR_FAILED_AUTO_SPD_CHANGE_CNT__SHIFT 0x9 +#define PCIE_LC_CNTL3__LC_ENHANCED_HOT_PLUG_EN__SHIFT 0xa +#define PCIE_LC_CNTL3__LC_RCVR_DET_EN_OVERRIDE__SHIFT 0xb +#define PCIE_LC_CNTL3__LC_CHIP_BIF_USB_IDLE_EN__SHIFT 0x10 +#define PCIE_LC_CNTL3__LC_L1_BLOCK_RECONFIG_EN__SHIFT 0x11 +#define PCIE_LC_CNTL3__LC_AUTO_DISABLE_SPEED_SUPPORT_EN__SHIFT 0x12 +#define PCIE_LC_CNTL3__LC_AUTO_DISABLE_SPEED_SUPPORT_MAX_FAIL_SEL__SHIFT 0x13 +#define PCIE_LC_CNTL3__LC_FAST_L1_ENTRY_EXIT_EN__SHIFT 0x15 +#define PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK__SHIFT 0x17 +#define PCIE_LC_CNTL3__LC_HW_VOLTAGE_IF_CONTROL__SHIFT 0x18 +#define PCIE_LC_CNTL3__LC_VOLTAGE_TIMER_SEL__SHIFT 0x1a +#define PCIE_LC_CNTL3__LC_GO_TO_RECOVERY__SHIFT 0x1e +#define PCIE_LC_CNTL3__LC_AUTO_RECOVERY_DIS__SHIFT 0x1f +#define PCIE_LC_CNTL3__LC_SELECT_DEEMPHASIS_MASK 0x00000001L +#define PCIE_LC_CNTL3__LC_SELECT_DEEMPHASIS_CNTL_MASK 0x00000006L +#define PCIE_LC_CNTL3__LC_RCVD_DEEMPHASIS_MASK 0x00000008L +#define PCIE_LC_CNTL3__LC_COMP_TO_DETECT_MASK 0x00000010L +#define PCIE_LC_CNTL3__LC_RESET_TSX_CNT_IN_RLOCK_EN_MASK 0x00000020L +#define PCIE_LC_CNTL3__LC_AUTO_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK 0x000000C0L +#define PCIE_LC_CNTL3__LC_AUTO_SPEED_CHANGE_ATTEMPT_FAILED_MASK 0x00000100L +#define PCIE_LC_CNTL3__LC_CLR_FAILED_AUTO_SPD_CHANGE_CNT_MASK 0x00000200L +#define PCIE_LC_CNTL3__LC_ENHANCED_HOT_PLUG_EN_MASK 0x00000400L +#define PCIE_LC_CNTL3__LC_RCVR_DET_EN_OVERRIDE_MASK 0x00000800L +#define PCIE_LC_CNTL3__LC_CHIP_BIF_USB_IDLE_EN_MASK 0x00010000L +#define PCIE_LC_CNTL3__LC_L1_BLOCK_RECONFIG_EN_MASK 0x00020000L +#define PCIE_LC_CNTL3__LC_AUTO_DISABLE_SPEED_SUPPORT_EN_MASK 0x00040000L +#define PCIE_LC_CNTL3__LC_AUTO_DISABLE_SPEED_SUPPORT_MAX_FAIL_SEL_MASK 0x00180000L +#define PCIE_LC_CNTL3__LC_FAST_L1_ENTRY_EXIT_EN_MASK 0x00200000L +#define PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK 0x00800000L +#define PCIE_LC_CNTL3__LC_HW_VOLTAGE_IF_CONTROL_MASK 0x03000000L +#define PCIE_LC_CNTL3__LC_VOLTAGE_TIMER_SEL_MASK 0x3C000000L +#define PCIE_LC_CNTL3__LC_GO_TO_RECOVERY_MASK 0x40000000L +#define PCIE_LC_CNTL3__LC_AUTO_RECOVERY_DIS_MASK 0x80000000L +//PCIE_LC_CNTL4 +#define PCIE_LC_CNTL4__LC_TX_ENABLE_BEHAVIOUR__SHIFT 0x0 +#define PCIE_LC_CNTL4__LC_DIS_CONTIG_END_SET_CHECK__SHIFT 0x2 +#define PCIE_LC_CNTL4__LC_DIS_ASPM_L1_IN_SPEED_CHANGE__SHIFT 0x3 +#define PCIE_LC_CNTL4__LC_BYPASS_EQ_8GT__SHIFT 0x4 +#define PCIE_LC_CNTL4__LC_REDO_EQ_8GT__SHIFT 0x5 +#define PCIE_LC_CNTL4__LC_EXTEND_EIEOS__SHIFT 0x6 +#define PCIE_LC_CNTL4__LC_IGNORE_PARITY__SHIFT 0x7 +#define PCIE_LC_CNTL4__LC_EQ_SEARCH_MODE_8GT__SHIFT 0x8 +#define PCIE_LC_CNTL4__LC_DSC_CHECK_COEFFS_IN_RLOCK__SHIFT 0xa +#define PCIE_LC_CNTL4__LC_USC_EQ_NOT_REQD_8GT__SHIFT 0xb +#define PCIE_LC_CNTL4__LC_USC_GO_TO_EQ_8GT__SHIFT 0xc +#define PCIE_LC_CNTL4__LC_SET_QUIESCE__SHIFT 0xd +#define PCIE_LC_CNTL4__LC_QUIESCE_RCVD__SHIFT 0xe +#define PCIE_LC_CNTL4__LC_UNEXPECTED_COEFFS_RCVD_8GT__SHIFT 0xf +#define PCIE_LC_CNTL4__LC_BYPASS_EQ_REQ_PHASE_8GT__SHIFT 0x10 +#define PCIE_LC_CNTL4__LC_FORCE_PRESET_IN_EQ_REQ_PHASE_8GT__SHIFT 0x11 +#define PCIE_LC_CNTL4__LC_FORCE_PRESET_VALUE_8GT__SHIFT 0x12 +#define PCIE_LC_CNTL4__LC_USC_DELAY_DLLPS__SHIFT 0x16 +#define PCIE_LC_CNTL4__LC_TX_SWING__SHIFT 0x17 +#define PCIE_LC_CNTL4__LC_EQ_WAIT_FOR_EVAL_DONE__SHIFT 0x18 +#define PCIE_LC_CNTL4__LC_8GT_SKIP_ORDER_EN__SHIFT 0x19 +#define PCIE_LC_CNTL4__LC_WAIT_FOR_MORE_TS_IN_RLOCK__SHIFT 0x1a +#define PCIE_LC_CNTL4__LC_TX_ENABLE_BEHAVIOUR_MASK 0x00000003L +#define PCIE_LC_CNTL4__LC_DIS_CONTIG_END_SET_CHECK_MASK 0x00000004L +#define PCIE_LC_CNTL4__LC_DIS_ASPM_L1_IN_SPEED_CHANGE_MASK 0x00000008L +#define PCIE_LC_CNTL4__LC_BYPASS_EQ_8GT_MASK 0x00000010L +#define PCIE_LC_CNTL4__LC_REDO_EQ_8GT_MASK 0x00000020L +#define PCIE_LC_CNTL4__LC_EXTEND_EIEOS_MASK 0x00000040L +#define PCIE_LC_CNTL4__LC_IGNORE_PARITY_MASK 0x00000080L +#define PCIE_LC_CNTL4__LC_EQ_SEARCH_MODE_8GT_MASK 0x00000300L +#define PCIE_LC_CNTL4__LC_DSC_CHECK_COEFFS_IN_RLOCK_MASK 0x00000400L +#define PCIE_LC_CNTL4__LC_USC_EQ_NOT_REQD_8GT_MASK 0x00000800L +#define PCIE_LC_CNTL4__LC_USC_GO_TO_EQ_8GT_MASK 0x00001000L +#define PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK 0x00002000L +#define PCIE_LC_CNTL4__LC_QUIESCE_RCVD_MASK 0x00004000L +#define PCIE_LC_CNTL4__LC_UNEXPECTED_COEFFS_RCVD_8GT_MASK 0x00008000L +#define PCIE_LC_CNTL4__LC_BYPASS_EQ_REQ_PHASE_8GT_MASK 0x00010000L +#define PCIE_LC_CNTL4__LC_FORCE_PRESET_IN_EQ_REQ_PHASE_8GT_MASK 0x00020000L +#define PCIE_LC_CNTL4__LC_FORCE_PRESET_VALUE_8GT_MASK 0x003C0000L +#define PCIE_LC_CNTL4__LC_USC_DELAY_DLLPS_MASK 0x00400000L +#define PCIE_LC_CNTL4__LC_TX_SWING_MASK 0x00800000L +#define PCIE_LC_CNTL4__LC_EQ_WAIT_FOR_EVAL_DONE_MASK 0x01000000L +#define PCIE_LC_CNTL4__LC_8GT_SKIP_ORDER_EN_MASK 0x02000000L +#define PCIE_LC_CNTL4__LC_WAIT_FOR_MORE_TS_IN_RLOCK_MASK 0xFC000000L +//PCIE_LC_CNTL5 +#define PCIE_LC_CNTL5__LC_DSC_EQ_FS_LF_INVALID_TO_PRESETS__SHIFT 0x18 +#define PCIE_LC_CNTL5__LC_TX_SWING_OVERRIDE__SHIFT 0x19 +#define PCIE_LC_CNTL5__LC_ACCEPT_ALL_PRESETS__SHIFT 0x1a +#define PCIE_LC_CNTL5__LC_ACCEPT_ALL_PRESETS_TEST__SHIFT 0x1b +#define PCIE_LC_CNTL5__LC_WAIT_IN_DETECT__SHIFT 0x1c +#define PCIE_LC_CNTL5__LC_HOLD_TRAINING_MODE__SHIFT 0x1d +#define PCIE_LC_CNTL5__LC_DSC_EQ_FS_LF_INVALID_TO_PRESETS_MASK 0x01000000L +#define PCIE_LC_CNTL5__LC_TX_SWING_OVERRIDE_MASK 0x02000000L +#define PCIE_LC_CNTL5__LC_ACCEPT_ALL_PRESETS_MASK 0x04000000L +#define PCIE_LC_CNTL5__LC_ACCEPT_ALL_PRESETS_TEST_MASK 0x08000000L +#define PCIE_LC_CNTL5__LC_WAIT_IN_DETECT_MASK 0x10000000L +#define PCIE_LC_CNTL5__LC_HOLD_TRAINING_MODE_MASK 0xE0000000L +//PCIE_LC_FORCE_COEFF +#define PCIE_LC_FORCE_COEFF__LC_FORCE_COEFF_8GT__SHIFT 0x0 +#define PCIE_LC_FORCE_COEFF__LC_FORCE_PRE_CURSOR_8GT__SHIFT 0x1 +#define PCIE_LC_FORCE_COEFF__LC_FORCE_CURSOR_8GT__SHIFT 0x7 +#define PCIE_LC_FORCE_COEFF__LC_FORCE_POST_CURSOR_8GT__SHIFT 0xd +#define PCIE_LC_FORCE_COEFF__LC_3X3_COEFF_SEARCH_EN_8GT__SHIFT 0x13 +#define PCIE_LC_FORCE_COEFF__LC_PRESET_10_EN__SHIFT 0x14 +#define PCIE_LC_FORCE_COEFF__LC_FORCE_COEFF_8GT_MASK 0x00000001L +#define PCIE_LC_FORCE_COEFF__LC_FORCE_PRE_CURSOR_8GT_MASK 0x0000007EL +#define PCIE_LC_FORCE_COEFF__LC_FORCE_CURSOR_8GT_MASK 0x00001F80L +#define PCIE_LC_FORCE_COEFF__LC_FORCE_POST_CURSOR_8GT_MASK 0x0007E000L +#define PCIE_LC_FORCE_COEFF__LC_3X3_COEFF_SEARCH_EN_8GT_MASK 0x00080000L +#define PCIE_LC_FORCE_COEFF__LC_PRESET_10_EN_MASK 0x00100000L +//PCIE_LC_BEST_EQ_SETTINGS +#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_PRESET__SHIFT 0x0 +#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_PRECURSOR__SHIFT 0x4 +#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_CURSOR__SHIFT 0xa +#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_POSTCURSOR__SHIFT 0x10 +#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_FOM__SHIFT 0x16 +#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_SETTINGS_RATE__SHIFT 0x1e +#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_PRESET_MASK 0x0000000FL +#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_PRECURSOR_MASK 0x000003F0L +#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_CURSOR_MASK 0x0000FC00L +#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_POSTCURSOR_MASK 0x003F0000L +#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_FOM_MASK 0x3FC00000L +#define PCIE_LC_BEST_EQ_SETTINGS__LC_BEST_SETTINGS_RATE_MASK 0x40000000L +//PCIE_LC_FORCE_EQ_REQ_COEFF +#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FORCE_COEFF_IN_EQ_REQ_PHASE_8GT__SHIFT 0x0 +#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FORCE_PRE_CURSOR_REQ_8GT__SHIFT 0x1 +#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FORCE_CURSOR_REQ_8GT__SHIFT 0x7 +#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FORCE_POST_CURSOR_REQ_8GT__SHIFT 0xd +#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FS_OTHER_END_8GT__SHIFT 0x13 +#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_LF_OTHER_END_8GT__SHIFT 0x19 +#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FORCE_COEFF_IN_EQ_REQ_PHASE_8GT_MASK 0x00000001L +#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FORCE_PRE_CURSOR_REQ_8GT_MASK 0x0000007EL +#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FORCE_CURSOR_REQ_8GT_MASK 0x00001F80L +#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FORCE_POST_CURSOR_REQ_8GT_MASK 0x0007E000L +#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_FS_OTHER_END_8GT_MASK 0x01F80000L +#define PCIE_LC_FORCE_EQ_REQ_COEFF__LC_LF_OTHER_END_8GT_MASK 0x7E000000L +//PCIE_LC_CNTL6 +#define PCIE_LC_CNTL6__LC_SPC_MODE_2P5GT__SHIFT 0x0 +#define PCIE_LC_CNTL6__LC_SPC_MODE_5GT__SHIFT 0x2 +#define PCIE_LC_CNTL6__LC_SPC_MODE_8GT__SHIFT 0x4 +#define PCIE_LC_CNTL6__LC_SPC_MODE_16GT__SHIFT 0x6 +#define PCIE_LC_CNTL6__LC_SRIS_EN__SHIFT 0x8 +#define PCIE_LC_CNTL6__LC_SRNS_SKIP_IN_SRIS__SHIFT 0x9 +#define PCIE_LC_CNTL6__LC_SRIS_AUTODETECT_EN__SHIFT 0xd +#define PCIE_LC_CNTL6__LC_SRIS_AUTODETECT_FACTOR__SHIFT 0xe +#define PCIE_LC_CNTL6__LC_SRIS_AUTODETECT_MODE__SHIFT 0x10 +#define PCIE_LC_CNTL6__LC_SRIS_AUTODETECT_OUT_OF_RANGE__SHIFT 0x12 +#define PCIE_LC_CNTL6__LC_DEFER_SKIP_FOR_EIEOS_EN__SHIFT 0x13 +#define PCIE_LC_CNTL6__LC_SEND_EIEOS_IN_RCFG__SHIFT 0x14 +#define PCIE_LC_CNTL6__LC_L1_POWERDOWN__SHIFT 0x15 +#define PCIE_LC_CNTL6__LC_P2_ENTRY__SHIFT 0x16 +#define PCIE_LC_CNTL6__LC_RXRECOVER_EN__SHIFT 0x17 +#define PCIE_LC_CNTL6__LC_RXRECOVER_TIMEOUT__SHIFT 0x18 +#define PCIE_LC_CNTL6__LC_RX_L0S_STANDBY_EN__SHIFT 0x1f +#define PCIE_LC_CNTL6__LC_SPC_MODE_2P5GT_MASK 0x00000003L +#define PCIE_LC_CNTL6__LC_SPC_MODE_5GT_MASK 0x0000000CL +#define PCIE_LC_CNTL6__LC_SPC_MODE_8GT_MASK 0x00000030L +#define PCIE_LC_CNTL6__LC_SPC_MODE_16GT_MASK 0x000000C0L +#define PCIE_LC_CNTL6__LC_SRIS_EN_MASK 0x00000100L +#define PCIE_LC_CNTL6__LC_SRNS_SKIP_IN_SRIS_MASK 0x00001E00L +#define PCIE_LC_CNTL6__LC_SRIS_AUTODETECT_EN_MASK 0x00002000L +#define PCIE_LC_CNTL6__LC_SRIS_AUTODETECT_FACTOR_MASK 0x0000C000L +#define PCIE_LC_CNTL6__LC_SRIS_AUTODETECT_MODE_MASK 0x00030000L +#define PCIE_LC_CNTL6__LC_SRIS_AUTODETECT_OUT_OF_RANGE_MASK 0x00040000L +#define PCIE_LC_CNTL6__LC_DEFER_SKIP_FOR_EIEOS_EN_MASK 0x00080000L +#define PCIE_LC_CNTL6__LC_SEND_EIEOS_IN_RCFG_MASK 0x00100000L +#define PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK 0x00200000L +#define PCIE_LC_CNTL6__LC_P2_ENTRY_MASK 0x00400000L +#define PCIE_LC_CNTL6__LC_RXRECOVER_EN_MASK 0x00800000L +#define PCIE_LC_CNTL6__LC_RXRECOVER_TIMEOUT_MASK 0x7F000000L +#define PCIE_LC_CNTL6__LC_RX_L0S_STANDBY_EN_MASK 0x80000000L +//PCIE_LC_CNTL7 +#define PCIE_LC_CNTL7__LC_EXPECTED_TS2_CFG_COMPLETE__SHIFT 0x0 +#define PCIE_LC_CNTL7__LC_IGNORE_NON_CONTIG_SETS_IN_RCFG__SHIFT 0x1 +#define PCIE_LC_CNTL7__LC_ROBUST_TRAINING_BIT_CHK_EN__SHIFT 0x2 +#define PCIE_LC_CNTL7__LC_RESET_TS_COUNT_ON_EI__SHIFT 0x3 +#define PCIE_LC_CNTL7__LC_NBIF_ASPM_INPUT_EN__SHIFT 0x4 +#define PCIE_LC_CNTL7__LC_CLEAR_REVERSE_ATTEMPT_IN_L0__SHIFT 0x5 +#define PCIE_LC_CNTL7__LC_LOCK_REVERSAL__SHIFT 0x6 +#define PCIE_LC_CNTL7__LC_FORCE_RX_EQ_IN_PROGRESS__SHIFT 0x7 +#define PCIE_LC_CNTL7__LC_EVER_IDLE_TO_RLOCK__SHIFT 0x8 +#define PCIE_LC_CNTL7__LC_RXEQEVAL_AFTER_TIMEOUT_EN__SHIFT 0x9 +#define PCIE_LC_CNTL7__LC_WAIT_FOR_LANES_IN_CONFIG__SHIFT 0xa +#define PCIE_LC_CNTL7__LC_REQ_COEFFS_FOR_TXMARGIN_EN__SHIFT 0xb +#define PCIE_LC_CNTL7__LC_ESM_WAIT_FOR_PLL_INIT_DONE_L1__SHIFT 0xc +#define PCIE_LC_CNTL7__LC_SCHEDULED_RXEQEVAL_INTERVAL__SHIFT 0xd +#define PCIE_LC_CNTL7__LC_SCHEDULED_RXEQEVAL_MODE__SHIFT 0x15 +#define PCIE_LC_CNTL7__LC_SCHEDULED_RXEQEVAL_UPCONFIG_EN__SHIFT 0x16 +#define PCIE_LC_CNTL7__LC_LINK_MANAGEMENT_EN__SHIFT 0x17 +#define PCIE_LC_CNTL7__LC_ESM_PLL_INIT_STATE__SHIFT 0x1b +#define PCIE_LC_CNTL7__LC_ESM_PLL_INIT_DONE__SHIFT 0x1c +#define PCIE_LC_CNTL7__LC_ESM_REDO_INIT__SHIFT 0x1d +#define PCIE_LC_CNTL7__LC_MULTIPORT_ESM__SHIFT 0x1e +#define PCIE_LC_CNTL7__LC_CONSECUTIVE_EIOS_RESET_EN__SHIFT 0x1f +#define PCIE_LC_CNTL7__LC_EXPECTED_TS2_CFG_COMPLETE_MASK 0x00000001L +#define PCIE_LC_CNTL7__LC_IGNORE_NON_CONTIG_SETS_IN_RCFG_MASK 0x00000002L +#define PCIE_LC_CNTL7__LC_ROBUST_TRAINING_BIT_CHK_EN_MASK 0x00000004L +#define PCIE_LC_CNTL7__LC_RESET_TS_COUNT_ON_EI_MASK 0x00000008L +#define PCIE_LC_CNTL7__LC_NBIF_ASPM_INPUT_EN_MASK 0x00000010L +#define PCIE_LC_CNTL7__LC_CLEAR_REVERSE_ATTEMPT_IN_L0_MASK 0x00000020L +#define PCIE_LC_CNTL7__LC_LOCK_REVERSAL_MASK 0x00000040L +#define PCIE_LC_CNTL7__LC_FORCE_RX_EQ_IN_PROGRESS_MASK 0x00000080L +#define PCIE_LC_CNTL7__LC_EVER_IDLE_TO_RLOCK_MASK 0x00000100L +#define PCIE_LC_CNTL7__LC_RXEQEVAL_AFTER_TIMEOUT_EN_MASK 0x00000200L +#define PCIE_LC_CNTL7__LC_WAIT_FOR_LANES_IN_CONFIG_MASK 0x00000400L +#define PCIE_LC_CNTL7__LC_REQ_COEFFS_FOR_TXMARGIN_EN_MASK 0x00000800L +#define PCIE_LC_CNTL7__LC_ESM_WAIT_FOR_PLL_INIT_DONE_L1_MASK 0x00001000L +#define PCIE_LC_CNTL7__LC_SCHEDULED_RXEQEVAL_INTERVAL_MASK 0x001FE000L +#define PCIE_LC_CNTL7__LC_SCHEDULED_RXEQEVAL_MODE_MASK 0x00200000L +#define PCIE_LC_CNTL7__LC_SCHEDULED_RXEQEVAL_UPCONFIG_EN_MASK 0x00400000L +#define PCIE_LC_CNTL7__LC_LINK_MANAGEMENT_EN_MASK 0x00800000L +#define PCIE_LC_CNTL7__LC_ESM_PLL_INIT_STATE_MASK 0x08000000L +#define PCIE_LC_CNTL7__LC_ESM_PLL_INIT_DONE_MASK 0x10000000L +#define PCIE_LC_CNTL7__LC_ESM_REDO_INIT_MASK 0x20000000L +#define PCIE_LC_CNTL7__LC_MULTIPORT_ESM_MASK 0x40000000L +#define PCIE_LC_CNTL7__LC_CONSECUTIVE_EIOS_RESET_EN_MASK 0x80000000L +//PCIE_LINK_MANAGEMENT_STATUS +#define PCIE_LINK_MANAGEMENT_STATUS__LINK_SPEED_UPDATE__SHIFT 0x0 +#define PCIE_LINK_MANAGEMENT_STATUS__LINK_SPEED_CHANGE_ATTEMPT_FAILED__SHIFT 0x1 +#define PCIE_LINK_MANAGEMENT_STATUS__LINK_PARTNER_SPEED_SUPPORT_UPDATE__SHIFT 0x2 +#define PCIE_LINK_MANAGEMENT_STATUS__LINK_WIDTH_UPDATE__SHIFT 0x3 +#define PCIE_LINK_MANAGEMENT_STATUS__LINK_WIDTH_CHANGE_ATTEMPT_FAILED__SHIFT 0x4 +#define PCIE_LINK_MANAGEMENT_STATUS__LINK_PARTNER_WIDTH_SUPPORT_UPDATE__SHIFT 0x5 +#define PCIE_LINK_MANAGEMENT_STATUS__POWER_DOWN_COMMAND_COMPLETE__SHIFT 0x6 +#define PCIE_LINK_MANAGEMENT_STATUS__BANDWIDTH_UPDATE__SHIFT 0x7 +#define PCIE_LINK_MANAGEMENT_STATUS__LINK_POWER_STATE_CHANGE__SHIFT 0x8 +#define PCIE_LINK_MANAGEMENT_STATUS__BW_REQUIREMENT_HINT__SHIFT 0x9 +#define PCIE_LINK_MANAGEMENT_STATUS__EQUALIZATION_REQUEST__SHIFT 0xa +#define PCIE_LINK_MANAGEMENT_STATUS__LINK_PARTNER_ESM_REQUEST__SHIFT 0xb +#define PCIE_LINK_MANAGEMENT_STATUS__LOW_SPEED_REQD_IMMEDIATE__SHIFT 0xc +#define PCIE_LINK_MANAGEMENT_STATUS__ESTABLISH_ESM_PLL_SETTINGS__SHIFT 0xd +#define PCIE_LINK_MANAGEMENT_STATUS__LINK_SPEED_UPDATE_MASK 0x00000001L +#define PCIE_LINK_MANAGEMENT_STATUS__LINK_SPEED_CHANGE_ATTEMPT_FAILED_MASK 0x00000002L +#define PCIE_LINK_MANAGEMENT_STATUS__LINK_PARTNER_SPEED_SUPPORT_UPDATE_MASK 0x00000004L +#define PCIE_LINK_MANAGEMENT_STATUS__LINK_WIDTH_UPDATE_MASK 0x00000008L +#define PCIE_LINK_MANAGEMENT_STATUS__LINK_WIDTH_CHANGE_ATTEMPT_FAILED_MASK 0x00000010L +#define PCIE_LINK_MANAGEMENT_STATUS__LINK_PARTNER_WIDTH_SUPPORT_UPDATE_MASK 0x00000020L +#define PCIE_LINK_MANAGEMENT_STATUS__POWER_DOWN_COMMAND_COMPLETE_MASK 0x00000040L +#define PCIE_LINK_MANAGEMENT_STATUS__BANDWIDTH_UPDATE_MASK 0x00000080L +#define PCIE_LINK_MANAGEMENT_STATUS__LINK_POWER_STATE_CHANGE_MASK 0x00000100L +#define PCIE_LINK_MANAGEMENT_STATUS__BW_REQUIREMENT_HINT_MASK 0x00000200L +#define PCIE_LINK_MANAGEMENT_STATUS__EQUALIZATION_REQUEST_MASK 0x00000400L +#define PCIE_LINK_MANAGEMENT_STATUS__LINK_PARTNER_ESM_REQUEST_MASK 0x00000800L +#define PCIE_LINK_MANAGEMENT_STATUS__LOW_SPEED_REQD_IMMEDIATE_MASK 0x00001000L +#define PCIE_LINK_MANAGEMENT_STATUS__ESTABLISH_ESM_PLL_SETTINGS_MASK 0x00002000L +//PCIE_LINK_MANAGEMENT_MASK +#define PCIE_LINK_MANAGEMENT_MASK__LINK_SPEED_UPDATE_MASK__SHIFT 0x0 +#define PCIE_LINK_MANAGEMENT_MASK__LINK_SPEED_CHANGE_ATTEMPT_FAILED_MASK__SHIFT 0x1 +#define PCIE_LINK_MANAGEMENT_MASK__LINK_PARTNER_SPEED_SUPPORT_UPDATE_MASK__SHIFT 0x2 +#define PCIE_LINK_MANAGEMENT_MASK__LINK_WIDTH_UPDATE_MASK__SHIFT 0x3 +#define PCIE_LINK_MANAGEMENT_MASK__LINK_WIDTH_CHANGE_ATTEMPT_FAILED_MASK__SHIFT 0x4 +#define PCIE_LINK_MANAGEMENT_MASK__LINK_PARTNER_WIDTH_SUPPORT_UPDATE_MASK__SHIFT 0x5 +#define PCIE_LINK_MANAGEMENT_MASK__POWER_DOWN_COMMAND_COMPLETE_MASK__SHIFT 0x6 +#define PCIE_LINK_MANAGEMENT_MASK__BANDWIDTH_UPDATE_MASK__SHIFT 0x7 +#define PCIE_LINK_MANAGEMENT_MASK__LINK_POWER_STATE_CHANGE_MASK__SHIFT 0x8 +#define PCIE_LINK_MANAGEMENT_MASK__BW_REQUIREMENT_HINT_MASK__SHIFT 0x9 +#define PCIE_LINK_MANAGEMENT_MASK__EQUALIZATION_REQUEST_MASK__SHIFT 0xa +#define PCIE_LINK_MANAGEMENT_MASK__LINK_PARTNER_ESM_REQUEST_MASK__SHIFT 0xb +#define PCIE_LINK_MANAGEMENT_MASK__LOW_SPEED_REQD_IMMEDIATE_MASK__SHIFT 0xc +#define PCIE_LINK_MANAGEMENT_MASK__ESTABLISH_ESM_PLL_SETTINGS_MASK__SHIFT 0xd +#define PCIE_LINK_MANAGEMENT_MASK__LINK_SPEED_UPDATE_MASK_MASK 0x00000001L +#define PCIE_LINK_MANAGEMENT_MASK__LINK_SPEED_CHANGE_ATTEMPT_FAILED_MASK_MASK 0x00000002L +#define PCIE_LINK_MANAGEMENT_MASK__LINK_PARTNER_SPEED_SUPPORT_UPDATE_MASK_MASK 0x00000004L +#define PCIE_LINK_MANAGEMENT_MASK__LINK_WIDTH_UPDATE_MASK_MASK 0x00000008L +#define PCIE_LINK_MANAGEMENT_MASK__LINK_WIDTH_CHANGE_ATTEMPT_FAILED_MASK_MASK 0x00000010L +#define PCIE_LINK_MANAGEMENT_MASK__LINK_PARTNER_WIDTH_SUPPORT_UPDATE_MASK_MASK 0x00000020L +#define PCIE_LINK_MANAGEMENT_MASK__POWER_DOWN_COMMAND_COMPLETE_MASK_MASK 0x00000040L +#define PCIE_LINK_MANAGEMENT_MASK__BANDWIDTH_UPDATE_MASK_MASK 0x00000080L +#define PCIE_LINK_MANAGEMENT_MASK__LINK_POWER_STATE_CHANGE_MASK_MASK 0x00000100L +#define PCIE_LINK_MANAGEMENT_MASK__BW_REQUIREMENT_HINT_MASK_MASK 0x00000200L +#define PCIE_LINK_MANAGEMENT_MASK__EQUALIZATION_REQUEST_MASK_MASK 0x00000400L +#define PCIE_LINK_MANAGEMENT_MASK__LINK_PARTNER_ESM_REQUEST_MASK_MASK 0x00000800L +#define PCIE_LINK_MANAGEMENT_MASK__LOW_SPEED_REQD_IMMEDIATE_MASK_MASK 0x00001000L +#define PCIE_LINK_MANAGEMENT_MASK__ESTABLISH_ESM_PLL_SETTINGS_MASK_MASK 0x00002000L +//PCIE_LINK_MANAGEMENT_CNTL +#define PCIE_LINK_MANAGEMENT_CNTL__FAR_END_WIDTH_SUPPORT__SHIFT 0x0 +#define PCIE_LINK_MANAGEMENT_CNTL__LINK_POWER_STATE__SHIFT 0x3 +#define PCIE_LINK_MANAGEMENT_CNTL__LINK_POWER_STATE_MASK__SHIFT 0x7 +#define PCIE_LINK_MANAGEMENT_CNTL__LINK_UP__SHIFT 0xb +#define PCIE_LINK_MANAGEMENT_CNTL__PORT_POWERED_DOWN__SHIFT 0xc +#define PCIE_LINK_MANAGEMENT_CNTL__SPC_MODE__SHIFT 0xd +#define PCIE_LINK_MANAGEMENT_CNTL__CLOCK_RATE__SHIFT 0xf +#define PCIE_LINK_MANAGEMENT_CNTL__LOW_BW_HINT__SHIFT 0x11 +#define PCIE_LINK_MANAGEMENT_CNTL__HIGH_BW_HINT__SHIFT 0x12 +#define PCIE_LINK_MANAGEMENT_CNTL__LOW_BW_THRESHOLD__SHIFT 0x13 +#define PCIE_LINK_MANAGEMENT_CNTL__HIGH_BW_THRESHOLD__SHIFT 0x17 +#define PCIE_LINK_MANAGEMENT_CNTL__BW_HINT_COUNT__SHIFT 0x1b +#define PCIE_LINK_MANAGEMENT_CNTL__EQ_REQ_RCVD_8GT__SHIFT 0x1e +#define PCIE_LINK_MANAGEMENT_CNTL__EQ_REQ_RCVD_16GT__SHIFT 0x1f +#define PCIE_LINK_MANAGEMENT_CNTL__FAR_END_WIDTH_SUPPORT_MASK 0x00000007L +#define PCIE_LINK_MANAGEMENT_CNTL__LINK_POWER_STATE_MASK 0x00000078L +#define PCIE_LINK_MANAGEMENT_CNTL__LINK_POWER_STATE_MASK_MASK 0x00000780L +#define PCIE_LINK_MANAGEMENT_CNTL__LINK_UP_MASK 0x00000800L +#define PCIE_LINK_MANAGEMENT_CNTL__PORT_POWERED_DOWN_MASK 0x00001000L +#define PCIE_LINK_MANAGEMENT_CNTL__SPC_MODE_MASK 0x00006000L +#define PCIE_LINK_MANAGEMENT_CNTL__CLOCK_RATE_MASK 0x00018000L +#define PCIE_LINK_MANAGEMENT_CNTL__LOW_BW_HINT_MASK 0x00020000L +#define PCIE_LINK_MANAGEMENT_CNTL__HIGH_BW_HINT_MASK 0x00040000L +#define PCIE_LINK_MANAGEMENT_CNTL__LOW_BW_THRESHOLD_MASK 0x00780000L +#define PCIE_LINK_MANAGEMENT_CNTL__HIGH_BW_THRESHOLD_MASK 0x07800000L +#define PCIE_LINK_MANAGEMENT_CNTL__BW_HINT_COUNT_MASK 0x38000000L +#define PCIE_LINK_MANAGEMENT_CNTL__EQ_REQ_RCVD_8GT_MASK 0x40000000L +#define PCIE_LINK_MANAGEMENT_CNTL__EQ_REQ_RCVD_16GT_MASK 0x80000000L +//PCIE_LC_L1_PM_SUBSTATE +#define PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN__SHIFT 0x0 +#define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE__SHIFT 0x1 +#define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_1_OVERRIDE__SHIFT 0x2 +#define PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_2_OVERRIDE__SHIFT 0x3 +#define PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_1_OVERRIDE__SHIFT 0x4 +#define PCIE_LC_L1_PM_SUBSTATE__LC_CLKREQ_FILTER_EN__SHIFT 0x5 +#define PCIE_LC_L1_PM_SUBSTATE__LC_T_POWER_ON_SCALE__SHIFT 0x6 +#define PCIE_LC_L1_PM_SUBSTATE__LC_T_POWER_ON_VALUE__SHIFT 0x8 +#define PCIE_LC_L1_PM_SUBSTATE__LC_L1_1_POWERDOWN__SHIFT 0x10 +#define PCIE_LC_L1_PM_SUBSTATE__LC_L1_2_POWERDOWN__SHIFT 0x14 +#define PCIE_LC_L1_PM_SUBSTATE__LC_DEFER_L1_2_EXIT__SHIFT 0x17 +#define PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK 0x00000001L +#define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK 0x00000002L +#define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_1_OVERRIDE_MASK 0x00000004L +#define PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_2_OVERRIDE_MASK 0x00000008L +#define PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_1_OVERRIDE_MASK 0x00000010L +#define PCIE_LC_L1_PM_SUBSTATE__LC_CLKREQ_FILTER_EN_MASK 0x00000020L +#define PCIE_LC_L1_PM_SUBSTATE__LC_T_POWER_ON_SCALE_MASK 0x000000C0L +#define PCIE_LC_L1_PM_SUBSTATE__LC_T_POWER_ON_VALUE_MASK 0x00001F00L +#define PCIE_LC_L1_PM_SUBSTATE__LC_L1_1_POWERDOWN_MASK 0x00070000L +#define PCIE_LC_L1_PM_SUBSTATE__LC_L1_2_POWERDOWN_MASK 0x00700000L +#define PCIE_LC_L1_PM_SUBSTATE__LC_DEFER_L1_2_EXIT_MASK 0x03800000L +//PCIE_LC_L1_PM_SUBSTATE2 +#define PCIE_LC_L1_PM_SUBSTATE2__LC_CM_RESTORE_TIME__SHIFT 0x0 +#define PCIE_LC_L1_PM_SUBSTATE2__LC_LTR_THRESHOLD_SCALE__SHIFT 0x8 +#define PCIE_LC_L1_PM_SUBSTATE2__LC_LTR_THRESHOLD_VALUE__SHIFT 0x10 +#define PCIE_LC_L1_PM_SUBSTATE2__LC_CM_RESTORE_TIME_MASK 0x000000FFL +#define PCIE_LC_L1_PM_SUBSTATE2__LC_LTR_THRESHOLD_SCALE_MASK 0x00000700L +#define PCIE_LC_L1_PM_SUBSTATE2__LC_LTR_THRESHOLD_VALUE_MASK 0x03FF0000L +//PCIE_LC_PORT_ORDER +#define PCIE_LC_PORT_ORDER__LC_PORT_OFFSET__SHIFT 0x0 +#define PCIE_LC_PORT_ORDER__LC_PORT_OFFSET_MASK 0x0000000FL +//PCIEP_BCH_ECC_CNTL +#define PCIEP_BCH_ECC_CNTL__STRAP_BCH_ECC_EN__SHIFT 0x0 +#define PCIEP_BCH_ECC_CNTL__BCH_ECC_ERROR_THRESHOLD__SHIFT 0x8 +#define PCIEP_BCH_ECC_CNTL__BCH_ECC_ERROR_STATUS__SHIFT 0x10 +#define PCIEP_BCH_ECC_CNTL__STRAP_BCH_ECC_EN_MASK 0x00000001L +#define PCIEP_BCH_ECC_CNTL__BCH_ECC_ERROR_THRESHOLD_MASK 0x0000FF00L +#define PCIEP_BCH_ECC_CNTL__BCH_ECC_ERROR_STATUS_MASK 0xFFFF0000L +//PCIE_LC_CNTL8 +#define PCIE_LC_CNTL8__LC_EQ_SEARCH_MODE_16GT__SHIFT 0x0 +#define PCIE_LC_CNTL8__LC_BYPASS_EQ_16GT__SHIFT 0x2 +#define PCIE_LC_CNTL8__LC_BYPASS_EQ_PRESET_16GT__SHIFT 0x3 +#define PCIE_LC_CNTL8__LC_REDO_EQ_16GT__SHIFT 0x7 +#define PCIE_LC_CNTL8__LC_USC_EQ_NOT_REQD_16GT__SHIFT 0x8 +#define PCIE_LC_CNTL8__LC_USC_GO_TO_EQ_16GT__SHIFT 0x9 +#define PCIE_LC_CNTL8__LC_UNEXPECTED_COEFFS_RCVD_16GT__SHIFT 0xa +#define PCIE_LC_CNTL8__LC_BYPASS_EQ_REQ_PHASE_16GT__SHIFT 0xb +#define PCIE_LC_CNTL8__LC_FORCE_PRESET_IN_EQ_REQ_PHASE_16GT__SHIFT 0xc +#define PCIE_LC_CNTL8__LC_FORCE_PRESET_VALUE_16GT__SHIFT 0xd +#define PCIE_LC_CNTL8__LC_EQTS2_PRESET_EN__SHIFT 0x11 +#define PCIE_LC_CNTL8__LC_EQTS2_PRESET__SHIFT 0x12 +#define PCIE_LC_CNTL8__LC_USE_EQTS2_PRESET__SHIFT 0x16 +#define PCIE_LC_CNTL8__LC_FOM_TIME__SHIFT 0x17 +#define PCIE_LC_CNTL8__LC_SAFE_EQ_SEARCH__SHIFT 0x19 +#define PCIE_LC_CNTL8__LC_DONT_CHECK_EQTS_IN_RCFG__SHIFT 0x1a +#define PCIE_LC_CNTL8__LC_DELAY_COEFF_UPDATE_DIS__SHIFT 0x1b +#define PCIE_LC_CNTL8__LC_8GT_EQ_REDO_EN__SHIFT 0x1c +#define PCIE_LC_CNTL8__LC_WAIT_FOR_EIEOS_IN_RLOCK__SHIFT 0x1d +#define PCIE_LC_CNTL8__LC_DYNAMIC_INACTIVE_TS_SELECT__SHIFT 0x1e +#define PCIE_LC_CNTL8__LC_EQ_SEARCH_MODE_16GT_MASK 0x00000003L +#define PCIE_LC_CNTL8__LC_BYPASS_EQ_16GT_MASK 0x00000004L +#define PCIE_LC_CNTL8__LC_BYPASS_EQ_PRESET_16GT_MASK 0x00000078L +#define PCIE_LC_CNTL8__LC_REDO_EQ_16GT_MASK 0x00000080L +#define PCIE_LC_CNTL8__LC_USC_EQ_NOT_REQD_16GT_MASK 0x00000100L +#define PCIE_LC_CNTL8__LC_USC_GO_TO_EQ_16GT_MASK 0x00000200L +#define PCIE_LC_CNTL8__LC_UNEXPECTED_COEFFS_RCVD_16GT_MASK 0x00000400L +#define PCIE_LC_CNTL8__LC_BYPASS_EQ_REQ_PHASE_16GT_MASK 0x00000800L +#define PCIE_LC_CNTL8__LC_FORCE_PRESET_IN_EQ_REQ_PHASE_16GT_MASK 0x00001000L +#define PCIE_LC_CNTL8__LC_FORCE_PRESET_VALUE_16GT_MASK 0x0001E000L +#define PCIE_LC_CNTL8__LC_EQTS2_PRESET_EN_MASK 0x00020000L +#define PCIE_LC_CNTL8__LC_EQTS2_PRESET_MASK 0x003C0000L +#define PCIE_LC_CNTL8__LC_USE_EQTS2_PRESET_MASK 0x00400000L +#define PCIE_LC_CNTL8__LC_FOM_TIME_MASK 0x01800000L +#define PCIE_LC_CNTL8__LC_SAFE_EQ_SEARCH_MASK 0x02000000L +#define PCIE_LC_CNTL8__LC_DONT_CHECK_EQTS_IN_RCFG_MASK 0x04000000L +#define PCIE_LC_CNTL8__LC_DELAY_COEFF_UPDATE_DIS_MASK 0x08000000L +#define PCIE_LC_CNTL8__LC_8GT_EQ_REDO_EN_MASK 0x10000000L +#define PCIE_LC_CNTL8__LC_WAIT_FOR_EIEOS_IN_RLOCK_MASK 0x20000000L +#define PCIE_LC_CNTL8__LC_DYNAMIC_INACTIVE_TS_SELECT_MASK 0xC0000000L +//PCIE_LC_CNTL9 +#define PCIE_LC_CNTL9__LC_OVERRIDE_RETIMER_PRESENCE_EN__SHIFT 0x0 +#define PCIE_LC_CNTL9__LC_OVERRIDE_RETIMER_PRESENCE__SHIFT 0x1 +#define PCIE_LC_CNTL9__LC_IGNORE_RETIMER_PRESENCE__SHIFT 0x3 +#define PCIE_LC_CNTL9__LC_RETIMER_PRESENCE__SHIFT 0x4 +#define PCIE_LC_CNTL9__LC_LOCK_IN_EQ_RESPONSE__SHIFT 0xd +#define PCIE_LC_CNTL9__LC_USC_ACCEPTABLE_PRESETS__SHIFT 0xe +#define PCIE_LC_CNTL9__LC_DSC_ACCEPT_8GT_EQ_REDO__SHIFT 0x18 +#define PCIE_LC_CNTL9__LC_DSC_ACCEPT_16GT_EQ_REDO__SHIFT 0x19 +#define PCIE_LC_CNTL9__LC_USC_HW_8GT_EQ_REDO_EN__SHIFT 0x1a +#define PCIE_LC_CNTL9__LC_USC_HW_16GT_EQ_REDO_EN__SHIFT 0x1b +#define PCIE_LC_CNTL9__LC_DELAY_DETECTED_TSX_RCV_EN__SHIFT 0x1c +#define PCIE_LC_CNTL9__LC_OVERRIDE_RETIMER_PRESENCE_EN_MASK 0x00000001L +#define PCIE_LC_CNTL9__LC_OVERRIDE_RETIMER_PRESENCE_MASK 0x00000006L +#define PCIE_LC_CNTL9__LC_IGNORE_RETIMER_PRESENCE_MASK 0x00000008L +#define PCIE_LC_CNTL9__LC_RETIMER_PRESENCE_MASK 0x00000030L +#define PCIE_LC_CNTL9__LC_LOCK_IN_EQ_RESPONSE_MASK 0x00002000L +#define PCIE_LC_CNTL9__LC_USC_ACCEPTABLE_PRESETS_MASK 0x00FFC000L +#define PCIE_LC_CNTL9__LC_DSC_ACCEPT_8GT_EQ_REDO_MASK 0x01000000L +#define PCIE_LC_CNTL9__LC_DSC_ACCEPT_16GT_EQ_REDO_MASK 0x02000000L +#define PCIE_LC_CNTL9__LC_USC_HW_8GT_EQ_REDO_EN_MASK 0x04000000L +#define PCIE_LC_CNTL9__LC_USC_HW_16GT_EQ_REDO_EN_MASK 0x08000000L +#define PCIE_LC_CNTL9__LC_DELAY_DETECTED_TSX_RCV_EN_MASK 0x10000000L +//PCIE_LC_FORCE_COEFF2 +#define PCIE_LC_FORCE_COEFF2__LC_FORCE_COEFF_16GT__SHIFT 0x0 +#define PCIE_LC_FORCE_COEFF2__LC_FORCE_PRE_CURSOR_16GT__SHIFT 0x1 +#define PCIE_LC_FORCE_COEFF2__LC_FORCE_CURSOR_16GT__SHIFT 0x7 +#define PCIE_LC_FORCE_COEFF2__LC_FORCE_POST_CURSOR_16GT__SHIFT 0xd +#define PCIE_LC_FORCE_COEFF2__LC_3X3_COEFF_SEARCH_EN_16GT__SHIFT 0x13 +#define PCIE_LC_FORCE_COEFF2__LC_FORCE_COEFF_16GT_MASK 0x00000001L +#define PCIE_LC_FORCE_COEFF2__LC_FORCE_PRE_CURSOR_16GT_MASK 0x0000007EL +#define PCIE_LC_FORCE_COEFF2__LC_FORCE_CURSOR_16GT_MASK 0x00001F80L +#define PCIE_LC_FORCE_COEFF2__LC_FORCE_POST_CURSOR_16GT_MASK 0x0007E000L +#define PCIE_LC_FORCE_COEFF2__LC_3X3_COEFF_SEARCH_EN_16GT_MASK 0x00080000L +//PCIE_LC_FORCE_EQ_REQ_COEFF2 +#define PCIE_LC_FORCE_EQ_REQ_COEFF2__LC_FORCE_COEFF_IN_EQ_REQ_PHASE_16GT__SHIFT 0x0 +#define PCIE_LC_FORCE_EQ_REQ_COEFF2__LC_FORCE_PRE_CURSOR_REQ_16GT__SHIFT 0x1 +#define PCIE_LC_FORCE_EQ_REQ_COEFF2__LC_FORCE_CURSOR_REQ_16GT__SHIFT 0x7 +#define PCIE_LC_FORCE_EQ_REQ_COEFF2__LC_FORCE_POST_CURSOR_REQ_16GT__SHIFT 0xd +#define PCIE_LC_FORCE_EQ_REQ_COEFF2__LC_FS_OTHER_END_16GT__SHIFT 0x13 +#define PCIE_LC_FORCE_EQ_REQ_COEFF2__LC_LF_OTHER_END_16GT__SHIFT 0x19 +#define PCIE_LC_FORCE_EQ_REQ_COEFF2__LC_FORCE_COEFF_IN_EQ_REQ_PHASE_16GT_MASK 0x00000001L +#define PCIE_LC_FORCE_EQ_REQ_COEFF2__LC_FORCE_PRE_CURSOR_REQ_16GT_MASK 0x0000007EL +#define PCIE_LC_FORCE_EQ_REQ_COEFF2__LC_FORCE_CURSOR_REQ_16GT_MASK 0x00001F80L +#define PCIE_LC_FORCE_EQ_REQ_COEFF2__LC_FORCE_POST_CURSOR_REQ_16GT_MASK 0x0007E000L +#define PCIE_LC_FORCE_EQ_REQ_COEFF2__LC_FS_OTHER_END_16GT_MASK 0x01F80000L +#define PCIE_LC_FORCE_EQ_REQ_COEFF2__LC_LF_OTHER_END_16GT_MASK 0x7E000000L + + +// addressBlock: nbio_pcie0_pciedir +//PCIE_RESERVED +#define PCIE_RESERVED__RESERVED__SHIFT 0x0 +#define PCIE_RESERVED__RESERVED_MASK 0xFFFFFFFFL +//PCIE_SCRATCH +#define PCIE_SCRATCH__PCIE_SCRATCH__SHIFT 0x0 +#define PCIE_SCRATCH__PCIE_SCRATCH_MASK 0xFFFFFFFFL +//PCIE_RX_NUM_NAK +#define PCIE_RX_NUM_NAK__RX_NUM_NAK__SHIFT 0x0 +#define PCIE_RX_NUM_NAK__RX_NUM_NAK_MASK 0xFFFFFFFFL +//PCIE_RX_NUM_NAK_GENERATED +#define PCIE_RX_NUM_NAK_GENERATED__RX_NUM_NAK_GENERATED__SHIFT 0x0 +#define PCIE_RX_NUM_NAK_GENERATED__RX_NUM_NAK_GENERATED_MASK 0xFFFFFFFFL +//PCIE_CNTL +#define PCIE_CNTL__HWINIT_WR_LOCK__SHIFT 0x0 +#define PCIE_CNTL__LC_HOT_PLUG_DELAY_SEL__SHIFT 0x1 +#define PCIE_CNTL__UR_ERR_REPORT_DIS__SHIFT 0x7 +#define PCIE_CNTL__PCIE_MALFORM_ATOMIC_OPS__SHIFT 0x8 +#define PCIE_CNTL__PCIE_HT_NP_MEM_WRITE__SHIFT 0x9 +#define PCIE_CNTL__RX_SB_ADJ_PAYLOAD_SIZE__SHIFT 0xa +#define PCIE_CNTL__RX_RCB_ATS_UC_DIS__SHIFT 0xf +#define PCIE_CNTL__RX_RCB_REORDER_EN__SHIFT 0x10 +#define PCIE_CNTL__RX_RCB_INVALID_SIZE_DIS__SHIFT 0x11 +#define PCIE_CNTL__RX_RCB_UNEXP_CPL_DIS__SHIFT 0x12 +#define PCIE_CNTL__RX_RCB_CPL_TIMEOUT_TEST_MODE__SHIFT 0x13 +#define PCIE_CNTL__RX_RCB_WRONG_PREFIX_DIS__SHIFT 0x14 +#define PCIE_CNTL__RX_RCB_WRONG_ATTR_DIS__SHIFT 0x15 +#define PCIE_CNTL__RX_RCB_WRONG_FUNCNUM_DIS__SHIFT 0x16 +#define PCIE_CNTL__RX_ATS_TRAN_CPL_SPLIT_DIS__SHIFT 0x17 +#define PCIE_CNTL__RX_IGNORE_LTR_MSG_UR__SHIFT 0x1e +#define PCIE_CNTL__RX_CPL_POSTED_REQ_ORD_EN__SHIFT 0x1f +#define PCIE_CNTL__HWINIT_WR_LOCK_MASK 0x00000001L +#define PCIE_CNTL__LC_HOT_PLUG_DELAY_SEL_MASK 0x0000000EL +#define PCIE_CNTL__UR_ERR_REPORT_DIS_MASK 0x00000080L +#define PCIE_CNTL__PCIE_MALFORM_ATOMIC_OPS_MASK 0x00000100L +#define PCIE_CNTL__PCIE_HT_NP_MEM_WRITE_MASK 0x00000200L +#define PCIE_CNTL__RX_SB_ADJ_PAYLOAD_SIZE_MASK 0x00001C00L +#define PCIE_CNTL__RX_RCB_ATS_UC_DIS_MASK 0x00008000L +#define PCIE_CNTL__RX_RCB_REORDER_EN_MASK 0x00010000L +#define PCIE_CNTL__RX_RCB_INVALID_SIZE_DIS_MASK 0x00020000L +#define PCIE_CNTL__RX_RCB_UNEXP_CPL_DIS_MASK 0x00040000L +#define PCIE_CNTL__RX_RCB_CPL_TIMEOUT_TEST_MODE_MASK 0x00080000L +#define PCIE_CNTL__RX_RCB_WRONG_PREFIX_DIS_MASK 0x00100000L +#define PCIE_CNTL__RX_RCB_WRONG_ATTR_DIS_MASK 0x00200000L +#define PCIE_CNTL__RX_RCB_WRONG_FUNCNUM_DIS_MASK 0x00400000L +#define PCIE_CNTL__RX_ATS_TRAN_CPL_SPLIT_DIS_MASK 0x00800000L +#define PCIE_CNTL__RX_IGNORE_LTR_MSG_UR_MASK 0x40000000L +#define PCIE_CNTL__RX_CPL_POSTED_REQ_ORD_EN_MASK 0x80000000L +//PCIE_CONFIG_CNTL +#define PCIE_CONFIG_CNTL__DYN_CLK_LATENCY__SHIFT 0x0 +#define PCIE_CONFIG_CNTL__CI_SWUS_MAX_PAYLOAD_SIZE_MODE__SHIFT 0x8 +#define PCIE_CONFIG_CNTL__CI_SWUS_PRIV_MAX_PAYLOAD_SIZE__SHIFT 0x9 +#define PCIE_CONFIG_CNTL__CI_10BIT_TAG_EN_OVERRIDE__SHIFT 0xb +#define PCIE_CONFIG_CNTL__CI_SWUS_10BIT_TAG_EN_OVERRIDE__SHIFT 0xd +#define PCIE_CONFIG_CNTL__CI_MAX_PAYLOAD_SIZE_MODE__SHIFT 0x10 +#define PCIE_CONFIG_CNTL__CI_PRIV_MAX_PAYLOAD_SIZE__SHIFT 0x11 +#define PCIE_CONFIG_CNTL__CI_MAX_READ_REQUEST_SIZE_MODE__SHIFT 0x14 +#define PCIE_CONFIG_CNTL__CI_PRIV_MAX_READ_REQUEST_SIZE__SHIFT 0x15 +#define PCIE_CONFIG_CNTL__CI_MAX_READ_SAFE_MODE__SHIFT 0x18 +#define PCIE_CONFIG_CNTL__CI_EXTENDED_TAG_EN_OVERRIDE__SHIFT 0x19 +#define PCIE_CONFIG_CNTL__CI_SWUS_MAX_READ_REQUEST_SIZE_MODE__SHIFT 0x1b +#define PCIE_CONFIG_CNTL__CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV__SHIFT 0x1c +#define PCIE_CONFIG_CNTL__CI_SWUS_EXTENDED_TAG_EN_OVERRIDE__SHIFT 0x1e +#define PCIE_CONFIG_CNTL__DYN_CLK_LATENCY_MASK 0x0000000FL +#define PCIE_CONFIG_CNTL__CI_SWUS_MAX_PAYLOAD_SIZE_MODE_MASK 0x00000100L +#define PCIE_CONFIG_CNTL__CI_SWUS_PRIV_MAX_PAYLOAD_SIZE_MASK 0x00000600L +#define PCIE_CONFIG_CNTL__CI_10BIT_TAG_EN_OVERRIDE_MASK 0x00001800L +#define PCIE_CONFIG_CNTL__CI_SWUS_10BIT_TAG_EN_OVERRIDE_MASK 0x00006000L +#define PCIE_CONFIG_CNTL__CI_MAX_PAYLOAD_SIZE_MODE_MASK 0x00010000L +#define PCIE_CONFIG_CNTL__CI_PRIV_MAX_PAYLOAD_SIZE_MASK 0x000E0000L +#define PCIE_CONFIG_CNTL__CI_MAX_READ_REQUEST_SIZE_MODE_MASK 0x00100000L +#define PCIE_CONFIG_CNTL__CI_PRIV_MAX_READ_REQUEST_SIZE_MASK 0x00E00000L +#define PCIE_CONFIG_CNTL__CI_MAX_READ_SAFE_MODE_MASK 0x01000000L +#define PCIE_CONFIG_CNTL__CI_EXTENDED_TAG_EN_OVERRIDE_MASK 0x06000000L +#define PCIE_CONFIG_CNTL__CI_SWUS_MAX_READ_REQUEST_SIZE_MODE_MASK 0x08000000L +#define PCIE_CONFIG_CNTL__CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV_MASK 0x30000000L +#define PCIE_CONFIG_CNTL__CI_SWUS_EXTENDED_TAG_EN_OVERRIDE_MASK 0xC0000000L +//PCIE_TX_TRACKING_ADDR_LO +#define PCIE_TX_TRACKING_ADDR_LO__TX_TRACKING_ADDR_LO__SHIFT 0x2 +#define PCIE_TX_TRACKING_ADDR_LO__TX_TRACKING_ADDR_LO_MASK 0xFFFFFFFCL +//PCIE_TX_TRACKING_ADDR_HI +#define PCIE_TX_TRACKING_ADDR_HI__TX_TRACKING_ADDR_HI__SHIFT 0x0 +#define PCIE_TX_TRACKING_ADDR_HI__TX_TRACKING_ADDR_HI_MASK 0xFFFFFFFFL +//PCIE_TX_TRACKING_CTRL_STATUS +#define PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_ENABLE__SHIFT 0x0 +#define PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_PORT__SHIFT 0x1 +#define PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_UNIT_ID__SHIFT 0x8 +#define PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_STATUS_VALID__SHIFT 0xf +#define PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_ENABLE_MASK 0x00000001L +#define PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_PORT_MASK 0x0000000EL +#define PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_UNIT_ID_MASK 0x00007F00L +#define PCIE_TX_TRACKING_CTRL_STATUS__TX_TRACKING_STATUS_VALID_MASK 0x00008000L +//PCIE_BW_BY_UNITID +#define PCIE_BW_BY_UNITID__CI_MST_PERF_UNITID_EN__SHIFT 0x0 +#define PCIE_BW_BY_UNITID__CI_MST_PERF_UNITID__SHIFT 0x8 +#define PCIE_BW_BY_UNITID__CI_MST_PERF_UNITID_EN_MASK 0x00000001L +#define PCIE_BW_BY_UNITID__CI_MST_PERF_UNITID_MASK 0x00007F00L +//PCIE_CNTL2 +#define PCIE_CNTL2__TX_ARB_ROUND_ROBIN_EN__SHIFT 0x0 +#define PCIE_CNTL2__TX_ARB_SLV_LIMIT__SHIFT 0x1 +#define PCIE_CNTL2__TX_ARB_MST_LIMIT__SHIFT 0x6 +#define PCIE_CNTL2__TX_BLOCK_TLP_ON_PM_DIS__SHIFT 0xb +#define PCIE_CNTL2__TX_NP_MEM_WRITE_SWP_ENCODING__SHIFT 0xc +#define PCIE_CNTL2__TX_ATOMIC_OPS_DISABLE__SHIFT 0xd +#define PCIE_CNTL2__TX_ATOMIC_ORDERING_DIS__SHIFT 0xe +#define PCIE_CNTL2__SLV_MEM_LS_EN__SHIFT 0x10 +#define PCIE_CNTL2__SLV_MEM_AGGRESSIVE_LS_EN__SHIFT 0x11 +#define PCIE_CNTL2__MST_MEM_LS_EN__SHIFT 0x12 +#define PCIE_CNTL2__REPLAY_MEM_LS_EN__SHIFT 0x13 +#define PCIE_CNTL2__SLV_MEM_SD_EN__SHIFT 0x14 +#define PCIE_CNTL2__SLV_MEM_AGGRESSIVE_SD_EN__SHIFT 0x15 +#define PCIE_CNTL2__MST_MEM_SD_EN__SHIFT 0x16 +#define PCIE_CNTL2__REPLAY_MEM_SD_EN__SHIFT 0x17 +#define PCIE_CNTL2__RX_NP_MEM_WRITE_ENCODING__SHIFT 0x18 +#define PCIE_CNTL2__SLV_MEM_DS_EN__SHIFT 0x1d +#define PCIE_CNTL2__MST_MEM_DS_EN__SHIFT 0x1e +#define PCIE_CNTL2__REPLAY_MEM_DS_EN__SHIFT 0x1f +#define PCIE_CNTL2__TX_ARB_ROUND_ROBIN_EN_MASK 0x00000001L +#define PCIE_CNTL2__TX_ARB_SLV_LIMIT_MASK 0x0000003EL +#define PCIE_CNTL2__TX_ARB_MST_LIMIT_MASK 0x000007C0L +#define PCIE_CNTL2__TX_BLOCK_TLP_ON_PM_DIS_MASK 0x00000800L +#define PCIE_CNTL2__TX_NP_MEM_WRITE_SWP_ENCODING_MASK 0x00001000L +#define PCIE_CNTL2__TX_ATOMIC_OPS_DISABLE_MASK 0x00002000L +#define PCIE_CNTL2__TX_ATOMIC_ORDERING_DIS_MASK 0x00004000L +#define PCIE_CNTL2__SLV_MEM_LS_EN_MASK 0x00010000L +#define PCIE_CNTL2__SLV_MEM_AGGRESSIVE_LS_EN_MASK 0x00020000L +#define PCIE_CNTL2__MST_MEM_LS_EN_MASK 0x00040000L +#define PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK 0x00080000L +#define PCIE_CNTL2__SLV_MEM_SD_EN_MASK 0x00100000L +#define PCIE_CNTL2__SLV_MEM_AGGRESSIVE_SD_EN_MASK 0x00200000L +#define PCIE_CNTL2__MST_MEM_SD_EN_MASK 0x00400000L +#define PCIE_CNTL2__REPLAY_MEM_SD_EN_MASK 0x00800000L +#define PCIE_CNTL2__RX_NP_MEM_WRITE_ENCODING_MASK 0x1F000000L +#define PCIE_CNTL2__SLV_MEM_DS_EN_MASK 0x20000000L +#define PCIE_CNTL2__MST_MEM_DS_EN_MASK 0x40000000L +#define PCIE_CNTL2__REPLAY_MEM_DS_EN_MASK 0x80000000L +//PCIE_RX_CNTL2 +#define PCIE_RX_CNTL2__RX_IGNORE_EP_INVALIDPASID_UR__SHIFT 0x0 +#define PCIE_RX_CNTL2__RX_IGNORE_EP_TRANSMRD_UR__SHIFT 0x1 +#define PCIE_RX_CNTL2__RX_IGNORE_EP_TRANSMWR_UR__SHIFT 0x2 +#define PCIE_RX_CNTL2__RX_IGNORE_EP_ATSTRANSREQ_UR__SHIFT 0x3 +#define PCIE_RX_CNTL2__RX_IGNORE_EP_PAGEREQMSG_UR__SHIFT 0x4 +#define PCIE_RX_CNTL2__RX_IGNORE_EP_INVCPL_UR__SHIFT 0x5 +#define PCIE_RX_CNTL2__RX_RCB_LATENCY_EN__SHIFT 0x8 +#define PCIE_RX_CNTL2__RX_RCB_LATENCY_SCALE__SHIFT 0x9 +#define PCIE_RX_CNTL2__SLVCPL_MEM_LS_EN__SHIFT 0xc +#define PCIE_RX_CNTL2__SLVCPL_MEM_SD_EN__SHIFT 0xd +#define PCIE_RX_CNTL2__SLVCPL_MEM_DS_EN__SHIFT 0xe +#define PCIE_RX_CNTL2__RX_RCB_LATENCY_MAX_COUNT__SHIFT 0x10 +#define PCIE_RX_CNTL2__FLR_EXTEND_MODE__SHIFT 0x1c +#define PCIE_RX_CNTL2__RX_IGNORE_EP_INVALIDPASID_UR_MASK 0x00000001L +#define PCIE_RX_CNTL2__RX_IGNORE_EP_TRANSMRD_UR_MASK 0x00000002L +#define PCIE_RX_CNTL2__RX_IGNORE_EP_TRANSMWR_UR_MASK 0x00000004L +#define PCIE_RX_CNTL2__RX_IGNORE_EP_ATSTRANSREQ_UR_MASK 0x00000008L +#define PCIE_RX_CNTL2__RX_IGNORE_EP_PAGEREQMSG_UR_MASK 0x00000010L +#define PCIE_RX_CNTL2__RX_IGNORE_EP_INVCPL_UR_MASK 0x00000020L +#define PCIE_RX_CNTL2__RX_RCB_LATENCY_EN_MASK 0x00000100L +#define PCIE_RX_CNTL2__RX_RCB_LATENCY_SCALE_MASK 0x00000E00L +#define PCIE_RX_CNTL2__SLVCPL_MEM_LS_EN_MASK 0x00001000L +#define PCIE_RX_CNTL2__SLVCPL_MEM_SD_EN_MASK 0x00002000L +#define PCIE_RX_CNTL2__SLVCPL_MEM_DS_EN_MASK 0x00004000L +#define PCIE_RX_CNTL2__RX_RCB_LATENCY_MAX_COUNT_MASK 0x03FF0000L +#define PCIE_RX_CNTL2__FLR_EXTEND_MODE_MASK 0x70000000L +//PCIE_TX_F0_ATTR_CNTL +#define PCIE_TX_F0_ATTR_CNTL__TX_F0_IDO_OVERRIDE_P__SHIFT 0x0 +#define PCIE_TX_F0_ATTR_CNTL__TX_F0_IDO_OVERRIDE_NP__SHIFT 0x2 +#define PCIE_TX_F0_ATTR_CNTL__TX_F0_IDO_OVERRIDE_CPL__SHIFT 0x4 +#define PCIE_TX_F0_ATTR_CNTL__TX_F0_RO_OVERRIDE_P__SHIFT 0x6 +#define PCIE_TX_F0_ATTR_CNTL__TX_F0_RO_OVERRIDE_NP__SHIFT 0x8 +#define PCIE_TX_F0_ATTR_CNTL__TX_F0_SNR_OVERRIDE_P__SHIFT 0xa +#define PCIE_TX_F0_ATTR_CNTL__TX_F0_SNR_OVERRIDE_NP__SHIFT 0xc +#define PCIE_TX_F0_ATTR_CNTL__TX_F0_IDO_OVERRIDE_P_MASK 0x00000003L +#define PCIE_TX_F0_ATTR_CNTL__TX_F0_IDO_OVERRIDE_NP_MASK 0x0000000CL +#define PCIE_TX_F0_ATTR_CNTL__TX_F0_IDO_OVERRIDE_CPL_MASK 0x00000030L +#define PCIE_TX_F0_ATTR_CNTL__TX_F0_RO_OVERRIDE_P_MASK 0x000000C0L +#define PCIE_TX_F0_ATTR_CNTL__TX_F0_RO_OVERRIDE_NP_MASK 0x00000300L +#define PCIE_TX_F0_ATTR_CNTL__TX_F0_SNR_OVERRIDE_P_MASK 0x00000C00L +#define PCIE_TX_F0_ATTR_CNTL__TX_F0_SNR_OVERRIDE_NP_MASK 0x00003000L +//PCIE_TX_SWUS_ATTR_CNTL +#define PCIE_TX_SWUS_ATTR_CNTL__TX_SWUS_IDO_OVERRIDE_P__SHIFT 0x0 +#define PCIE_TX_SWUS_ATTR_CNTL__TX_SWUS_IDO_OVERRIDE_NP__SHIFT 0x2 +#define PCIE_TX_SWUS_ATTR_CNTL__TX_SWUS_IDO_OVERRIDE_CPL__SHIFT 0x4 +#define PCIE_TX_SWUS_ATTR_CNTL__TX_SWUS_RO_OVERRIDE_P__SHIFT 0x6 +#define PCIE_TX_SWUS_ATTR_CNTL__TX_SWUS_RO_OVERRIDE_NP__SHIFT 0x8 +#define PCIE_TX_SWUS_ATTR_CNTL__TX_SWUS_SNR_OVERRIDE_P__SHIFT 0xa +#define PCIE_TX_SWUS_ATTR_CNTL__TX_SWUS_SNR_OVERRIDE_NP__SHIFT 0xc +#define PCIE_TX_SWUS_ATTR_CNTL__TX_SWUS_IDO_OVERRIDE_P_MASK 0x00000003L +#define PCIE_TX_SWUS_ATTR_CNTL__TX_SWUS_IDO_OVERRIDE_NP_MASK 0x0000000CL +#define PCIE_TX_SWUS_ATTR_CNTL__TX_SWUS_IDO_OVERRIDE_CPL_MASK 0x00000030L +#define PCIE_TX_SWUS_ATTR_CNTL__TX_SWUS_RO_OVERRIDE_P_MASK 0x000000C0L +#define PCIE_TX_SWUS_ATTR_CNTL__TX_SWUS_RO_OVERRIDE_NP_MASK 0x00000300L +#define PCIE_TX_SWUS_ATTR_CNTL__TX_SWUS_SNR_OVERRIDE_P_MASK 0x00000C00L +#define PCIE_TX_SWUS_ATTR_CNTL__TX_SWUS_SNR_OVERRIDE_NP_MASK 0x00003000L +//PCIE_CI_CNTL +#define PCIE_CI_CNTL__CI_SLAVE_SPLIT_MODE__SHIFT 0x2 +#define PCIE_CI_CNTL__CI_SLAVE_GEN_USR_DIS__SHIFT 0x3 +#define PCIE_CI_CNTL__CI_MST_CMPL_DUMMY_DATA__SHIFT 0x4 +#define PCIE_CI_CNTL__CI_SLV_RC_RD_REQ_SIZE__SHIFT 0x6 +#define PCIE_CI_CNTL__CI_SLV_ORDERING_DIS__SHIFT 0x8 +#define PCIE_CI_CNTL__CI_RC_ORDERING_DIS__SHIFT 0x9 +#define PCIE_CI_CNTL__CI_SLV_CPL_ALLOC_DIS__SHIFT 0xa +#define PCIE_CI_CNTL__CI_SLV_CPL_ALLOC_MODE__SHIFT 0xb +#define PCIE_CI_CNTL__CI_SLV_CPL_ALLOC_SOR__SHIFT 0xc +#define PCIE_CI_CNTL__CI_SLV_SDP_ERR_DATA_ON_POISONED_DIS__SHIFT 0x10 +#define PCIE_CI_CNTL__TX_PRIV_TLP_PREFIX_BLOCKING_DIS__SHIFT 0x11 +#define PCIE_CI_CNTL__TX_PRIV_POISONED_TLP_EGRESS_BLOCKING_DIS__SHIFT 0x12 +#define PCIE_CI_CNTL__TX_PRIV_ATOMICOP_EGRESS_BLOCKING_DIS__SHIFT 0x13 +#define PCIE_CI_CNTL__PRIV_AUTO_SLOT_PWR_LIMIT_DIS__SHIFT 0x14 +#define PCIE_CI_CNTL__TX_DISABLE_SLOT_PWR_LIMIT_MSG__SHIFT 0x15 +#define PCIE_CI_CNTL__RX_RCB_RC_CTO_TO_UR_EN__SHIFT 0x16 +#define PCIE_CI_CNTL__RX_RCB_RC_DPC_EXCEPTION_EN__SHIFT 0x17 +#define PCIE_CI_CNTL__RX_RCB_RC_DPC_CPL_CTL_EN__SHIFT 0x18 +#define PCIE_CI_CNTL__CI_MSTSPLIT_DIS__SHIFT 0x19 +#define PCIE_CI_CNTL__CI_MSTSPLIT_REQ_CHAIN_DIS__SHIFT 0x1a +#define PCIE_CI_CNTL__TX_MWR_SPLIT_QW_PKT_SAFE_MODE__SHIFT 0x1b +#define PCIE_CI_CNTL__CI_MST_TAG_BORROWING_DIS__SHIFT 0x1c +#define PCIE_CI_CNTL__RX_RCB_RC_CTO_TO_SC_IN_LINK_DOWN_EN__SHIFT 0x1d +#define PCIE_CI_CNTL__CI_SLAVE_SPLIT_MODE_MASK 0x00000004L +#define PCIE_CI_CNTL__CI_SLAVE_GEN_USR_DIS_MASK 0x00000008L +#define PCIE_CI_CNTL__CI_MST_CMPL_DUMMY_DATA_MASK 0x00000010L +#define PCIE_CI_CNTL__CI_SLV_RC_RD_REQ_SIZE_MASK 0x000000C0L +#define PCIE_CI_CNTL__CI_SLV_ORDERING_DIS_MASK 0x00000100L +#define PCIE_CI_CNTL__CI_RC_ORDERING_DIS_MASK 0x00000200L +#define PCIE_CI_CNTL__CI_SLV_CPL_ALLOC_DIS_MASK 0x00000400L +#define PCIE_CI_CNTL__CI_SLV_CPL_ALLOC_MODE_MASK 0x00000800L +#define PCIE_CI_CNTL__CI_SLV_CPL_ALLOC_SOR_MASK 0x00001000L +#define PCIE_CI_CNTL__CI_SLV_SDP_ERR_DATA_ON_POISONED_DIS_MASK 0x00010000L +#define PCIE_CI_CNTL__TX_PRIV_TLP_PREFIX_BLOCKING_DIS_MASK 0x00020000L +#define PCIE_CI_CNTL__TX_PRIV_POISONED_TLP_EGRESS_BLOCKING_DIS_MASK 0x00040000L +#define PCIE_CI_CNTL__TX_PRIV_ATOMICOP_EGRESS_BLOCKING_DIS_MASK 0x00080000L +#define PCIE_CI_CNTL__PRIV_AUTO_SLOT_PWR_LIMIT_DIS_MASK 0x00100000L +#define PCIE_CI_CNTL__TX_DISABLE_SLOT_PWR_LIMIT_MSG_MASK 0x00200000L +#define PCIE_CI_CNTL__RX_RCB_RC_CTO_TO_UR_EN_MASK 0x00400000L +#define PCIE_CI_CNTL__RX_RCB_RC_DPC_EXCEPTION_EN_MASK 0x00800000L +#define PCIE_CI_CNTL__RX_RCB_RC_DPC_CPL_CTL_EN_MASK 0x01000000L +#define PCIE_CI_CNTL__CI_MSTSPLIT_DIS_MASK 0x02000000L +#define PCIE_CI_CNTL__CI_MSTSPLIT_REQ_CHAIN_DIS_MASK 0x04000000L +#define PCIE_CI_CNTL__TX_MWR_SPLIT_QW_PKT_SAFE_MODE_MASK 0x08000000L +#define PCIE_CI_CNTL__CI_MST_TAG_BORROWING_DIS_MASK 0x10000000L +#define PCIE_CI_CNTL__RX_RCB_RC_CTO_TO_SC_IN_LINK_DOWN_EN_MASK 0x20000000L +//PCIE_BUS_CNTL +#define PCIE_BUS_CNTL__PMI_INT_DIS__SHIFT 0x6 +#define PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS__SHIFT 0x7 +#define PCIE_BUS_CNTL__TRUE_PM_STATUS_EN__SHIFT 0xc +#define PCIE_BUS_CNTL__PMI_INT_DIS_MASK 0x00000040L +#define PCIE_BUS_CNTL__IMMEDIATE_PMI_DIS_MASK 0x00000080L +#define PCIE_BUS_CNTL__TRUE_PM_STATUS_EN_MASK 0x00001000L +//PCIE_LC_STATE6 +#define PCIE_LC_STATE6__LC_PREV_STATE24__SHIFT 0x0 +#define PCIE_LC_STATE6__LC_PREV_STATE25__SHIFT 0x8 +#define PCIE_LC_STATE6__LC_PREV_STATE26__SHIFT 0x10 +#define PCIE_LC_STATE6__LC_PREV_STATE27__SHIFT 0x18 +#define PCIE_LC_STATE6__LC_PREV_STATE24_MASK 0x0000003FL +#define PCIE_LC_STATE6__LC_PREV_STATE25_MASK 0x00003F00L +#define PCIE_LC_STATE6__LC_PREV_STATE26_MASK 0x003F0000L +#define PCIE_LC_STATE6__LC_PREV_STATE27_MASK 0x3F000000L +//PCIE_LC_STATE7 +#define PCIE_LC_STATE7__LC_PREV_STATE28__SHIFT 0x0 +#define PCIE_LC_STATE7__LC_PREV_STATE29__SHIFT 0x8 +#define PCIE_LC_STATE7__LC_PREV_STATE30__SHIFT 0x10 +#define PCIE_LC_STATE7__LC_PREV_STATE31__SHIFT 0x18 +#define PCIE_LC_STATE7__LC_PREV_STATE28_MASK 0x0000003FL +#define PCIE_LC_STATE7__LC_PREV_STATE29_MASK 0x00003F00L +#define PCIE_LC_STATE7__LC_PREV_STATE30_MASK 0x003F0000L +#define PCIE_LC_STATE7__LC_PREV_STATE31_MASK 0x3F000000L +//PCIE_LC_STATE8 +#define PCIE_LC_STATE8__LC_PREV_STATE32__SHIFT 0x0 +#define PCIE_LC_STATE8__LC_PREV_STATE33__SHIFT 0x8 +#define PCIE_LC_STATE8__LC_PREV_STATE34__SHIFT 0x10 +#define PCIE_LC_STATE8__LC_PREV_STATE35__SHIFT 0x18 +#define PCIE_LC_STATE8__LC_PREV_STATE32_MASK 0x0000003FL +#define PCIE_LC_STATE8__LC_PREV_STATE33_MASK 0x00003F00L +#define PCIE_LC_STATE8__LC_PREV_STATE34_MASK 0x003F0000L +#define PCIE_LC_STATE8__LC_PREV_STATE35_MASK 0x3F000000L +//PCIE_LC_STATE9 +#define PCIE_LC_STATE9__LC_PREV_STATE36__SHIFT 0x0 +#define PCIE_LC_STATE9__LC_PREV_STATE37__SHIFT 0x8 +#define PCIE_LC_STATE9__LC_PREV_STATE38__SHIFT 0x10 +#define PCIE_LC_STATE9__LC_PREV_STATE39__SHIFT 0x18 +#define PCIE_LC_STATE9__LC_PREV_STATE36_MASK 0x0000003FL +#define PCIE_LC_STATE9__LC_PREV_STATE37_MASK 0x00003F00L +#define PCIE_LC_STATE9__LC_PREV_STATE38_MASK 0x003F0000L +#define PCIE_LC_STATE9__LC_PREV_STATE39_MASK 0x3F000000L +//PCIE_LC_STATE10 +#define PCIE_LC_STATE10__LC_PREV_STATE40__SHIFT 0x0 +#define PCIE_LC_STATE10__LC_PREV_STATE41__SHIFT 0x8 +#define PCIE_LC_STATE10__LC_PREV_STATE42__SHIFT 0x10 +#define PCIE_LC_STATE10__LC_PREV_STATE43__SHIFT 0x18 +#define PCIE_LC_STATE10__LC_PREV_STATE40_MASK 0x0000003FL +#define PCIE_LC_STATE10__LC_PREV_STATE41_MASK 0x00003F00L +#define PCIE_LC_STATE10__LC_PREV_STATE42_MASK 0x003F0000L +#define PCIE_LC_STATE10__LC_PREV_STATE43_MASK 0x3F000000L +//PCIE_LC_STATE11 +#define PCIE_LC_STATE11__LC_PREV_STATE44__SHIFT 0x0 +#define PCIE_LC_STATE11__LC_PREV_STATE45__SHIFT 0x8 +#define PCIE_LC_STATE11__LC_PREV_STATE46__SHIFT 0x10 +#define PCIE_LC_STATE11__LC_PREV_STATE47__SHIFT 0x18 +#define PCIE_LC_STATE11__LC_PREV_STATE44_MASK 0x0000003FL +#define PCIE_LC_STATE11__LC_PREV_STATE45_MASK 0x00003F00L +#define PCIE_LC_STATE11__LC_PREV_STATE46_MASK 0x003F0000L +#define PCIE_LC_STATE11__LC_PREV_STATE47_MASK 0x3F000000L +//PCIE_LC_STATUS1 +#define PCIE_LC_STATUS1__LC_REVERSE_RCVR__SHIFT 0x0 +#define PCIE_LC_STATUS1__LC_REVERSE_XMIT__SHIFT 0x1 +#define PCIE_LC_STATUS1__LC_OPERATING_LINK_WIDTH__SHIFT 0x2 +#define PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH__SHIFT 0x5 +#define PCIE_LC_STATUS1__LC_REVERSE_RCVR_MASK 0x00000001L +#define PCIE_LC_STATUS1__LC_REVERSE_XMIT_MASK 0x00000002L +#define PCIE_LC_STATUS1__LC_OPERATING_LINK_WIDTH_MASK 0x0000001CL +#define PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH_MASK 0x000000E0L +//PCIE_LC_STATUS2 +#define PCIE_LC_STATUS2__LC_TOTAL_INACTIVE_LANES__SHIFT 0x0 +#define PCIE_LC_STATUS2__LC_TURN_ON_LANE__SHIFT 0x10 +#define PCIE_LC_STATUS2__LC_TOTAL_INACTIVE_LANES_MASK 0x0000FFFFL +#define PCIE_LC_STATUS2__LC_TURN_ON_LANE_MASK 0xFFFF0000L +//PCIE_TX_CNTL3 +#define PCIE_TX_CNTL3__TX_REGNUM_FROM_ADDR_CFGWR_IOWR_DIS__SHIFT 0x0 +#define PCIE_TX_CNTL3__CI_SLV_CPL_ALLOC_OVERSUBSCRIBE_MODE__SHIFT 0x1 +#define PCIE_TX_CNTL3__TX_REGNUM_FROM_ADDR_CFGWR_IOWR_DIS_MASK 0x00000001L +#define PCIE_TX_CNTL3__CI_SLV_CPL_ALLOC_OVERSUBSCRIBE_MODE_MASK 0x0000000EL +//PCIE_WPR_CNTL +#define PCIE_WPR_CNTL__WPR_RESET_HOT_RST_EN__SHIFT 0x0 +#define PCIE_WPR_CNTL__WPR_RESET_LNK_DWN_EN__SHIFT 0x1 +#define PCIE_WPR_CNTL__WPR_RESET_LNK_DIS_EN__SHIFT 0x2 +#define PCIE_WPR_CNTL__WPR_RESET_COR_EN__SHIFT 0x3 +#define PCIE_WPR_CNTL__WPR_RESET_REG_EN__SHIFT 0x4 +#define PCIE_WPR_CNTL__WPR_RESET_STY_EN__SHIFT 0x5 +#define PCIE_WPR_CNTL__WPR_RESET_PHY_EN__SHIFT 0x6 +#define PCIE_WPR_CNTL__WPR_RESET_HOT_RST_EN_MASK 0x00000001L +#define PCIE_WPR_CNTL__WPR_RESET_LNK_DWN_EN_MASK 0x00000002L +#define PCIE_WPR_CNTL__WPR_RESET_LNK_DIS_EN_MASK 0x00000004L +#define PCIE_WPR_CNTL__WPR_RESET_COR_EN_MASK 0x00000008L +#define PCIE_WPR_CNTL__WPR_RESET_REG_EN_MASK 0x00000010L +#define PCIE_WPR_CNTL__WPR_RESET_STY_EN_MASK 0x00000020L +#define PCIE_WPR_CNTL__WPR_RESET_PHY_EN_MASK 0x00000040L +//PCIE_RX_LAST_TLP0 +#define PCIE_RX_LAST_TLP0__RX_LAST_TLP0__SHIFT 0x0 +#define PCIE_RX_LAST_TLP0__RX_LAST_TLP0_MASK 0xFFFFFFFFL +//PCIE_RX_LAST_TLP1 +#define PCIE_RX_LAST_TLP1__RX_LAST_TLP1__SHIFT 0x0 +#define PCIE_RX_LAST_TLP1__RX_LAST_TLP1_MASK 0xFFFFFFFFL +//PCIE_RX_LAST_TLP2 +#define PCIE_RX_LAST_TLP2__RX_LAST_TLP2__SHIFT 0x0 +#define PCIE_RX_LAST_TLP2__RX_LAST_TLP2_MASK 0xFFFFFFFFL +//PCIE_RX_LAST_TLP3 +#define PCIE_RX_LAST_TLP3__RX_LAST_TLP3__SHIFT 0x0 +#define PCIE_RX_LAST_TLP3__RX_LAST_TLP3_MASK 0xFFFFFFFFL +//PCIE_TX_LAST_TLP0 +#define PCIE_TX_LAST_TLP0__TX_LAST_TLP0__SHIFT 0x0 +#define PCIE_TX_LAST_TLP0__TX_LAST_TLP0_MASK 0xFFFFFFFFL +//PCIE_TX_LAST_TLP1 +#define PCIE_TX_LAST_TLP1__TX_LAST_TLP1__SHIFT 0x0 +#define PCIE_TX_LAST_TLP1__TX_LAST_TLP1_MASK 0xFFFFFFFFL +//PCIE_TX_LAST_TLP2 +#define PCIE_TX_LAST_TLP2__TX_LAST_TLP2__SHIFT 0x0 +#define PCIE_TX_LAST_TLP2__TX_LAST_TLP2_MASK 0xFFFFFFFFL +//PCIE_TX_LAST_TLP3 +#define PCIE_TX_LAST_TLP3__TX_LAST_TLP3__SHIFT 0x0 +#define PCIE_TX_LAST_TLP3__TX_LAST_TLP3_MASK 0xFFFFFFFFL +//PCIE_I2C_REG_ADDR_EXPAND +#define PCIE_I2C_REG_ADDR_EXPAND__I2C_REG_ADDR__SHIFT 0x0 +#define PCIE_I2C_REG_ADDR_EXPAND__I2C_REG_ADDR_MASK 0x0001FFFFL +//PCIE_I2C_REG_DATA +#define PCIE_I2C_REG_DATA__I2C_REG_DATA__SHIFT 0x0 +#define PCIE_I2C_REG_DATA__I2C_REG_DATA_MASK 0xFFFFFFFFL +//PCIE_CFG_CNTL +#define PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG__SHIFT 0x0 +#define PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG__SHIFT 0x1 +#define PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG__SHIFT 0x2 +#define PCIE_CFG_CNTL__CFG_EN_DEC_TO_HIDDEN_REG_MASK 0x00000001L +#define PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN2_HIDDEN_REG_MASK 0x00000002L +#define PCIE_CFG_CNTL__CFG_EN_DEC_TO_GEN3_HIDDEN_REG_MASK 0x00000004L +//PCIE_LC_PM_CNTL +#define PCIE_LC_PM_CNTL__LC_PORT_0_CLKREQB_MAP__SHIFT 0x0 +#define PCIE_LC_PM_CNTL__LC_PORT_1_CLKREQB_MAP__SHIFT 0x4 +#define PCIE_LC_PM_CNTL__LC_PORT_2_CLKREQB_MAP__SHIFT 0x8 +#define PCIE_LC_PM_CNTL__LC_PORT_3_CLKREQB_MAP__SHIFT 0xc +#define PCIE_LC_PM_CNTL__LC_PORT_4_CLKREQB_MAP__SHIFT 0x10 +#define PCIE_LC_PM_CNTL__LC_PORT_5_CLKREQB_MAP__SHIFT 0x14 +#define PCIE_LC_PM_CNTL__LC_PORT_6_CLKREQB_MAP__SHIFT 0x18 +#define PCIE_LC_PM_CNTL__LC_PORT_7_CLKREQB_MAP__SHIFT 0x1c +#define PCIE_LC_PM_CNTL__LC_PORT_0_CLKREQB_MAP_MASK 0x0000000FL +#define PCIE_LC_PM_CNTL__LC_PORT_1_CLKREQB_MAP_MASK 0x000000F0L +#define PCIE_LC_PM_CNTL__LC_PORT_2_CLKREQB_MAP_MASK 0x00000F00L +#define PCIE_LC_PM_CNTL__LC_PORT_3_CLKREQB_MAP_MASK 0x0000F000L +#define PCIE_LC_PM_CNTL__LC_PORT_4_CLKREQB_MAP_MASK 0x000F0000L +#define PCIE_LC_PM_CNTL__LC_PORT_5_CLKREQB_MAP_MASK 0x00F00000L +#define PCIE_LC_PM_CNTL__LC_PORT_6_CLKREQB_MAP_MASK 0x0F000000L +#define PCIE_LC_PM_CNTL__LC_PORT_7_CLKREQB_MAP_MASK 0xF0000000L +//PCIE_LC_PORT_ORDER_CNTL +#define PCIE_LC_PORT_ORDER_CNTL__LC_PORT_ORDER_EN__SHIFT 0x0 +#define PCIE_LC_PORT_ORDER_CNTL__LC_PORT_ORDER_EN_MASK 0x00000001L +//PCIE_P_CNTL +#define PCIE_P_CNTL__P_PWRDN_EN__SHIFT 0x0 +#define PCIE_P_CNTL__P_SYMALIGN_MODE__SHIFT 0x1 +#define PCIE_P_CNTL__P_IGNORE_CRC_ERR__SHIFT 0x4 +#define PCIE_P_CNTL__P_IGNORE_LEN_ERR__SHIFT 0x5 +#define PCIE_P_CNTL__P_IGNORE_EDB_ERR__SHIFT 0x6 +#define PCIE_P_CNTL__P_IGNORE_IDL_ERR__SHIFT 0x7 +#define PCIE_P_CNTL__P_IGNORE_TOK_ERR__SHIFT 0x8 +#define PCIE_P_CNTL__P_BLK_LOCK_MODE__SHIFT 0xc +#define PCIE_P_CNTL__P_ALWAYS_USE_FAST_TXCLK__SHIFT 0xd +#define PCIE_P_CNTL__P_ELEC_IDLE_MODE__SHIFT 0xe +#define PCIE_P_CNTL__DLP_IGNORE_IN_L1_EN__SHIFT 0x10 +#define PCIE_P_CNTL__ASSERT_DVALID_ON_EI_TRANS__SHIFT 0x11 +#define PCIE_P_CNTL__P_PWRDN_EN_MASK 0x00000001L +#define PCIE_P_CNTL__P_SYMALIGN_MODE_MASK 0x00000002L +#define PCIE_P_CNTL__P_IGNORE_CRC_ERR_MASK 0x00000010L +#define PCIE_P_CNTL__P_IGNORE_LEN_ERR_MASK 0x00000020L +#define PCIE_P_CNTL__P_IGNORE_EDB_ERR_MASK 0x00000040L +#define PCIE_P_CNTL__P_IGNORE_IDL_ERR_MASK 0x00000080L +#define PCIE_P_CNTL__P_IGNORE_TOK_ERR_MASK 0x00000100L +#define PCIE_P_CNTL__P_BLK_LOCK_MODE_MASK 0x00001000L +#define PCIE_P_CNTL__P_ALWAYS_USE_FAST_TXCLK_MASK 0x00002000L +#define PCIE_P_CNTL__P_ELEC_IDLE_MODE_MASK 0x0000C000L +#define PCIE_P_CNTL__DLP_IGNORE_IN_L1_EN_MASK 0x00010000L +#define PCIE_P_CNTL__ASSERT_DVALID_ON_EI_TRANS_MASK 0x00020000L +//PCIE_P_BUF_STATUS +#define PCIE_P_BUF_STATUS__P_OVERFLOW_ERR__SHIFT 0x0 +#define PCIE_P_BUF_STATUS__P_UNDERFLOW_ERR__SHIFT 0x10 +#define PCIE_P_BUF_STATUS__P_OVERFLOW_ERR_MASK 0x0000FFFFL +#define PCIE_P_BUF_STATUS__P_UNDERFLOW_ERR_MASK 0xFFFF0000L +//PCIE_P_DECODER_STATUS +#define PCIE_P_DECODER_STATUS__P_DECODE_ERR__SHIFT 0x0 +#define PCIE_P_DECODER_STATUS__P_DECODE_ERR_MASK 0x0000FFFFL +//PCIE_P_MISC_STATUS +#define PCIE_P_MISC_STATUS__P_DESKEW_ERR__SHIFT 0x0 +#define PCIE_P_MISC_STATUS__P_SYMUNLOCK_ERR__SHIFT 0x10 +#define PCIE_P_MISC_STATUS__P_DESKEW_ERR_MASK 0x000000FFL +#define PCIE_P_MISC_STATUS__P_SYMUNLOCK_ERR_MASK 0xFFFF0000L +//PCIE_P_RCV_L0S_FTS_DET +#define PCIE_P_RCV_L0S_FTS_DET__P_RCV_L0S_FTS_DET_MIN__SHIFT 0x0 +#define PCIE_P_RCV_L0S_FTS_DET__P_RCV_L0S_FTS_DET_MAX__SHIFT 0x8 +#define PCIE_P_RCV_L0S_FTS_DET__P_RCV_L0S_FTS_DET_MIN_MASK 0x000000FFL +#define PCIE_P_RCV_L0S_FTS_DET__P_RCV_L0S_FTS_DET_MAX_MASK 0x0000FF00L +//PCIE_RX_AD +#define PCIE_RX_AD__RX_SWUS_DROP_PME_TO__SHIFT 0x0 +#define PCIE_RX_AD__RX_SWUS_DROP_UNLOCK__SHIFT 0x1 +#define PCIE_RX_AD__RX_SWUS_UR_VDM0__SHIFT 0x2 +#define PCIE_RX_AD__RX_SWUS_DROP_VDM0__SHIFT 0x3 +#define PCIE_RX_AD__RX_SWUS_DROP_VDM1__SHIFT 0x4 +#define PCIE_RX_AD__RX_SWUS_UR_MSG_PREFIX_DIS__SHIFT 0x5 +#define PCIE_RX_AD__RX_RC_DROP_VDM0__SHIFT 0x8 +#define PCIE_RX_AD__RX_RC_UR_VDM0__SHIFT 0x9 +#define PCIE_RX_AD__RX_RC_DROP_VDM1__SHIFT 0xa +#define PCIE_RX_AD__RX_RC_UR_SSPL_MSG__SHIFT 0xb +#define PCIE_RX_AD__RX_RC_UR_BFRC_MSG__SHIFT 0xc +#define PCIE_RX_AD__RX_RC_DROP_PME_TO_ACK__SHIFT 0xd +#define PCIE_RX_AD__RX_RC_UR_ECRC_DIS__SHIFT 0xe +#define PCIE_RX_AD__RX_RC_DROP_CPL_ECRC_FAILURE__SHIFT 0xf +#define PCIE_RX_AD__RX_SB_DROP_LTAR_VDM_EN__SHIFT 0x10 +#define PCIE_RX_AD__RX_SWUS_DROP_PME_TO_MASK 0x00000001L +#define PCIE_RX_AD__RX_SWUS_DROP_UNLOCK_MASK 0x00000002L +#define PCIE_RX_AD__RX_SWUS_UR_VDM0_MASK 0x00000004L +#define PCIE_RX_AD__RX_SWUS_DROP_VDM0_MASK 0x00000008L +#define PCIE_RX_AD__RX_SWUS_DROP_VDM1_MASK 0x00000010L +#define PCIE_RX_AD__RX_SWUS_UR_MSG_PREFIX_DIS_MASK 0x00000020L +#define PCIE_RX_AD__RX_RC_DROP_VDM0_MASK 0x00000100L +#define PCIE_RX_AD__RX_RC_UR_VDM0_MASK 0x00000200L +#define PCIE_RX_AD__RX_RC_DROP_VDM1_MASK 0x00000400L +#define PCIE_RX_AD__RX_RC_UR_SSPL_MSG_MASK 0x00000800L +#define PCIE_RX_AD__RX_RC_UR_BFRC_MSG_MASK 0x00001000L +#define PCIE_RX_AD__RX_RC_DROP_PME_TO_ACK_MASK 0x00002000L +#define PCIE_RX_AD__RX_RC_UR_ECRC_DIS_MASK 0x00004000L +#define PCIE_RX_AD__RX_RC_DROP_CPL_ECRC_FAILURE_MASK 0x00008000L +#define PCIE_RX_AD__RX_SB_DROP_LTAR_VDM_EN_MASK 0x00010000L +//PCIE_SDP_CTRL +#define PCIE_SDP_CTRL__SDP_UNIT_ID__SHIFT 0x0 +#define PCIE_SDP_CTRL__CI_SLV_REQR_FULL_DISCONNECT_EN__SHIFT 0x4 +#define PCIE_SDP_CTRL__CI_SLV_REQR_PART_DISCONNECT_EN__SHIFT 0x5 +#define PCIE_SDP_CTRL__CI_MSTSDP_CLKGATE_ONESIDED_ENABLE__SHIFT 0x6 +#define PCIE_SDP_CTRL__TX_RC_TPH_PRIV_DIS__SHIFT 0x7 +#define PCIE_SDP_CTRL__TX_SWUS_TPH_PRIV_DIS__SHIFT 0x8 +#define PCIE_SDP_CTRL__CI_SLAVE_TAG_STEALING_DIS__SHIFT 0x9 +#define PCIE_SDP_CTRL__SLAVE_PREFIX_PRELOAD_DIS__SHIFT 0xa +#define PCIE_SDP_CTRL__CI_DISABLE_LTR_DROPPING__SHIFT 0xb +#define PCIE_SDP_CTRL__RX_SWUS_SIDEBAND_CPLHDR_DIS__SHIFT 0xc +#define PCIE_SDP_CTRL__CI_MST_MEMR_RD_NONCONT_BE_EN__SHIFT 0xd +#define PCIE_SDP_CTRL__CI_MSTSDP_DISCONNECT_RSP_ON_PARTIAL__SHIFT 0xe +#define PCIE_SDP_CTRL__CI_SWUS_RCVD_ERR_HANDLING_DIS__SHIFT 0xf +#define PCIE_SDP_CTRL__EARLY_HW_WAKE_UP_EN__SHIFT 0x10 +#define PCIE_SDP_CTRL__SLV_SDP_DISCONNECT_WHEN_IN_L1_EN__SHIFT 0x11 +#define PCIE_SDP_CTRL__BLOCK_SLV_SDP_DISCONNECT_WHEN_EARLY_HW_WAKE_UP_EN__SHIFT 0x12 +#define PCIE_SDP_CTRL__TX_ENCMSG_USE_SDP_EP_DIS__SHIFT 0x13 +#define PCIE_SDP_CTRL__TX_IGNORE_POISON_BIT_EN__SHIFT 0x14 +#define PCIE_SDP_CTRL__TX_RBUF_WRITE_2HDR_DIS__SHIFT 0x15 +#define PCIE_SDP_CTRL__TX_RBUF_READ_2HDR_DIS__SHIFT 0x16 +#define PCIE_SDP_CTRL__SDP_UNIT_ID_MASK 0x0000000FL +#define PCIE_SDP_CTRL__CI_SLV_REQR_FULL_DISCONNECT_EN_MASK 0x00000010L +#define PCIE_SDP_CTRL__CI_SLV_REQR_PART_DISCONNECT_EN_MASK 0x00000020L +#define PCIE_SDP_CTRL__CI_MSTSDP_CLKGATE_ONESIDED_ENABLE_MASK 0x00000040L +#define PCIE_SDP_CTRL__TX_RC_TPH_PRIV_DIS_MASK 0x00000080L +#define PCIE_SDP_CTRL__TX_SWUS_TPH_PRIV_DIS_MASK 0x00000100L +#define PCIE_SDP_CTRL__CI_SLAVE_TAG_STEALING_DIS_MASK 0x00000200L +#define PCIE_SDP_CTRL__SLAVE_PREFIX_PRELOAD_DIS_MASK 0x00000400L +#define PCIE_SDP_CTRL__CI_DISABLE_LTR_DROPPING_MASK 0x00000800L +#define PCIE_SDP_CTRL__RX_SWUS_SIDEBAND_CPLHDR_DIS_MASK 0x00001000L +#define PCIE_SDP_CTRL__CI_MST_MEMR_RD_NONCONT_BE_EN_MASK 0x00002000L +#define PCIE_SDP_CTRL__CI_MSTSDP_DISCONNECT_RSP_ON_PARTIAL_MASK 0x00004000L +#define PCIE_SDP_CTRL__CI_SWUS_RCVD_ERR_HANDLING_DIS_MASK 0x00008000L +#define PCIE_SDP_CTRL__EARLY_HW_WAKE_UP_EN_MASK 0x00010000L +#define PCIE_SDP_CTRL__SLV_SDP_DISCONNECT_WHEN_IN_L1_EN_MASK 0x00020000L +#define PCIE_SDP_CTRL__BLOCK_SLV_SDP_DISCONNECT_WHEN_EARLY_HW_WAKE_UP_EN_MASK 0x00040000L +#define PCIE_SDP_CTRL__TX_ENCMSG_USE_SDP_EP_DIS_MASK 0x00080000L +#define PCIE_SDP_CTRL__TX_IGNORE_POISON_BIT_EN_MASK 0x00100000L +#define PCIE_SDP_CTRL__TX_RBUF_WRITE_2HDR_DIS_MASK 0x00200000L +#define PCIE_SDP_CTRL__TX_RBUF_READ_2HDR_DIS_MASK 0x00400000L +//PCIE_SDP_SWUS_SLV_ATTR_CTRL +#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_RO_OVERRIDE_MEMWR__SHIFT 0x0 +#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_RO_OVERRIDE_MEMRD__SHIFT 0x2 +#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_RO_OVERRIDE_ATOMIC__SHIFT 0x4 +#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_SNR_OVERRIDE_MEMWR__SHIFT 0x6 +#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_SNR_OVERRIDE_MEMRD__SHIFT 0x8 +#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_SNR_OVERRIDE_ATOMIC__SHIFT 0xa +#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_IDO_OVERRIDE_MEMWR__SHIFT 0xc +#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_IDO_OVERRIDE_MEMRD__SHIFT 0xe +#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_IDO_OVERRIDE_ATOMIC__SHIFT 0x10 +#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_RO_OVERRIDE_MEMWR_MASK 0x00000003L +#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_RO_OVERRIDE_MEMRD_MASK 0x0000000CL +#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_RO_OVERRIDE_ATOMIC_MASK 0x00000030L +#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_SNR_OVERRIDE_MEMWR_MASK 0x000000C0L +#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_SNR_OVERRIDE_MEMRD_MASK 0x00000300L +#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_SNR_OVERRIDE_ATOMIC_MASK 0x00000C00L +#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_IDO_OVERRIDE_MEMWR_MASK 0x00003000L +#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_IDO_OVERRIDE_MEMRD_MASK 0x0000C000L +#define PCIE_SDP_SWUS_SLV_ATTR_CTRL__CI_SWUS_SLV_IDO_OVERRIDE_ATOMIC_MASK 0x00030000L +//PCIE_PERF_COUNT_CNTL +#define PCIE_PERF_COUNT_CNTL__GLOBAL_COUNT_EN__SHIFT 0x0 +#define PCIE_PERF_COUNT_CNTL__GLOBAL_SHADOW_WR__SHIFT 0x1 +#define PCIE_PERF_COUNT_CNTL__GLOBAL_COUNT_RESET__SHIFT 0x2 +#define PCIE_PERF_COUNT_CNTL__GLOBAL_COUNT_EN_MASK 0x00000001L +#define PCIE_PERF_COUNT_CNTL__GLOBAL_SHADOW_WR_MASK 0x00000002L +#define PCIE_PERF_COUNT_CNTL__GLOBAL_COUNT_RESET_MASK 0x00000004L +//PCIE_PERF_CNTL_TXCLK +#define PCIE_PERF_CNTL_TXCLK__EVENT0_SEL__SHIFT 0x0 +#define PCIE_PERF_CNTL_TXCLK__EVENT1_SEL__SHIFT 0x8 +#define PCIE_PERF_CNTL_TXCLK__COUNTER0_UPPER__SHIFT 0x10 +#define PCIE_PERF_CNTL_TXCLK__COUNTER1_UPPER__SHIFT 0x18 +#define PCIE_PERF_CNTL_TXCLK__EVENT0_SEL_MASK 0x000000FFL +#define PCIE_PERF_CNTL_TXCLK__EVENT1_SEL_MASK 0x0000FF00L +#define PCIE_PERF_CNTL_TXCLK__COUNTER0_UPPER_MASK 0x00FF0000L +#define PCIE_PERF_CNTL_TXCLK__COUNTER1_UPPER_MASK 0xFF000000L +//PCIE_PERF_COUNT0_TXCLK +#define PCIE_PERF_COUNT0_TXCLK__COUNTER0__SHIFT 0x0 +#define PCIE_PERF_COUNT0_TXCLK__COUNTER0_MASK 0xFFFFFFFFL +//PCIE_PERF_COUNT1_TXCLK +#define PCIE_PERF_COUNT1_TXCLK__COUNTER1__SHIFT 0x0 +#define PCIE_PERF_COUNT1_TXCLK__COUNTER1_MASK 0xFFFFFFFFL +//PCIE_PERF_CNTL_MST_R_CLK +#define PCIE_PERF_CNTL_MST_R_CLK__EVENT0_SEL__SHIFT 0x0 +#define PCIE_PERF_CNTL_MST_R_CLK__EVENT1_SEL__SHIFT 0x8 +#define PCIE_PERF_CNTL_MST_R_CLK__COUNTER0_UPPER__SHIFT 0x10 +#define PCIE_PERF_CNTL_MST_R_CLK__COUNTER1_UPPER__SHIFT 0x18 +#define PCIE_PERF_CNTL_MST_R_CLK__EVENT0_SEL_MASK 0x000000FFL +#define PCIE_PERF_CNTL_MST_R_CLK__EVENT1_SEL_MASK 0x0000FF00L +#define PCIE_PERF_CNTL_MST_R_CLK__COUNTER0_UPPER_MASK 0x00FF0000L +#define PCIE_PERF_CNTL_MST_R_CLK__COUNTER1_UPPER_MASK 0xFF000000L +//PCIE_PERF_COUNT0_MST_R_CLK +#define PCIE_PERF_COUNT0_MST_R_CLK__COUNTER0__SHIFT 0x0 +#define PCIE_PERF_COUNT0_MST_R_CLK__COUNTER0_MASK 0xFFFFFFFFL +//PCIE_PERF_COUNT1_MST_R_CLK +#define PCIE_PERF_COUNT1_MST_R_CLK__COUNTER1__SHIFT 0x0 +#define PCIE_PERF_COUNT1_MST_R_CLK__COUNTER1_MASK 0xFFFFFFFFL +//PCIE_PERF_CNTL_MST_C_CLK +#define PCIE_PERF_CNTL_MST_C_CLK__EVENT0_SEL__SHIFT 0x0 +#define PCIE_PERF_CNTL_MST_C_CLK__EVENT1_SEL__SHIFT 0x8 +#define PCIE_PERF_CNTL_MST_C_CLK__COUNTER0_UPPER__SHIFT 0x10 +#define PCIE_PERF_CNTL_MST_C_CLK__COUNTER1_UPPER__SHIFT 0x18 +#define PCIE_PERF_CNTL_MST_C_CLK__EVENT0_SEL_MASK 0x000000FFL +#define PCIE_PERF_CNTL_MST_C_CLK__EVENT1_SEL_MASK 0x0000FF00L +#define PCIE_PERF_CNTL_MST_C_CLK__COUNTER0_UPPER_MASK 0x00FF0000L +#define PCIE_PERF_CNTL_MST_C_CLK__COUNTER1_UPPER_MASK 0xFF000000L +//PCIE_PERF_COUNT0_MST_C_CLK +#define PCIE_PERF_COUNT0_MST_C_CLK__COUNTER0__SHIFT 0x0 +#define PCIE_PERF_COUNT0_MST_C_CLK__COUNTER0_MASK 0xFFFFFFFFL +//PCIE_PERF_COUNT1_MST_C_CLK +#define PCIE_PERF_COUNT1_MST_C_CLK__COUNTER1__SHIFT 0x0 +#define PCIE_PERF_COUNT1_MST_C_CLK__COUNTER1_MASK 0xFFFFFFFFL +//PCIE_PERF_CNTL_SLV_R_CLK +#define PCIE_PERF_CNTL_SLV_R_CLK__EVENT0_SEL__SHIFT 0x0 +#define PCIE_PERF_CNTL_SLV_R_CLK__EVENT1_SEL__SHIFT 0x8 +#define PCIE_PERF_CNTL_SLV_R_CLK__COUNTER0_UPPER__SHIFT 0x10 +#define PCIE_PERF_CNTL_SLV_R_CLK__COUNTER1_UPPER__SHIFT 0x18 +#define PCIE_PERF_CNTL_SLV_R_CLK__EVENT0_SEL_MASK 0x000000FFL +#define PCIE_PERF_CNTL_SLV_R_CLK__EVENT1_SEL_MASK 0x0000FF00L +#define PCIE_PERF_CNTL_SLV_R_CLK__COUNTER0_UPPER_MASK 0x00FF0000L +#define PCIE_PERF_CNTL_SLV_R_CLK__COUNTER1_UPPER_MASK 0xFF000000L +//PCIE_PERF_COUNT0_SLV_R_CLK +#define PCIE_PERF_COUNT0_SLV_R_CLK__COUNTER0__SHIFT 0x0 +#define PCIE_PERF_COUNT0_SLV_R_CLK__COUNTER0_MASK 0xFFFFFFFFL +//PCIE_PERF_COUNT1_SLV_R_CLK +#define PCIE_PERF_COUNT1_SLV_R_CLK__COUNTER1__SHIFT 0x0 +#define PCIE_PERF_COUNT1_SLV_R_CLK__COUNTER1_MASK 0xFFFFFFFFL +//PCIE_PERF_CNTL_SLV_S_C_CLK +#define PCIE_PERF_CNTL_SLV_S_C_CLK__EVENT0_SEL__SHIFT 0x0 +#define PCIE_PERF_CNTL_SLV_S_C_CLK__EVENT1_SEL__SHIFT 0x8 +#define PCIE_PERF_CNTL_SLV_S_C_CLK__COUNTER0_UPPER__SHIFT 0x10 +#define PCIE_PERF_CNTL_SLV_S_C_CLK__COUNTER1_UPPER__SHIFT 0x18 +#define PCIE_PERF_CNTL_SLV_S_C_CLK__EVENT0_SEL_MASK 0x000000FFL +#define PCIE_PERF_CNTL_SLV_S_C_CLK__EVENT1_SEL_MASK 0x0000FF00L +#define PCIE_PERF_CNTL_SLV_S_C_CLK__COUNTER0_UPPER_MASK 0x00FF0000L +#define PCIE_PERF_CNTL_SLV_S_C_CLK__COUNTER1_UPPER_MASK 0xFF000000L +//PCIE_PERF_COUNT0_SLV_S_C_CLK +#define PCIE_PERF_COUNT0_SLV_S_C_CLK__COUNTER0__SHIFT 0x0 +#define PCIE_PERF_COUNT0_SLV_S_C_CLK__COUNTER0_MASK 0xFFFFFFFFL +//PCIE_PERF_COUNT1_SLV_S_C_CLK +#define PCIE_PERF_COUNT1_SLV_S_C_CLK__COUNTER1__SHIFT 0x0 +#define PCIE_PERF_COUNT1_SLV_S_C_CLK__COUNTER1_MASK 0xFFFFFFFFL +//PCIE_PERF_CNTL_SLV_NS_C_CLK +#define PCIE_PERF_CNTL_SLV_NS_C_CLK__EVENT0_SEL__SHIFT 0x0 +#define PCIE_PERF_CNTL_SLV_NS_C_CLK__EVENT1_SEL__SHIFT 0x8 +#define PCIE_PERF_CNTL_SLV_NS_C_CLK__COUNTER0_UPPER__SHIFT 0x10 +#define PCIE_PERF_CNTL_SLV_NS_C_CLK__COUNTER1_UPPER__SHIFT 0x18 +#define PCIE_PERF_CNTL_SLV_NS_C_CLK__EVENT0_SEL_MASK 0x000000FFL +#define PCIE_PERF_CNTL_SLV_NS_C_CLK__EVENT1_SEL_MASK 0x0000FF00L +#define PCIE_PERF_CNTL_SLV_NS_C_CLK__COUNTER0_UPPER_MASK 0x00FF0000L +#define PCIE_PERF_CNTL_SLV_NS_C_CLK__COUNTER1_UPPER_MASK 0xFF000000L +//PCIE_PERF_COUNT0_SLV_NS_C_CLK +#define PCIE_PERF_COUNT0_SLV_NS_C_CLK__COUNTER0__SHIFT 0x0 +#define PCIE_PERF_COUNT0_SLV_NS_C_CLK__COUNTER0_MASK 0xFFFFFFFFL +//PCIE_PERF_COUNT1_SLV_NS_C_CLK +#define PCIE_PERF_COUNT1_SLV_NS_C_CLK__COUNTER1__SHIFT 0x0 +#define PCIE_PERF_COUNT1_SLV_NS_C_CLK__COUNTER1_MASK 0xFFFFFFFFL +//PCIE_PERF_CNTL_EVENT0_PORT_SEL +#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_TXCLK__SHIFT 0x0 +#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_MST_R_CLK__SHIFT 0x4 +#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_MST_C_CLK__SHIFT 0x8 +#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_SLV_R_CLK__SHIFT 0xc +#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_SLV_S_C_CLK__SHIFT 0x10 +#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_SLV_NS_C_CLK__SHIFT 0x14 +#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_TXCLK2__SHIFT 0x18 +#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_TXCLK_MASK 0x0000000FL +#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_MST_R_CLK_MASK 0x000000F0L +#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_MST_C_CLK_MASK 0x00000F00L +#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_SLV_R_CLK_MASK 0x0000F000L +#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_SLV_S_C_CLK_MASK 0x000F0000L +#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_SLV_NS_C_CLK_MASK 0x00F00000L +#define PCIE_PERF_CNTL_EVENT0_PORT_SEL__PERF0_PORT_SEL_TXCLK2_MASK 0x0F000000L +//PCIE_PERF_CNTL_EVENT1_PORT_SEL +#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_TXCLK__SHIFT 0x0 +#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_MST_R_CLK__SHIFT 0x4 +#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_MST_C_CLK__SHIFT 0x8 +#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_SLV_R_CLK__SHIFT 0xc +#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_SLV_S_C_CLK__SHIFT 0x10 +#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_SLV_NS_C_CLK__SHIFT 0x14 +#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_TXCLK2__SHIFT 0x18 +#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_TXCLK_MASK 0x0000000FL +#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_MST_R_CLK_MASK 0x000000F0L +#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_MST_C_CLK_MASK 0x00000F00L +#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_SLV_R_CLK_MASK 0x0000F000L +#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_SLV_S_C_CLK_MASK 0x000F0000L +#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_SLV_NS_C_CLK_MASK 0x00F00000L +#define PCIE_PERF_CNTL_EVENT1_PORT_SEL__PERF1_PORT_SEL_TXCLK2_MASK 0x0F000000L +//PCIE_PERF_CNTL_TXCLK2 +#define PCIE_PERF_CNTL_TXCLK2__EVENT0_SEL__SHIFT 0x0 +#define PCIE_PERF_CNTL_TXCLK2__EVENT1_SEL__SHIFT 0x8 +#define PCIE_PERF_CNTL_TXCLK2__COUNTER0_UPPER__SHIFT 0x10 +#define PCIE_PERF_CNTL_TXCLK2__COUNTER1_UPPER__SHIFT 0x18 +#define PCIE_PERF_CNTL_TXCLK2__EVENT0_SEL_MASK 0x000000FFL +#define PCIE_PERF_CNTL_TXCLK2__EVENT1_SEL_MASK 0x0000FF00L +#define PCIE_PERF_CNTL_TXCLK2__COUNTER0_UPPER_MASK 0x00FF0000L +#define PCIE_PERF_CNTL_TXCLK2__COUNTER1_UPPER_MASK 0xFF000000L +//PCIE_PERF_COUNT0_TXCLK2 +#define PCIE_PERF_COUNT0_TXCLK2__COUNTER0__SHIFT 0x0 +#define PCIE_PERF_COUNT0_TXCLK2__COUNTER0_MASK 0xFFFFFFFFL +//PCIE_PERF_COUNT1_TXCLK2 +#define PCIE_PERF_COUNT1_TXCLK2__COUNTER1__SHIFT 0x0 +#define PCIE_PERF_COUNT1_TXCLK2__COUNTER1_MASK 0xFFFFFFFFL +//PCIE_HIP_REG0 +#define PCIE_HIP_REG0__CI_HIP_APT0_BASE_HI__SHIFT 0x0 +#define PCIE_HIP_REG0__CI_HIP_APT0_ENABLE__SHIFT 0x18 +#define PCIE_HIP_REG0__CI_HIP_APT0_PASID_MODE__SHIFT 0x19 +#define PCIE_HIP_REG0__CI_HIP_APT0_REQAT_MODE__SHIFT 0x1a +#define PCIE_HIP_REG0__CI_HIP_APT0_REQIO_MODE__SHIFT 0x1d +#define PCIE_HIP_REG0__CI_HIP_APT0_BASE_HI_MASK 0x000FFFFFL +#define PCIE_HIP_REG0__CI_HIP_APT0_ENABLE_MASK 0x01000000L +#define PCIE_HIP_REG0__CI_HIP_APT0_PASID_MODE_MASK 0x02000000L +#define PCIE_HIP_REG0__CI_HIP_APT0_REQAT_MODE_MASK 0x1C000000L +#define PCIE_HIP_REG0__CI_HIP_APT0_REQIO_MODE_MASK 0x60000000L +//PCIE_HIP_REG1 +#define PCIE_HIP_REG1__CI_HIP_APT0_BASE_LO__SHIFT 0x0 +#define PCIE_HIP_REG1__CI_HIP_APT0_BASE_LO_MASK 0xFFFFFFFFL +//PCIE_HIP_REG2 +#define PCIE_HIP_REG2__CI_HIP_APT0_LIMIT_HI__SHIFT 0x0 +#define PCIE_HIP_REG2__CI_HIP_APT0_LIMIT_HI_MASK 0x000FFFFFL +//PCIE_HIP_REG3 +#define PCIE_HIP_REG3__CI_HIP_APT0_LIMIT_LO__SHIFT 0x0 +#define PCIE_HIP_REG3__CI_HIP_APT0_LIMIT_LO_MASK 0xFFFFFFFFL +//PCIE_HIP_REG4 +#define PCIE_HIP_REG4__CI_HIP_APT1_BASE_HI__SHIFT 0x0 +#define PCIE_HIP_REG4__CI_HIP_APT1_ENABLE__SHIFT 0x18 +#define PCIE_HIP_REG4__CI_HIP_APT1_PASID_MODE__SHIFT 0x19 +#define PCIE_HIP_REG4__CI_HIP_APT1_REQAT_MODE__SHIFT 0x1a +#define PCIE_HIP_REG4__CI_HIP_APT1_REQIO_MODE__SHIFT 0x1d +#define PCIE_HIP_REG4__CI_HIP_APT1_BASE_HI_MASK 0x000FFFFFL +#define PCIE_HIP_REG4__CI_HIP_APT1_ENABLE_MASK 0x01000000L +#define PCIE_HIP_REG4__CI_HIP_APT1_PASID_MODE_MASK 0x02000000L +#define PCIE_HIP_REG4__CI_HIP_APT1_REQAT_MODE_MASK 0x1C000000L +#define PCIE_HIP_REG4__CI_HIP_APT1_REQIO_MODE_MASK 0x60000000L +//PCIE_HIP_REG5 +#define PCIE_HIP_REG5__CI_HIP_APT1_BASE_LO__SHIFT 0x0 +#define PCIE_HIP_REG5__CI_HIP_APT1_BASE_LO_MASK 0xFFFFFFFFL +//PCIE_HIP_REG6 +#define PCIE_HIP_REG6__CI_HIP_APT1_LIMIT_HI__SHIFT 0x0 +#define PCIE_HIP_REG6__CI_HIP_APT1_LIMIT_HI_MASK 0x000FFFFFL +//PCIE_HIP_REG7 +#define PCIE_HIP_REG7__CI_HIP_APT1_LIMIT_LO__SHIFT 0x0 +#define PCIE_HIP_REG7__CI_HIP_APT1_LIMIT_LO_MASK 0xFFFFFFFFL +//PCIE_HIP_REG8 +#define PCIE_HIP_REG8__CI_HIP_MASK__SHIFT 0x0 +#define PCIE_HIP_REG8__CI_HIP_MASK_MASK 0x000FFFFFL +//PCIE_PRBS_CLR +#define PCIE_PRBS_CLR__PRBS_CLR__SHIFT 0x0 +#define PCIE_PRBS_CLR__PRBS_POLARITY_EN__SHIFT 0x18 +#define PCIE_PRBS_CLR__PRBS_CLR_MASK 0x0000FFFFL +#define PCIE_PRBS_CLR__PRBS_POLARITY_EN_MASK 0x01000000L +//PCIE_PRBS_STATUS1 +#define PCIE_PRBS_STATUS1__PRBS_ERRSTAT__SHIFT 0x0 +#define PCIE_PRBS_STATUS1__PRBS_LOCKED__SHIFT 0x10 +#define PCIE_PRBS_STATUS1__PRBS_ERRSTAT_MASK 0x0000FFFFL +#define PCIE_PRBS_STATUS1__PRBS_LOCKED_MASK 0xFFFF0000L +//PCIE_PRBS_STATUS2 +#define PCIE_PRBS_STATUS2__PRBS_BITCNT_DONE__SHIFT 0x0 +#define PCIE_PRBS_STATUS2__PRBS_BITCNT_DONE_MASK 0x0000FFFFL +//PCIE_PRBS_FREERUN +#define PCIE_PRBS_FREERUN__PRBS_FREERUN__SHIFT 0x0 +#define PCIE_PRBS_FREERUN__PRBS_FREERUN_MASK 0x0000FFFFL +//PCIE_PRBS_MISC +#define PCIE_PRBS_MISC__PRBS_EN__SHIFT 0x0 +#define PCIE_PRBS_MISC__PRBS_TEST_MODE__SHIFT 0x1 +#define PCIE_PRBS_MISC__PRBS_USER_PATTERN_TOGGLE__SHIFT 0x4 +#define PCIE_PRBS_MISC__PRBS_8BIT_SEL__SHIFT 0x5 +#define PCIE_PRBS_MISC__PRBS_COMMA_NUM__SHIFT 0x6 +#define PCIE_PRBS_MISC__PRBS_LOCK_CNT__SHIFT 0x8 +#define PCIE_PRBS_MISC__PRBS_DATA_RATE__SHIFT 0xe +#define PCIE_PRBS_MISC__PRBS_CHK_ERR_MASK__SHIFT 0x10 +#define PCIE_PRBS_MISC__PRBS_EN_MASK 0x00000001L +#define PCIE_PRBS_MISC__PRBS_TEST_MODE_MASK 0x0000000EL +#define PCIE_PRBS_MISC__PRBS_USER_PATTERN_TOGGLE_MASK 0x00000010L +#define PCIE_PRBS_MISC__PRBS_8BIT_SEL_MASK 0x00000020L +#define PCIE_PRBS_MISC__PRBS_COMMA_NUM_MASK 0x000000C0L +#define PCIE_PRBS_MISC__PRBS_LOCK_CNT_MASK 0x00001F00L +#define PCIE_PRBS_MISC__PRBS_DATA_RATE_MASK 0x0000C000L +#define PCIE_PRBS_MISC__PRBS_CHK_ERR_MASK_MASK 0xFFFF0000L +//PCIE_PRBS_USER_PATTERN +#define PCIE_PRBS_USER_PATTERN__PRBS_USER_PATTERN__SHIFT 0x0 +#define PCIE_PRBS_USER_PATTERN__PRBS_USER_PATTERN_MASK 0x3FFFFFFFL +//PCIE_PRBS_LO_BITCNT +#define PCIE_PRBS_LO_BITCNT__PRBS_LO_BITCNT__SHIFT 0x0 +#define PCIE_PRBS_LO_BITCNT__PRBS_LO_BITCNT_MASK 0xFFFFFFFFL +//PCIE_PRBS_HI_BITCNT +#define PCIE_PRBS_HI_BITCNT__PRBS_HI_BITCNT__SHIFT 0x0 +#define PCIE_PRBS_HI_BITCNT__PRBS_HI_BITCNT_MASK 0x000000FFL +//PCIE_PRBS_ERRCNT_0 +#define PCIE_PRBS_ERRCNT_0__PRBS_ERRCNT_0__SHIFT 0x0 +#define PCIE_PRBS_ERRCNT_0__PRBS_ERRCNT_0_MASK 0xFFFFFFFFL +//PCIE_PRBS_ERRCNT_1 +#define PCIE_PRBS_ERRCNT_1__PRBS_ERRCNT_1__SHIFT 0x0 +#define PCIE_PRBS_ERRCNT_1__PRBS_ERRCNT_1_MASK 0xFFFFFFFFL +//PCIE_PRBS_ERRCNT_2 +#define PCIE_PRBS_ERRCNT_2__PRBS_ERRCNT_2__SHIFT 0x0 +#define PCIE_PRBS_ERRCNT_2__PRBS_ERRCNT_2_MASK 0xFFFFFFFFL +//PCIE_PRBS_ERRCNT_3 +#define PCIE_PRBS_ERRCNT_3__PRBS_ERRCNT_3__SHIFT 0x0 +#define PCIE_PRBS_ERRCNT_3__PRBS_ERRCNT_3_MASK 0xFFFFFFFFL +//PCIE_PRBS_ERRCNT_4 +#define PCIE_PRBS_ERRCNT_4__PRBS_ERRCNT_4__SHIFT 0x0 +#define PCIE_PRBS_ERRCNT_4__PRBS_ERRCNT_4_MASK 0xFFFFFFFFL +//PCIE_PRBS_ERRCNT_5 +#define PCIE_PRBS_ERRCNT_5__PRBS_ERRCNT_5__SHIFT 0x0 +#define PCIE_PRBS_ERRCNT_5__PRBS_ERRCNT_5_MASK 0xFFFFFFFFL +//PCIE_PRBS_ERRCNT_6 +#define PCIE_PRBS_ERRCNT_6__PRBS_ERRCNT_6__SHIFT 0x0 +#define PCIE_PRBS_ERRCNT_6__PRBS_ERRCNT_6_MASK 0xFFFFFFFFL +//PCIE_PRBS_ERRCNT_7 +#define PCIE_PRBS_ERRCNT_7__PRBS_ERRCNT_7__SHIFT 0x0 +#define PCIE_PRBS_ERRCNT_7__PRBS_ERRCNT_7_MASK 0xFFFFFFFFL +//PCIE_PRBS_ERRCNT_8 +#define PCIE_PRBS_ERRCNT_8__PRBS_ERRCNT_8__SHIFT 0x0 +#define PCIE_PRBS_ERRCNT_8__PRBS_ERRCNT_8_MASK 0xFFFFFFFFL +//PCIE_PRBS_ERRCNT_9 +#define PCIE_PRBS_ERRCNT_9__PRBS_ERRCNT_9__SHIFT 0x0 +#define PCIE_PRBS_ERRCNT_9__PRBS_ERRCNT_9_MASK 0xFFFFFFFFL +//PCIE_PRBS_ERRCNT_10 +#define PCIE_PRBS_ERRCNT_10__PRBS_ERRCNT_10__SHIFT 0x0 +#define PCIE_PRBS_ERRCNT_10__PRBS_ERRCNT_10_MASK 0xFFFFFFFFL +//PCIE_PRBS_ERRCNT_11 +#define PCIE_PRBS_ERRCNT_11__PRBS_ERRCNT_11__SHIFT 0x0 +#define PCIE_PRBS_ERRCNT_11__PRBS_ERRCNT_11_MASK 0xFFFFFFFFL +//PCIE_PRBS_ERRCNT_12 +#define PCIE_PRBS_ERRCNT_12__PRBS_ERRCNT_12__SHIFT 0x0 +#define PCIE_PRBS_ERRCNT_12__PRBS_ERRCNT_12_MASK 0xFFFFFFFFL +//PCIE_PRBS_ERRCNT_13 +#define PCIE_PRBS_ERRCNT_13__PRBS_ERRCNT_13__SHIFT 0x0 +#define PCIE_PRBS_ERRCNT_13__PRBS_ERRCNT_13_MASK 0xFFFFFFFFL +//PCIE_PRBS_ERRCNT_14 +#define PCIE_PRBS_ERRCNT_14__PRBS_ERRCNT_14__SHIFT 0x0 +#define PCIE_PRBS_ERRCNT_14__PRBS_ERRCNT_14_MASK 0xFFFFFFFFL +//PCIE_PRBS_ERRCNT_15 +#define PCIE_PRBS_ERRCNT_15__PRBS_ERRCNT_15__SHIFT 0x0 +#define PCIE_PRBS_ERRCNT_15__PRBS_ERRCNT_15_MASK 0xFFFFFFFFL +//SWRST_COMMAND_STATUS +#define SWRST_COMMAND_STATUS__RECONFIGURE__SHIFT 0x0 +#define SWRST_COMMAND_STATUS__ATOMIC_RESET__SHIFT 0x1 +#define SWRST_COMMAND_STATUS__RESET_COMPLETE__SHIFT 0x10 +#define SWRST_COMMAND_STATUS__WAIT_STATE__SHIFT 0x11 +#define SWRST_COMMAND_STATUS__SWUS_LINK_RESET__SHIFT 0x18 +#define SWRST_COMMAND_STATUS__SWUS_LINK_RESET_CFG_ONLY__SHIFT 0x19 +#define SWRST_COMMAND_STATUS__SWUS_LINK_RESET_PHY_CALIB__SHIFT 0x1a +#define SWRST_COMMAND_STATUS__SWDS_LINK_RESET__SHIFT 0x1b +#define SWRST_COMMAND_STATUS__SWDS_LINK_RESET_CFG_ONLY__SHIFT 0x1c +#define SWRST_COMMAND_STATUS__LINK_RESET_TYPE_HOT_RESET__SHIFT 0x1d +#define SWRST_COMMAND_STATUS__LINK_RESET_TYPE_LINK_DISABLE__SHIFT 0x1e +#define SWRST_COMMAND_STATUS__LINK_RESET_TYPE_LINK_DOWN__SHIFT 0x1f +#define SWRST_COMMAND_STATUS__RECONFIGURE_MASK 0x00000001L +#define SWRST_COMMAND_STATUS__ATOMIC_RESET_MASK 0x00000002L +#define SWRST_COMMAND_STATUS__RESET_COMPLETE_MASK 0x00010000L +#define SWRST_COMMAND_STATUS__WAIT_STATE_MASK 0x00020000L +#define SWRST_COMMAND_STATUS__SWUS_LINK_RESET_MASK 0x01000000L +#define SWRST_COMMAND_STATUS__SWUS_LINK_RESET_CFG_ONLY_MASK 0x02000000L +#define SWRST_COMMAND_STATUS__SWUS_LINK_RESET_PHY_CALIB_MASK 0x04000000L +#define SWRST_COMMAND_STATUS__SWDS_LINK_RESET_MASK 0x08000000L +#define SWRST_COMMAND_STATUS__SWDS_LINK_RESET_CFG_ONLY_MASK 0x10000000L +#define SWRST_COMMAND_STATUS__LINK_RESET_TYPE_HOT_RESET_MASK 0x20000000L +#define SWRST_COMMAND_STATUS__LINK_RESET_TYPE_LINK_DISABLE_MASK 0x40000000L +#define SWRST_COMMAND_STATUS__LINK_RESET_TYPE_LINK_DOWN_MASK 0x80000000L +//SWRST_GENERAL_CONTROL +#define SWRST_GENERAL_CONTROL__RECONFIGURE_EN__SHIFT 0x0 +#define SWRST_GENERAL_CONTROL__ATOMIC_RESET_EN__SHIFT 0x1 +#define SWRST_GENERAL_CONTROL__RESET_PERIOD__SHIFT 0x2 +#define SWRST_GENERAL_CONTROL__WAIT_LINKUP__SHIFT 0x8 +#define SWRST_GENERAL_CONTROL__FORCE_REGIDLE__SHIFT 0x9 +#define SWRST_GENERAL_CONTROL__BLOCK_ON_IDLE__SHIFT 0xa +#define SWRST_GENERAL_CONTROL__CONFIG_XFER_MODE__SHIFT 0xc +#define SWRST_GENERAL_CONTROL__MP1_PCIE_CROSSFIRE_LOCKDOWN_EN__SHIFT 0x18 +#define SWRST_GENERAL_CONTROL__IGNORE_SDP_RESET__SHIFT 0x19 +#define SWRST_GENERAL_CONTROL__RECONFIGURE_EN_MASK 0x00000001L +#define SWRST_GENERAL_CONTROL__ATOMIC_RESET_EN_MASK 0x00000002L +#define SWRST_GENERAL_CONTROL__RESET_PERIOD_MASK 0x0000001CL +#define SWRST_GENERAL_CONTROL__WAIT_LINKUP_MASK 0x00000100L +#define SWRST_GENERAL_CONTROL__FORCE_REGIDLE_MASK 0x00000200L +#define SWRST_GENERAL_CONTROL__BLOCK_ON_IDLE_MASK 0x00000400L +#define SWRST_GENERAL_CONTROL__CONFIG_XFER_MODE_MASK 0x00001000L +#define SWRST_GENERAL_CONTROL__MP1_PCIE_CROSSFIRE_LOCKDOWN_EN_MASK 0x01000000L +#define SWRST_GENERAL_CONTROL__IGNORE_SDP_RESET_MASK 0x02000000L +//SWRST_COMMAND_0 +#define SWRST_COMMAND_0__PORT0_COR_RESET__SHIFT 0x0 +#define SWRST_COMMAND_0__PORT0_CFG_RESET__SHIFT 0x8 +#define SWRST_COMMAND_0__PORT1_CFG_RESET__SHIFT 0x9 +#define SWRST_COMMAND_0__PORT2_CFG_RESET__SHIFT 0xa +#define SWRST_COMMAND_0__PORT3_CFG_RESET__SHIFT 0xb +#define SWRST_COMMAND_0__PORT4_CFG_RESET__SHIFT 0xc +#define SWRST_COMMAND_0__PORT5_CFG_RESET__SHIFT 0xd +#define SWRST_COMMAND_0__PORT6_CFG_RESET__SHIFT 0xe +#define SWRST_COMMAND_0__PORT7_CFG_RESET__SHIFT 0xf +#define SWRST_COMMAND_0__BIF0_GLOBAL_RESET__SHIFT 0x18 +#define SWRST_COMMAND_0__BIF0_CALIB_RESET__SHIFT 0x19 +#define SWRST_COMMAND_0__BIF0_CORE_RESET__SHIFT 0x1a +#define SWRST_COMMAND_0__BIF0_REGISTER_RESET__SHIFT 0x1b +#define SWRST_COMMAND_0__BIF0_PHY_RESET__SHIFT 0x1c +#define SWRST_COMMAND_0__BIF0_STICKY_RESET__SHIFT 0x1d +#define SWRST_COMMAND_0__BIF0_CONFIG_RESET__SHIFT 0x1e +#define SWRST_COMMAND_0__PORT0_COR_RESET_MASK 0x00000001L +#define SWRST_COMMAND_0__PORT0_CFG_RESET_MASK 0x00000100L +#define SWRST_COMMAND_0__PORT1_CFG_RESET_MASK 0x00000200L +#define SWRST_COMMAND_0__PORT2_CFG_RESET_MASK 0x00000400L +#define SWRST_COMMAND_0__PORT3_CFG_RESET_MASK 0x00000800L +#define SWRST_COMMAND_0__PORT4_CFG_RESET_MASK 0x00001000L +#define SWRST_COMMAND_0__PORT5_CFG_RESET_MASK 0x00002000L +#define SWRST_COMMAND_0__PORT6_CFG_RESET_MASK 0x00004000L +#define SWRST_COMMAND_0__PORT7_CFG_RESET_MASK 0x00008000L +#define SWRST_COMMAND_0__BIF0_GLOBAL_RESET_MASK 0x01000000L +#define SWRST_COMMAND_0__BIF0_CALIB_RESET_MASK 0x02000000L +#define SWRST_COMMAND_0__BIF0_CORE_RESET_MASK 0x04000000L +#define SWRST_COMMAND_0__BIF0_REGISTER_RESET_MASK 0x08000000L +#define SWRST_COMMAND_0__BIF0_PHY_RESET_MASK 0x10000000L +#define SWRST_COMMAND_0__BIF0_STICKY_RESET_MASK 0x20000000L +#define SWRST_COMMAND_0__BIF0_CONFIG_RESET_MASK 0x40000000L +//SWRST_COMMAND_1 +#define SWRST_COMMAND_1__SWITCHCLK__SHIFT 0x15 +#define SWRST_COMMAND_1__RESETAXIMST__SHIFT 0x16 +#define SWRST_COMMAND_1__RESETAXISLV__SHIFT 0x17 +#define SWRST_COMMAND_1__RESETAXIINT__SHIFT 0x18 +#define SWRST_COMMAND_1__RESETPCFG__SHIFT 0x19 +#define SWRST_COMMAND_1__RESETLNCT__SHIFT 0x1a +#define SWRST_COMMAND_1__RESETMNTR__SHIFT 0x1b +#define SWRST_COMMAND_1__RESETHLTR__SHIFT 0x1c +#define SWRST_COMMAND_1__RESETCPM__SHIFT 0x1d +#define SWRST_COMMAND_1__RESETPHY0__SHIFT 0x1e +#define SWRST_COMMAND_1__SWITCHCLK_MASK 0x00200000L +#define SWRST_COMMAND_1__RESETAXIMST_MASK 0x00400000L +#define SWRST_COMMAND_1__RESETAXISLV_MASK 0x00800000L +#define SWRST_COMMAND_1__RESETAXIINT_MASK 0x01000000L +#define SWRST_COMMAND_1__RESETPCFG_MASK 0x02000000L +#define SWRST_COMMAND_1__RESETLNCT_MASK 0x04000000L +#define SWRST_COMMAND_1__RESETMNTR_MASK 0x08000000L +#define SWRST_COMMAND_1__RESETHLTR_MASK 0x10000000L +#define SWRST_COMMAND_1__RESETCPM_MASK 0x20000000L +#define SWRST_COMMAND_1__RESETPHY0_MASK 0x40000000L +//SWRST_CONTROL_0 +#define SWRST_CONTROL_0__PORT0_COR_RCEN__SHIFT 0x0 +#define SWRST_CONTROL_0__PORT0_CFG_RCEN__SHIFT 0x8 +#define SWRST_CONTROL_0__PORT1_CFG_RCEN__SHIFT 0x9 +#define SWRST_CONTROL_0__PORT2_CFG_RCEN__SHIFT 0xa +#define SWRST_CONTROL_0__PORT3_CFG_RCEN__SHIFT 0xb +#define SWRST_CONTROL_0__PORT4_CFG_RCEN__SHIFT 0xc +#define SWRST_CONTROL_0__PORT5_CFG_RCEN__SHIFT 0xd +#define SWRST_CONTROL_0__PORT6_CFG_RCEN__SHIFT 0xe +#define SWRST_CONTROL_0__PORT7_CFG_RCEN__SHIFT 0xf +#define SWRST_CONTROL_0__BIF0_GLOBAL_RESETRCEN__SHIFT 0x18 +#define SWRST_CONTROL_0__BIF0_CALIB_RESETRCEN__SHIFT 0x19 +#define SWRST_CONTROL_0__BIF0_CORE_RESETRCEN__SHIFT 0x1a +#define SWRST_CONTROL_0__BIF0_REGISTER_RESETRCEN__SHIFT 0x1b +#define SWRST_CONTROL_0__BIF0_PHY_RESETRCEN__SHIFT 0x1c +#define SWRST_CONTROL_0__BIF0_STICKY_RESETRCEN__SHIFT 0x1d +#define SWRST_CONTROL_0__BIF0_CONFIG_RESETRCEN__SHIFT 0x1e +#define SWRST_CONTROL_0__PORT0_COR_RCEN_MASK 0x00000001L +#define SWRST_CONTROL_0__PORT0_CFG_RCEN_MASK 0x00000100L +#define SWRST_CONTROL_0__PORT1_CFG_RCEN_MASK 0x00000200L +#define SWRST_CONTROL_0__PORT2_CFG_RCEN_MASK 0x00000400L +#define SWRST_CONTROL_0__PORT3_CFG_RCEN_MASK 0x00000800L +#define SWRST_CONTROL_0__PORT4_CFG_RCEN_MASK 0x00001000L +#define SWRST_CONTROL_0__PORT5_CFG_RCEN_MASK 0x00002000L +#define SWRST_CONTROL_0__PORT6_CFG_RCEN_MASK 0x00004000L +#define SWRST_CONTROL_0__PORT7_CFG_RCEN_MASK 0x00008000L +#define SWRST_CONTROL_0__BIF0_GLOBAL_RESETRCEN_MASK 0x01000000L +#define SWRST_CONTROL_0__BIF0_CALIB_RESETRCEN_MASK 0x02000000L +#define SWRST_CONTROL_0__BIF0_CORE_RESETRCEN_MASK 0x04000000L +#define SWRST_CONTROL_0__BIF0_REGISTER_RESETRCEN_MASK 0x08000000L +#define SWRST_CONTROL_0__BIF0_PHY_RESETRCEN_MASK 0x10000000L +#define SWRST_CONTROL_0__BIF0_STICKY_RESETRCEN_MASK 0x20000000L +#define SWRST_CONTROL_0__BIF0_CONFIG_RESETRCEN_MASK 0x40000000L +//SWRST_CONTROL_1 +#define SWRST_CONTROL_1__SWITCHCLK_RCEN__SHIFT 0x15 +#define SWRST_CONTROL_1__RESETAXIMST_RCEN__SHIFT 0x16 +#define SWRST_CONTROL_1__RESETAXISLV_RCEN__SHIFT 0x17 +#define SWRST_CONTROL_1__RESETAXIINT_RCEN__SHIFT 0x18 +#define SWRST_CONTROL_1__RESETPCFG_RCEN__SHIFT 0x19 +#define SWRST_CONTROL_1__RESETLNCT_RCEN__SHIFT 0x1a +#define SWRST_CONTROL_1__RESETMNTR_RCEN__SHIFT 0x1b +#define SWRST_CONTROL_1__RESETHLTR_RCEN__SHIFT 0x1c +#define SWRST_CONTROL_1__RESETCPM_RCEN__SHIFT 0x1d +#define SWRST_CONTROL_1__RESETPHY0_RCEN__SHIFT 0x1e +#define SWRST_CONTROL_1__SWITCHCLK_RCEN_MASK 0x00200000L +#define SWRST_CONTROL_1__RESETAXIMST_RCEN_MASK 0x00400000L +#define SWRST_CONTROL_1__RESETAXISLV_RCEN_MASK 0x00800000L +#define SWRST_CONTROL_1__RESETAXIINT_RCEN_MASK 0x01000000L +#define SWRST_CONTROL_1__RESETPCFG_RCEN_MASK 0x02000000L +#define SWRST_CONTROL_1__RESETLNCT_RCEN_MASK 0x04000000L +#define SWRST_CONTROL_1__RESETMNTR_RCEN_MASK 0x08000000L +#define SWRST_CONTROL_1__RESETHLTR_RCEN_MASK 0x10000000L +#define SWRST_CONTROL_1__RESETCPM_RCEN_MASK 0x20000000L +#define SWRST_CONTROL_1__RESETPHY0_RCEN_MASK 0x40000000L +//SWRST_CONTROL_2 +#define SWRST_CONTROL_2__PORT0_COR_ATEN__SHIFT 0x0 +#define SWRST_CONTROL_2__PORT0_CFG_ATEN__SHIFT 0x8 +#define SWRST_CONTROL_2__PORT1_CFG_ATEN__SHIFT 0x9 +#define SWRST_CONTROL_2__PORT2_CFG_ATEN__SHIFT 0xa +#define SWRST_CONTROL_2__PORT3_CFG_ATEN__SHIFT 0xb +#define SWRST_CONTROL_2__PORT4_CFG_ATEN__SHIFT 0xc +#define SWRST_CONTROL_2__PORT5_CFG_ATEN__SHIFT 0xd +#define SWRST_CONTROL_2__PORT6_CFG_ATEN__SHIFT 0xe +#define SWRST_CONTROL_2__PORT7_CFG_ATEN__SHIFT 0xf +#define SWRST_CONTROL_2__BIF0_GLOBAL_RESETATEN__SHIFT 0x18 +#define SWRST_CONTROL_2__BIF0_CALIB_RESETATEN__SHIFT 0x19 +#define SWRST_CONTROL_2__BIF0_CORE_RESETATEN__SHIFT 0x1a +#define SWRST_CONTROL_2__BIF0_REGISTER_RESETATEN__SHIFT 0x1b +#define SWRST_CONTROL_2__BIF0_PHY_RESETATEN__SHIFT 0x1c +#define SWRST_CONTROL_2__BIF0_STICKY_RESETATEN__SHIFT 0x1d +#define SWRST_CONTROL_2__BIF0_CONFIG_RESETATEN__SHIFT 0x1e +#define SWRST_CONTROL_2__PORT0_COR_ATEN_MASK 0x00000001L +#define SWRST_CONTROL_2__PORT0_CFG_ATEN_MASK 0x00000100L +#define SWRST_CONTROL_2__PORT1_CFG_ATEN_MASK 0x00000200L +#define SWRST_CONTROL_2__PORT2_CFG_ATEN_MASK 0x00000400L +#define SWRST_CONTROL_2__PORT3_CFG_ATEN_MASK 0x00000800L +#define SWRST_CONTROL_2__PORT4_CFG_ATEN_MASK 0x00001000L +#define SWRST_CONTROL_2__PORT5_CFG_ATEN_MASK 0x00002000L +#define SWRST_CONTROL_2__PORT6_CFG_ATEN_MASK 0x00004000L +#define SWRST_CONTROL_2__PORT7_CFG_ATEN_MASK 0x00008000L +#define SWRST_CONTROL_2__BIF0_GLOBAL_RESETATEN_MASK 0x01000000L +#define SWRST_CONTROL_2__BIF0_CALIB_RESETATEN_MASK 0x02000000L +#define SWRST_CONTROL_2__BIF0_CORE_RESETATEN_MASK 0x04000000L +#define SWRST_CONTROL_2__BIF0_REGISTER_RESETATEN_MASK 0x08000000L +#define SWRST_CONTROL_2__BIF0_PHY_RESETATEN_MASK 0x10000000L +#define SWRST_CONTROL_2__BIF0_STICKY_RESETATEN_MASK 0x20000000L +#define SWRST_CONTROL_2__BIF0_CONFIG_RESETATEN_MASK 0x40000000L +//SWRST_CONTROL_3 +#define SWRST_CONTROL_3__SWITCHCLK_ATEN__SHIFT 0x15 +#define SWRST_CONTROL_3__RESETAXIMST_ATEN__SHIFT 0x16 +#define SWRST_CONTROL_3__RESETAXISLV_ATEN__SHIFT 0x17 +#define SWRST_CONTROL_3__RESETAXIINT_ATEN__SHIFT 0x18 +#define SWRST_CONTROL_3__RESETPCFG_ATEN__SHIFT 0x19 +#define SWRST_CONTROL_3__RESETLNCT_ATEN__SHIFT 0x1a +#define SWRST_CONTROL_3__RESETMNTR_ATEN__SHIFT 0x1b +#define SWRST_CONTROL_3__RESETHLTR_ATEN__SHIFT 0x1c +#define SWRST_CONTROL_3__RESETCPM_ATEN__SHIFT 0x1d +#define SWRST_CONTROL_3__RESETPHY0_ATEN__SHIFT 0x1e +#define SWRST_CONTROL_3__SWITCHCLK_ATEN_MASK 0x00200000L +#define SWRST_CONTROL_3__RESETAXIMST_ATEN_MASK 0x00400000L +#define SWRST_CONTROL_3__RESETAXISLV_ATEN_MASK 0x00800000L +#define SWRST_CONTROL_3__RESETAXIINT_ATEN_MASK 0x01000000L +#define SWRST_CONTROL_3__RESETPCFG_ATEN_MASK 0x02000000L +#define SWRST_CONTROL_3__RESETLNCT_ATEN_MASK 0x04000000L +#define SWRST_CONTROL_3__RESETMNTR_ATEN_MASK 0x08000000L +#define SWRST_CONTROL_3__RESETHLTR_ATEN_MASK 0x10000000L +#define SWRST_CONTROL_3__RESETCPM_ATEN_MASK 0x20000000L +#define SWRST_CONTROL_3__RESETPHY0_ATEN_MASK 0x40000000L +//SWRST_CONTROL_4 +#define SWRST_CONTROL_4__PORT0_COR_WREN__SHIFT 0x0 +#define SWRST_CONTROL_4__PORT0_CFG_WREN__SHIFT 0x8 +#define SWRST_CONTROL_4__PORT1_CFG_WREN__SHIFT 0x9 +#define SWRST_CONTROL_4__PORT2_CFG_WREN__SHIFT 0xa +#define SWRST_CONTROL_4__PORT3_CFG_WREN__SHIFT 0xb +#define SWRST_CONTROL_4__PORT4_CFG_WREN__SHIFT 0xc +#define SWRST_CONTROL_4__PORT5_CFG_WREN__SHIFT 0xd +#define SWRST_CONTROL_4__PORT6_CFG_WREN__SHIFT 0xe +#define SWRST_CONTROL_4__PORT7_CFG_WREN__SHIFT 0xf +#define SWRST_CONTROL_4__BIF0_GLOBAL_WRRESETEN__SHIFT 0x18 +#define SWRST_CONTROL_4__BIF0_CALIB_WRRESETEN__SHIFT 0x19 +#define SWRST_CONTROL_4__BIF0_CORE_WRRESETEN__SHIFT 0x1a +#define SWRST_CONTROL_4__BIF0_REGISTER_WRRESETEN__SHIFT 0x1b +#define SWRST_CONTROL_4__BIF0_PHY_WRRESETEN__SHIFT 0x1c +#define SWRST_CONTROL_4__BIF0_STICKY_WRRESETEN__SHIFT 0x1d +#define SWRST_CONTROL_4__BIF0_CONFIG_WRRESETEN__SHIFT 0x1e +#define SWRST_CONTROL_4__PORT0_COR_WREN_MASK 0x00000001L +#define SWRST_CONTROL_4__PORT0_CFG_WREN_MASK 0x00000100L +#define SWRST_CONTROL_4__PORT1_CFG_WREN_MASK 0x00000200L +#define SWRST_CONTROL_4__PORT2_CFG_WREN_MASK 0x00000400L +#define SWRST_CONTROL_4__PORT3_CFG_WREN_MASK 0x00000800L +#define SWRST_CONTROL_4__PORT4_CFG_WREN_MASK 0x00001000L +#define SWRST_CONTROL_4__PORT5_CFG_WREN_MASK 0x00002000L +#define SWRST_CONTROL_4__PORT6_CFG_WREN_MASK 0x00004000L +#define SWRST_CONTROL_4__PORT7_CFG_WREN_MASK 0x00008000L +#define SWRST_CONTROL_4__BIF0_GLOBAL_WRRESETEN_MASK 0x01000000L +#define SWRST_CONTROL_4__BIF0_CALIB_WRRESETEN_MASK 0x02000000L +#define SWRST_CONTROL_4__BIF0_CORE_WRRESETEN_MASK 0x04000000L +#define SWRST_CONTROL_4__BIF0_REGISTER_WRRESETEN_MASK 0x08000000L +#define SWRST_CONTROL_4__BIF0_PHY_WRRESETEN_MASK 0x10000000L +#define SWRST_CONTROL_4__BIF0_STICKY_WRRESETEN_MASK 0x20000000L +#define SWRST_CONTROL_4__BIF0_CONFIG_WRRESETEN_MASK 0x40000000L +//SWRST_CONTROL_5 +#define SWRST_CONTROL_5__WRSWITCHCLK_EN__SHIFT 0x15 +#define SWRST_CONTROL_5__WRRESETAXIMST_EN__SHIFT 0x16 +#define SWRST_CONTROL_5__WRRESETAXISLV_EN__SHIFT 0x17 +#define SWRST_CONTROL_5__WRRESETAXIINT_EN__SHIFT 0x18 +#define SWRST_CONTROL_5__WRRESETPCFG_EN__SHIFT 0x19 +#define SWRST_CONTROL_5__WRRESETLNCT_EN__SHIFT 0x1a +#define SWRST_CONTROL_5__WRRESETMNTR_EN__SHIFT 0x1b +#define SWRST_CONTROL_5__WRRESETHLTR_EN__SHIFT 0x1c +#define SWRST_CONTROL_5__WRRESETCPM_EN__SHIFT 0x1d +#define SWRST_CONTROL_5__WRRESETPHY0_EN__SHIFT 0x1e +#define SWRST_CONTROL_5__WRSWITCHCLK_EN_MASK 0x00200000L +#define SWRST_CONTROL_5__WRRESETAXIMST_EN_MASK 0x00400000L +#define SWRST_CONTROL_5__WRRESETAXISLV_EN_MASK 0x00800000L +#define SWRST_CONTROL_5__WRRESETAXIINT_EN_MASK 0x01000000L +#define SWRST_CONTROL_5__WRRESETPCFG_EN_MASK 0x02000000L +#define SWRST_CONTROL_5__WRRESETLNCT_EN_MASK 0x04000000L +#define SWRST_CONTROL_5__WRRESETMNTR_EN_MASK 0x08000000L +#define SWRST_CONTROL_5__WRRESETHLTR_EN_MASK 0x10000000L +#define SWRST_CONTROL_5__WRRESETCPM_EN_MASK 0x20000000L +#define SWRST_CONTROL_5__WRRESETPHY0_EN_MASK 0x40000000L +//SWRST_CONTROL_6 +#define SWRST_CONTROL_6__HOLD_TRAINING_A__SHIFT 0x0 +#define SWRST_CONTROL_6__HOLD_TRAINING_B__SHIFT 0x1 +#define SWRST_CONTROL_6__HOLD_TRAINING_C__SHIFT 0x2 +#define SWRST_CONTROL_6__HOLD_TRAINING_D__SHIFT 0x3 +#define SWRST_CONTROL_6__HOLD_TRAINING_E__SHIFT 0x4 +#define SWRST_CONTROL_6__HOLD_TRAINING_F__SHIFT 0x5 +#define SWRST_CONTROL_6__HOLD_TRAINING_G__SHIFT 0x6 +#define SWRST_CONTROL_6__HOLD_TRAINING_H__SHIFT 0x7 +#define SWRST_CONTROL_6__HOLD_TRAINING_I__SHIFT 0x8 +#define SWRST_CONTROL_6__HOLD_TRAINING_J__SHIFT 0x9 +#define SWRST_CONTROL_6__HOLD_TRAINING_K__SHIFT 0xa +#define SWRST_CONTROL_6__HOLD_TRAINING_A_MASK 0x00000001L +#define SWRST_CONTROL_6__HOLD_TRAINING_B_MASK 0x00000002L +#define SWRST_CONTROL_6__HOLD_TRAINING_C_MASK 0x00000004L +#define SWRST_CONTROL_6__HOLD_TRAINING_D_MASK 0x00000008L +#define SWRST_CONTROL_6__HOLD_TRAINING_E_MASK 0x00000010L +#define SWRST_CONTROL_6__HOLD_TRAINING_F_MASK 0x00000020L +#define SWRST_CONTROL_6__HOLD_TRAINING_G_MASK 0x00000040L +#define SWRST_CONTROL_6__HOLD_TRAINING_H_MASK 0x00000080L +#define SWRST_CONTROL_6__HOLD_TRAINING_I_MASK 0x00000100L +#define SWRST_CONTROL_6__HOLD_TRAINING_J_MASK 0x00000200L +#define SWRST_CONTROL_6__HOLD_TRAINING_K_MASK 0x00000400L +//SWRST_EP_COMMAND_0 +#define SWRST_EP_COMMAND_0__EP_CFG_RESET_ONLY__SHIFT 0x0 +#define SWRST_EP_COMMAND_0__EP_HOT_RESET__SHIFT 0x8 +#define SWRST_EP_COMMAND_0__EP_LNKDWN_RESET__SHIFT 0x9 +#define SWRST_EP_COMMAND_0__EP_LNKDIS_RESET__SHIFT 0xa +#define SWRST_EP_COMMAND_0__EP_CFG_RESET_ONLY_MASK 0x00000001L +#define SWRST_EP_COMMAND_0__EP_HOT_RESET_MASK 0x00000100L +#define SWRST_EP_COMMAND_0__EP_LNKDWN_RESET_MASK 0x00000200L +#define SWRST_EP_COMMAND_0__EP_LNKDIS_RESET_MASK 0x00000400L +//SWRST_EP_CONTROL_0 +#define SWRST_EP_CONTROL_0__EP_CFG_RESET_ONLY_EN__SHIFT 0x0 +#define SWRST_EP_CONTROL_0__EP_HOT_RESET_EN__SHIFT 0x8 +#define SWRST_EP_CONTROL_0__EP_LNKDWN_RESET_EN__SHIFT 0x9 +#define SWRST_EP_CONTROL_0__EP_LNKDIS_RESET_EN__SHIFT 0xa +#define SWRST_EP_CONTROL_0__EP_CFG_RESET_ONLY_EN_MASK 0x00000001L +#define SWRST_EP_CONTROL_0__EP_HOT_RESET_EN_MASK 0x00000100L +#define SWRST_EP_CONTROL_0__EP_LNKDWN_RESET_EN_MASK 0x00000200L +#define SWRST_EP_CONTROL_0__EP_LNKDIS_RESET_EN_MASK 0x00000400L +//CPM_CONTROL +#define CPM_CONTROL__LCLK_DYN_GATE_ENABLE__SHIFT 0x0 +#define CPM_CONTROL__TXCLK_DYN_GATE_ENABLE__SHIFT 0x1 +#define CPM_CONTROL__L1_PWR_GATE_ENABLE__SHIFT 0x2 +#define CPM_CONTROL__L1_1_PWR_GATE_ENABLE__SHIFT 0x3 +#define CPM_CONTROL__L1_2_PWR_GATE_ENABLE__SHIFT 0x4 +#define CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE__SHIFT 0x5 +#define CPM_CONTROL__TXCLK_REGS_GATE_ENABLE__SHIFT 0x6 +#define CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE__SHIFT 0x7 +#define CPM_CONTROL__REFCLK_REGS_GATE_ENABLE__SHIFT 0x8 +#define CPM_CONTROL__LCLK_DYN_GATE_LATENCY__SHIFT 0x9 +#define CPM_CONTROL__TXCLK_DYN_GATE_LATENCY__SHIFT 0xb +#define CPM_CONTROL__REFCLKREQ_REFCLKACK_LOOPBACK_ENABLE__SHIFT 0xd +#define CPM_CONTROL__TXCLK_REGS_GATE_LATENCY__SHIFT 0xe +#define CPM_CONTROL__REFCLK_REGS_GATE_LATENCY__SHIFT 0xf +#define CPM_CONTROL__LCLK_GATE_TXCLK_FREE__SHIFT 0x10 +#define CPM_CONTROL__RCVR_DET_CLK_ENABLE__SHIFT 0x11 +#define CPM_CONTROL__FAST_TXCLK_LATENCY__SHIFT 0x12 +#define CPM_CONTROL__REGS_IDLE_TO_PG_LATENCY__SHIFT 0x15 +#define CPM_CONTROL__REFCLK_XSTCLK_ENABLE__SHIFT 0x16 +#define CPM_CONTROL__REFCLK_XSTCLK_LATENCY__SHIFT 0x17 +#define CPM_CONTROL__CLKREQb_UNGATE_TXCLK_ENABLE__SHIFT 0x18 +#define CPM_CONTROL__LCLK_GATE_ALLOW_IN_L1__SHIFT 0x19 +#define CPM_CONTROL__PG_EARLY_WAKE_ENABLE__SHIFT 0x1a +#define CPM_CONTROL__PCIE_CORE_IDLE__SHIFT 0x1b +#define CPM_CONTROL__PCIE_LINK_IDLE__SHIFT 0x1c +#define CPM_CONTROL__PCIE_BUFFER_EMPTY__SHIFT 0x1d +#define CPM_CONTROL__SPARE_REGS0__SHIFT 0x1e +#define CPM_CONTROL__IGNORE_REGS_IDLE_IN_PG__SHIFT 0x1f +#define CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK 0x00000001L +#define CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK 0x00000002L +#define CPM_CONTROL__L1_PWR_GATE_ENABLE_MASK 0x00000004L +#define CPM_CONTROL__L1_1_PWR_GATE_ENABLE_MASK 0x00000008L +#define CPM_CONTROL__L1_2_PWR_GATE_ENABLE_MASK 0x00000010L +#define CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK 0x00000020L +#define CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK 0x00000040L +#define CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK 0x00000080L +#define CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK 0x00000100L +#define CPM_CONTROL__LCLK_DYN_GATE_LATENCY_MASK 0x00000600L +#define CPM_CONTROL__TXCLK_DYN_GATE_LATENCY_MASK 0x00001800L +#define CPM_CONTROL__REFCLKREQ_REFCLKACK_LOOPBACK_ENABLE_MASK 0x00002000L +#define CPM_CONTROL__TXCLK_REGS_GATE_LATENCY_MASK 0x00004000L +#define CPM_CONTROL__REFCLK_REGS_GATE_LATENCY_MASK 0x00008000L +#define CPM_CONTROL__LCLK_GATE_TXCLK_FREE_MASK 0x00010000L +#define CPM_CONTROL__RCVR_DET_CLK_ENABLE_MASK 0x00020000L +#define CPM_CONTROL__FAST_TXCLK_LATENCY_MASK 0x001C0000L +#define CPM_CONTROL__REGS_IDLE_TO_PG_LATENCY_MASK 0x00200000L +#define CPM_CONTROL__REFCLK_XSTCLK_ENABLE_MASK 0x00400000L +#define CPM_CONTROL__REFCLK_XSTCLK_LATENCY_MASK 0x00800000L +#define CPM_CONTROL__CLKREQb_UNGATE_TXCLK_ENABLE_MASK 0x01000000L +#define CPM_CONTROL__LCLK_GATE_ALLOW_IN_L1_MASK 0x02000000L +#define CPM_CONTROL__PG_EARLY_WAKE_ENABLE_MASK 0x04000000L +#define CPM_CONTROL__PCIE_CORE_IDLE_MASK 0x08000000L +#define CPM_CONTROL__PCIE_LINK_IDLE_MASK 0x10000000L +#define CPM_CONTROL__PCIE_BUFFER_EMPTY_MASK 0x20000000L +#define CPM_CONTROL__SPARE_REGS0_MASK 0x40000000L +#define CPM_CONTROL__IGNORE_REGS_IDLE_IN_PG_MASK 0x80000000L +//SMN_APERTURE_ID_A +#define SMN_APERTURE_ID_A__SMU_APERTURE_ID__SHIFT 0x0 +#define SMN_APERTURE_ID_A__SMU_APERTURE_ID_MASK 0x00000FFFL +//SMN_APERTURE_ID_B +#define SMN_APERTURE_ID_B__IOHUB_APERTURE_ID__SHIFT 0x0 +#define SMN_APERTURE_ID_B__NBIF_APERTURE_ID__SHIFT 0xc +#define SMN_APERTURE_ID_B__IOHUB_APERTURE_ID_MASK 0x00000FFFL +#define SMN_APERTURE_ID_B__NBIF_APERTURE_ID_MASK 0x00FFF000L +//RSMU_MASTER_CONTROL +#define RSMU_MASTER_CONTROL__RSMU_MASTER_MESSAGE_SEND_ENABLE__SHIFT 0x0 +#define RSMU_MASTER_CONTROL__RSMU_MASTER_MESSAGE_SEND_ENABLE_MASK 0x00000001L +//RSMU_SLAVE_CONTROL +#define RSMU_SLAVE_CONTROL__RSMU_SLAVE_INVALID_READ_RETURN_ZERO__SHIFT 0x0 +#define RSMU_SLAVE_CONTROL__RSMU_SLAVE_IGNORE_INVALID_CONFIG_WRITE__SHIFT 0x2 +#define RSMU_SLAVE_CONTROL__RSMU_SLAVE_INVALID_READ_RETURN_ZERO_MASK 0x00000001L +#define RSMU_SLAVE_CONTROL__RSMU_SLAVE_IGNORE_INVALID_CONFIG_WRITE_MASK 0x00000004L +//RSMU_POWER_GATING_CONTROL +#define RSMU_POWER_GATING_CONTROL__PWR_GATE_MAC_ONLY__SHIFT 0x0 +#define RSMU_POWER_GATING_CONTROL__PWR_GATE_PHY_ONLY__SHIFT 0x1 +#define RSMU_POWER_GATING_CONTROL__PWR_GATE_MAC_ONLY_MASK 0x00000001L +#define RSMU_POWER_GATING_CONTROL__PWR_GATE_PHY_ONLY_MASK 0x00000002L +//RSMU_BIOS_TIMER_CMD +#define RSMU_BIOS_TIMER_CMD__CFG_TMR_MICROSECONDS__SHIFT 0x0 +#define RSMU_BIOS_TIMER_CMD__CFG_TMR_MICROSECONDS_MASK 0xFFFFFFFFL +//RSMU_BIOS_TIMER_CNTL +#define RSMU_BIOS_TIMER_CNTL__CFG_TMR_CLOCKRATE__SHIFT 0x0 +#define RSMU_BIOS_TIMER_CNTL__CFG_TMR_CLOCKRATE_MASK 0x000000FFL +//LNCNT_CONTROL +#define LNCNT_CONTROL__CFG_LNC_WINDOW_EN__SHIFT 0x0 +#define LNCNT_CONTROL__CFG_LNC_BW_CNT_EN__SHIFT 0x1 +#define LNCNT_CONTROL__CFG_LNC_CMN_CNT_EN__SHIFT 0x2 +#define LNCNT_CONTROL__CFG_LNC_OVRD_EN__SHIFT 0x3 +#define LNCNT_CONTROL__CFG_LNC_OVRD_VAL__SHIFT 0x4 +#define LNCNT_CONTROL__CFG_LNC_WINDOW_EN_MASK 0x00000001L +#define LNCNT_CONTROL__CFG_LNC_BW_CNT_EN_MASK 0x00000002L +#define LNCNT_CONTROL__CFG_LNC_CMN_CNT_EN_MASK 0x00000004L +#define LNCNT_CONTROL__CFG_LNC_OVRD_EN_MASK 0x00000008L +#define LNCNT_CONTROL__CFG_LNC_OVRD_VAL_MASK 0x00000010L +//CFG_LNC_WINDOW_REGISTER +#define CFG_LNC_WINDOW_REGISTER__CFG_LNC_WINDOW__SHIFT 0x0 +#define CFG_LNC_WINDOW_REGISTER__CFG_LNC_WINDOW_MASK 0x00FFFFFFL +//LNCNT_QUAN_THRD +#define LNCNT_QUAN_THRD__CFG_LNC_BW_QUAN_THRD__SHIFT 0x0 +#define LNCNT_QUAN_THRD__CFG_LNC_CMN_QUAN_THRD__SHIFT 0x4 +#define LNCNT_QUAN_THRD__CFG_LNC_BW_QUAN_THRD_MASK 0x00000007L +#define LNCNT_QUAN_THRD__CFG_LNC_CMN_QUAN_THRD_MASK 0x00000070L +//LNCNT_WEIGHT +#define LNCNT_WEIGHT__CFG_LNC_BW_WEIGHT__SHIFT 0x0 +#define LNCNT_WEIGHT__CFG_LNC_CMN_WEIGHT__SHIFT 0x10 +#define LNCNT_WEIGHT__CFG_LNC_BW_WEIGHT_MASK 0x0000FFFFL +#define LNCNT_WEIGHT__CFG_LNC_CMN_WEIGHT_MASK 0xFFFF0000L +//LNC_TOTAL_WACC_REGISTER +#define LNC_TOTAL_WACC_REGISTER__LNC_TOTAL_WACC__SHIFT 0x0 +#define LNC_TOTAL_WACC_REGISTER__LNC_TOTAL_WACC_MASK 0xFFFFFFFFL +//LNC_BW_WACC_REGISTER +#define LNC_BW_WACC_REGISTER__LNC_BW_WACC__SHIFT 0x0 +#define LNC_BW_WACC_REGISTER__LNC_BW_WACC_MASK 0xFFFFFFFFL +//LNC_CMN_WACC_REGISTER +#define LNC_CMN_WACC_REGISTER__LNC_CMN_WACC__SHIFT 0x0 +#define LNC_CMN_WACC_REGISTER__LNC_CMN_WACC_MASK 0xFFFFFFFFL +//SMU_INT_PIN_SHARING_PORT_INDICATOR +#define SMU_INT_PIN_SHARING_PORT_INDICATOR__LINK_MANAGEMENT_INT_STATUS__SHIFT 0x0 +#define SMU_INT_PIN_SHARING_PORT_INDICATOR__LTR_INT_STATUS__SHIFT 0x8 +#define SMU_INT_PIN_SHARING_PORT_INDICATOR__DPC_INT_STATUS__SHIFT 0x10 +#define SMU_INT_PIN_SHARING_PORT_INDICATOR__LINK_MANAGEMENT_INT_STATUS_MASK 0x000000FFL +#define SMU_INT_PIN_SHARING_PORT_INDICATOR__LTR_INT_STATUS_MASK 0x0000FF00L +#define SMU_INT_PIN_SHARING_PORT_INDICATOR__DPC_INT_STATUS_MASK 0x00FF0000L +//PCIE_PGMST_CNTL +#define PCIE_PGMST_CNTL__CFG_PG_HYSTERESIS__SHIFT 0x0 +#define PCIE_PGMST_CNTL__CFG_PG_EN__SHIFT 0x8 +#define PCIE_PGMST_CNTL__CFG_IDLENESS_COUNT_EN__SHIFT 0xa +#define PCIE_PGMST_CNTL__CFG_FW_PG_EXIT_CNTL__SHIFT 0xe +#define PCIE_PGMST_CNTL__CFG_PG_HYSTERESIS_MASK 0x000000FFL +#define PCIE_PGMST_CNTL__CFG_PG_EN_MASK 0x00000100L +#define PCIE_PGMST_CNTL__CFG_IDLENESS_COUNT_EN_MASK 0x00003C00L +#define PCIE_PGMST_CNTL__CFG_FW_PG_EXIT_CNTL_MASK 0x0000C000L +//PCIE_PGSLV_CNTL +#define PCIE_PGSLV_CNTL__CFG_IDLE_HYSTERESIS__SHIFT 0x0 +#define PCIE_PGSLV_CNTL__CFG_IDLE_HYSTERESIS_MASK 0x0000001FL +//SMU_PCIE_DF_Address +#define SMU_PCIE_DF_Address__RAS_INTR_CTL_addr__SHIFT 0x0 +#define SMU_PCIE_DF_Address__RAS_INTR_CTL_addr_MASK 0x000FFFFFL + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf0_SYSPFVFDEC +//BIF_BX_DEV0_EPF0_VF0_MM_INDEX +#define BIF_BX_DEV0_EPF0_VF0_MM_INDEX__MM_OFFSET__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF0_MM_INDEX__MM_APER__SHIFT 0x1f +#define BIF_BX_DEV0_EPF0_VF0_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL +#define BIF_BX_DEV0_EPF0_VF0_MM_INDEX__MM_APER_MASK 0x80000000L +//BIF_BX_DEV0_EPF0_VF0_MM_DATA +#define BIF_BX_DEV0_EPF0_VF0_MM_DATA__MM_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF0_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF0_MM_INDEX_HI +#define BIF_BX_DEV0_EPF0_VF0_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF0_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf0_BIFPFVFDEC1 +//RCC_DEV0_EPF0_VF0_RCC_ERR_LOG +#define RCC_DEV0_EPF0_VF0_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF0_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1 +#define RCC_DEV0_EPF0_VF0_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF0_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L +//RCC_DEV0_EPF0_VF0_RCC_DOORBELL_APER_EN +#define RCC_DEV0_EPF0_VF0_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF0_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L +//RCC_DEV0_EPF0_VF0_RCC_CONFIG_MEMSIZE +#define RCC_DEV0_EPF0_VF0_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF0_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF0_RCC_CONFIG_RESERVED +#define RCC_DEV0_EPF0_VF0_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF0_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF0_RCC_IOV_FUNC_IDENTIFIER +#define RCC_DEV0_EPF0_VF0_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF0_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f +#define RCC_DEV0_EPF0_VF0_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF0_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf0_BIFPFVFDEC1 +//BIF_BX_DEV0_EPF0_VF0_BIF_BME_STATUS +#define BIF_BX_DEV0_EPF0_VF0_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF0_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF0_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF0_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L +//BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG +#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11 +#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12 +#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13 +#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L +#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L +#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L +#define BIF_BX_DEV0_EPF0_VF0_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L +//BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH +#define BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW +#define BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_CNTL +#define BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF0_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L +//BIF_BX_DEV0_EPF0_VF0_HDP_REG_COHERENCY_FLUSH_CNTL +#define BIF_BX_DEV0_EPF0_VF0_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF0_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL +#define BIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4 +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5 +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6 +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7 +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L +//BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4 +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5 +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6 +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7 +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L +#define BIF_BX_DEV0_EPF0_VF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L +//BIF_BX_DEV0_EPF0_VF0_BIF_TRANS_PENDING +#define BIF_BX_DEV0_EPF0_VF0_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF0_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF0_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF0_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L +//BIF_BX_DEV0_EPF0_VF0_NBIF_GFX_ADDR_LUT_BYPASS +#define BIF_BX_DEV0_EPF0_VF0_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF0_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW0 +#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW1 +#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW2 +#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW3 +#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW0 +#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW1 +#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW2 +#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW3 +#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL +#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L +//BIF_BX_DEV0_EPF0_VF0_MAILBOX_INT_CNTL +#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF0_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L +//BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX +#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf +#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17 +#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18 +#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19 +#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L +#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L +#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L +#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L +#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L +#define BIF_BX_DEV0_EPF0_VF0_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf0_BIFDEC2 +//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_ADDR_LO +#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_ADDR_HI +#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_MSG_DATA +#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_CONTROL +#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_ADDR_LO +#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_ADDR_HI +#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_MSG_DATA +#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_CONTROL +#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_ADDR_LO +#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_ADDR_HI +#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_MSG_DATA +#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_CONTROL +#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF0_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF0_GFXMSIX_PBA +#define RCC_DEV0_EPF0_VF0_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF0_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1 +#define RCC_DEV0_EPF0_VF0_GFXMSIX_PBA__MSIX_PENDING_BITS_2__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF0_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF0_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L +#define RCC_DEV0_EPF0_VF0_GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf1_SYSPFVFDEC +//BIF_BX_DEV0_EPF0_VF1_MM_INDEX +#define BIF_BX_DEV0_EPF0_VF1_MM_INDEX__MM_OFFSET__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF1_MM_INDEX__MM_APER__SHIFT 0x1f +#define BIF_BX_DEV0_EPF0_VF1_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL +#define BIF_BX_DEV0_EPF0_VF1_MM_INDEX__MM_APER_MASK 0x80000000L +//BIF_BX_DEV0_EPF0_VF1_MM_DATA +#define BIF_BX_DEV0_EPF0_VF1_MM_DATA__MM_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF1_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF1_MM_INDEX_HI +#define BIF_BX_DEV0_EPF0_VF1_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF1_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf1_BIFPFVFDEC1 +//RCC_DEV0_EPF0_VF1_RCC_ERR_LOG +#define RCC_DEV0_EPF0_VF1_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF1_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1 +#define RCC_DEV0_EPF0_VF1_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF1_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L +//RCC_DEV0_EPF0_VF1_RCC_DOORBELL_APER_EN +#define RCC_DEV0_EPF0_VF1_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF1_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L +//RCC_DEV0_EPF0_VF1_RCC_CONFIG_MEMSIZE +#define RCC_DEV0_EPF0_VF1_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF1_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF1_RCC_CONFIG_RESERVED +#define RCC_DEV0_EPF0_VF1_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF1_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF1_RCC_IOV_FUNC_IDENTIFIER +#define RCC_DEV0_EPF0_VF1_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF1_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f +#define RCC_DEV0_EPF0_VF1_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF1_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf1_BIFPFVFDEC1 +//BIF_BX_DEV0_EPF0_VF1_BIF_BME_STATUS +#define BIF_BX_DEV0_EPF0_VF1_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF1_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF1_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF1_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L +//BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG +#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11 +#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12 +#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13 +#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L +#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L +#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L +#define BIF_BX_DEV0_EPF0_VF1_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L +//BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_BASE_HIGH +#define BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_BASE_LOW +#define BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_CNTL +#define BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF1_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L +//BIF_BX_DEV0_EPF0_VF1_HDP_REG_COHERENCY_FLUSH_CNTL +#define BIF_BX_DEV0_EPF0_VF1_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF1_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF1_HDP_MEM_COHERENCY_FLUSH_CNTL +#define BIF_BX_DEV0_EPF0_VF1_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF1_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4 +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5 +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6 +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7 +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L +//BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4 +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5 +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6 +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7 +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L +#define BIF_BX_DEV0_EPF0_VF1_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L +//BIF_BX_DEV0_EPF0_VF1_BIF_TRANS_PENDING +#define BIF_BX_DEV0_EPF0_VF1_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF1_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF1_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF1_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L +//BIF_BX_DEV0_EPF0_VF1_NBIF_GFX_ADDR_LUT_BYPASS +#define BIF_BX_DEV0_EPF0_VF1_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF1_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW0 +#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW1 +#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW2 +#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW3 +#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW0 +#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW1 +#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW2 +#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW3 +#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL +#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L +//BIF_BX_DEV0_EPF0_VF1_MAILBOX_INT_CNTL +#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF1_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L +//BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX +#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf +#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17 +#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18 +#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19 +#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L +#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L +#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L +#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L +#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L +#define BIF_BX_DEV0_EPF0_VF1_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf1_BIFDEC2 +//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_ADDR_LO +#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_ADDR_HI +#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_MSG_DATA +#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_CONTROL +#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_ADDR_LO +#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_ADDR_HI +#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_MSG_DATA +#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_CONTROL +#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_ADDR_LO +#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_ADDR_HI +#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_MSG_DATA +#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_CONTROL +#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF1_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF1_GFXMSIX_PBA +#define RCC_DEV0_EPF0_VF1_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF1_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1 +#define RCC_DEV0_EPF0_VF1_GFXMSIX_PBA__MSIX_PENDING_BITS_2__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF1_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF1_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L +#define RCC_DEV0_EPF0_VF1_GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf2_SYSPFVFDEC +//BIF_BX_DEV0_EPF0_VF2_MM_INDEX +#define BIF_BX_DEV0_EPF0_VF2_MM_INDEX__MM_OFFSET__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF2_MM_INDEX__MM_APER__SHIFT 0x1f +#define BIF_BX_DEV0_EPF0_VF2_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL +#define BIF_BX_DEV0_EPF0_VF2_MM_INDEX__MM_APER_MASK 0x80000000L +//BIF_BX_DEV0_EPF0_VF2_MM_DATA +#define BIF_BX_DEV0_EPF0_VF2_MM_DATA__MM_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF2_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF2_MM_INDEX_HI +#define BIF_BX_DEV0_EPF0_VF2_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF2_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf2_BIFPFVFDEC1 +//RCC_DEV0_EPF0_VF2_RCC_ERR_LOG +#define RCC_DEV0_EPF0_VF2_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF2_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1 +#define RCC_DEV0_EPF0_VF2_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF2_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L +//RCC_DEV0_EPF0_VF2_RCC_DOORBELL_APER_EN +#define RCC_DEV0_EPF0_VF2_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF2_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L +//RCC_DEV0_EPF0_VF2_RCC_CONFIG_MEMSIZE +#define RCC_DEV0_EPF0_VF2_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF2_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF2_RCC_CONFIG_RESERVED +#define RCC_DEV0_EPF0_VF2_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF2_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF2_RCC_IOV_FUNC_IDENTIFIER +#define RCC_DEV0_EPF0_VF2_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF2_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f +#define RCC_DEV0_EPF0_VF2_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF2_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf2_BIFPFVFDEC1 +//BIF_BX_DEV0_EPF0_VF2_BIF_BME_STATUS +#define BIF_BX_DEV0_EPF0_VF2_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF2_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF2_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF2_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L +//BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG +#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11 +#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12 +#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13 +#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L +#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L +#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L +#define BIF_BX_DEV0_EPF0_VF2_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L +//BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_BASE_HIGH +#define BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_BASE_LOW +#define BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_CNTL +#define BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF2_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L +//BIF_BX_DEV0_EPF0_VF2_HDP_REG_COHERENCY_FLUSH_CNTL +#define BIF_BX_DEV0_EPF0_VF2_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF2_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF2_HDP_MEM_COHERENCY_FLUSH_CNTL +#define BIF_BX_DEV0_EPF0_VF2_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF2_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4 +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5 +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6 +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7 +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L +//BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4 +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5 +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6 +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7 +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L +#define BIF_BX_DEV0_EPF0_VF2_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L +//BIF_BX_DEV0_EPF0_VF2_BIF_TRANS_PENDING +#define BIF_BX_DEV0_EPF0_VF2_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF2_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF2_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF2_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L +//BIF_BX_DEV0_EPF0_VF2_NBIF_GFX_ADDR_LUT_BYPASS +#define BIF_BX_DEV0_EPF0_VF2_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF2_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW0 +#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW1 +#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW2 +#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW3 +#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW0 +#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW1 +#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW2 +#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW3 +#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL +#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L +//BIF_BX_DEV0_EPF0_VF2_MAILBOX_INT_CNTL +#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF2_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L +//BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX +#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf +#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17 +#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18 +#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19 +#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L +#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L +#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L +#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L +#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L +#define BIF_BX_DEV0_EPF0_VF2_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf2_BIFDEC2 +//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_ADDR_LO +#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_ADDR_HI +#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_MSG_DATA +#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_CONTROL +#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_ADDR_LO +#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_ADDR_HI +#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_MSG_DATA +#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_CONTROL +#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_ADDR_LO +#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_ADDR_HI +#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_MSG_DATA +#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_CONTROL +#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF2_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF2_GFXMSIX_PBA +#define RCC_DEV0_EPF0_VF2_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF2_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1 +#define RCC_DEV0_EPF0_VF2_GFXMSIX_PBA__MSIX_PENDING_BITS_2__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF2_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF2_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L +#define RCC_DEV0_EPF0_VF2_GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf3_SYSPFVFDEC +//BIF_BX_DEV0_EPF0_VF3_MM_INDEX +#define BIF_BX_DEV0_EPF0_VF3_MM_INDEX__MM_OFFSET__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF3_MM_INDEX__MM_APER__SHIFT 0x1f +#define BIF_BX_DEV0_EPF0_VF3_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL +#define BIF_BX_DEV0_EPF0_VF3_MM_INDEX__MM_APER_MASK 0x80000000L +//BIF_BX_DEV0_EPF0_VF3_MM_DATA +#define BIF_BX_DEV0_EPF0_VF3_MM_DATA__MM_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF3_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF3_MM_INDEX_HI +#define BIF_BX_DEV0_EPF0_VF3_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF3_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf3_BIFPFVFDEC1 +//RCC_DEV0_EPF0_VF3_RCC_ERR_LOG +#define RCC_DEV0_EPF0_VF3_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF3_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1 +#define RCC_DEV0_EPF0_VF3_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF3_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L +//RCC_DEV0_EPF0_VF3_RCC_DOORBELL_APER_EN +#define RCC_DEV0_EPF0_VF3_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF3_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L +//RCC_DEV0_EPF0_VF3_RCC_CONFIG_MEMSIZE +#define RCC_DEV0_EPF0_VF3_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF3_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF3_RCC_CONFIG_RESERVED +#define RCC_DEV0_EPF0_VF3_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF3_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF3_RCC_IOV_FUNC_IDENTIFIER +#define RCC_DEV0_EPF0_VF3_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF3_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f +#define RCC_DEV0_EPF0_VF3_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF3_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf3_BIFPFVFDEC1 +//BIF_BX_DEV0_EPF0_VF3_BIF_BME_STATUS +#define BIF_BX_DEV0_EPF0_VF3_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF3_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF3_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF3_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L +//BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG +#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11 +#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12 +#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13 +#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L +#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L +#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L +#define BIF_BX_DEV0_EPF0_VF3_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L +//BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_BASE_HIGH +#define BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_BASE_LOW +#define BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_CNTL +#define BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF3_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L +//BIF_BX_DEV0_EPF0_VF3_HDP_REG_COHERENCY_FLUSH_CNTL +#define BIF_BX_DEV0_EPF0_VF3_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF3_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF3_HDP_MEM_COHERENCY_FLUSH_CNTL +#define BIF_BX_DEV0_EPF0_VF3_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF3_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4 +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5 +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6 +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7 +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L +//BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4 +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5 +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6 +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7 +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L +#define BIF_BX_DEV0_EPF0_VF3_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L +//BIF_BX_DEV0_EPF0_VF3_BIF_TRANS_PENDING +#define BIF_BX_DEV0_EPF0_VF3_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF3_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF3_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF3_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L +//BIF_BX_DEV0_EPF0_VF3_NBIF_GFX_ADDR_LUT_BYPASS +#define BIF_BX_DEV0_EPF0_VF3_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF3_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW0 +#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW1 +#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW2 +#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW3 +#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW0 +#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW1 +#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW2 +#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW3 +#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL +#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L +//BIF_BX_DEV0_EPF0_VF3_MAILBOX_INT_CNTL +#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF3_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L +//BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX +#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf +#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17 +#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18 +#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19 +#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L +#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L +#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L +#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L +#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L +#define BIF_BX_DEV0_EPF0_VF3_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf3_BIFDEC2 +//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_ADDR_LO +#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_ADDR_HI +#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_MSG_DATA +#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_CONTROL +#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_ADDR_LO +#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_ADDR_HI +#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_MSG_DATA +#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_CONTROL +#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_ADDR_LO +#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_ADDR_HI +#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_MSG_DATA +#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_CONTROL +#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF3_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF3_GFXMSIX_PBA +#define RCC_DEV0_EPF0_VF3_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF3_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1 +#define RCC_DEV0_EPF0_VF3_GFXMSIX_PBA__MSIX_PENDING_BITS_2__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF3_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF3_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L +#define RCC_DEV0_EPF0_VF3_GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf4_SYSPFVFDEC +//BIF_BX_DEV0_EPF0_VF4_MM_INDEX +#define BIF_BX_DEV0_EPF0_VF4_MM_INDEX__MM_OFFSET__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF4_MM_INDEX__MM_APER__SHIFT 0x1f +#define BIF_BX_DEV0_EPF0_VF4_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL +#define BIF_BX_DEV0_EPF0_VF4_MM_INDEX__MM_APER_MASK 0x80000000L +//BIF_BX_DEV0_EPF0_VF4_MM_DATA +#define BIF_BX_DEV0_EPF0_VF4_MM_DATA__MM_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF4_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF4_MM_INDEX_HI +#define BIF_BX_DEV0_EPF0_VF4_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF4_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf4_BIFPFVFDEC1 +//RCC_DEV0_EPF0_VF4_RCC_ERR_LOG +#define RCC_DEV0_EPF0_VF4_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF4_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1 +#define RCC_DEV0_EPF0_VF4_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF4_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L +//RCC_DEV0_EPF0_VF4_RCC_DOORBELL_APER_EN +#define RCC_DEV0_EPF0_VF4_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF4_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L +//RCC_DEV0_EPF0_VF4_RCC_CONFIG_MEMSIZE +#define RCC_DEV0_EPF0_VF4_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF4_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF4_RCC_CONFIG_RESERVED +#define RCC_DEV0_EPF0_VF4_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF4_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF4_RCC_IOV_FUNC_IDENTIFIER +#define RCC_DEV0_EPF0_VF4_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF4_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f +#define RCC_DEV0_EPF0_VF4_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF4_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf4_BIFPFVFDEC1 +//BIF_BX_DEV0_EPF0_VF4_BIF_BME_STATUS +#define BIF_BX_DEV0_EPF0_VF4_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF4_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF4_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF4_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L +//BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG +#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11 +#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12 +#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13 +#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L +#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L +#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L +#define BIF_BX_DEV0_EPF0_VF4_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L +//BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_BASE_HIGH +#define BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_BASE_LOW +#define BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_CNTL +#define BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF4_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L +//BIF_BX_DEV0_EPF0_VF4_HDP_REG_COHERENCY_FLUSH_CNTL +#define BIF_BX_DEV0_EPF0_VF4_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF4_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF4_HDP_MEM_COHERENCY_FLUSH_CNTL +#define BIF_BX_DEV0_EPF0_VF4_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF4_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4 +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5 +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6 +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7 +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L +//BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4 +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5 +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6 +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7 +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L +#define BIF_BX_DEV0_EPF0_VF4_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L +//BIF_BX_DEV0_EPF0_VF4_BIF_TRANS_PENDING +#define BIF_BX_DEV0_EPF0_VF4_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF4_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF4_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF4_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L +//BIF_BX_DEV0_EPF0_VF4_NBIF_GFX_ADDR_LUT_BYPASS +#define BIF_BX_DEV0_EPF0_VF4_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF4_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW0 +#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW1 +#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW2 +#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW3 +#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW0 +#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW1 +#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW2 +#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW3 +#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL +#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L +//BIF_BX_DEV0_EPF0_VF4_MAILBOX_INT_CNTL +#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF4_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L +//BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX +#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf +#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17 +#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18 +#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19 +#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L +#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L +#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L +#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L +#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L +#define BIF_BX_DEV0_EPF0_VF4_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf4_BIFDEC2 +//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_ADDR_LO +#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_ADDR_HI +#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_MSG_DATA +#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_CONTROL +#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_ADDR_LO +#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_ADDR_HI +#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_MSG_DATA +#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_CONTROL +#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_ADDR_LO +#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_ADDR_HI +#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_MSG_DATA +#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_CONTROL +#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF4_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF4_GFXMSIX_PBA +#define RCC_DEV0_EPF0_VF4_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF4_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1 +#define RCC_DEV0_EPF0_VF4_GFXMSIX_PBA__MSIX_PENDING_BITS_2__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF4_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF4_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L +#define RCC_DEV0_EPF0_VF4_GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf5_SYSPFVFDEC +//BIF_BX_DEV0_EPF0_VF5_MM_INDEX +#define BIF_BX_DEV0_EPF0_VF5_MM_INDEX__MM_OFFSET__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF5_MM_INDEX__MM_APER__SHIFT 0x1f +#define BIF_BX_DEV0_EPF0_VF5_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL +#define BIF_BX_DEV0_EPF0_VF5_MM_INDEX__MM_APER_MASK 0x80000000L +//BIF_BX_DEV0_EPF0_VF5_MM_DATA +#define BIF_BX_DEV0_EPF0_VF5_MM_DATA__MM_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF5_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF5_MM_INDEX_HI +#define BIF_BX_DEV0_EPF0_VF5_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF5_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf5_BIFPFVFDEC1 +//RCC_DEV0_EPF0_VF5_RCC_ERR_LOG +#define RCC_DEV0_EPF0_VF5_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF5_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1 +#define RCC_DEV0_EPF0_VF5_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF5_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L +//RCC_DEV0_EPF0_VF5_RCC_DOORBELL_APER_EN +#define RCC_DEV0_EPF0_VF5_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF5_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L +//RCC_DEV0_EPF0_VF5_RCC_CONFIG_MEMSIZE +#define RCC_DEV0_EPF0_VF5_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF5_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF5_RCC_CONFIG_RESERVED +#define RCC_DEV0_EPF0_VF5_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF5_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF5_RCC_IOV_FUNC_IDENTIFIER +#define RCC_DEV0_EPF0_VF5_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF5_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f +#define RCC_DEV0_EPF0_VF5_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF5_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf5_BIFPFVFDEC1 +//BIF_BX_DEV0_EPF0_VF5_BIF_BME_STATUS +#define BIF_BX_DEV0_EPF0_VF5_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF5_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF5_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF5_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L +//BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG +#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11 +#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12 +#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13 +#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L +#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L +#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L +#define BIF_BX_DEV0_EPF0_VF5_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L +//BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_BASE_HIGH +#define BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_BASE_LOW +#define BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_CNTL +#define BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF5_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L +//BIF_BX_DEV0_EPF0_VF5_HDP_REG_COHERENCY_FLUSH_CNTL +#define BIF_BX_DEV0_EPF0_VF5_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF5_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF5_HDP_MEM_COHERENCY_FLUSH_CNTL +#define BIF_BX_DEV0_EPF0_VF5_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF5_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4 +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5 +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6 +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7 +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L +//BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4 +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5 +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6 +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7 +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L +#define BIF_BX_DEV0_EPF0_VF5_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L +//BIF_BX_DEV0_EPF0_VF5_BIF_TRANS_PENDING +#define BIF_BX_DEV0_EPF0_VF5_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF5_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF5_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF5_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L +//BIF_BX_DEV0_EPF0_VF5_NBIF_GFX_ADDR_LUT_BYPASS +#define BIF_BX_DEV0_EPF0_VF5_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF5_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW0 +#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW1 +#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW2 +#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW3 +#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW0 +#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW1 +#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW2 +#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW3 +#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL +#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L +//BIF_BX_DEV0_EPF0_VF5_MAILBOX_INT_CNTL +#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF5_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L +//BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX +#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf +#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17 +#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18 +#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19 +#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L +#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L +#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L +#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L +#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L +#define BIF_BX_DEV0_EPF0_VF5_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf5_BIFDEC2 +//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_ADDR_LO +#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_ADDR_HI +#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_MSG_DATA +#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_CONTROL +#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_ADDR_LO +#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_ADDR_HI +#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_MSG_DATA +#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_CONTROL +#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_ADDR_LO +#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_ADDR_HI +#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_MSG_DATA +#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_CONTROL +#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF5_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF5_GFXMSIX_PBA +#define RCC_DEV0_EPF0_VF5_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF5_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1 +#define RCC_DEV0_EPF0_VF5_GFXMSIX_PBA__MSIX_PENDING_BITS_2__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF5_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF5_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L +#define RCC_DEV0_EPF0_VF5_GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf6_SYSPFVFDEC +//BIF_BX_DEV0_EPF0_VF6_MM_INDEX +#define BIF_BX_DEV0_EPF0_VF6_MM_INDEX__MM_OFFSET__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF6_MM_INDEX__MM_APER__SHIFT 0x1f +#define BIF_BX_DEV0_EPF0_VF6_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL +#define BIF_BX_DEV0_EPF0_VF6_MM_INDEX__MM_APER_MASK 0x80000000L +//BIF_BX_DEV0_EPF0_VF6_MM_DATA +#define BIF_BX_DEV0_EPF0_VF6_MM_DATA__MM_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF6_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF6_MM_INDEX_HI +#define BIF_BX_DEV0_EPF0_VF6_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF6_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf6_BIFPFVFDEC1 +//RCC_DEV0_EPF0_VF6_RCC_ERR_LOG +#define RCC_DEV0_EPF0_VF6_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF6_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1 +#define RCC_DEV0_EPF0_VF6_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF6_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L +//RCC_DEV0_EPF0_VF6_RCC_DOORBELL_APER_EN +#define RCC_DEV0_EPF0_VF6_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF6_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L +//RCC_DEV0_EPF0_VF6_RCC_CONFIG_MEMSIZE +#define RCC_DEV0_EPF0_VF6_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF6_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF6_RCC_CONFIG_RESERVED +#define RCC_DEV0_EPF0_VF6_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF6_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF6_RCC_IOV_FUNC_IDENTIFIER +#define RCC_DEV0_EPF0_VF6_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF6_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f +#define RCC_DEV0_EPF0_VF6_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF6_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf6_BIFPFVFDEC1 +//BIF_BX_DEV0_EPF0_VF6_BIF_BME_STATUS +#define BIF_BX_DEV0_EPF0_VF6_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF6_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF6_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF6_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L +//BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG +#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11 +#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12 +#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13 +#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L +#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L +#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L +#define BIF_BX_DEV0_EPF0_VF6_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L +//BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_BASE_HIGH +#define BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_BASE_LOW +#define BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_CNTL +#define BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF6_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L +//BIF_BX_DEV0_EPF0_VF6_HDP_REG_COHERENCY_FLUSH_CNTL +#define BIF_BX_DEV0_EPF0_VF6_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF6_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF6_HDP_MEM_COHERENCY_FLUSH_CNTL +#define BIF_BX_DEV0_EPF0_VF6_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF6_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4 +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5 +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6 +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7 +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L +//BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4 +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5 +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6 +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7 +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L +#define BIF_BX_DEV0_EPF0_VF6_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L +//BIF_BX_DEV0_EPF0_VF6_BIF_TRANS_PENDING +#define BIF_BX_DEV0_EPF0_VF6_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF6_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF6_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF6_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L +//BIF_BX_DEV0_EPF0_VF6_NBIF_GFX_ADDR_LUT_BYPASS +#define BIF_BX_DEV0_EPF0_VF6_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF6_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW0 +#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW1 +#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW2 +#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW3 +#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW0 +#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW1 +#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW2 +#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW3 +#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL +#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L +//BIF_BX_DEV0_EPF0_VF6_MAILBOX_INT_CNTL +#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF6_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L +//BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX +#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf +#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17 +#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18 +#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19 +#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L +#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L +#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L +#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L +#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L +#define BIF_BX_DEV0_EPF0_VF6_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf6_BIFDEC2 +//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_ADDR_LO +#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_ADDR_HI +#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_MSG_DATA +#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_CONTROL +#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_ADDR_LO +#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_ADDR_HI +#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_MSG_DATA +#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_CONTROL +#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_ADDR_LO +#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_ADDR_HI +#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_MSG_DATA +#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_CONTROL +#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF6_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF6_GFXMSIX_PBA +#define RCC_DEV0_EPF0_VF6_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF6_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1 +#define RCC_DEV0_EPF0_VF6_GFXMSIX_PBA__MSIX_PENDING_BITS_2__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF6_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF6_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L +#define RCC_DEV0_EPF0_VF6_GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf7_SYSPFVFDEC +//BIF_BX_DEV0_EPF0_VF7_MM_INDEX +#define BIF_BX_DEV0_EPF0_VF7_MM_INDEX__MM_OFFSET__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF7_MM_INDEX__MM_APER__SHIFT 0x1f +#define BIF_BX_DEV0_EPF0_VF7_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL +#define BIF_BX_DEV0_EPF0_VF7_MM_INDEX__MM_APER_MASK 0x80000000L +//BIF_BX_DEV0_EPF0_VF7_MM_DATA +#define BIF_BX_DEV0_EPF0_VF7_MM_DATA__MM_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF7_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF7_MM_INDEX_HI +#define BIF_BX_DEV0_EPF0_VF7_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF7_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf7_BIFPFVFDEC1 +//RCC_DEV0_EPF0_VF7_RCC_ERR_LOG +#define RCC_DEV0_EPF0_VF7_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF7_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1 +#define RCC_DEV0_EPF0_VF7_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF7_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L +//RCC_DEV0_EPF0_VF7_RCC_DOORBELL_APER_EN +#define RCC_DEV0_EPF0_VF7_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF7_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L +//RCC_DEV0_EPF0_VF7_RCC_CONFIG_MEMSIZE +#define RCC_DEV0_EPF0_VF7_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF7_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF7_RCC_CONFIG_RESERVED +#define RCC_DEV0_EPF0_VF7_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF7_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF7_RCC_IOV_FUNC_IDENTIFIER +#define RCC_DEV0_EPF0_VF7_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF7_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f +#define RCC_DEV0_EPF0_VF7_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF7_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf7_BIFPFVFDEC1 +//BIF_BX_DEV0_EPF0_VF7_BIF_BME_STATUS +#define BIF_BX_DEV0_EPF0_VF7_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF7_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF7_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF7_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L +//BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG +#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11 +#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12 +#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13 +#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L +#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L +#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L +#define BIF_BX_DEV0_EPF0_VF7_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L +//BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_BASE_HIGH +#define BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_BASE_LOW +#define BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_CNTL +#define BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF7_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L +//BIF_BX_DEV0_EPF0_VF7_HDP_REG_COHERENCY_FLUSH_CNTL +#define BIF_BX_DEV0_EPF0_VF7_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF7_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF7_HDP_MEM_COHERENCY_FLUSH_CNTL +#define BIF_BX_DEV0_EPF0_VF7_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF7_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4 +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5 +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6 +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7 +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L +//BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4 +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5 +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6 +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7 +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L +#define BIF_BX_DEV0_EPF0_VF7_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L +//BIF_BX_DEV0_EPF0_VF7_BIF_TRANS_PENDING +#define BIF_BX_DEV0_EPF0_VF7_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF7_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF7_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF7_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L +//BIF_BX_DEV0_EPF0_VF7_NBIF_GFX_ADDR_LUT_BYPASS +#define BIF_BX_DEV0_EPF0_VF7_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF7_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW0 +#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW1 +#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW2 +#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW3 +#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW0 +#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW1 +#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW2 +#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW3 +#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL +#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L +//BIF_BX_DEV0_EPF0_VF7_MAILBOX_INT_CNTL +#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF7_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L +//BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX +#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf +#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17 +#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18 +#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19 +#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L +#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L +#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L +#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L +#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L +#define BIF_BX_DEV0_EPF0_VF7_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf7_BIFDEC2 +//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_ADDR_LO +#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_ADDR_HI +#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_MSG_DATA +#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_CONTROL +#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_ADDR_LO +#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_ADDR_HI +#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_MSG_DATA +#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_CONTROL +#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_ADDR_LO +#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_ADDR_HI +#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_MSG_DATA +#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_CONTROL +#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF7_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF7_GFXMSIX_PBA +#define RCC_DEV0_EPF0_VF7_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF7_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1 +#define RCC_DEV0_EPF0_VF7_GFXMSIX_PBA__MSIX_PENDING_BITS_2__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF7_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF7_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L +#define RCC_DEV0_EPF0_VF7_GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf8_SYSPFVFDEC +//BIF_BX_DEV0_EPF0_VF8_MM_INDEX +#define BIF_BX_DEV0_EPF0_VF8_MM_INDEX__MM_OFFSET__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF8_MM_INDEX__MM_APER__SHIFT 0x1f +#define BIF_BX_DEV0_EPF0_VF8_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL +#define BIF_BX_DEV0_EPF0_VF8_MM_INDEX__MM_APER_MASK 0x80000000L +//BIF_BX_DEV0_EPF0_VF8_MM_DATA +#define BIF_BX_DEV0_EPF0_VF8_MM_DATA__MM_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF8_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF8_MM_INDEX_HI +#define BIF_BX_DEV0_EPF0_VF8_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF8_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf8_BIFPFVFDEC1 +//RCC_DEV0_EPF0_VF8_RCC_ERR_LOG +#define RCC_DEV0_EPF0_VF8_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF8_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1 +#define RCC_DEV0_EPF0_VF8_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF8_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L +//RCC_DEV0_EPF0_VF8_RCC_DOORBELL_APER_EN +#define RCC_DEV0_EPF0_VF8_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF8_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L +//RCC_DEV0_EPF0_VF8_RCC_CONFIG_MEMSIZE +#define RCC_DEV0_EPF0_VF8_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF8_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF8_RCC_CONFIG_RESERVED +#define RCC_DEV0_EPF0_VF8_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF8_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF8_RCC_IOV_FUNC_IDENTIFIER +#define RCC_DEV0_EPF0_VF8_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF8_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f +#define RCC_DEV0_EPF0_VF8_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF8_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf8_BIFPFVFDEC1 +//BIF_BX_DEV0_EPF0_VF8_BIF_BME_STATUS +#define BIF_BX_DEV0_EPF0_VF8_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF8_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF8_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF8_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L +//BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG +#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11 +#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12 +#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13 +#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L +#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L +#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L +#define BIF_BX_DEV0_EPF0_VF8_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L +//BIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_BASE_HIGH +#define BIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_BASE_LOW +#define BIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_CNTL +#define BIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF8_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L +//BIF_BX_DEV0_EPF0_VF8_HDP_REG_COHERENCY_FLUSH_CNTL +#define BIF_BX_DEV0_EPF0_VF8_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF8_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF8_HDP_MEM_COHERENCY_FLUSH_CNTL +#define BIF_BX_DEV0_EPF0_VF8_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF8_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4 +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5 +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6 +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7 +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L +//BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4 +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5 +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6 +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7 +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L +#define BIF_BX_DEV0_EPF0_VF8_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L +//BIF_BX_DEV0_EPF0_VF8_BIF_TRANS_PENDING +#define BIF_BX_DEV0_EPF0_VF8_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF8_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF8_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF8_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L +//BIF_BX_DEV0_EPF0_VF8_NBIF_GFX_ADDR_LUT_BYPASS +#define BIF_BX_DEV0_EPF0_VF8_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF8_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW0 +#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW1 +#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW2 +#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW3 +#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW0 +#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW1 +#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW2 +#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW3 +#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF8_MAILBOX_CONTROL +#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L +//BIF_BX_DEV0_EPF0_VF8_MAILBOX_INT_CNTL +#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF8_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L +//BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX +#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf +#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17 +#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18 +#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19 +#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L +#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L +#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L +#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L +#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L +#define BIF_BX_DEV0_EPF0_VF8_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf8_BIFDEC2 +//RCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_ADDR_LO +#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_ADDR_HI +#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_MSG_DATA +#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_CONTROL +#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_ADDR_LO +#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_ADDR_HI +#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_MSG_DATA +#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_CONTROL +#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_ADDR_LO +#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_ADDR_HI +#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_MSG_DATA +#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_CONTROL +#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF8_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF8_GFXMSIX_PBA +#define RCC_DEV0_EPF0_VF8_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF8_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1 +#define RCC_DEV0_EPF0_VF8_GFXMSIX_PBA__MSIX_PENDING_BITS_2__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF8_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF8_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L +#define RCC_DEV0_EPF0_VF8_GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf9_SYSPFVFDEC +//BIF_BX_DEV0_EPF0_VF9_MM_INDEX +#define BIF_BX_DEV0_EPF0_VF9_MM_INDEX__MM_OFFSET__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF9_MM_INDEX__MM_APER__SHIFT 0x1f +#define BIF_BX_DEV0_EPF0_VF9_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL +#define BIF_BX_DEV0_EPF0_VF9_MM_INDEX__MM_APER_MASK 0x80000000L +//BIF_BX_DEV0_EPF0_VF9_MM_DATA +#define BIF_BX_DEV0_EPF0_VF9_MM_DATA__MM_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF9_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF9_MM_INDEX_HI +#define BIF_BX_DEV0_EPF0_VF9_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF9_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf9_BIFPFVFDEC1 +//RCC_DEV0_EPF0_VF9_RCC_ERR_LOG +#define RCC_DEV0_EPF0_VF9_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF9_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1 +#define RCC_DEV0_EPF0_VF9_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF9_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L +//RCC_DEV0_EPF0_VF9_RCC_DOORBELL_APER_EN +#define RCC_DEV0_EPF0_VF9_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF9_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L +//RCC_DEV0_EPF0_VF9_RCC_CONFIG_MEMSIZE +#define RCC_DEV0_EPF0_VF9_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF9_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF9_RCC_CONFIG_RESERVED +#define RCC_DEV0_EPF0_VF9_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF9_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF9_RCC_IOV_FUNC_IDENTIFIER +#define RCC_DEV0_EPF0_VF9_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF9_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f +#define RCC_DEV0_EPF0_VF9_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF9_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf9_BIFPFVFDEC1 +//BIF_BX_DEV0_EPF0_VF9_BIF_BME_STATUS +#define BIF_BX_DEV0_EPF0_VF9_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF9_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF9_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF9_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L +//BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG +#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11 +#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12 +#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13 +#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L +#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L +#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L +#define BIF_BX_DEV0_EPF0_VF9_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L +//BIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_BASE_HIGH +#define BIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_BASE_LOW +#define BIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_CNTL +#define BIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF9_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L +//BIF_BX_DEV0_EPF0_VF9_HDP_REG_COHERENCY_FLUSH_CNTL +#define BIF_BX_DEV0_EPF0_VF9_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF9_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF9_HDP_MEM_COHERENCY_FLUSH_CNTL +#define BIF_BX_DEV0_EPF0_VF9_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF9_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4 +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5 +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6 +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7 +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L +//BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4 +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5 +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6 +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7 +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L +#define BIF_BX_DEV0_EPF0_VF9_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L +//BIF_BX_DEV0_EPF0_VF9_BIF_TRANS_PENDING +#define BIF_BX_DEV0_EPF0_VF9_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF9_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF9_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF9_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L +//BIF_BX_DEV0_EPF0_VF9_NBIF_GFX_ADDR_LUT_BYPASS +#define BIF_BX_DEV0_EPF0_VF9_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF9_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW0 +#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW1 +#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW2 +#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW3 +#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW0 +#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW1 +#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW2 +#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW3 +#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF9_MAILBOX_CONTROL +#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L +//BIF_BX_DEV0_EPF0_VF9_MAILBOX_INT_CNTL +#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF9_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L +//BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX +#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf +#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17 +#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18 +#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19 +#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L +#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L +#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L +#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L +#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L +#define BIF_BX_DEV0_EPF0_VF9_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf9_BIFDEC2 +//RCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_ADDR_LO +#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_ADDR_HI +#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_MSG_DATA +#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_CONTROL +#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_ADDR_LO +#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_ADDR_HI +#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_MSG_DATA +#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_CONTROL +#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_ADDR_LO +#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_ADDR_HI +#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_MSG_DATA +#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_CONTROL +#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF9_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF9_GFXMSIX_PBA +#define RCC_DEV0_EPF0_VF9_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF9_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1 +#define RCC_DEV0_EPF0_VF9_GFXMSIX_PBA__MSIX_PENDING_BITS_2__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF9_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF9_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L +#define RCC_DEV0_EPF0_VF9_GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf10_SYSPFVFDEC +//BIF_BX_DEV0_EPF0_VF10_MM_INDEX +#define BIF_BX_DEV0_EPF0_VF10_MM_INDEX__MM_OFFSET__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF10_MM_INDEX__MM_APER__SHIFT 0x1f +#define BIF_BX_DEV0_EPF0_VF10_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL +#define BIF_BX_DEV0_EPF0_VF10_MM_INDEX__MM_APER_MASK 0x80000000L +//BIF_BX_DEV0_EPF0_VF10_MM_DATA +#define BIF_BX_DEV0_EPF0_VF10_MM_DATA__MM_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF10_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF10_MM_INDEX_HI +#define BIF_BX_DEV0_EPF0_VF10_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF10_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf10_BIFPFVFDEC1 +//RCC_DEV0_EPF0_VF10_RCC_ERR_LOG +#define RCC_DEV0_EPF0_VF10_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF10_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1 +#define RCC_DEV0_EPF0_VF10_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF10_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L +//RCC_DEV0_EPF0_VF10_RCC_DOORBELL_APER_EN +#define RCC_DEV0_EPF0_VF10_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF10_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L +//RCC_DEV0_EPF0_VF10_RCC_CONFIG_MEMSIZE +#define RCC_DEV0_EPF0_VF10_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF10_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF10_RCC_CONFIG_RESERVED +#define RCC_DEV0_EPF0_VF10_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF10_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF10_RCC_IOV_FUNC_IDENTIFIER +#define RCC_DEV0_EPF0_VF10_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF10_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f +#define RCC_DEV0_EPF0_VF10_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF10_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf10_BIFPFVFDEC1 +//BIF_BX_DEV0_EPF0_VF10_BIF_BME_STATUS +#define BIF_BX_DEV0_EPF0_VF10_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF10_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF10_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF10_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L +//BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG +#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11 +#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12 +#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13 +#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L +#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L +#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L +#define BIF_BX_DEV0_EPF0_VF10_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L +//BIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_BASE_HIGH +#define BIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_BASE_LOW +#define BIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_CNTL +#define BIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF10_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L +//BIF_BX_DEV0_EPF0_VF10_HDP_REG_COHERENCY_FLUSH_CNTL +#define BIF_BX_DEV0_EPF0_VF10_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF10_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF10_HDP_MEM_COHERENCY_FLUSH_CNTL +#define BIF_BX_DEV0_EPF0_VF10_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF10_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4 +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5 +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6 +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7 +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L +//BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4 +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5 +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6 +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7 +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L +#define BIF_BX_DEV0_EPF0_VF10_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L +//BIF_BX_DEV0_EPF0_VF10_BIF_TRANS_PENDING +#define BIF_BX_DEV0_EPF0_VF10_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF10_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF10_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF10_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L +//BIF_BX_DEV0_EPF0_VF10_NBIF_GFX_ADDR_LUT_BYPASS +#define BIF_BX_DEV0_EPF0_VF10_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF10_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW0 +#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW1 +#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW2 +#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW3 +#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW0 +#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW1 +#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW2 +#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW3 +#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF10_MAILBOX_CONTROL +#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L +//BIF_BX_DEV0_EPF0_VF10_MAILBOX_INT_CNTL +#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF10_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L +//BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX +#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf +#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17 +#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18 +#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19 +#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L +#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L +#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L +#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L +#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L +#define BIF_BX_DEV0_EPF0_VF10_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf10_BIFDEC2 +//RCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_ADDR_LO +#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_ADDR_HI +#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_MSG_DATA +#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_CONTROL +#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_ADDR_LO +#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_ADDR_HI +#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_MSG_DATA +#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_CONTROL +#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_ADDR_LO +#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_ADDR_HI +#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_MSG_DATA +#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_CONTROL +#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF10_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF10_GFXMSIX_PBA +#define RCC_DEV0_EPF0_VF10_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF10_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1 +#define RCC_DEV0_EPF0_VF10_GFXMSIX_PBA__MSIX_PENDING_BITS_2__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF10_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF10_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L +#define RCC_DEV0_EPF0_VF10_GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf11_SYSPFVFDEC +//BIF_BX_DEV0_EPF0_VF11_MM_INDEX +#define BIF_BX_DEV0_EPF0_VF11_MM_INDEX__MM_OFFSET__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF11_MM_INDEX__MM_APER__SHIFT 0x1f +#define BIF_BX_DEV0_EPF0_VF11_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL +#define BIF_BX_DEV0_EPF0_VF11_MM_INDEX__MM_APER_MASK 0x80000000L +//BIF_BX_DEV0_EPF0_VF11_MM_DATA +#define BIF_BX_DEV0_EPF0_VF11_MM_DATA__MM_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF11_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF11_MM_INDEX_HI +#define BIF_BX_DEV0_EPF0_VF11_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF11_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf11_BIFPFVFDEC1 +//RCC_DEV0_EPF0_VF11_RCC_ERR_LOG +#define RCC_DEV0_EPF0_VF11_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF11_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1 +#define RCC_DEV0_EPF0_VF11_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF11_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L +//RCC_DEV0_EPF0_VF11_RCC_DOORBELL_APER_EN +#define RCC_DEV0_EPF0_VF11_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF11_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L +//RCC_DEV0_EPF0_VF11_RCC_CONFIG_MEMSIZE +#define RCC_DEV0_EPF0_VF11_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF11_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF11_RCC_CONFIG_RESERVED +#define RCC_DEV0_EPF0_VF11_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF11_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF11_RCC_IOV_FUNC_IDENTIFIER +#define RCC_DEV0_EPF0_VF11_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF11_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f +#define RCC_DEV0_EPF0_VF11_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF11_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf11_BIFPFVFDEC1 +//BIF_BX_DEV0_EPF0_VF11_BIF_BME_STATUS +#define BIF_BX_DEV0_EPF0_VF11_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF11_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF11_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF11_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L +//BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG +#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11 +#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12 +#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13 +#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L +#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L +#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L +#define BIF_BX_DEV0_EPF0_VF11_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L +//BIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_BASE_HIGH +#define BIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_BASE_LOW +#define BIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_CNTL +#define BIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF11_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L +//BIF_BX_DEV0_EPF0_VF11_HDP_REG_COHERENCY_FLUSH_CNTL +#define BIF_BX_DEV0_EPF0_VF11_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF11_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF11_HDP_MEM_COHERENCY_FLUSH_CNTL +#define BIF_BX_DEV0_EPF0_VF11_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF11_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4 +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5 +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6 +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7 +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L +//BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4 +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5 +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6 +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7 +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L +#define BIF_BX_DEV0_EPF0_VF11_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L +//BIF_BX_DEV0_EPF0_VF11_BIF_TRANS_PENDING +#define BIF_BX_DEV0_EPF0_VF11_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF11_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF11_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF11_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L +//BIF_BX_DEV0_EPF0_VF11_NBIF_GFX_ADDR_LUT_BYPASS +#define BIF_BX_DEV0_EPF0_VF11_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF11_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW0 +#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW1 +#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW2 +#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW3 +#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW0 +#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW1 +#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW2 +#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW3 +#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF11_MAILBOX_CONTROL +#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L +//BIF_BX_DEV0_EPF0_VF11_MAILBOX_INT_CNTL +#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF11_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L +//BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX +#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf +#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17 +#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18 +#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19 +#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L +#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L +#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L +#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L +#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L +#define BIF_BX_DEV0_EPF0_VF11_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf11_BIFDEC2 +//RCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_ADDR_LO +#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_ADDR_HI +#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_MSG_DATA +#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_CONTROL +#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_ADDR_LO +#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_ADDR_HI +#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_MSG_DATA +#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_CONTROL +#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_ADDR_LO +#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_ADDR_HI +#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_MSG_DATA +#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_CONTROL +#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF11_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF11_GFXMSIX_PBA +#define RCC_DEV0_EPF0_VF11_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF11_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1 +#define RCC_DEV0_EPF0_VF11_GFXMSIX_PBA__MSIX_PENDING_BITS_2__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF11_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF11_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L +#define RCC_DEV0_EPF0_VF11_GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf12_SYSPFVFDEC +//BIF_BX_DEV0_EPF0_VF12_MM_INDEX +#define BIF_BX_DEV0_EPF0_VF12_MM_INDEX__MM_OFFSET__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF12_MM_INDEX__MM_APER__SHIFT 0x1f +#define BIF_BX_DEV0_EPF0_VF12_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL +#define BIF_BX_DEV0_EPF0_VF12_MM_INDEX__MM_APER_MASK 0x80000000L +//BIF_BX_DEV0_EPF0_VF12_MM_DATA +#define BIF_BX_DEV0_EPF0_VF12_MM_DATA__MM_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF12_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF12_MM_INDEX_HI +#define BIF_BX_DEV0_EPF0_VF12_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF12_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf12_BIFPFVFDEC1 +//RCC_DEV0_EPF0_VF12_RCC_ERR_LOG +#define RCC_DEV0_EPF0_VF12_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF12_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1 +#define RCC_DEV0_EPF0_VF12_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF12_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L +//RCC_DEV0_EPF0_VF12_RCC_DOORBELL_APER_EN +#define RCC_DEV0_EPF0_VF12_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF12_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L +//RCC_DEV0_EPF0_VF12_RCC_CONFIG_MEMSIZE +#define RCC_DEV0_EPF0_VF12_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF12_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF12_RCC_CONFIG_RESERVED +#define RCC_DEV0_EPF0_VF12_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF12_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF12_RCC_IOV_FUNC_IDENTIFIER +#define RCC_DEV0_EPF0_VF12_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF12_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f +#define RCC_DEV0_EPF0_VF12_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF12_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf12_BIFPFVFDEC1 +//BIF_BX_DEV0_EPF0_VF12_BIF_BME_STATUS +#define BIF_BX_DEV0_EPF0_VF12_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF12_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF12_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF12_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L +//BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG +#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11 +#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12 +#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13 +#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L +#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L +#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L +#define BIF_BX_DEV0_EPF0_VF12_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L +//BIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_BASE_HIGH +#define BIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_BASE_LOW +#define BIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_CNTL +#define BIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF12_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L +//BIF_BX_DEV0_EPF0_VF12_HDP_REG_COHERENCY_FLUSH_CNTL +#define BIF_BX_DEV0_EPF0_VF12_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF12_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF12_HDP_MEM_COHERENCY_FLUSH_CNTL +#define BIF_BX_DEV0_EPF0_VF12_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF12_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4 +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5 +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6 +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7 +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L +//BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4 +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5 +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6 +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7 +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L +#define BIF_BX_DEV0_EPF0_VF12_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L +//BIF_BX_DEV0_EPF0_VF12_BIF_TRANS_PENDING +#define BIF_BX_DEV0_EPF0_VF12_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF12_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF12_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF12_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L +//BIF_BX_DEV0_EPF0_VF12_NBIF_GFX_ADDR_LUT_BYPASS +#define BIF_BX_DEV0_EPF0_VF12_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF12_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW0 +#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW1 +#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW2 +#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW3 +#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW0 +#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW1 +#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW2 +#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW3 +#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF12_MAILBOX_CONTROL +#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L +//BIF_BX_DEV0_EPF0_VF12_MAILBOX_INT_CNTL +#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF12_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L +//BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX +#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf +#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17 +#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18 +#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19 +#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L +#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L +#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L +#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L +#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L +#define BIF_BX_DEV0_EPF0_VF12_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf12_BIFDEC2 +//RCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_ADDR_LO +#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_ADDR_HI +#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_MSG_DATA +#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_CONTROL +#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_ADDR_LO +#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_ADDR_HI +#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_MSG_DATA +#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_CONTROL +#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_ADDR_LO +#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_ADDR_HI +#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_MSG_DATA +#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_CONTROL +#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF12_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF12_GFXMSIX_PBA +#define RCC_DEV0_EPF0_VF12_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF12_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1 +#define RCC_DEV0_EPF0_VF12_GFXMSIX_PBA__MSIX_PENDING_BITS_2__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF12_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF12_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L +#define RCC_DEV0_EPF0_VF12_GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf13_SYSPFVFDEC +//BIF_BX_DEV0_EPF0_VF13_MM_INDEX +#define BIF_BX_DEV0_EPF0_VF13_MM_INDEX__MM_OFFSET__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF13_MM_INDEX__MM_APER__SHIFT 0x1f +#define BIF_BX_DEV0_EPF0_VF13_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL +#define BIF_BX_DEV0_EPF0_VF13_MM_INDEX__MM_APER_MASK 0x80000000L +//BIF_BX_DEV0_EPF0_VF13_MM_DATA +#define BIF_BX_DEV0_EPF0_VF13_MM_DATA__MM_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF13_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF13_MM_INDEX_HI +#define BIF_BX_DEV0_EPF0_VF13_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF13_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf13_BIFPFVFDEC1 +//RCC_DEV0_EPF0_VF13_RCC_ERR_LOG +#define RCC_DEV0_EPF0_VF13_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF13_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1 +#define RCC_DEV0_EPF0_VF13_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF13_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L +//RCC_DEV0_EPF0_VF13_RCC_DOORBELL_APER_EN +#define RCC_DEV0_EPF0_VF13_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF13_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L +//RCC_DEV0_EPF0_VF13_RCC_CONFIG_MEMSIZE +#define RCC_DEV0_EPF0_VF13_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF13_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF13_RCC_CONFIG_RESERVED +#define RCC_DEV0_EPF0_VF13_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF13_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF13_RCC_IOV_FUNC_IDENTIFIER +#define RCC_DEV0_EPF0_VF13_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF13_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f +#define RCC_DEV0_EPF0_VF13_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF13_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf13_BIFPFVFDEC1 +//BIF_BX_DEV0_EPF0_VF13_BIF_BME_STATUS +#define BIF_BX_DEV0_EPF0_VF13_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF13_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF13_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF13_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L +//BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG +#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11 +#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12 +#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13 +#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L +#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L +#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L +#define BIF_BX_DEV0_EPF0_VF13_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L +//BIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_BASE_HIGH +#define BIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_BASE_LOW +#define BIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_CNTL +#define BIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF13_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L +//BIF_BX_DEV0_EPF0_VF13_HDP_REG_COHERENCY_FLUSH_CNTL +#define BIF_BX_DEV0_EPF0_VF13_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF13_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF13_HDP_MEM_COHERENCY_FLUSH_CNTL +#define BIF_BX_DEV0_EPF0_VF13_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF13_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4 +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5 +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6 +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7 +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L +//BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4 +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5 +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6 +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7 +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L +#define BIF_BX_DEV0_EPF0_VF13_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L +//BIF_BX_DEV0_EPF0_VF13_BIF_TRANS_PENDING +#define BIF_BX_DEV0_EPF0_VF13_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF13_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF13_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF13_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L +//BIF_BX_DEV0_EPF0_VF13_NBIF_GFX_ADDR_LUT_BYPASS +#define BIF_BX_DEV0_EPF0_VF13_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF13_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW0 +#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW1 +#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW2 +#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW3 +#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW0 +#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW1 +#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW2 +#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW3 +#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF13_MAILBOX_CONTROL +#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L +//BIF_BX_DEV0_EPF0_VF13_MAILBOX_INT_CNTL +#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF13_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L +//BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX +#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf +#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17 +#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18 +#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19 +#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L +#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L +#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L +#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L +#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L +#define BIF_BX_DEV0_EPF0_VF13_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf13_BIFDEC2 +//RCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_ADDR_LO +#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_ADDR_HI +#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_MSG_DATA +#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_CONTROL +#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_ADDR_LO +#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_ADDR_HI +#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_MSG_DATA +#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_CONTROL +#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_ADDR_LO +#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_ADDR_HI +#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_MSG_DATA +#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_CONTROL +#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF13_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF13_GFXMSIX_PBA +#define RCC_DEV0_EPF0_VF13_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF13_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1 +#define RCC_DEV0_EPF0_VF13_GFXMSIX_PBA__MSIX_PENDING_BITS_2__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF13_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF13_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L +#define RCC_DEV0_EPF0_VF13_GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf14_SYSPFVFDEC +//BIF_BX_DEV0_EPF0_VF14_MM_INDEX +#define BIF_BX_DEV0_EPF0_VF14_MM_INDEX__MM_OFFSET__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF14_MM_INDEX__MM_APER__SHIFT 0x1f +#define BIF_BX_DEV0_EPF0_VF14_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL +#define BIF_BX_DEV0_EPF0_VF14_MM_INDEX__MM_APER_MASK 0x80000000L +//BIF_BX_DEV0_EPF0_VF14_MM_DATA +#define BIF_BX_DEV0_EPF0_VF14_MM_DATA__MM_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF14_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF14_MM_INDEX_HI +#define BIF_BX_DEV0_EPF0_VF14_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF14_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf14_BIFPFVFDEC1 +//RCC_DEV0_EPF0_VF14_RCC_ERR_LOG +#define RCC_DEV0_EPF0_VF14_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF14_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1 +#define RCC_DEV0_EPF0_VF14_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF14_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L +//RCC_DEV0_EPF0_VF14_RCC_DOORBELL_APER_EN +#define RCC_DEV0_EPF0_VF14_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF14_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L +//RCC_DEV0_EPF0_VF14_RCC_CONFIG_MEMSIZE +#define RCC_DEV0_EPF0_VF14_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF14_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF14_RCC_CONFIG_RESERVED +#define RCC_DEV0_EPF0_VF14_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF14_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF14_RCC_IOV_FUNC_IDENTIFIER +#define RCC_DEV0_EPF0_VF14_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF14_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f +#define RCC_DEV0_EPF0_VF14_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF14_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf14_BIFPFVFDEC1 +//BIF_BX_DEV0_EPF0_VF14_BIF_BME_STATUS +#define BIF_BX_DEV0_EPF0_VF14_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF14_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF14_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF14_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L +//BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG +#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11 +#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12 +#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13 +#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L +#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L +#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L +#define BIF_BX_DEV0_EPF0_VF14_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L +//BIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_BASE_HIGH +#define BIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_BASE_LOW +#define BIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_CNTL +#define BIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF14_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L +//BIF_BX_DEV0_EPF0_VF14_HDP_REG_COHERENCY_FLUSH_CNTL +#define BIF_BX_DEV0_EPF0_VF14_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF14_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF14_HDP_MEM_COHERENCY_FLUSH_CNTL +#define BIF_BX_DEV0_EPF0_VF14_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF14_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4 +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5 +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6 +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7 +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L +//BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4 +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5 +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6 +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7 +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L +#define BIF_BX_DEV0_EPF0_VF14_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L +//BIF_BX_DEV0_EPF0_VF14_BIF_TRANS_PENDING +#define BIF_BX_DEV0_EPF0_VF14_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF14_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF14_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF14_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L +//BIF_BX_DEV0_EPF0_VF14_NBIF_GFX_ADDR_LUT_BYPASS +#define BIF_BX_DEV0_EPF0_VF14_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF14_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW0 +#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW1 +#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW2 +#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW3 +#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW0 +#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW1 +#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW2 +#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW3 +#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF14_MAILBOX_CONTROL +#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L +//BIF_BX_DEV0_EPF0_VF14_MAILBOX_INT_CNTL +#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF14_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L +//BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX +#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf +#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17 +#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18 +#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19 +#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L +#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L +#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L +#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L +#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L +#define BIF_BX_DEV0_EPF0_VF14_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf14_BIFDEC2 +//RCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_ADDR_LO +#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_ADDR_HI +#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_MSG_DATA +#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_CONTROL +#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_ADDR_LO +#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_ADDR_HI +#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_MSG_DATA +#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_CONTROL +#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_ADDR_LO +#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_ADDR_HI +#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_MSG_DATA +#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_CONTROL +#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF14_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF14_GFXMSIX_PBA +#define RCC_DEV0_EPF0_VF14_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF14_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1 +#define RCC_DEV0_EPF0_VF14_GFXMSIX_PBA__MSIX_PENDING_BITS_2__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF14_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF14_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L +#define RCC_DEV0_EPF0_VF14_GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf15_SYSPFVFDEC +//BIF_BX_DEV0_EPF0_VF15_MM_INDEX +#define BIF_BX_DEV0_EPF0_VF15_MM_INDEX__MM_OFFSET__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF15_MM_INDEX__MM_APER__SHIFT 0x1f +#define BIF_BX_DEV0_EPF0_VF15_MM_INDEX__MM_OFFSET_MASK 0x7FFFFFFFL +#define BIF_BX_DEV0_EPF0_VF15_MM_INDEX__MM_APER_MASK 0x80000000L +//BIF_BX_DEV0_EPF0_VF15_MM_DATA +#define BIF_BX_DEV0_EPF0_VF15_MM_DATA__MM_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF15_MM_DATA__MM_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF15_MM_INDEX_HI +#define BIF_BX_DEV0_EPF0_VF15_MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF15_MM_INDEX_HI__MM_OFFSET_HI_MASK 0xFFFFFFFFL + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf15_BIFPFVFDEC1 +//RCC_DEV0_EPF0_VF15_RCC_ERR_LOG +#define RCC_DEV0_EPF0_VF15_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF15_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS__SHIFT 0x1 +#define RCC_DEV0_EPF0_VF15_RCC_ERR_LOG__INVALID_REG_ACCESS_IN_SRIOV_STATUS_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF15_RCC_ERR_LOG__DOORBELL_READ_ACCESS_STATUS_MASK 0x00000002L +//RCC_DEV0_EPF0_VF15_RCC_DOORBELL_APER_EN +#define RCC_DEV0_EPF0_VF15_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF15_RCC_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x00000001L +//RCC_DEV0_EPF0_VF15_RCC_CONFIG_MEMSIZE +#define RCC_DEV0_EPF0_VF15_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF15_RCC_CONFIG_MEMSIZE__CONFIG_MEMSIZE_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF15_RCC_CONFIG_RESERVED +#define RCC_DEV0_EPF0_VF15_RCC_CONFIG_RESERVED__CONFIG_RESERVED__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF15_RCC_CONFIG_RESERVED__CONFIG_RESERVED_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF15_RCC_IOV_FUNC_IDENTIFIER +#define RCC_DEV0_EPF0_VF15_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF15_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE__SHIFT 0x1f +#define RCC_DEV0_EPF0_VF15_RCC_IOV_FUNC_IDENTIFIER__FUNC_IDENTIFIER_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF15_RCC_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK 0x80000000L + + +// addressBlock: nbio_nbif0_bif_bx_dev0_epf0_vf15_BIFPFVFDEC1 +//BIF_BX_DEV0_EPF0_VF15_BIF_BME_STATUS +#define BIF_BX_DEV0_EPF0_VF15_BIF_BME_STATUS__DMA_ON_BME_LOW__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF15_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF15_BIF_BME_STATUS__DMA_ON_BME_LOW_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF15_BIF_BME_STATUS__CLEAR_DMA_ON_BME_LOW_MASK 0x00010000L +//BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG +#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW__SHIFT 0x11 +#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH__SHIFT 0x12 +#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR__SHIFT 0x13 +#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_OPCODE_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_REQEN_LOW_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_LENGTH_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__UR_ATOMIC_NR_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_OPCODE_MASK 0x00010000L +#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_REQEN_LOW_MASK 0x00020000L +#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_LENGTH_MASK 0x00040000L +#define BIF_BX_DEV0_EPF0_VF15_BIF_ATOMIC_ERR_LOG__CLEAR_UR_ATOMIC_NR_MASK 0x00080000L +//BIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_BASE_HIGH +#define BIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_BASE_HIGH__DOORBELL_SELFRING_GPA_APER_BASE_HIGH_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_BASE_LOW +#define BIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_BASE_LOW__DOORBELL_SELFRING_GPA_APER_BASE_LOW_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_CNTL +#define BIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_MODE_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF15_DOORBELL_SELFRING_GPA_APER_CNTL__DOORBELL_SELFRING_GPA_APER_SIZE_MASK 0x000FFF00L +//BIF_BX_DEV0_EPF0_VF15_HDP_REG_COHERENCY_FLUSH_CNTL +#define BIF_BX_DEV0_EPF0_VF15_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF15_HDP_REG_COHERENCY_FLUSH_CNTL__HDP_REG_FLUSH_ADDR_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF15_HDP_MEM_COHERENCY_FLUSH_CNTL +#define BIF_BX_DEV0_EPF0_VF15_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF15_HDP_MEM_COHERENCY_FLUSH_CNTL__HDP_MEM_FLUSH_ADDR_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP0__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP1__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP2__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP3__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP4__SHIFT 0x4 +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP5__SHIFT 0x5 +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP6__SHIFT 0x6 +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP7__SHIFT 0x7 +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP8__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP9__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__SDMA0__SHIFT 0xa +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__SDMA1__SHIFT 0xb +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP0_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP1_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP2_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP3_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP4_MASK 0x00000010L +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP5_MASK 0x00000020L +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP6_MASK 0x00000040L +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP7_MASK 0x00000080L +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP8_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__CP9_MASK 0x00000200L +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__SDMA0_MASK 0x00000400L +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_REQ__SDMA1_MASK 0x00000800L +//BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP0__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP1__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP2__SHIFT 0x2 +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP3__SHIFT 0x3 +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP4__SHIFT 0x4 +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP5__SHIFT 0x5 +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP6__SHIFT 0x6 +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP7__SHIFT 0x7 +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP8__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP9__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__SDMA0__SHIFT 0xa +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__SDMA1__SHIFT 0xb +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP0_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP1_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP2_MASK 0x00000004L +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP3_MASK 0x00000008L +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP4_MASK 0x00000010L +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP5_MASK 0x00000020L +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP6_MASK 0x00000040L +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP7_MASK 0x00000080L +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP8_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__CP9_MASK 0x00000200L +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__SDMA0_MASK 0x00000400L +#define BIF_BX_DEV0_EPF0_VF15_GPU_HDP_FLUSH_DONE__SDMA1_MASK 0x00000800L +//BIF_BX_DEV0_EPF0_VF15_BIF_TRANS_PENDING +#define BIF_BX_DEV0_EPF0_VF15_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF15_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF15_BIF_TRANS_PENDING__BIF_MST_TRANS_PENDING_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF15_BIF_TRANS_PENDING__BIF_SLV_TRANS_PENDING_MASK 0x00000002L +//BIF_BX_DEV0_EPF0_VF15_NBIF_GFX_ADDR_LUT_BYPASS +#define BIF_BX_DEV0_EPF0_VF15_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF15_NBIF_GFX_ADDR_LUT_BYPASS__LUT_BYPASS_MASK 0x00000001L +//BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW0 +#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW1 +#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW2 +#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW3 +#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_TRN_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW0 +#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW0__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW1 +#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW1__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW2 +#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW2__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW3 +#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_MSGBUF_RCV_DW3__MSGBUF_DATA_MASK 0xFFFFFFFFL +//BIF_BX_DEV0_EPF0_VF15_MAILBOX_CONTROL +#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_CONTROL__TRN_MSG_VALID__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_CONTROL__TRN_MSG_ACK__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_CONTROL__RCV_MSG_VALID__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_CONTROL__RCV_MSG_ACK__SHIFT 0x9 +#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_CONTROL__TRN_MSG_VALID_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_CONTROL__TRN_MSG_ACK_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_CONTROL__RCV_MSG_VALID_MASK 0x00000100L +#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_CONTROL__RCV_MSG_ACK_MASK 0x00000200L +//BIF_BX_DEV0_EPF0_VF15_MAILBOX_INT_CNTL +#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_INT_CNTL__VALID_INT_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_INT_CNTL__ACK_INT_EN__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_INT_CNTL__VALID_INT_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF15_MAILBOX_INT_CNTL__ACK_INT_EN_MASK 0x00000002L +//BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX +#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN__SHIFT 0x0 +#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN__SHIFT 0x1 +#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA__SHIFT 0x8 +#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID__SHIFT 0xf +#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA__SHIFT 0x10 +#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID__SHIFT 0x17 +#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK__SHIFT 0x18 +#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK__SHIFT 0x19 +#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_ACK_INTR_EN_MASK 0x00000001L +#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_VALID_INTR_EN_MASK 0x00000002L +#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_DATA_MASK 0x00000F00L +#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_VALID_MASK 0x00008000L +#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_DATA_MASK 0x000F0000L +#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_VALID_MASK 0x00800000L +#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_TRN_MSG_ACK_MASK 0x01000000L +#define BIF_BX_DEV0_EPF0_VF15_BIF_VMHV_MAILBOX__VMHV_MAILBOX_RCV_MSG_ACK_MASK 0x02000000L + + +// addressBlock: nbio_nbif0_rcc_dev0_epf0_vf15_BIFDEC2 +//RCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_ADDR_LO +#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_ADDR_HI +#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_MSG_DATA +#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_CONTROL +#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT0_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_ADDR_LO +#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_ADDR_HI +#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_MSG_DATA +#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_CONTROL +#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT1_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_ADDR_LO +#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_ADDR_LO__MSG_ADDR_LO_MASK 0xFFFFFFFCL +//RCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_ADDR_HI +#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_ADDR_HI__MSG_ADDR_HI_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_MSG_DATA +#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_MSG_DATA__MSG_DATA__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_MSG_DATA__MSG_DATA_MASK 0xFFFFFFFFL +//RCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_CONTROL +#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_CONTROL__MASK_BIT__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF15_GFXMSIX_VECT2_CONTROL__MASK_BIT_MASK 0x00000001L +//RCC_DEV0_EPF0_VF15_GFXMSIX_PBA +#define RCC_DEV0_EPF0_VF15_GFXMSIX_PBA__MSIX_PENDING_BITS_0__SHIFT 0x0 +#define RCC_DEV0_EPF0_VF15_GFXMSIX_PBA__MSIX_PENDING_BITS_1__SHIFT 0x1 +#define RCC_DEV0_EPF0_VF15_GFXMSIX_PBA__MSIX_PENDING_BITS_2__SHIFT 0x2 +#define RCC_DEV0_EPF0_VF15_GFXMSIX_PBA__MSIX_PENDING_BITS_0_MASK 0x00000001L +#define RCC_DEV0_EPF0_VF15_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L +#define RCC_DEV0_EPF0_VF15_GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L + +#endif -- GitLab From c62d3cd0ddd629606a3830aa22e9dcc6c2a0d3bf Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Wed, 17 Jan 2018 19:42:33 +0800 Subject: [PATCH 0394/1692] drm/amdgpu/include: Add sdma0/1 4.2 register headerfiles. (v3) These are the System DMA register headers for vega20. v2: cleanups (Alex) v3: add missing licenses (Alex) Signed-off-by: Feifei Xu Acked-by: Hawking Zhang Signed-off-by: Alex Deucher --- .../include/asic_reg/sdma0/sdma0_4_2_offset.h | 1047 ++++++ .../asic_reg/sdma0/sdma0_4_2_sh_mask.h | 2992 +++++++++++++++++ .../include/asic_reg/sdma1/sdma1_4_2_offset.h | 1039 ++++++ .../asic_reg/sdma1/sdma1_4_2_sh_mask.h | 2948 ++++++++++++++++ 4 files changed, 8026 insertions(+) create mode 100644 drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_2_offset.h create mode 100644 drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_2_sh_mask.h create mode 100644 drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_2_offset.h create mode 100644 drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_2_sh_mask.h diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_2_offset.h new file mode 100644 index 000000000000..30b2f5df1402 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_2_offset.h @@ -0,0 +1,1047 @@ +/* + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _sdma0_4_2_0_OFFSET_HEADER +#define _sdma0_4_2_0_OFFSET_HEADER + + + +// addressBlock: sdma0_sdma0dec +// base address: 0x4980 +#define mmSDMA0_UCODE_ADDR 0x0000 +#define mmSDMA0_UCODE_ADDR_BASE_IDX 0 +#define mmSDMA0_UCODE_DATA 0x0001 +#define mmSDMA0_UCODE_DATA_BASE_IDX 0 +#define mmSDMA0_VM_CNTL 0x0004 +#define mmSDMA0_VM_CNTL_BASE_IDX 0 +#define mmSDMA0_VM_CTX_LO 0x0005 +#define mmSDMA0_VM_CTX_LO_BASE_IDX 0 +#define mmSDMA0_VM_CTX_HI 0x0006 +#define mmSDMA0_VM_CTX_HI_BASE_IDX 0 +#define mmSDMA0_ACTIVE_FCN_ID 0x0007 +#define mmSDMA0_ACTIVE_FCN_ID_BASE_IDX 0 +#define mmSDMA0_VM_CTX_CNTL 0x0008 +#define mmSDMA0_VM_CTX_CNTL_BASE_IDX 0 +#define mmSDMA0_VIRT_RESET_REQ 0x0009 +#define mmSDMA0_VIRT_RESET_REQ_BASE_IDX 0 +#define mmSDMA0_VF_ENABLE 0x000a +#define mmSDMA0_VF_ENABLE_BASE_IDX 0 +#define mmSDMA0_CONTEXT_REG_TYPE0 0x000b +#define mmSDMA0_CONTEXT_REG_TYPE0_BASE_IDX 0 +#define mmSDMA0_CONTEXT_REG_TYPE1 0x000c +#define mmSDMA0_CONTEXT_REG_TYPE1_BASE_IDX 0 +#define mmSDMA0_CONTEXT_REG_TYPE2 0x000d +#define mmSDMA0_CONTEXT_REG_TYPE2_BASE_IDX 0 +#define mmSDMA0_CONTEXT_REG_TYPE3 0x000e +#define mmSDMA0_CONTEXT_REG_TYPE3_BASE_IDX 0 +#define mmSDMA0_PUB_REG_TYPE0 0x000f +#define mmSDMA0_PUB_REG_TYPE0_BASE_IDX 0 +#define mmSDMA0_PUB_REG_TYPE1 0x0010 +#define mmSDMA0_PUB_REG_TYPE1_BASE_IDX 0 +#define mmSDMA0_PUB_REG_TYPE2 0x0011 +#define mmSDMA0_PUB_REG_TYPE2_BASE_IDX 0 +#define mmSDMA0_PUB_REG_TYPE3 0x0012 +#define mmSDMA0_PUB_REG_TYPE3_BASE_IDX 0 +#define mmSDMA0_MMHUB_CNTL 0x0013 +#define mmSDMA0_MMHUB_CNTL_BASE_IDX 0 +#define mmSDMA0_CONTEXT_GROUP_BOUNDARY 0x0019 +#define mmSDMA0_CONTEXT_GROUP_BOUNDARY_BASE_IDX 0 +#define mmSDMA0_POWER_CNTL 0x001a +#define mmSDMA0_POWER_CNTL_BASE_IDX 0 +#define mmSDMA0_CLK_CTRL 0x001b +#define mmSDMA0_CLK_CTRL_BASE_IDX 0 +#define mmSDMA0_CNTL 0x001c +#define mmSDMA0_CNTL_BASE_IDX 0 +#define mmSDMA0_CHICKEN_BITS 0x001d +#define mmSDMA0_CHICKEN_BITS_BASE_IDX 0 +#define mmSDMA0_GB_ADDR_CONFIG 0x001e +#define mmSDMA0_GB_ADDR_CONFIG_BASE_IDX 0 +#define mmSDMA0_GB_ADDR_CONFIG_READ 0x001f +#define mmSDMA0_GB_ADDR_CONFIG_READ_BASE_IDX 0 +#define mmSDMA0_RB_RPTR_FETCH_HI 0x0020 +#define mmSDMA0_RB_RPTR_FETCH_HI_BASE_IDX 0 +#define mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL 0x0021 +#define mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL_BASE_IDX 0 +#define mmSDMA0_RB_RPTR_FETCH 0x0022 +#define mmSDMA0_RB_RPTR_FETCH_BASE_IDX 0 +#define mmSDMA0_IB_OFFSET_FETCH 0x0023 +#define mmSDMA0_IB_OFFSET_FETCH_BASE_IDX 0 +#define mmSDMA0_PROGRAM 0x0024 +#define mmSDMA0_PROGRAM_BASE_IDX 0 +#define mmSDMA0_STATUS_REG 0x0025 +#define mmSDMA0_STATUS_REG_BASE_IDX 0 +#define mmSDMA0_STATUS1_REG 0x0026 +#define mmSDMA0_STATUS1_REG_BASE_IDX 0 +#define mmSDMA0_RD_BURST_CNTL 0x0027 +#define mmSDMA0_RD_BURST_CNTL_BASE_IDX 0 +#define mmSDMA0_HBM_PAGE_CONFIG 0x0028 +#define mmSDMA0_HBM_PAGE_CONFIG_BASE_IDX 0 +#define mmSDMA0_UCODE_CHECKSUM 0x0029 +#define mmSDMA0_UCODE_CHECKSUM_BASE_IDX 0 +#define mmSDMA0_F32_CNTL 0x002a +#define mmSDMA0_F32_CNTL_BASE_IDX 0 +#define mmSDMA0_FREEZE 0x002b +#define mmSDMA0_FREEZE_BASE_IDX 0 +#define mmSDMA0_PHASE0_QUANTUM 0x002c +#define mmSDMA0_PHASE0_QUANTUM_BASE_IDX 0 +#define mmSDMA0_PHASE1_QUANTUM 0x002d +#define mmSDMA0_PHASE1_QUANTUM_BASE_IDX 0 +#define mmSDMA_POWER_GATING 0x002e +#define mmSDMA_POWER_GATING_BASE_IDX 0 +#define mmSDMA_PGFSM_CONFIG 0x002f +#define mmSDMA_PGFSM_CONFIG_BASE_IDX 0 +#define mmSDMA_PGFSM_WRITE 0x0030 +#define mmSDMA_PGFSM_WRITE_BASE_IDX 0 +#define mmSDMA_PGFSM_READ 0x0031 +#define mmSDMA_PGFSM_READ_BASE_IDX 0 +#define mmSDMA0_EDC_CONFIG 0x0032 +#define mmSDMA0_EDC_CONFIG_BASE_IDX 0 +#define mmSDMA0_BA_THRESHOLD 0x0033 +#define mmSDMA0_BA_THRESHOLD_BASE_IDX 0 +#define mmSDMA0_ID 0x0034 +#define mmSDMA0_ID_BASE_IDX 0 +#define mmSDMA0_VERSION 0x0035 +#define mmSDMA0_VERSION_BASE_IDX 0 +#define mmSDMA0_EDC_COUNTER 0x0036 +#define mmSDMA0_EDC_COUNTER_BASE_IDX 0 +#define mmSDMA0_EDC_COUNTER_CLEAR 0x0037 +#define mmSDMA0_EDC_COUNTER_CLEAR_BASE_IDX 0 +#define mmSDMA0_STATUS2_REG 0x0038 +#define mmSDMA0_STATUS2_REG_BASE_IDX 0 +#define mmSDMA0_ATOMIC_CNTL 0x0039 +#define mmSDMA0_ATOMIC_CNTL_BASE_IDX 0 +#define mmSDMA0_ATOMIC_PREOP_LO 0x003a +#define mmSDMA0_ATOMIC_PREOP_LO_BASE_IDX 0 +#define mmSDMA0_ATOMIC_PREOP_HI 0x003b +#define mmSDMA0_ATOMIC_PREOP_HI_BASE_IDX 0 +#define mmSDMA0_UTCL1_CNTL 0x003c +#define mmSDMA0_UTCL1_CNTL_BASE_IDX 0 +#define mmSDMA0_UTCL1_WATERMK 0x003d +#define mmSDMA0_UTCL1_WATERMK_BASE_IDX 0 +#define mmSDMA0_UTCL1_RD_STATUS 0x003e +#define mmSDMA0_UTCL1_RD_STATUS_BASE_IDX 0 +#define mmSDMA0_UTCL1_WR_STATUS 0x003f +#define mmSDMA0_UTCL1_WR_STATUS_BASE_IDX 0 +#define mmSDMA0_UTCL1_INV0 0x0040 +#define mmSDMA0_UTCL1_INV0_BASE_IDX 0 +#define mmSDMA0_UTCL1_INV1 0x0041 +#define mmSDMA0_UTCL1_INV1_BASE_IDX 0 +#define mmSDMA0_UTCL1_INV2 0x0042 +#define mmSDMA0_UTCL1_INV2_BASE_IDX 0 +#define mmSDMA0_UTCL1_RD_XNACK0 0x0043 +#define mmSDMA0_UTCL1_RD_XNACK0_BASE_IDX 0 +#define mmSDMA0_UTCL1_RD_XNACK1 0x0044 +#define mmSDMA0_UTCL1_RD_XNACK1_BASE_IDX 0 +#define mmSDMA0_UTCL1_WR_XNACK0 0x0045 +#define mmSDMA0_UTCL1_WR_XNACK0_BASE_IDX 0 +#define mmSDMA0_UTCL1_WR_XNACK1 0x0046 +#define mmSDMA0_UTCL1_WR_XNACK1_BASE_IDX 0 +#define mmSDMA0_UTCL1_TIMEOUT 0x0047 +#define mmSDMA0_UTCL1_TIMEOUT_BASE_IDX 0 +#define mmSDMA0_UTCL1_PAGE 0x0048 +#define mmSDMA0_UTCL1_PAGE_BASE_IDX 0 +#define mmSDMA0_POWER_CNTL_IDLE 0x0049 +#define mmSDMA0_POWER_CNTL_IDLE_BASE_IDX 0 +#define mmSDMA0_RELAX_ORDERING_LUT 0x004a +#define mmSDMA0_RELAX_ORDERING_LUT_BASE_IDX 0 +#define mmSDMA0_CHICKEN_BITS_2 0x004b +#define mmSDMA0_CHICKEN_BITS_2_BASE_IDX 0 +#define mmSDMA0_STATUS3_REG 0x004c +#define mmSDMA0_STATUS3_REG_BASE_IDX 0 +#define mmSDMA0_PHYSICAL_ADDR_LO 0x004d +#define mmSDMA0_PHYSICAL_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_PHYSICAL_ADDR_HI 0x004e +#define mmSDMA0_PHYSICAL_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_PHASE2_QUANTUM 0x004f +#define mmSDMA0_PHASE2_QUANTUM_BASE_IDX 0 +#define mmSDMA0_ERROR_LOG 0x0050 +#define mmSDMA0_ERROR_LOG_BASE_IDX 0 +#define mmSDMA0_PUB_DUMMY_REG0 0x0051 +#define mmSDMA0_PUB_DUMMY_REG0_BASE_IDX 0 +#define mmSDMA0_PUB_DUMMY_REG1 0x0052 +#define mmSDMA0_PUB_DUMMY_REG1_BASE_IDX 0 +#define mmSDMA0_PUB_DUMMY_REG2 0x0053 +#define mmSDMA0_PUB_DUMMY_REG2_BASE_IDX 0 +#define mmSDMA0_PUB_DUMMY_REG3 0x0054 +#define mmSDMA0_PUB_DUMMY_REG3_BASE_IDX 0 +#define mmSDMA0_F32_COUNTER 0x0055 +#define mmSDMA0_F32_COUNTER_BASE_IDX 0 +#define mmSDMA0_PERFMON_CNTL 0x0057 +#define mmSDMA0_PERFMON_CNTL_BASE_IDX 0 +#define mmSDMA0_PERFCOUNTER0_RESULT 0x0058 +#define mmSDMA0_PERFCOUNTER0_RESULT_BASE_IDX 0 +#define mmSDMA0_PERFCOUNTER1_RESULT 0x0059 +#define mmSDMA0_PERFCOUNTER1_RESULT_BASE_IDX 0 +#define mmSDMA0_PERFCOUNTER_TAG_DELAY_RANGE 0x005a +#define mmSDMA0_PERFCOUNTER_TAG_DELAY_RANGE_BASE_IDX 0 +#define mmSDMA0_CRD_CNTL 0x005b +#define mmSDMA0_CRD_CNTL_BASE_IDX 0 +#define mmSDMA0_GPU_IOV_VIOLATION_LOG 0x005d +#define mmSDMA0_GPU_IOV_VIOLATION_LOG_BASE_IDX 0 +#define mmSDMA0_ULV_CNTL 0x005e +#define mmSDMA0_ULV_CNTL_BASE_IDX 0 +#define mmSDMA0_EA_DBIT_ADDR_DATA 0x0060 +#define mmSDMA0_EA_DBIT_ADDR_DATA_BASE_IDX 0 +#define mmSDMA0_EA_DBIT_ADDR_INDEX 0x0061 +#define mmSDMA0_EA_DBIT_ADDR_INDEX_BASE_IDX 0 +#define mmSDMA0_GFX_RB_CNTL 0x0080 +#define mmSDMA0_GFX_RB_CNTL_BASE_IDX 0 +#define mmSDMA0_GFX_RB_BASE 0x0081 +#define mmSDMA0_GFX_RB_BASE_BASE_IDX 0 +#define mmSDMA0_GFX_RB_BASE_HI 0x0082 +#define mmSDMA0_GFX_RB_BASE_HI_BASE_IDX 0 +#define mmSDMA0_GFX_RB_RPTR 0x0083 +#define mmSDMA0_GFX_RB_RPTR_BASE_IDX 0 +#define mmSDMA0_GFX_RB_RPTR_HI 0x0084 +#define mmSDMA0_GFX_RB_RPTR_HI_BASE_IDX 0 +#define mmSDMA0_GFX_RB_WPTR 0x0085 +#define mmSDMA0_GFX_RB_WPTR_BASE_IDX 0 +#define mmSDMA0_GFX_RB_WPTR_HI 0x0086 +#define mmSDMA0_GFX_RB_WPTR_HI_BASE_IDX 0 +#define mmSDMA0_GFX_RB_WPTR_POLL_CNTL 0x0087 +#define mmSDMA0_GFX_RB_WPTR_POLL_CNTL_BASE_IDX 0 +#define mmSDMA0_GFX_RB_RPTR_ADDR_HI 0x0088 +#define mmSDMA0_GFX_RB_RPTR_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_GFX_RB_RPTR_ADDR_LO 0x0089 +#define mmSDMA0_GFX_RB_RPTR_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_GFX_IB_CNTL 0x008a +#define mmSDMA0_GFX_IB_CNTL_BASE_IDX 0 +#define mmSDMA0_GFX_IB_RPTR 0x008b +#define mmSDMA0_GFX_IB_RPTR_BASE_IDX 0 +#define mmSDMA0_GFX_IB_OFFSET 0x008c +#define mmSDMA0_GFX_IB_OFFSET_BASE_IDX 0 +#define mmSDMA0_GFX_IB_BASE_LO 0x008d +#define mmSDMA0_GFX_IB_BASE_LO_BASE_IDX 0 +#define mmSDMA0_GFX_IB_BASE_HI 0x008e +#define mmSDMA0_GFX_IB_BASE_HI_BASE_IDX 0 +#define mmSDMA0_GFX_IB_SIZE 0x008f +#define mmSDMA0_GFX_IB_SIZE_BASE_IDX 0 +#define mmSDMA0_GFX_SKIP_CNTL 0x0090 +#define mmSDMA0_GFX_SKIP_CNTL_BASE_IDX 0 +#define mmSDMA0_GFX_CONTEXT_STATUS 0x0091 +#define mmSDMA0_GFX_CONTEXT_STATUS_BASE_IDX 0 +#define mmSDMA0_GFX_DOORBELL 0x0092 +#define mmSDMA0_GFX_DOORBELL_BASE_IDX 0 +#define mmSDMA0_GFX_CONTEXT_CNTL 0x0093 +#define mmSDMA0_GFX_CONTEXT_CNTL_BASE_IDX 0 +#define mmSDMA0_GFX_STATUS 0x00a8 +#define mmSDMA0_GFX_STATUS_BASE_IDX 0 +#define mmSDMA0_GFX_DOORBELL_LOG 0x00a9 +#define mmSDMA0_GFX_DOORBELL_LOG_BASE_IDX 0 +#define mmSDMA0_GFX_WATERMARK 0x00aa +#define mmSDMA0_GFX_WATERMARK_BASE_IDX 0 +#define mmSDMA0_GFX_DOORBELL_OFFSET 0x00ab +#define mmSDMA0_GFX_DOORBELL_OFFSET_BASE_IDX 0 +#define mmSDMA0_GFX_CSA_ADDR_LO 0x00ac +#define mmSDMA0_GFX_CSA_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_GFX_CSA_ADDR_HI 0x00ad +#define mmSDMA0_GFX_CSA_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_GFX_IB_SUB_REMAIN 0x00af +#define mmSDMA0_GFX_IB_SUB_REMAIN_BASE_IDX 0 +#define mmSDMA0_GFX_PREEMPT 0x00b0 +#define mmSDMA0_GFX_PREEMPT_BASE_IDX 0 +#define mmSDMA0_GFX_DUMMY_REG 0x00b1 +#define mmSDMA0_GFX_DUMMY_REG_BASE_IDX 0 +#define mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI 0x00b2 +#define mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO 0x00b3 +#define mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_GFX_RB_AQL_CNTL 0x00b4 +#define mmSDMA0_GFX_RB_AQL_CNTL_BASE_IDX 0 +#define mmSDMA0_GFX_MINOR_PTR_UPDATE 0x00b5 +#define mmSDMA0_GFX_MINOR_PTR_UPDATE_BASE_IDX 0 +#define mmSDMA0_GFX_MIDCMD_DATA0 0x00c0 +#define mmSDMA0_GFX_MIDCMD_DATA0_BASE_IDX 0 +#define mmSDMA0_GFX_MIDCMD_DATA1 0x00c1 +#define mmSDMA0_GFX_MIDCMD_DATA1_BASE_IDX 0 +#define mmSDMA0_GFX_MIDCMD_DATA2 0x00c2 +#define mmSDMA0_GFX_MIDCMD_DATA2_BASE_IDX 0 +#define mmSDMA0_GFX_MIDCMD_DATA3 0x00c3 +#define mmSDMA0_GFX_MIDCMD_DATA3_BASE_IDX 0 +#define mmSDMA0_GFX_MIDCMD_DATA4 0x00c4 +#define mmSDMA0_GFX_MIDCMD_DATA4_BASE_IDX 0 +#define mmSDMA0_GFX_MIDCMD_DATA5 0x00c5 +#define mmSDMA0_GFX_MIDCMD_DATA5_BASE_IDX 0 +#define mmSDMA0_GFX_MIDCMD_DATA6 0x00c6 +#define mmSDMA0_GFX_MIDCMD_DATA6_BASE_IDX 0 +#define mmSDMA0_GFX_MIDCMD_DATA7 0x00c7 +#define mmSDMA0_GFX_MIDCMD_DATA7_BASE_IDX 0 +#define mmSDMA0_GFX_MIDCMD_DATA8 0x00c8 +#define mmSDMA0_GFX_MIDCMD_DATA8_BASE_IDX 0 +#define mmSDMA0_GFX_MIDCMD_CNTL 0x00c9 +#define mmSDMA0_GFX_MIDCMD_CNTL_BASE_IDX 0 +#define mmSDMA0_PAGE_RB_CNTL 0x00e0 +#define mmSDMA0_PAGE_RB_CNTL_BASE_IDX 0 +#define mmSDMA0_PAGE_RB_BASE 0x00e1 +#define mmSDMA0_PAGE_RB_BASE_BASE_IDX 0 +#define mmSDMA0_PAGE_RB_BASE_HI 0x00e2 +#define mmSDMA0_PAGE_RB_BASE_HI_BASE_IDX 0 +#define mmSDMA0_PAGE_RB_RPTR 0x00e3 +#define mmSDMA0_PAGE_RB_RPTR_BASE_IDX 0 +#define mmSDMA0_PAGE_RB_RPTR_HI 0x00e4 +#define mmSDMA0_PAGE_RB_RPTR_HI_BASE_IDX 0 +#define mmSDMA0_PAGE_RB_WPTR 0x00e5 +#define mmSDMA0_PAGE_RB_WPTR_BASE_IDX 0 +#define mmSDMA0_PAGE_RB_WPTR_HI 0x00e6 +#define mmSDMA0_PAGE_RB_WPTR_HI_BASE_IDX 0 +#define mmSDMA0_PAGE_RB_WPTR_POLL_CNTL 0x00e7 +#define mmSDMA0_PAGE_RB_WPTR_POLL_CNTL_BASE_IDX 0 +#define mmSDMA0_PAGE_RB_RPTR_ADDR_HI 0x00e8 +#define mmSDMA0_PAGE_RB_RPTR_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_PAGE_RB_RPTR_ADDR_LO 0x00e9 +#define mmSDMA0_PAGE_RB_RPTR_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_PAGE_IB_CNTL 0x00ea +#define mmSDMA0_PAGE_IB_CNTL_BASE_IDX 0 +#define mmSDMA0_PAGE_IB_RPTR 0x00eb +#define mmSDMA0_PAGE_IB_RPTR_BASE_IDX 0 +#define mmSDMA0_PAGE_IB_OFFSET 0x00ec +#define mmSDMA0_PAGE_IB_OFFSET_BASE_IDX 0 +#define mmSDMA0_PAGE_IB_BASE_LO 0x00ed +#define mmSDMA0_PAGE_IB_BASE_LO_BASE_IDX 0 +#define mmSDMA0_PAGE_IB_BASE_HI 0x00ee +#define mmSDMA0_PAGE_IB_BASE_HI_BASE_IDX 0 +#define mmSDMA0_PAGE_IB_SIZE 0x00ef +#define mmSDMA0_PAGE_IB_SIZE_BASE_IDX 0 +#define mmSDMA0_PAGE_SKIP_CNTL 0x00f0 +#define mmSDMA0_PAGE_SKIP_CNTL_BASE_IDX 0 +#define mmSDMA0_PAGE_CONTEXT_STATUS 0x00f1 +#define mmSDMA0_PAGE_CONTEXT_STATUS_BASE_IDX 0 +#define mmSDMA0_PAGE_DOORBELL 0x00f2 +#define mmSDMA0_PAGE_DOORBELL_BASE_IDX 0 +#define mmSDMA0_PAGE_STATUS 0x0108 +#define mmSDMA0_PAGE_STATUS_BASE_IDX 0 +#define mmSDMA0_PAGE_DOORBELL_LOG 0x0109 +#define mmSDMA0_PAGE_DOORBELL_LOG_BASE_IDX 0 +#define mmSDMA0_PAGE_WATERMARK 0x010a +#define mmSDMA0_PAGE_WATERMARK_BASE_IDX 0 +#define mmSDMA0_PAGE_DOORBELL_OFFSET 0x010b +#define mmSDMA0_PAGE_DOORBELL_OFFSET_BASE_IDX 0 +#define mmSDMA0_PAGE_CSA_ADDR_LO 0x010c +#define mmSDMA0_PAGE_CSA_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_PAGE_CSA_ADDR_HI 0x010d +#define mmSDMA0_PAGE_CSA_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_PAGE_IB_SUB_REMAIN 0x010f +#define mmSDMA0_PAGE_IB_SUB_REMAIN_BASE_IDX 0 +#define mmSDMA0_PAGE_PREEMPT 0x0110 +#define mmSDMA0_PAGE_PREEMPT_BASE_IDX 0 +#define mmSDMA0_PAGE_DUMMY_REG 0x0111 +#define mmSDMA0_PAGE_DUMMY_REG_BASE_IDX 0 +#define mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_HI 0x0112 +#define mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_LO 0x0113 +#define mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_PAGE_RB_AQL_CNTL 0x0114 +#define mmSDMA0_PAGE_RB_AQL_CNTL_BASE_IDX 0 +#define mmSDMA0_PAGE_MINOR_PTR_UPDATE 0x0115 +#define mmSDMA0_PAGE_MINOR_PTR_UPDATE_BASE_IDX 0 +#define mmSDMA0_PAGE_MIDCMD_DATA0 0x0120 +#define mmSDMA0_PAGE_MIDCMD_DATA0_BASE_IDX 0 +#define mmSDMA0_PAGE_MIDCMD_DATA1 0x0121 +#define mmSDMA0_PAGE_MIDCMD_DATA1_BASE_IDX 0 +#define mmSDMA0_PAGE_MIDCMD_DATA2 0x0122 +#define mmSDMA0_PAGE_MIDCMD_DATA2_BASE_IDX 0 +#define mmSDMA0_PAGE_MIDCMD_DATA3 0x0123 +#define mmSDMA0_PAGE_MIDCMD_DATA3_BASE_IDX 0 +#define mmSDMA0_PAGE_MIDCMD_DATA4 0x0124 +#define mmSDMA0_PAGE_MIDCMD_DATA4_BASE_IDX 0 +#define mmSDMA0_PAGE_MIDCMD_DATA5 0x0125 +#define mmSDMA0_PAGE_MIDCMD_DATA5_BASE_IDX 0 +#define mmSDMA0_PAGE_MIDCMD_DATA6 0x0126 +#define mmSDMA0_PAGE_MIDCMD_DATA6_BASE_IDX 0 +#define mmSDMA0_PAGE_MIDCMD_DATA7 0x0127 +#define mmSDMA0_PAGE_MIDCMD_DATA7_BASE_IDX 0 +#define mmSDMA0_PAGE_MIDCMD_DATA8 0x0128 +#define mmSDMA0_PAGE_MIDCMD_DATA8_BASE_IDX 0 +#define mmSDMA0_PAGE_MIDCMD_CNTL 0x0129 +#define mmSDMA0_PAGE_MIDCMD_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC0_RB_CNTL 0x0140 +#define mmSDMA0_RLC0_RB_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC0_RB_BASE 0x0141 +#define mmSDMA0_RLC0_RB_BASE_BASE_IDX 0 +#define mmSDMA0_RLC0_RB_BASE_HI 0x0142 +#define mmSDMA0_RLC0_RB_BASE_HI_BASE_IDX 0 +#define mmSDMA0_RLC0_RB_RPTR 0x0143 +#define mmSDMA0_RLC0_RB_RPTR_BASE_IDX 0 +#define mmSDMA0_RLC0_RB_RPTR_HI 0x0144 +#define mmSDMA0_RLC0_RB_RPTR_HI_BASE_IDX 0 +#define mmSDMA0_RLC0_RB_WPTR 0x0145 +#define mmSDMA0_RLC0_RB_WPTR_BASE_IDX 0 +#define mmSDMA0_RLC0_RB_WPTR_HI 0x0146 +#define mmSDMA0_RLC0_RB_WPTR_HI_BASE_IDX 0 +#define mmSDMA0_RLC0_RB_WPTR_POLL_CNTL 0x0147 +#define mmSDMA0_RLC0_RB_WPTR_POLL_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC0_RB_RPTR_ADDR_HI 0x0148 +#define mmSDMA0_RLC0_RB_RPTR_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC0_RB_RPTR_ADDR_LO 0x0149 +#define mmSDMA0_RLC0_RB_RPTR_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC0_IB_CNTL 0x014a +#define mmSDMA0_RLC0_IB_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC0_IB_RPTR 0x014b +#define mmSDMA0_RLC0_IB_RPTR_BASE_IDX 0 +#define mmSDMA0_RLC0_IB_OFFSET 0x014c +#define mmSDMA0_RLC0_IB_OFFSET_BASE_IDX 0 +#define mmSDMA0_RLC0_IB_BASE_LO 0x014d +#define mmSDMA0_RLC0_IB_BASE_LO_BASE_IDX 0 +#define mmSDMA0_RLC0_IB_BASE_HI 0x014e +#define mmSDMA0_RLC0_IB_BASE_HI_BASE_IDX 0 +#define mmSDMA0_RLC0_IB_SIZE 0x014f +#define mmSDMA0_RLC0_IB_SIZE_BASE_IDX 0 +#define mmSDMA0_RLC0_SKIP_CNTL 0x0150 +#define mmSDMA0_RLC0_SKIP_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC0_CONTEXT_STATUS 0x0151 +#define mmSDMA0_RLC0_CONTEXT_STATUS_BASE_IDX 0 +#define mmSDMA0_RLC0_DOORBELL 0x0152 +#define mmSDMA0_RLC0_DOORBELL_BASE_IDX 0 +#define mmSDMA0_RLC0_STATUS 0x0168 +#define mmSDMA0_RLC0_STATUS_BASE_IDX 0 +#define mmSDMA0_RLC0_DOORBELL_LOG 0x0169 +#define mmSDMA0_RLC0_DOORBELL_LOG_BASE_IDX 0 +#define mmSDMA0_RLC0_WATERMARK 0x016a +#define mmSDMA0_RLC0_WATERMARK_BASE_IDX 0 +#define mmSDMA0_RLC0_DOORBELL_OFFSET 0x016b +#define mmSDMA0_RLC0_DOORBELL_OFFSET_BASE_IDX 0 +#define mmSDMA0_RLC0_CSA_ADDR_LO 0x016c +#define mmSDMA0_RLC0_CSA_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC0_CSA_ADDR_HI 0x016d +#define mmSDMA0_RLC0_CSA_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC0_IB_SUB_REMAIN 0x016f +#define mmSDMA0_RLC0_IB_SUB_REMAIN_BASE_IDX 0 +#define mmSDMA0_RLC0_PREEMPT 0x0170 +#define mmSDMA0_RLC0_PREEMPT_BASE_IDX 0 +#define mmSDMA0_RLC0_DUMMY_REG 0x0171 +#define mmSDMA0_RLC0_DUMMY_REG_BASE_IDX 0 +#define mmSDMA0_RLC0_RB_WPTR_POLL_ADDR_HI 0x0172 +#define mmSDMA0_RLC0_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC0_RB_WPTR_POLL_ADDR_LO 0x0173 +#define mmSDMA0_RLC0_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC0_RB_AQL_CNTL 0x0174 +#define mmSDMA0_RLC0_RB_AQL_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC0_MINOR_PTR_UPDATE 0x0175 +#define mmSDMA0_RLC0_MINOR_PTR_UPDATE_BASE_IDX 0 +#define mmSDMA0_RLC0_MIDCMD_DATA0 0x0180 +#define mmSDMA0_RLC0_MIDCMD_DATA0_BASE_IDX 0 +#define mmSDMA0_RLC0_MIDCMD_DATA1 0x0181 +#define mmSDMA0_RLC0_MIDCMD_DATA1_BASE_IDX 0 +#define mmSDMA0_RLC0_MIDCMD_DATA2 0x0182 +#define mmSDMA0_RLC0_MIDCMD_DATA2_BASE_IDX 0 +#define mmSDMA0_RLC0_MIDCMD_DATA3 0x0183 +#define mmSDMA0_RLC0_MIDCMD_DATA3_BASE_IDX 0 +#define mmSDMA0_RLC0_MIDCMD_DATA4 0x0184 +#define mmSDMA0_RLC0_MIDCMD_DATA4_BASE_IDX 0 +#define mmSDMA0_RLC0_MIDCMD_DATA5 0x0185 +#define mmSDMA0_RLC0_MIDCMD_DATA5_BASE_IDX 0 +#define mmSDMA0_RLC0_MIDCMD_DATA6 0x0186 +#define mmSDMA0_RLC0_MIDCMD_DATA6_BASE_IDX 0 +#define mmSDMA0_RLC0_MIDCMD_DATA7 0x0187 +#define mmSDMA0_RLC0_MIDCMD_DATA7_BASE_IDX 0 +#define mmSDMA0_RLC0_MIDCMD_DATA8 0x0188 +#define mmSDMA0_RLC0_MIDCMD_DATA8_BASE_IDX 0 +#define mmSDMA0_RLC0_MIDCMD_CNTL 0x0189 +#define mmSDMA0_RLC0_MIDCMD_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC1_RB_CNTL 0x01a0 +#define mmSDMA0_RLC1_RB_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC1_RB_BASE 0x01a1 +#define mmSDMA0_RLC1_RB_BASE_BASE_IDX 0 +#define mmSDMA0_RLC1_RB_BASE_HI 0x01a2 +#define mmSDMA0_RLC1_RB_BASE_HI_BASE_IDX 0 +#define mmSDMA0_RLC1_RB_RPTR 0x01a3 +#define mmSDMA0_RLC1_RB_RPTR_BASE_IDX 0 +#define mmSDMA0_RLC1_RB_RPTR_HI 0x01a4 +#define mmSDMA0_RLC1_RB_RPTR_HI_BASE_IDX 0 +#define mmSDMA0_RLC1_RB_WPTR 0x01a5 +#define mmSDMA0_RLC1_RB_WPTR_BASE_IDX 0 +#define mmSDMA0_RLC1_RB_WPTR_HI 0x01a6 +#define mmSDMA0_RLC1_RB_WPTR_HI_BASE_IDX 0 +#define mmSDMA0_RLC1_RB_WPTR_POLL_CNTL 0x01a7 +#define mmSDMA0_RLC1_RB_WPTR_POLL_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC1_RB_RPTR_ADDR_HI 0x01a8 +#define mmSDMA0_RLC1_RB_RPTR_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC1_RB_RPTR_ADDR_LO 0x01a9 +#define mmSDMA0_RLC1_RB_RPTR_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC1_IB_CNTL 0x01aa +#define mmSDMA0_RLC1_IB_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC1_IB_RPTR 0x01ab +#define mmSDMA0_RLC1_IB_RPTR_BASE_IDX 0 +#define mmSDMA0_RLC1_IB_OFFSET 0x01ac +#define mmSDMA0_RLC1_IB_OFFSET_BASE_IDX 0 +#define mmSDMA0_RLC1_IB_BASE_LO 0x01ad +#define mmSDMA0_RLC1_IB_BASE_LO_BASE_IDX 0 +#define mmSDMA0_RLC1_IB_BASE_HI 0x01ae +#define mmSDMA0_RLC1_IB_BASE_HI_BASE_IDX 0 +#define mmSDMA0_RLC1_IB_SIZE 0x01af +#define mmSDMA0_RLC1_IB_SIZE_BASE_IDX 0 +#define mmSDMA0_RLC1_SKIP_CNTL 0x01b0 +#define mmSDMA0_RLC1_SKIP_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC1_CONTEXT_STATUS 0x01b1 +#define mmSDMA0_RLC1_CONTEXT_STATUS_BASE_IDX 0 +#define mmSDMA0_RLC1_DOORBELL 0x01b2 +#define mmSDMA0_RLC1_DOORBELL_BASE_IDX 0 +#define mmSDMA0_RLC1_STATUS 0x01c8 +#define mmSDMA0_RLC1_STATUS_BASE_IDX 0 +#define mmSDMA0_RLC1_DOORBELL_LOG 0x01c9 +#define mmSDMA0_RLC1_DOORBELL_LOG_BASE_IDX 0 +#define mmSDMA0_RLC1_WATERMARK 0x01ca +#define mmSDMA0_RLC1_WATERMARK_BASE_IDX 0 +#define mmSDMA0_RLC1_DOORBELL_OFFSET 0x01cb +#define mmSDMA0_RLC1_DOORBELL_OFFSET_BASE_IDX 0 +#define mmSDMA0_RLC1_CSA_ADDR_LO 0x01cc +#define mmSDMA0_RLC1_CSA_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC1_CSA_ADDR_HI 0x01cd +#define mmSDMA0_RLC1_CSA_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC1_IB_SUB_REMAIN 0x01cf +#define mmSDMA0_RLC1_IB_SUB_REMAIN_BASE_IDX 0 +#define mmSDMA0_RLC1_PREEMPT 0x01d0 +#define mmSDMA0_RLC1_PREEMPT_BASE_IDX 0 +#define mmSDMA0_RLC1_DUMMY_REG 0x01d1 +#define mmSDMA0_RLC1_DUMMY_REG_BASE_IDX 0 +#define mmSDMA0_RLC1_RB_WPTR_POLL_ADDR_HI 0x01d2 +#define mmSDMA0_RLC1_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC1_RB_WPTR_POLL_ADDR_LO 0x01d3 +#define mmSDMA0_RLC1_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC1_RB_AQL_CNTL 0x01d4 +#define mmSDMA0_RLC1_RB_AQL_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC1_MINOR_PTR_UPDATE 0x01d5 +#define mmSDMA0_RLC1_MINOR_PTR_UPDATE_BASE_IDX 0 +#define mmSDMA0_RLC1_MIDCMD_DATA0 0x01e0 +#define mmSDMA0_RLC1_MIDCMD_DATA0_BASE_IDX 0 +#define mmSDMA0_RLC1_MIDCMD_DATA1 0x01e1 +#define mmSDMA0_RLC1_MIDCMD_DATA1_BASE_IDX 0 +#define mmSDMA0_RLC1_MIDCMD_DATA2 0x01e2 +#define mmSDMA0_RLC1_MIDCMD_DATA2_BASE_IDX 0 +#define mmSDMA0_RLC1_MIDCMD_DATA3 0x01e3 +#define mmSDMA0_RLC1_MIDCMD_DATA3_BASE_IDX 0 +#define mmSDMA0_RLC1_MIDCMD_DATA4 0x01e4 +#define mmSDMA0_RLC1_MIDCMD_DATA4_BASE_IDX 0 +#define mmSDMA0_RLC1_MIDCMD_DATA5 0x01e5 +#define mmSDMA0_RLC1_MIDCMD_DATA5_BASE_IDX 0 +#define mmSDMA0_RLC1_MIDCMD_DATA6 0x01e6 +#define mmSDMA0_RLC1_MIDCMD_DATA6_BASE_IDX 0 +#define mmSDMA0_RLC1_MIDCMD_DATA7 0x01e7 +#define mmSDMA0_RLC1_MIDCMD_DATA7_BASE_IDX 0 +#define mmSDMA0_RLC1_MIDCMD_DATA8 0x01e8 +#define mmSDMA0_RLC1_MIDCMD_DATA8_BASE_IDX 0 +#define mmSDMA0_RLC1_MIDCMD_CNTL 0x01e9 +#define mmSDMA0_RLC1_MIDCMD_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC2_RB_CNTL 0x0200 +#define mmSDMA0_RLC2_RB_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC2_RB_BASE 0x0201 +#define mmSDMA0_RLC2_RB_BASE_BASE_IDX 0 +#define mmSDMA0_RLC2_RB_BASE_HI 0x0202 +#define mmSDMA0_RLC2_RB_BASE_HI_BASE_IDX 0 +#define mmSDMA0_RLC2_RB_RPTR 0x0203 +#define mmSDMA0_RLC2_RB_RPTR_BASE_IDX 0 +#define mmSDMA0_RLC2_RB_RPTR_HI 0x0204 +#define mmSDMA0_RLC2_RB_RPTR_HI_BASE_IDX 0 +#define mmSDMA0_RLC2_RB_WPTR 0x0205 +#define mmSDMA0_RLC2_RB_WPTR_BASE_IDX 0 +#define mmSDMA0_RLC2_RB_WPTR_HI 0x0206 +#define mmSDMA0_RLC2_RB_WPTR_HI_BASE_IDX 0 +#define mmSDMA0_RLC2_RB_WPTR_POLL_CNTL 0x0207 +#define mmSDMA0_RLC2_RB_WPTR_POLL_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC2_RB_RPTR_ADDR_HI 0x0208 +#define mmSDMA0_RLC2_RB_RPTR_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC2_RB_RPTR_ADDR_LO 0x0209 +#define mmSDMA0_RLC2_RB_RPTR_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC2_IB_CNTL 0x020a +#define mmSDMA0_RLC2_IB_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC2_IB_RPTR 0x020b +#define mmSDMA0_RLC2_IB_RPTR_BASE_IDX 0 +#define mmSDMA0_RLC2_IB_OFFSET 0x020c +#define mmSDMA0_RLC2_IB_OFFSET_BASE_IDX 0 +#define mmSDMA0_RLC2_IB_BASE_LO 0x020d +#define mmSDMA0_RLC2_IB_BASE_LO_BASE_IDX 0 +#define mmSDMA0_RLC2_IB_BASE_HI 0x020e +#define mmSDMA0_RLC2_IB_BASE_HI_BASE_IDX 0 +#define mmSDMA0_RLC2_IB_SIZE 0x020f +#define mmSDMA0_RLC2_IB_SIZE_BASE_IDX 0 +#define mmSDMA0_RLC2_SKIP_CNTL 0x0210 +#define mmSDMA0_RLC2_SKIP_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC2_CONTEXT_STATUS 0x0211 +#define mmSDMA0_RLC2_CONTEXT_STATUS_BASE_IDX 0 +#define mmSDMA0_RLC2_DOORBELL 0x0212 +#define mmSDMA0_RLC2_DOORBELL_BASE_IDX 0 +#define mmSDMA0_RLC2_STATUS 0x0228 +#define mmSDMA0_RLC2_STATUS_BASE_IDX 0 +#define mmSDMA0_RLC2_DOORBELL_LOG 0x0229 +#define mmSDMA0_RLC2_DOORBELL_LOG_BASE_IDX 0 +#define mmSDMA0_RLC2_WATERMARK 0x022a +#define mmSDMA0_RLC2_WATERMARK_BASE_IDX 0 +#define mmSDMA0_RLC2_DOORBELL_OFFSET 0x022b +#define mmSDMA0_RLC2_DOORBELL_OFFSET_BASE_IDX 0 +#define mmSDMA0_RLC2_CSA_ADDR_LO 0x022c +#define mmSDMA0_RLC2_CSA_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC2_CSA_ADDR_HI 0x022d +#define mmSDMA0_RLC2_CSA_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC2_IB_SUB_REMAIN 0x022f +#define mmSDMA0_RLC2_IB_SUB_REMAIN_BASE_IDX 0 +#define mmSDMA0_RLC2_PREEMPT 0x0230 +#define mmSDMA0_RLC2_PREEMPT_BASE_IDX 0 +#define mmSDMA0_RLC2_DUMMY_REG 0x0231 +#define mmSDMA0_RLC2_DUMMY_REG_BASE_IDX 0 +#define mmSDMA0_RLC2_RB_WPTR_POLL_ADDR_HI 0x0232 +#define mmSDMA0_RLC2_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC2_RB_WPTR_POLL_ADDR_LO 0x0233 +#define mmSDMA0_RLC2_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC2_RB_AQL_CNTL 0x0234 +#define mmSDMA0_RLC2_RB_AQL_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC2_MINOR_PTR_UPDATE 0x0235 +#define mmSDMA0_RLC2_MINOR_PTR_UPDATE_BASE_IDX 0 +#define mmSDMA0_RLC2_MIDCMD_DATA0 0x0240 +#define mmSDMA0_RLC2_MIDCMD_DATA0_BASE_IDX 0 +#define mmSDMA0_RLC2_MIDCMD_DATA1 0x0241 +#define mmSDMA0_RLC2_MIDCMD_DATA1_BASE_IDX 0 +#define mmSDMA0_RLC2_MIDCMD_DATA2 0x0242 +#define mmSDMA0_RLC2_MIDCMD_DATA2_BASE_IDX 0 +#define mmSDMA0_RLC2_MIDCMD_DATA3 0x0243 +#define mmSDMA0_RLC2_MIDCMD_DATA3_BASE_IDX 0 +#define mmSDMA0_RLC2_MIDCMD_DATA4 0x0244 +#define mmSDMA0_RLC2_MIDCMD_DATA4_BASE_IDX 0 +#define mmSDMA0_RLC2_MIDCMD_DATA5 0x0245 +#define mmSDMA0_RLC2_MIDCMD_DATA5_BASE_IDX 0 +#define mmSDMA0_RLC2_MIDCMD_DATA6 0x0246 +#define mmSDMA0_RLC2_MIDCMD_DATA6_BASE_IDX 0 +#define mmSDMA0_RLC2_MIDCMD_DATA7 0x0247 +#define mmSDMA0_RLC2_MIDCMD_DATA7_BASE_IDX 0 +#define mmSDMA0_RLC2_MIDCMD_DATA8 0x0248 +#define mmSDMA0_RLC2_MIDCMD_DATA8_BASE_IDX 0 +#define mmSDMA0_RLC2_MIDCMD_CNTL 0x0249 +#define mmSDMA0_RLC2_MIDCMD_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC3_RB_CNTL 0x0260 +#define mmSDMA0_RLC3_RB_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC3_RB_BASE 0x0261 +#define mmSDMA0_RLC3_RB_BASE_BASE_IDX 0 +#define mmSDMA0_RLC3_RB_BASE_HI 0x0262 +#define mmSDMA0_RLC3_RB_BASE_HI_BASE_IDX 0 +#define mmSDMA0_RLC3_RB_RPTR 0x0263 +#define mmSDMA0_RLC3_RB_RPTR_BASE_IDX 0 +#define mmSDMA0_RLC3_RB_RPTR_HI 0x0264 +#define mmSDMA0_RLC3_RB_RPTR_HI_BASE_IDX 0 +#define mmSDMA0_RLC3_RB_WPTR 0x0265 +#define mmSDMA0_RLC3_RB_WPTR_BASE_IDX 0 +#define mmSDMA0_RLC3_RB_WPTR_HI 0x0266 +#define mmSDMA0_RLC3_RB_WPTR_HI_BASE_IDX 0 +#define mmSDMA0_RLC3_RB_WPTR_POLL_CNTL 0x0267 +#define mmSDMA0_RLC3_RB_WPTR_POLL_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC3_RB_RPTR_ADDR_HI 0x0268 +#define mmSDMA0_RLC3_RB_RPTR_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC3_RB_RPTR_ADDR_LO 0x0269 +#define mmSDMA0_RLC3_RB_RPTR_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC3_IB_CNTL 0x026a +#define mmSDMA0_RLC3_IB_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC3_IB_RPTR 0x026b +#define mmSDMA0_RLC3_IB_RPTR_BASE_IDX 0 +#define mmSDMA0_RLC3_IB_OFFSET 0x026c +#define mmSDMA0_RLC3_IB_OFFSET_BASE_IDX 0 +#define mmSDMA0_RLC3_IB_BASE_LO 0x026d +#define mmSDMA0_RLC3_IB_BASE_LO_BASE_IDX 0 +#define mmSDMA0_RLC3_IB_BASE_HI 0x026e +#define mmSDMA0_RLC3_IB_BASE_HI_BASE_IDX 0 +#define mmSDMA0_RLC3_IB_SIZE 0x026f +#define mmSDMA0_RLC3_IB_SIZE_BASE_IDX 0 +#define mmSDMA0_RLC3_SKIP_CNTL 0x0270 +#define mmSDMA0_RLC3_SKIP_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC3_CONTEXT_STATUS 0x0271 +#define mmSDMA0_RLC3_CONTEXT_STATUS_BASE_IDX 0 +#define mmSDMA0_RLC3_DOORBELL 0x0272 +#define mmSDMA0_RLC3_DOORBELL_BASE_IDX 0 +#define mmSDMA0_RLC3_STATUS 0x0288 +#define mmSDMA0_RLC3_STATUS_BASE_IDX 0 +#define mmSDMA0_RLC3_DOORBELL_LOG 0x0289 +#define mmSDMA0_RLC3_DOORBELL_LOG_BASE_IDX 0 +#define mmSDMA0_RLC3_WATERMARK 0x028a +#define mmSDMA0_RLC3_WATERMARK_BASE_IDX 0 +#define mmSDMA0_RLC3_DOORBELL_OFFSET 0x028b +#define mmSDMA0_RLC3_DOORBELL_OFFSET_BASE_IDX 0 +#define mmSDMA0_RLC3_CSA_ADDR_LO 0x028c +#define mmSDMA0_RLC3_CSA_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC3_CSA_ADDR_HI 0x028d +#define mmSDMA0_RLC3_CSA_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC3_IB_SUB_REMAIN 0x028f +#define mmSDMA0_RLC3_IB_SUB_REMAIN_BASE_IDX 0 +#define mmSDMA0_RLC3_PREEMPT 0x0290 +#define mmSDMA0_RLC3_PREEMPT_BASE_IDX 0 +#define mmSDMA0_RLC3_DUMMY_REG 0x0291 +#define mmSDMA0_RLC3_DUMMY_REG_BASE_IDX 0 +#define mmSDMA0_RLC3_RB_WPTR_POLL_ADDR_HI 0x0292 +#define mmSDMA0_RLC3_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC3_RB_WPTR_POLL_ADDR_LO 0x0293 +#define mmSDMA0_RLC3_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC3_RB_AQL_CNTL 0x0294 +#define mmSDMA0_RLC3_RB_AQL_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC3_MINOR_PTR_UPDATE 0x0295 +#define mmSDMA0_RLC3_MINOR_PTR_UPDATE_BASE_IDX 0 +#define mmSDMA0_RLC3_MIDCMD_DATA0 0x02a0 +#define mmSDMA0_RLC3_MIDCMD_DATA0_BASE_IDX 0 +#define mmSDMA0_RLC3_MIDCMD_DATA1 0x02a1 +#define mmSDMA0_RLC3_MIDCMD_DATA1_BASE_IDX 0 +#define mmSDMA0_RLC3_MIDCMD_DATA2 0x02a2 +#define mmSDMA0_RLC3_MIDCMD_DATA2_BASE_IDX 0 +#define mmSDMA0_RLC3_MIDCMD_DATA3 0x02a3 +#define mmSDMA0_RLC3_MIDCMD_DATA3_BASE_IDX 0 +#define mmSDMA0_RLC3_MIDCMD_DATA4 0x02a4 +#define mmSDMA0_RLC3_MIDCMD_DATA4_BASE_IDX 0 +#define mmSDMA0_RLC3_MIDCMD_DATA5 0x02a5 +#define mmSDMA0_RLC3_MIDCMD_DATA5_BASE_IDX 0 +#define mmSDMA0_RLC3_MIDCMD_DATA6 0x02a6 +#define mmSDMA0_RLC3_MIDCMD_DATA6_BASE_IDX 0 +#define mmSDMA0_RLC3_MIDCMD_DATA7 0x02a7 +#define mmSDMA0_RLC3_MIDCMD_DATA7_BASE_IDX 0 +#define mmSDMA0_RLC3_MIDCMD_DATA8 0x02a8 +#define mmSDMA0_RLC3_MIDCMD_DATA8_BASE_IDX 0 +#define mmSDMA0_RLC3_MIDCMD_CNTL 0x02a9 +#define mmSDMA0_RLC3_MIDCMD_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC4_RB_CNTL 0x02c0 +#define mmSDMA0_RLC4_RB_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC4_RB_BASE 0x02c1 +#define mmSDMA0_RLC4_RB_BASE_BASE_IDX 0 +#define mmSDMA0_RLC4_RB_BASE_HI 0x02c2 +#define mmSDMA0_RLC4_RB_BASE_HI_BASE_IDX 0 +#define mmSDMA0_RLC4_RB_RPTR 0x02c3 +#define mmSDMA0_RLC4_RB_RPTR_BASE_IDX 0 +#define mmSDMA0_RLC4_RB_RPTR_HI 0x02c4 +#define mmSDMA0_RLC4_RB_RPTR_HI_BASE_IDX 0 +#define mmSDMA0_RLC4_RB_WPTR 0x02c5 +#define mmSDMA0_RLC4_RB_WPTR_BASE_IDX 0 +#define mmSDMA0_RLC4_RB_WPTR_HI 0x02c6 +#define mmSDMA0_RLC4_RB_WPTR_HI_BASE_IDX 0 +#define mmSDMA0_RLC4_RB_WPTR_POLL_CNTL 0x02c7 +#define mmSDMA0_RLC4_RB_WPTR_POLL_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC4_RB_RPTR_ADDR_HI 0x02c8 +#define mmSDMA0_RLC4_RB_RPTR_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC4_RB_RPTR_ADDR_LO 0x02c9 +#define mmSDMA0_RLC4_RB_RPTR_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC4_IB_CNTL 0x02ca +#define mmSDMA0_RLC4_IB_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC4_IB_RPTR 0x02cb +#define mmSDMA0_RLC4_IB_RPTR_BASE_IDX 0 +#define mmSDMA0_RLC4_IB_OFFSET 0x02cc +#define mmSDMA0_RLC4_IB_OFFSET_BASE_IDX 0 +#define mmSDMA0_RLC4_IB_BASE_LO 0x02cd +#define mmSDMA0_RLC4_IB_BASE_LO_BASE_IDX 0 +#define mmSDMA0_RLC4_IB_BASE_HI 0x02ce +#define mmSDMA0_RLC4_IB_BASE_HI_BASE_IDX 0 +#define mmSDMA0_RLC4_IB_SIZE 0x02cf +#define mmSDMA0_RLC4_IB_SIZE_BASE_IDX 0 +#define mmSDMA0_RLC4_SKIP_CNTL 0x02d0 +#define mmSDMA0_RLC4_SKIP_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC4_CONTEXT_STATUS 0x02d1 +#define mmSDMA0_RLC4_CONTEXT_STATUS_BASE_IDX 0 +#define mmSDMA0_RLC4_DOORBELL 0x02d2 +#define mmSDMA0_RLC4_DOORBELL_BASE_IDX 0 +#define mmSDMA0_RLC4_STATUS 0x02e8 +#define mmSDMA0_RLC4_STATUS_BASE_IDX 0 +#define mmSDMA0_RLC4_DOORBELL_LOG 0x02e9 +#define mmSDMA0_RLC4_DOORBELL_LOG_BASE_IDX 0 +#define mmSDMA0_RLC4_WATERMARK 0x02ea +#define mmSDMA0_RLC4_WATERMARK_BASE_IDX 0 +#define mmSDMA0_RLC4_DOORBELL_OFFSET 0x02eb +#define mmSDMA0_RLC4_DOORBELL_OFFSET_BASE_IDX 0 +#define mmSDMA0_RLC4_CSA_ADDR_LO 0x02ec +#define mmSDMA0_RLC4_CSA_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC4_CSA_ADDR_HI 0x02ed +#define mmSDMA0_RLC4_CSA_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC4_IB_SUB_REMAIN 0x02ef +#define mmSDMA0_RLC4_IB_SUB_REMAIN_BASE_IDX 0 +#define mmSDMA0_RLC4_PREEMPT 0x02f0 +#define mmSDMA0_RLC4_PREEMPT_BASE_IDX 0 +#define mmSDMA0_RLC4_DUMMY_REG 0x02f1 +#define mmSDMA0_RLC4_DUMMY_REG_BASE_IDX 0 +#define mmSDMA0_RLC4_RB_WPTR_POLL_ADDR_HI 0x02f2 +#define mmSDMA0_RLC4_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC4_RB_WPTR_POLL_ADDR_LO 0x02f3 +#define mmSDMA0_RLC4_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC4_RB_AQL_CNTL 0x02f4 +#define mmSDMA0_RLC4_RB_AQL_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC4_MINOR_PTR_UPDATE 0x02f5 +#define mmSDMA0_RLC4_MINOR_PTR_UPDATE_BASE_IDX 0 +#define mmSDMA0_RLC4_MIDCMD_DATA0 0x0300 +#define mmSDMA0_RLC4_MIDCMD_DATA0_BASE_IDX 0 +#define mmSDMA0_RLC4_MIDCMD_DATA1 0x0301 +#define mmSDMA0_RLC4_MIDCMD_DATA1_BASE_IDX 0 +#define mmSDMA0_RLC4_MIDCMD_DATA2 0x0302 +#define mmSDMA0_RLC4_MIDCMD_DATA2_BASE_IDX 0 +#define mmSDMA0_RLC4_MIDCMD_DATA3 0x0303 +#define mmSDMA0_RLC4_MIDCMD_DATA3_BASE_IDX 0 +#define mmSDMA0_RLC4_MIDCMD_DATA4 0x0304 +#define mmSDMA0_RLC4_MIDCMD_DATA4_BASE_IDX 0 +#define mmSDMA0_RLC4_MIDCMD_DATA5 0x0305 +#define mmSDMA0_RLC4_MIDCMD_DATA5_BASE_IDX 0 +#define mmSDMA0_RLC4_MIDCMD_DATA6 0x0306 +#define mmSDMA0_RLC4_MIDCMD_DATA6_BASE_IDX 0 +#define mmSDMA0_RLC4_MIDCMD_DATA7 0x0307 +#define mmSDMA0_RLC4_MIDCMD_DATA7_BASE_IDX 0 +#define mmSDMA0_RLC4_MIDCMD_DATA8 0x0308 +#define mmSDMA0_RLC4_MIDCMD_DATA8_BASE_IDX 0 +#define mmSDMA0_RLC4_MIDCMD_CNTL 0x0309 +#define mmSDMA0_RLC4_MIDCMD_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC5_RB_CNTL 0x0320 +#define mmSDMA0_RLC5_RB_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC5_RB_BASE 0x0321 +#define mmSDMA0_RLC5_RB_BASE_BASE_IDX 0 +#define mmSDMA0_RLC5_RB_BASE_HI 0x0322 +#define mmSDMA0_RLC5_RB_BASE_HI_BASE_IDX 0 +#define mmSDMA0_RLC5_RB_RPTR 0x0323 +#define mmSDMA0_RLC5_RB_RPTR_BASE_IDX 0 +#define mmSDMA0_RLC5_RB_RPTR_HI 0x0324 +#define mmSDMA0_RLC5_RB_RPTR_HI_BASE_IDX 0 +#define mmSDMA0_RLC5_RB_WPTR 0x0325 +#define mmSDMA0_RLC5_RB_WPTR_BASE_IDX 0 +#define mmSDMA0_RLC5_RB_WPTR_HI 0x0326 +#define mmSDMA0_RLC5_RB_WPTR_HI_BASE_IDX 0 +#define mmSDMA0_RLC5_RB_WPTR_POLL_CNTL 0x0327 +#define mmSDMA0_RLC5_RB_WPTR_POLL_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC5_RB_RPTR_ADDR_HI 0x0328 +#define mmSDMA0_RLC5_RB_RPTR_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC5_RB_RPTR_ADDR_LO 0x0329 +#define mmSDMA0_RLC5_RB_RPTR_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC5_IB_CNTL 0x032a +#define mmSDMA0_RLC5_IB_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC5_IB_RPTR 0x032b +#define mmSDMA0_RLC5_IB_RPTR_BASE_IDX 0 +#define mmSDMA0_RLC5_IB_OFFSET 0x032c +#define mmSDMA0_RLC5_IB_OFFSET_BASE_IDX 0 +#define mmSDMA0_RLC5_IB_BASE_LO 0x032d +#define mmSDMA0_RLC5_IB_BASE_LO_BASE_IDX 0 +#define mmSDMA0_RLC5_IB_BASE_HI 0x032e +#define mmSDMA0_RLC5_IB_BASE_HI_BASE_IDX 0 +#define mmSDMA0_RLC5_IB_SIZE 0x032f +#define mmSDMA0_RLC5_IB_SIZE_BASE_IDX 0 +#define mmSDMA0_RLC5_SKIP_CNTL 0x0330 +#define mmSDMA0_RLC5_SKIP_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC5_CONTEXT_STATUS 0x0331 +#define mmSDMA0_RLC5_CONTEXT_STATUS_BASE_IDX 0 +#define mmSDMA0_RLC5_DOORBELL 0x0332 +#define mmSDMA0_RLC5_DOORBELL_BASE_IDX 0 +#define mmSDMA0_RLC5_STATUS 0x0348 +#define mmSDMA0_RLC5_STATUS_BASE_IDX 0 +#define mmSDMA0_RLC5_DOORBELL_LOG 0x0349 +#define mmSDMA0_RLC5_DOORBELL_LOG_BASE_IDX 0 +#define mmSDMA0_RLC5_WATERMARK 0x034a +#define mmSDMA0_RLC5_WATERMARK_BASE_IDX 0 +#define mmSDMA0_RLC5_DOORBELL_OFFSET 0x034b +#define mmSDMA0_RLC5_DOORBELL_OFFSET_BASE_IDX 0 +#define mmSDMA0_RLC5_CSA_ADDR_LO 0x034c +#define mmSDMA0_RLC5_CSA_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC5_CSA_ADDR_HI 0x034d +#define mmSDMA0_RLC5_CSA_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC5_IB_SUB_REMAIN 0x034f +#define mmSDMA0_RLC5_IB_SUB_REMAIN_BASE_IDX 0 +#define mmSDMA0_RLC5_PREEMPT 0x0350 +#define mmSDMA0_RLC5_PREEMPT_BASE_IDX 0 +#define mmSDMA0_RLC5_DUMMY_REG 0x0351 +#define mmSDMA0_RLC5_DUMMY_REG_BASE_IDX 0 +#define mmSDMA0_RLC5_RB_WPTR_POLL_ADDR_HI 0x0352 +#define mmSDMA0_RLC5_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC5_RB_WPTR_POLL_ADDR_LO 0x0353 +#define mmSDMA0_RLC5_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC5_RB_AQL_CNTL 0x0354 +#define mmSDMA0_RLC5_RB_AQL_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC5_MINOR_PTR_UPDATE 0x0355 +#define mmSDMA0_RLC5_MINOR_PTR_UPDATE_BASE_IDX 0 +#define mmSDMA0_RLC5_MIDCMD_DATA0 0x0360 +#define mmSDMA0_RLC5_MIDCMD_DATA0_BASE_IDX 0 +#define mmSDMA0_RLC5_MIDCMD_DATA1 0x0361 +#define mmSDMA0_RLC5_MIDCMD_DATA1_BASE_IDX 0 +#define mmSDMA0_RLC5_MIDCMD_DATA2 0x0362 +#define mmSDMA0_RLC5_MIDCMD_DATA2_BASE_IDX 0 +#define mmSDMA0_RLC5_MIDCMD_DATA3 0x0363 +#define mmSDMA0_RLC5_MIDCMD_DATA3_BASE_IDX 0 +#define mmSDMA0_RLC5_MIDCMD_DATA4 0x0364 +#define mmSDMA0_RLC5_MIDCMD_DATA4_BASE_IDX 0 +#define mmSDMA0_RLC5_MIDCMD_DATA5 0x0365 +#define mmSDMA0_RLC5_MIDCMD_DATA5_BASE_IDX 0 +#define mmSDMA0_RLC5_MIDCMD_DATA6 0x0366 +#define mmSDMA0_RLC5_MIDCMD_DATA6_BASE_IDX 0 +#define mmSDMA0_RLC5_MIDCMD_DATA7 0x0367 +#define mmSDMA0_RLC5_MIDCMD_DATA7_BASE_IDX 0 +#define mmSDMA0_RLC5_MIDCMD_DATA8 0x0368 +#define mmSDMA0_RLC5_MIDCMD_DATA8_BASE_IDX 0 +#define mmSDMA0_RLC5_MIDCMD_CNTL 0x0369 +#define mmSDMA0_RLC5_MIDCMD_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC6_RB_CNTL 0x0380 +#define mmSDMA0_RLC6_RB_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC6_RB_BASE 0x0381 +#define mmSDMA0_RLC6_RB_BASE_BASE_IDX 0 +#define mmSDMA0_RLC6_RB_BASE_HI 0x0382 +#define mmSDMA0_RLC6_RB_BASE_HI_BASE_IDX 0 +#define mmSDMA0_RLC6_RB_RPTR 0x0383 +#define mmSDMA0_RLC6_RB_RPTR_BASE_IDX 0 +#define mmSDMA0_RLC6_RB_RPTR_HI 0x0384 +#define mmSDMA0_RLC6_RB_RPTR_HI_BASE_IDX 0 +#define mmSDMA0_RLC6_RB_WPTR 0x0385 +#define mmSDMA0_RLC6_RB_WPTR_BASE_IDX 0 +#define mmSDMA0_RLC6_RB_WPTR_HI 0x0386 +#define mmSDMA0_RLC6_RB_WPTR_HI_BASE_IDX 0 +#define mmSDMA0_RLC6_RB_WPTR_POLL_CNTL 0x0387 +#define mmSDMA0_RLC6_RB_WPTR_POLL_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC6_RB_RPTR_ADDR_HI 0x0388 +#define mmSDMA0_RLC6_RB_RPTR_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC6_RB_RPTR_ADDR_LO 0x0389 +#define mmSDMA0_RLC6_RB_RPTR_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC6_IB_CNTL 0x038a +#define mmSDMA0_RLC6_IB_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC6_IB_RPTR 0x038b +#define mmSDMA0_RLC6_IB_RPTR_BASE_IDX 0 +#define mmSDMA0_RLC6_IB_OFFSET 0x038c +#define mmSDMA0_RLC6_IB_OFFSET_BASE_IDX 0 +#define mmSDMA0_RLC6_IB_BASE_LO 0x038d +#define mmSDMA0_RLC6_IB_BASE_LO_BASE_IDX 0 +#define mmSDMA0_RLC6_IB_BASE_HI 0x038e +#define mmSDMA0_RLC6_IB_BASE_HI_BASE_IDX 0 +#define mmSDMA0_RLC6_IB_SIZE 0x038f +#define mmSDMA0_RLC6_IB_SIZE_BASE_IDX 0 +#define mmSDMA0_RLC6_SKIP_CNTL 0x0390 +#define mmSDMA0_RLC6_SKIP_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC6_CONTEXT_STATUS 0x0391 +#define mmSDMA0_RLC6_CONTEXT_STATUS_BASE_IDX 0 +#define mmSDMA0_RLC6_DOORBELL 0x0392 +#define mmSDMA0_RLC6_DOORBELL_BASE_IDX 0 +#define mmSDMA0_RLC6_STATUS 0x03a8 +#define mmSDMA0_RLC6_STATUS_BASE_IDX 0 +#define mmSDMA0_RLC6_DOORBELL_LOG 0x03a9 +#define mmSDMA0_RLC6_DOORBELL_LOG_BASE_IDX 0 +#define mmSDMA0_RLC6_WATERMARK 0x03aa +#define mmSDMA0_RLC6_WATERMARK_BASE_IDX 0 +#define mmSDMA0_RLC6_DOORBELL_OFFSET 0x03ab +#define mmSDMA0_RLC6_DOORBELL_OFFSET_BASE_IDX 0 +#define mmSDMA0_RLC6_CSA_ADDR_LO 0x03ac +#define mmSDMA0_RLC6_CSA_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC6_CSA_ADDR_HI 0x03ad +#define mmSDMA0_RLC6_CSA_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC6_IB_SUB_REMAIN 0x03af +#define mmSDMA0_RLC6_IB_SUB_REMAIN_BASE_IDX 0 +#define mmSDMA0_RLC6_PREEMPT 0x03b0 +#define mmSDMA0_RLC6_PREEMPT_BASE_IDX 0 +#define mmSDMA0_RLC6_DUMMY_REG 0x03b1 +#define mmSDMA0_RLC6_DUMMY_REG_BASE_IDX 0 +#define mmSDMA0_RLC6_RB_WPTR_POLL_ADDR_HI 0x03b2 +#define mmSDMA0_RLC6_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC6_RB_WPTR_POLL_ADDR_LO 0x03b3 +#define mmSDMA0_RLC6_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC6_RB_AQL_CNTL 0x03b4 +#define mmSDMA0_RLC6_RB_AQL_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC6_MINOR_PTR_UPDATE 0x03b5 +#define mmSDMA0_RLC6_MINOR_PTR_UPDATE_BASE_IDX 0 +#define mmSDMA0_RLC6_MIDCMD_DATA0 0x03c0 +#define mmSDMA0_RLC6_MIDCMD_DATA0_BASE_IDX 0 +#define mmSDMA0_RLC6_MIDCMD_DATA1 0x03c1 +#define mmSDMA0_RLC6_MIDCMD_DATA1_BASE_IDX 0 +#define mmSDMA0_RLC6_MIDCMD_DATA2 0x03c2 +#define mmSDMA0_RLC6_MIDCMD_DATA2_BASE_IDX 0 +#define mmSDMA0_RLC6_MIDCMD_DATA3 0x03c3 +#define mmSDMA0_RLC6_MIDCMD_DATA3_BASE_IDX 0 +#define mmSDMA0_RLC6_MIDCMD_DATA4 0x03c4 +#define mmSDMA0_RLC6_MIDCMD_DATA4_BASE_IDX 0 +#define mmSDMA0_RLC6_MIDCMD_DATA5 0x03c5 +#define mmSDMA0_RLC6_MIDCMD_DATA5_BASE_IDX 0 +#define mmSDMA0_RLC6_MIDCMD_DATA6 0x03c6 +#define mmSDMA0_RLC6_MIDCMD_DATA6_BASE_IDX 0 +#define mmSDMA0_RLC6_MIDCMD_DATA7 0x03c7 +#define mmSDMA0_RLC6_MIDCMD_DATA7_BASE_IDX 0 +#define mmSDMA0_RLC6_MIDCMD_DATA8 0x03c8 +#define mmSDMA0_RLC6_MIDCMD_DATA8_BASE_IDX 0 +#define mmSDMA0_RLC6_MIDCMD_CNTL 0x03c9 +#define mmSDMA0_RLC6_MIDCMD_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC7_RB_CNTL 0x03e0 +#define mmSDMA0_RLC7_RB_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC7_RB_BASE 0x03e1 +#define mmSDMA0_RLC7_RB_BASE_BASE_IDX 0 +#define mmSDMA0_RLC7_RB_BASE_HI 0x03e2 +#define mmSDMA0_RLC7_RB_BASE_HI_BASE_IDX 0 +#define mmSDMA0_RLC7_RB_RPTR 0x03e3 +#define mmSDMA0_RLC7_RB_RPTR_BASE_IDX 0 +#define mmSDMA0_RLC7_RB_RPTR_HI 0x03e4 +#define mmSDMA0_RLC7_RB_RPTR_HI_BASE_IDX 0 +#define mmSDMA0_RLC7_RB_WPTR 0x03e5 +#define mmSDMA0_RLC7_RB_WPTR_BASE_IDX 0 +#define mmSDMA0_RLC7_RB_WPTR_HI 0x03e6 +#define mmSDMA0_RLC7_RB_WPTR_HI_BASE_IDX 0 +#define mmSDMA0_RLC7_RB_WPTR_POLL_CNTL 0x03e7 +#define mmSDMA0_RLC7_RB_WPTR_POLL_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC7_RB_RPTR_ADDR_HI 0x03e8 +#define mmSDMA0_RLC7_RB_RPTR_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC7_RB_RPTR_ADDR_LO 0x03e9 +#define mmSDMA0_RLC7_RB_RPTR_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC7_IB_CNTL 0x03ea +#define mmSDMA0_RLC7_IB_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC7_IB_RPTR 0x03eb +#define mmSDMA0_RLC7_IB_RPTR_BASE_IDX 0 +#define mmSDMA0_RLC7_IB_OFFSET 0x03ec +#define mmSDMA0_RLC7_IB_OFFSET_BASE_IDX 0 +#define mmSDMA0_RLC7_IB_BASE_LO 0x03ed +#define mmSDMA0_RLC7_IB_BASE_LO_BASE_IDX 0 +#define mmSDMA0_RLC7_IB_BASE_HI 0x03ee +#define mmSDMA0_RLC7_IB_BASE_HI_BASE_IDX 0 +#define mmSDMA0_RLC7_IB_SIZE 0x03ef +#define mmSDMA0_RLC7_IB_SIZE_BASE_IDX 0 +#define mmSDMA0_RLC7_SKIP_CNTL 0x03f0 +#define mmSDMA0_RLC7_SKIP_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC7_CONTEXT_STATUS 0x03f1 +#define mmSDMA0_RLC7_CONTEXT_STATUS_BASE_IDX 0 +#define mmSDMA0_RLC7_DOORBELL 0x03f2 +#define mmSDMA0_RLC7_DOORBELL_BASE_IDX 0 +#define mmSDMA0_RLC7_STATUS 0x0408 +#define mmSDMA0_RLC7_STATUS_BASE_IDX 0 +#define mmSDMA0_RLC7_DOORBELL_LOG 0x0409 +#define mmSDMA0_RLC7_DOORBELL_LOG_BASE_IDX 0 +#define mmSDMA0_RLC7_WATERMARK 0x040a +#define mmSDMA0_RLC7_WATERMARK_BASE_IDX 0 +#define mmSDMA0_RLC7_DOORBELL_OFFSET 0x040b +#define mmSDMA0_RLC7_DOORBELL_OFFSET_BASE_IDX 0 +#define mmSDMA0_RLC7_CSA_ADDR_LO 0x040c +#define mmSDMA0_RLC7_CSA_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC7_CSA_ADDR_HI 0x040d +#define mmSDMA0_RLC7_CSA_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC7_IB_SUB_REMAIN 0x040f +#define mmSDMA0_RLC7_IB_SUB_REMAIN_BASE_IDX 0 +#define mmSDMA0_RLC7_PREEMPT 0x0410 +#define mmSDMA0_RLC7_PREEMPT_BASE_IDX 0 +#define mmSDMA0_RLC7_DUMMY_REG 0x0411 +#define mmSDMA0_RLC7_DUMMY_REG_BASE_IDX 0 +#define mmSDMA0_RLC7_RB_WPTR_POLL_ADDR_HI 0x0412 +#define mmSDMA0_RLC7_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC7_RB_WPTR_POLL_ADDR_LO 0x0413 +#define mmSDMA0_RLC7_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC7_RB_AQL_CNTL 0x0414 +#define mmSDMA0_RLC7_RB_AQL_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC7_MINOR_PTR_UPDATE 0x0415 +#define mmSDMA0_RLC7_MINOR_PTR_UPDATE_BASE_IDX 0 +#define mmSDMA0_RLC7_MIDCMD_DATA0 0x0420 +#define mmSDMA0_RLC7_MIDCMD_DATA0_BASE_IDX 0 +#define mmSDMA0_RLC7_MIDCMD_DATA1 0x0421 +#define mmSDMA0_RLC7_MIDCMD_DATA1_BASE_IDX 0 +#define mmSDMA0_RLC7_MIDCMD_DATA2 0x0422 +#define mmSDMA0_RLC7_MIDCMD_DATA2_BASE_IDX 0 +#define mmSDMA0_RLC7_MIDCMD_DATA3 0x0423 +#define mmSDMA0_RLC7_MIDCMD_DATA3_BASE_IDX 0 +#define mmSDMA0_RLC7_MIDCMD_DATA4 0x0424 +#define mmSDMA0_RLC7_MIDCMD_DATA4_BASE_IDX 0 +#define mmSDMA0_RLC7_MIDCMD_DATA5 0x0425 +#define mmSDMA0_RLC7_MIDCMD_DATA5_BASE_IDX 0 +#define mmSDMA0_RLC7_MIDCMD_DATA6 0x0426 +#define mmSDMA0_RLC7_MIDCMD_DATA6_BASE_IDX 0 +#define mmSDMA0_RLC7_MIDCMD_DATA7 0x0427 +#define mmSDMA0_RLC7_MIDCMD_DATA7_BASE_IDX 0 +#define mmSDMA0_RLC7_MIDCMD_DATA8 0x0428 +#define mmSDMA0_RLC7_MIDCMD_DATA8_BASE_IDX 0 +#define mmSDMA0_RLC7_MIDCMD_CNTL 0x0429 +#define mmSDMA0_RLC7_MIDCMD_CNTL_BASE_IDX 0 + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_2_sh_mask.h new file mode 100644 index 000000000000..11bfb43833ca --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_2_sh_mask.h @@ -0,0 +1,2992 @@ +/* + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _sdma0_4_2_0_SH_MASK_HEADER +#define _sdma0_4_2_0_SH_MASK_HEADER + + +// addressBlock: sdma0_sdma0dec +//SDMA0_UCODE_ADDR +#define SDMA0_UCODE_ADDR__VALUE__SHIFT 0x0 +#define SDMA0_UCODE_ADDR__VALUE_MASK 0x00001FFFL +//SDMA0_UCODE_DATA +#define SDMA0_UCODE_DATA__VALUE__SHIFT 0x0 +#define SDMA0_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL +//SDMA0_VM_CNTL +#define SDMA0_VM_CNTL__CMD__SHIFT 0x0 +#define SDMA0_VM_CNTL__CMD_MASK 0x0000000FL +//SDMA0_VM_CTX_LO +#define SDMA0_VM_CTX_LO__ADDR__SHIFT 0x2 +#define SDMA0_VM_CTX_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_VM_CTX_HI +#define SDMA0_VM_CTX_HI__ADDR__SHIFT 0x0 +#define SDMA0_VM_CTX_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_ACTIVE_FCN_ID +#define SDMA0_ACTIVE_FCN_ID__VFID__SHIFT 0x0 +#define SDMA0_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4 +#define SDMA0_ACTIVE_FCN_ID__VF__SHIFT 0x1f +#define SDMA0_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL +#define SDMA0_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L +#define SDMA0_ACTIVE_FCN_ID__VF_MASK 0x80000000L +//SDMA0_VM_CTX_CNTL +#define SDMA0_VM_CTX_CNTL__PRIV__SHIFT 0x0 +#define SDMA0_VM_CTX_CNTL__VMID__SHIFT 0x4 +#define SDMA0_VM_CTX_CNTL__PRIV_MASK 0x00000001L +#define SDMA0_VM_CTX_CNTL__VMID_MASK 0x000000F0L +//SDMA0_VIRT_RESET_REQ +#define SDMA0_VIRT_RESET_REQ__VF__SHIFT 0x0 +#define SDMA0_VIRT_RESET_REQ__PF__SHIFT 0x1f +#define SDMA0_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL +#define SDMA0_VIRT_RESET_REQ__PF_MASK 0x80000000L +//SDMA0_VF_ENABLE +#define SDMA0_VF_ENABLE__VF_ENABLE__SHIFT 0x0 +#define SDMA0_VF_ENABLE__VF_ENABLE_MASK 0x00000001L +//SDMA0_CONTEXT_REG_TYPE0 +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_CNTL__SHIFT 0x0 +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_BASE__SHIFT 0x1 +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_BASE_HI__SHIFT 0x2 +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR__SHIFT 0x3 +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_HI__SHIFT 0x4 +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR__SHIFT 0x5 +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR_HI__SHIFT 0x6 +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR_POLL_CNTL__SHIFT 0x7 +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_ADDR_HI__SHIFT 0x8 +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_ADDR_LO__SHIFT 0x9 +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_CNTL__SHIFT 0xa +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_RPTR__SHIFT 0xb +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_OFFSET__SHIFT 0xc +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_BASE_LO__SHIFT 0xd +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_BASE_HI__SHIFT 0xe +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_SIZE__SHIFT 0xf +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_SKIP_CNTL__SHIFT 0x10 +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_CONTEXT_STATUS__SHIFT 0x11 +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_DOORBELL__SHIFT 0x12 +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_CONTEXT_CNTL__SHIFT 0x13 +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_CNTL_MASK 0x00000001L +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_BASE_MASK 0x00000002L +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_BASE_HI_MASK 0x00000004L +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_MASK 0x00000008L +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_HI_MASK 0x00000010L +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR_MASK 0x00000020L +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR_HI_MASK 0x00000040L +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR_POLL_CNTL_MASK 0x00000080L +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_ADDR_HI_MASK 0x00000100L +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_ADDR_LO_MASK 0x00000200L +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_CNTL_MASK 0x00000400L +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_RPTR_MASK 0x00000800L +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_OFFSET_MASK 0x00001000L +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_BASE_LO_MASK 0x00002000L +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_BASE_HI_MASK 0x00004000L +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_SIZE_MASK 0x00008000L +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_SKIP_CNTL_MASK 0x00010000L +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_CONTEXT_STATUS_MASK 0x00020000L +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_DOORBELL_MASK 0x00040000L +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_CONTEXT_CNTL_MASK 0x00080000L +//SDMA0_CONTEXT_REG_TYPE1 +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_STATUS__SHIFT 0x8 +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DOORBELL_LOG__SHIFT 0x9 +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_WATERMARK__SHIFT 0xa +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DOORBELL_OFFSET__SHIFT 0xb +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_CSA_ADDR_LO__SHIFT 0xc +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_CSA_ADDR_HI__SHIFT 0xd +#define SDMA0_CONTEXT_REG_TYPE1__VOID_REG2__SHIFT 0xe +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_IB_SUB_REMAIN__SHIFT 0xf +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_PREEMPT__SHIFT 0x10 +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DUMMY_REG__SHIFT 0x11 +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_WPTR_POLL_ADDR_HI__SHIFT 0x12 +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_WPTR_POLL_ADDR_LO__SHIFT 0x13 +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_AQL_CNTL__SHIFT 0x14 +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_MINOR_PTR_UPDATE__SHIFT 0x15 +#define SDMA0_CONTEXT_REG_TYPE1__RESERVED__SHIFT 0x16 +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_STATUS_MASK 0x00000100L +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DOORBELL_LOG_MASK 0x00000200L +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_WATERMARK_MASK 0x00000400L +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DOORBELL_OFFSET_MASK 0x00000800L +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_CSA_ADDR_LO_MASK 0x00001000L +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_CSA_ADDR_HI_MASK 0x00002000L +#define SDMA0_CONTEXT_REG_TYPE1__VOID_REG2_MASK 0x00004000L +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_IB_SUB_REMAIN_MASK 0x00008000L +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_PREEMPT_MASK 0x00010000L +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DUMMY_REG_MASK 0x00020000L +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_WPTR_POLL_ADDR_HI_MASK 0x00040000L +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_WPTR_POLL_ADDR_LO_MASK 0x00080000L +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_AQL_CNTL_MASK 0x00100000L +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_MINOR_PTR_UPDATE_MASK 0x00200000L +#define SDMA0_CONTEXT_REG_TYPE1__RESERVED_MASK 0xFFC00000L +//SDMA0_CONTEXT_REG_TYPE2 +#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA0__SHIFT 0x0 +#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA1__SHIFT 0x1 +#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA2__SHIFT 0x2 +#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA3__SHIFT 0x3 +#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA4__SHIFT 0x4 +#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA5__SHIFT 0x5 +#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA6__SHIFT 0x6 +#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA7__SHIFT 0x7 +#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA8__SHIFT 0x8 +#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_CNTL__SHIFT 0x9 +#define SDMA0_CONTEXT_REG_TYPE2__RESERVED__SHIFT 0xa +#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA0_MASK 0x00000001L +#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA1_MASK 0x00000002L +#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA2_MASK 0x00000004L +#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA3_MASK 0x00000008L +#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA4_MASK 0x00000010L +#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA5_MASK 0x00000020L +#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA6_MASK 0x00000040L +#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA7_MASK 0x00000080L +#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA8_MASK 0x00000100L +#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_CNTL_MASK 0x00000200L +#define SDMA0_CONTEXT_REG_TYPE2__RESERVED_MASK 0xFFFFFC00L +//SDMA0_CONTEXT_REG_TYPE3 +#define SDMA0_CONTEXT_REG_TYPE3__RESERVED__SHIFT 0x0 +#define SDMA0_CONTEXT_REG_TYPE3__RESERVED_MASK 0xFFFFFFFFL +//SDMA0_PUB_REG_TYPE0 +#define SDMA0_PUB_REG_TYPE0__SDMA0_UCODE_ADDR__SHIFT 0x0 +#define SDMA0_PUB_REG_TYPE0__SDMA0_UCODE_DATA__SHIFT 0x1 +#define SDMA0_PUB_REG_TYPE0__RESERVED3__SHIFT 0x3 +#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CNTL__SHIFT 0x4 +#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_LO__SHIFT 0x5 +#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_HI__SHIFT 0x6 +#define SDMA0_PUB_REG_TYPE0__SDMA0_ACTIVE_FCN_ID__SHIFT 0x7 +#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_CNTL__SHIFT 0x8 +#define SDMA0_PUB_REG_TYPE0__SDMA0_VIRT_RESET_REQ__SHIFT 0x9 +#define SDMA0_PUB_REG_TYPE0__RESERVED10__SHIFT 0xa +#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE0__SHIFT 0xb +#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE1__SHIFT 0xc +#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE2__SHIFT 0xd +#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE3__SHIFT 0xe +#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE0__SHIFT 0xf +#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE1__SHIFT 0x10 +#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE2__SHIFT 0x11 +#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE3__SHIFT 0x12 +#define SDMA0_PUB_REG_TYPE0__SDMA0_MMHUB_CNTL__SHIFT 0x13 +#define SDMA0_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY__SHIFT 0x14 +#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_GROUP_BOUNDARY__SHIFT 0x19 +#define SDMA0_PUB_REG_TYPE0__SDMA0_POWER_CNTL__SHIFT 0x1a +#define SDMA0_PUB_REG_TYPE0__SDMA0_CLK_CTRL__SHIFT 0x1b +#define SDMA0_PUB_REG_TYPE0__SDMA0_CNTL__SHIFT 0x1c +#define SDMA0_PUB_REG_TYPE0__SDMA0_CHICKEN_BITS__SHIFT 0x1d +#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG__SHIFT 0x1e +#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG_READ__SHIFT 0x1f +#define SDMA0_PUB_REG_TYPE0__SDMA0_UCODE_ADDR_MASK 0x00000001L +#define SDMA0_PUB_REG_TYPE0__SDMA0_UCODE_DATA_MASK 0x00000002L +#define SDMA0_PUB_REG_TYPE0__RESERVED3_MASK 0x00000008L +#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CNTL_MASK 0x00000010L +#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_LO_MASK 0x00000020L +#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_HI_MASK 0x00000040L +#define SDMA0_PUB_REG_TYPE0__SDMA0_ACTIVE_FCN_ID_MASK 0x00000080L +#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_CNTL_MASK 0x00000100L +#define SDMA0_PUB_REG_TYPE0__SDMA0_VIRT_RESET_REQ_MASK 0x00000200L +#define SDMA0_PUB_REG_TYPE0__RESERVED10_MASK 0x00000400L +#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE0_MASK 0x00000800L +#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE1_MASK 0x00001000L +#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE2_MASK 0x00002000L +#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE3_MASK 0x00004000L +#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE0_MASK 0x00008000L +#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE1_MASK 0x00010000L +#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE2_MASK 0x00020000L +#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE3_MASK 0x00040000L +#define SDMA0_PUB_REG_TYPE0__SDMA0_MMHUB_CNTL_MASK 0x00080000L +#define SDMA0_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY_MASK 0x01F00000L +#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_GROUP_BOUNDARY_MASK 0x02000000L +#define SDMA0_PUB_REG_TYPE0__SDMA0_POWER_CNTL_MASK 0x04000000L +#define SDMA0_PUB_REG_TYPE0__SDMA0_CLK_CTRL_MASK 0x08000000L +#define SDMA0_PUB_REG_TYPE0__SDMA0_CNTL_MASK 0x10000000L +#define SDMA0_PUB_REG_TYPE0__SDMA0_CHICKEN_BITS_MASK 0x20000000L +#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG_MASK 0x40000000L +#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG_READ_MASK 0x80000000L +//SDMA0_PUB_REG_TYPE1 +#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH_HI__SHIFT 0x0 +#define SDMA0_PUB_REG_TYPE1__SDMA0_SEM_WAIT_FAIL_TIMER_CNTL__SHIFT 0x1 +#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH__SHIFT 0x2 +#define SDMA0_PUB_REG_TYPE1__SDMA0_IB_OFFSET_FETCH__SHIFT 0x3 +#define SDMA0_PUB_REG_TYPE1__SDMA0_PROGRAM__SHIFT 0x4 +#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS_REG__SHIFT 0x5 +#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS1_REG__SHIFT 0x6 +#define SDMA0_PUB_REG_TYPE1__SDMA0_RD_BURST_CNTL__SHIFT 0x7 +#define SDMA0_PUB_REG_TYPE1__SDMA0_HBM_PAGE_CONFIG__SHIFT 0x8 +#define SDMA0_PUB_REG_TYPE1__SDMA0_UCODE_CHECKSUM__SHIFT 0x9 +#define SDMA0_PUB_REG_TYPE1__SDMA0_F32_CNTL__SHIFT 0xa +#define SDMA0_PUB_REG_TYPE1__SDMA0_FREEZE__SHIFT 0xb +#define SDMA0_PUB_REG_TYPE1__SDMA0_PHASE0_QUANTUM__SHIFT 0xc +#define SDMA0_PUB_REG_TYPE1__SDMA0_PHASE1_QUANTUM__SHIFT 0xd +#define SDMA0_PUB_REG_TYPE1__SDMA_POWER_GATING__SHIFT 0xe +#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG__SHIFT 0xf +#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_WRITE__SHIFT 0x10 +#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_READ__SHIFT 0x11 +#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_CONFIG__SHIFT 0x12 +#define SDMA0_PUB_REG_TYPE1__SDMA0_BA_THRESHOLD__SHIFT 0x13 +#define SDMA0_PUB_REG_TYPE1__SDMA0_ID__SHIFT 0x14 +#define SDMA0_PUB_REG_TYPE1__SDMA0_VERSION__SHIFT 0x15 +#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER__SHIFT 0x16 +#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER_CLEAR__SHIFT 0x17 +#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS2_REG__SHIFT 0x18 +#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_CNTL__SHIFT 0x19 +#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_LO__SHIFT 0x1a +#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_HI__SHIFT 0x1b +#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_CNTL__SHIFT 0x1c +#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_WATERMK__SHIFT 0x1d +#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_RD_STATUS__SHIFT 0x1e +#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_WR_STATUS__SHIFT 0x1f +#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH_HI_MASK 0x00000001L +#define SDMA0_PUB_REG_TYPE1__SDMA0_SEM_WAIT_FAIL_TIMER_CNTL_MASK 0x00000002L +#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH_MASK 0x00000004L +#define SDMA0_PUB_REG_TYPE1__SDMA0_IB_OFFSET_FETCH_MASK 0x00000008L +#define SDMA0_PUB_REG_TYPE1__SDMA0_PROGRAM_MASK 0x00000010L +#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS_REG_MASK 0x00000020L +#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS1_REG_MASK 0x00000040L +#define SDMA0_PUB_REG_TYPE1__SDMA0_RD_BURST_CNTL_MASK 0x00000080L +#define SDMA0_PUB_REG_TYPE1__SDMA0_HBM_PAGE_CONFIG_MASK 0x00000100L +#define SDMA0_PUB_REG_TYPE1__SDMA0_UCODE_CHECKSUM_MASK 0x00000200L +#define SDMA0_PUB_REG_TYPE1__SDMA0_F32_CNTL_MASK 0x00000400L +#define SDMA0_PUB_REG_TYPE1__SDMA0_FREEZE_MASK 0x00000800L +#define SDMA0_PUB_REG_TYPE1__SDMA0_PHASE0_QUANTUM_MASK 0x00001000L +#define SDMA0_PUB_REG_TYPE1__SDMA0_PHASE1_QUANTUM_MASK 0x00002000L +#define SDMA0_PUB_REG_TYPE1__SDMA_POWER_GATING_MASK 0x00004000L +#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG_MASK 0x00008000L +#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_WRITE_MASK 0x00010000L +#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_READ_MASK 0x00020000L +#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_CONFIG_MASK 0x00040000L +#define SDMA0_PUB_REG_TYPE1__SDMA0_BA_THRESHOLD_MASK 0x00080000L +#define SDMA0_PUB_REG_TYPE1__SDMA0_ID_MASK 0x00100000L +#define SDMA0_PUB_REG_TYPE1__SDMA0_VERSION_MASK 0x00200000L +#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER_MASK 0x00400000L +#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER_CLEAR_MASK 0x00800000L +#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS2_REG_MASK 0x01000000L +#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_CNTL_MASK 0x02000000L +#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_LO_MASK 0x04000000L +#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_HI_MASK 0x08000000L +#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_CNTL_MASK 0x10000000L +#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_WATERMK_MASK 0x20000000L +#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_RD_STATUS_MASK 0x40000000L +#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_WR_STATUS_MASK 0x80000000L +//SDMA0_PUB_REG_TYPE2 +#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV0__SHIFT 0x0 +#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV1__SHIFT 0x1 +#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV2__SHIFT 0x2 +#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK0__SHIFT 0x3 +#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK1__SHIFT 0x4 +#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK0__SHIFT 0x5 +#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK1__SHIFT 0x6 +#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_TIMEOUT__SHIFT 0x7 +#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_PAGE__SHIFT 0x8 +#define SDMA0_PUB_REG_TYPE2__SDMA0_POWER_CNTL_IDLE__SHIFT 0x9 +#define SDMA0_PUB_REG_TYPE2__SDMA0_RELAX_ORDERING_LUT__SHIFT 0xa +#define SDMA0_PUB_REG_TYPE2__SDMA0_CHICKEN_BITS_2__SHIFT 0xb +#define SDMA0_PUB_REG_TYPE2__SDMA0_STATUS3_REG__SHIFT 0xc +#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_LO__SHIFT 0xd +#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_HI__SHIFT 0xe +#define SDMA0_PUB_REG_TYPE2__SDMA0_PHASE2_QUANTUM__SHIFT 0xf +#define SDMA0_PUB_REG_TYPE2__SDMA0_ERROR_LOG__SHIFT 0x10 +#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG0__SHIFT 0x11 +#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG1__SHIFT 0x12 +#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG2__SHIFT 0x13 +#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG3__SHIFT 0x14 +#define SDMA0_PUB_REG_TYPE2__SDMA0_F32_COUNTER__SHIFT 0x15 +#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFMON_CNTL__SHIFT 0x17 +#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER0_RESULT__SHIFT 0x18 +#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER1_RESULT__SHIFT 0x19 +#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__SHIFT 0x1a +#define SDMA0_PUB_REG_TYPE2__SDMA0_CRD_CNTL__SHIFT 0x1b +#define SDMA0_PUB_REG_TYPE2__SDMA0_GPU_IOV_VIOLATION_LOG__SHIFT 0x1d +#define SDMA0_PUB_REG_TYPE2__SDMA0_ULV_CNTL__SHIFT 0x1e +#define SDMA0_PUB_REG_TYPE2__RESERVED__SHIFT 0x1f +#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV0_MASK 0x00000001L +#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV1_MASK 0x00000002L +#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV2_MASK 0x00000004L +#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK0_MASK 0x00000008L +#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK1_MASK 0x00000010L +#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK0_MASK 0x00000020L +#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK1_MASK 0x00000040L +#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_TIMEOUT_MASK 0x00000080L +#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_PAGE_MASK 0x00000100L +#define SDMA0_PUB_REG_TYPE2__SDMA0_POWER_CNTL_IDLE_MASK 0x00000200L +#define SDMA0_PUB_REG_TYPE2__SDMA0_RELAX_ORDERING_LUT_MASK 0x00000400L +#define SDMA0_PUB_REG_TYPE2__SDMA0_CHICKEN_BITS_2_MASK 0x00000800L +#define SDMA0_PUB_REG_TYPE2__SDMA0_STATUS3_REG_MASK 0x00001000L +#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_LO_MASK 0x00002000L +#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_HI_MASK 0x00004000L +#define SDMA0_PUB_REG_TYPE2__SDMA0_PHASE2_QUANTUM_MASK 0x00008000L +#define SDMA0_PUB_REG_TYPE2__SDMA0_ERROR_LOG_MASK 0x00010000L +#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG0_MASK 0x00020000L +#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG1_MASK 0x00040000L +#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG2_MASK 0x00080000L +#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG3_MASK 0x00100000L +#define SDMA0_PUB_REG_TYPE2__SDMA0_F32_COUNTER_MASK 0x00200000L +#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFMON_CNTL_MASK 0x00800000L +#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER0_RESULT_MASK 0x01000000L +#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER1_RESULT_MASK 0x02000000L +#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER_TAG_DELAY_RANGE_MASK 0x04000000L +#define SDMA0_PUB_REG_TYPE2__SDMA0_CRD_CNTL_MASK 0x08000000L +#define SDMA0_PUB_REG_TYPE2__SDMA0_GPU_IOV_VIOLATION_LOG_MASK 0x20000000L +#define SDMA0_PUB_REG_TYPE2__SDMA0_ULV_CNTL_MASK 0x40000000L +#define SDMA0_PUB_REG_TYPE2__RESERVED_MASK 0x80000000L +//SDMA0_PUB_REG_TYPE3 +#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_DATA__SHIFT 0x0 +#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_INDEX__SHIFT 0x1 +#define SDMA0_PUB_REG_TYPE3__RESERVED__SHIFT 0x2 +#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_DATA_MASK 0x00000001L +#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_INDEX_MASK 0x00000002L +#define SDMA0_PUB_REG_TYPE3__RESERVED_MASK 0xFFFFFFFCL +//SDMA0_MMHUB_CNTL +#define SDMA0_MMHUB_CNTL__UNIT_ID__SHIFT 0x0 +#define SDMA0_MMHUB_CNTL__UNIT_ID_MASK 0x0000003FL +//SDMA0_CONTEXT_GROUP_BOUNDARY +#define SDMA0_CONTEXT_GROUP_BOUNDARY__RESERVED__SHIFT 0x0 +#define SDMA0_CONTEXT_GROUP_BOUNDARY__RESERVED_MASK 0xFFFFFFFFL +//SDMA0_POWER_CNTL +#define SDMA0_POWER_CNTL__PG_CNTL_ENABLE__SHIFT 0x0 +#define SDMA0_POWER_CNTL__EXT_PG_POWER_ON_REQ__SHIFT 0x1 +#define SDMA0_POWER_CNTL__EXT_PG_POWER_OFF_REQ__SHIFT 0x2 +#define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME__SHIFT 0x3 +#define SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE__SHIFT 0x8 +#define SDMA0_POWER_CNTL__MEM_POWER_LS_EN__SHIFT 0x9 +#define SDMA0_POWER_CNTL__MEM_POWER_DS_EN__SHIFT 0xa +#define SDMA0_POWER_CNTL__MEM_POWER_SD_EN__SHIFT 0xb +#define SDMA0_POWER_CNTL__MEM_POWER_DELAY__SHIFT 0xc +#define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME__SHIFT 0x1a +#define SDMA0_POWER_CNTL__PG_CNTL_ENABLE_MASK 0x00000001L +#define SDMA0_POWER_CNTL__EXT_PG_POWER_ON_REQ_MASK 0x00000002L +#define SDMA0_POWER_CNTL__EXT_PG_POWER_OFF_REQ_MASK 0x00000004L +#define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L +#define SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK 0x00000100L +#define SDMA0_POWER_CNTL__MEM_POWER_LS_EN_MASK 0x00000200L +#define SDMA0_POWER_CNTL__MEM_POWER_DS_EN_MASK 0x00000400L +#define SDMA0_POWER_CNTL__MEM_POWER_SD_EN_MASK 0x00000800L +#define SDMA0_POWER_CNTL__MEM_POWER_DELAY_MASK 0x003FF000L +#define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L +//SDMA0_CLK_CTRL +#define SDMA0_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define SDMA0_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define SDMA0_CLK_CTRL__RESERVED__SHIFT 0xc +#define SDMA0_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18 +#define SDMA0_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19 +#define SDMA0_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a +#define SDMA0_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b +#define SDMA0_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c +#define SDMA0_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d +#define SDMA0_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e +#define SDMA0_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f +#define SDMA0_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define SDMA0_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define SDMA0_CLK_CTRL__RESERVED_MASK 0x00FFF000L +#define SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L +#define SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L +#define SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L +#define SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L +#define SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L +#define SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L +#define SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L +#define SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L +//SDMA0_CNTL +#define SDMA0_CNTL__TRAP_ENABLE__SHIFT 0x0 +#define SDMA0_CNTL__UTC_L1_ENABLE__SHIFT 0x1 +#define SDMA0_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x2 +#define SDMA0_CNTL__DATA_SWAP_ENABLE__SHIFT 0x3 +#define SDMA0_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x4 +#define SDMA0_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x5 +#define SDMA0_CNTL__MIDCMD_WORLDSWITCH_ENABLE__SHIFT 0x11 +#define SDMA0_CNTL__AUTO_CTXSW_ENABLE__SHIFT 0x12 +#define SDMA0_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x1c +#define SDMA0_CNTL__FROZEN_INT_ENABLE__SHIFT 0x1d +#define SDMA0_CNTL__IB_PREEMPT_INT_ENABLE__SHIFT 0x1e +#define SDMA0_CNTL__TRAP_ENABLE_MASK 0x00000001L +#define SDMA0_CNTL__UTC_L1_ENABLE_MASK 0x00000002L +#define SDMA0_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L +#define SDMA0_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L +#define SDMA0_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L +#define SDMA0_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00000020L +#define SDMA0_CNTL__MIDCMD_WORLDSWITCH_ENABLE_MASK 0x00020000L +#define SDMA0_CNTL__AUTO_CTXSW_ENABLE_MASK 0x00040000L +#define SDMA0_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L +#define SDMA0_CNTL__FROZEN_INT_ENABLE_MASK 0x20000000L +#define SDMA0_CNTL__IB_PREEMPT_INT_ENABLE_MASK 0x40000000L +//SDMA0_CHICKEN_BITS +#define SDMA0_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE__SHIFT 0x0 +#define SDMA0_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE__SHIFT 0x1 +#define SDMA0_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE__SHIFT 0x2 +#define SDMA0_CHICKEN_BITS__WRITE_BURST_LENGTH__SHIFT 0x8 +#define SDMA0_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE__SHIFT 0xa +#define SDMA0_CHICKEN_BITS__COPY_OVERLAP_ENABLE__SHIFT 0x10 +#define SDMA0_CHICKEN_BITS__RAW_CHECK_ENABLE__SHIFT 0x11 +#define SDMA0_CHICKEN_BITS__SRBM_POLL_RETRYING__SHIFT 0x14 +#define SDMA0_CHICKEN_BITS__CG_STATUS_OUTPUT__SHIFT 0x17 +#define SDMA0_CHICKEN_BITS__TIME_BASED_QOS__SHIFT 0x19 +#define SDMA0_CHICKEN_BITS__CE_AFIFO_WATERMARK__SHIFT 0x1a +#define SDMA0_CHICKEN_BITS__CE_DFIFO_WATERMARK__SHIFT 0x1c +#define SDMA0_CHICKEN_BITS__CE_LFIFO_WATERMARK__SHIFT 0x1e +#define SDMA0_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE_MASK 0x00000001L +#define SDMA0_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE_MASK 0x00000002L +#define SDMA0_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE_MASK 0x00000004L +#define SDMA0_CHICKEN_BITS__WRITE_BURST_LENGTH_MASK 0x00000300L +#define SDMA0_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE_MASK 0x00001C00L +#define SDMA0_CHICKEN_BITS__COPY_OVERLAP_ENABLE_MASK 0x00010000L +#define SDMA0_CHICKEN_BITS__RAW_CHECK_ENABLE_MASK 0x00020000L +#define SDMA0_CHICKEN_BITS__SRBM_POLL_RETRYING_MASK 0x00100000L +#define SDMA0_CHICKEN_BITS__CG_STATUS_OUTPUT_MASK 0x00800000L +#define SDMA0_CHICKEN_BITS__TIME_BASED_QOS_MASK 0x02000000L +#define SDMA0_CHICKEN_BITS__CE_AFIFO_WATERMARK_MASK 0x0C000000L +#define SDMA0_CHICKEN_BITS__CE_DFIFO_WATERMARK_MASK 0x30000000L +#define SDMA0_CHICKEN_BITS__CE_LFIFO_WATERMARK_MASK 0xC0000000L +//SDMA0_GB_ADDR_CONFIG +#define SDMA0_GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0 +#define SDMA0_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3 +#define SDMA0_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x8 +#define SDMA0_GB_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc +#define SDMA0_GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13 +#define SDMA0_GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L +#define SDMA0_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L +#define SDMA0_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L +#define SDMA0_GB_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L +#define SDMA0_GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L +//SDMA0_GB_ADDR_CONFIG_READ +#define SDMA0_GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0 +#define SDMA0_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3 +#define SDMA0_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE__SHIFT 0x8 +#define SDMA0_GB_ADDR_CONFIG_READ__NUM_BANKS__SHIFT 0xc +#define SDMA0_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13 +#define SDMA0_GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L +#define SDMA0_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L +#define SDMA0_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE_MASK 0x00000700L +#define SDMA0_GB_ADDR_CONFIG_READ__NUM_BANKS_MASK 0x00007000L +#define SDMA0_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L +//SDMA0_RB_RPTR_FETCH_HI +#define SDMA0_RB_RPTR_FETCH_HI__OFFSET__SHIFT 0x0 +#define SDMA0_RB_RPTR_FETCH_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_SEM_WAIT_FAIL_TIMER_CNTL +#define SDMA0_SEM_WAIT_FAIL_TIMER_CNTL__TIMER__SHIFT 0x0 +#define SDMA0_SEM_WAIT_FAIL_TIMER_CNTL__TIMER_MASK 0xFFFFFFFFL +//SDMA0_RB_RPTR_FETCH +#define SDMA0_RB_RPTR_FETCH__OFFSET__SHIFT 0x2 +#define SDMA0_RB_RPTR_FETCH__OFFSET_MASK 0xFFFFFFFCL +//SDMA0_IB_OFFSET_FETCH +#define SDMA0_IB_OFFSET_FETCH__OFFSET__SHIFT 0x2 +#define SDMA0_IB_OFFSET_FETCH__OFFSET_MASK 0x003FFFFCL +//SDMA0_PROGRAM +#define SDMA0_PROGRAM__STREAM__SHIFT 0x0 +#define SDMA0_PROGRAM__STREAM_MASK 0xFFFFFFFFL +//SDMA0_STATUS_REG +#define SDMA0_STATUS_REG__IDLE__SHIFT 0x0 +#define SDMA0_STATUS_REG__REG_IDLE__SHIFT 0x1 +#define SDMA0_STATUS_REG__RB_EMPTY__SHIFT 0x2 +#define SDMA0_STATUS_REG__RB_FULL__SHIFT 0x3 +#define SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT 0x4 +#define SDMA0_STATUS_REG__RB_CMD_FULL__SHIFT 0x5 +#define SDMA0_STATUS_REG__IB_CMD_IDLE__SHIFT 0x6 +#define SDMA0_STATUS_REG__IB_CMD_FULL__SHIFT 0x7 +#define SDMA0_STATUS_REG__BLOCK_IDLE__SHIFT 0x8 +#define SDMA0_STATUS_REG__INSIDE_IB__SHIFT 0x9 +#define SDMA0_STATUS_REG__EX_IDLE__SHIFT 0xa +#define SDMA0_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE__SHIFT 0xb +#define SDMA0_STATUS_REG__PACKET_READY__SHIFT 0xc +#define SDMA0_STATUS_REG__MC_WR_IDLE__SHIFT 0xd +#define SDMA0_STATUS_REG__SRBM_IDLE__SHIFT 0xe +#define SDMA0_STATUS_REG__CONTEXT_EMPTY__SHIFT 0xf +#define SDMA0_STATUS_REG__DELTA_RPTR_FULL__SHIFT 0x10 +#define SDMA0_STATUS_REG__RB_MC_RREQ_IDLE__SHIFT 0x11 +#define SDMA0_STATUS_REG__IB_MC_RREQ_IDLE__SHIFT 0x12 +#define SDMA0_STATUS_REG__MC_RD_IDLE__SHIFT 0x13 +#define SDMA0_STATUS_REG__DELTA_RPTR_EMPTY__SHIFT 0x14 +#define SDMA0_STATUS_REG__MC_RD_RET_STALL__SHIFT 0x15 +#define SDMA0_STATUS_REG__MC_RD_NO_POLL_IDLE__SHIFT 0x16 +#define SDMA0_STATUS_REG__PREV_CMD_IDLE__SHIFT 0x19 +#define SDMA0_STATUS_REG__SEM_IDLE__SHIFT 0x1a +#define SDMA0_STATUS_REG__SEM_REQ_STALL__SHIFT 0x1b +#define SDMA0_STATUS_REG__SEM_RESP_STATE__SHIFT 0x1c +#define SDMA0_STATUS_REG__INT_IDLE__SHIFT 0x1e +#define SDMA0_STATUS_REG__INT_REQ_STALL__SHIFT 0x1f +#define SDMA0_STATUS_REG__IDLE_MASK 0x00000001L +#define SDMA0_STATUS_REG__REG_IDLE_MASK 0x00000002L +#define SDMA0_STATUS_REG__RB_EMPTY_MASK 0x00000004L +#define SDMA0_STATUS_REG__RB_FULL_MASK 0x00000008L +#define SDMA0_STATUS_REG__RB_CMD_IDLE_MASK 0x00000010L +#define SDMA0_STATUS_REG__RB_CMD_FULL_MASK 0x00000020L +#define SDMA0_STATUS_REG__IB_CMD_IDLE_MASK 0x00000040L +#define SDMA0_STATUS_REG__IB_CMD_FULL_MASK 0x00000080L +#define SDMA0_STATUS_REG__BLOCK_IDLE_MASK 0x00000100L +#define SDMA0_STATUS_REG__INSIDE_IB_MASK 0x00000200L +#define SDMA0_STATUS_REG__EX_IDLE_MASK 0x00000400L +#define SDMA0_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE_MASK 0x00000800L +#define SDMA0_STATUS_REG__PACKET_READY_MASK 0x00001000L +#define SDMA0_STATUS_REG__MC_WR_IDLE_MASK 0x00002000L +#define SDMA0_STATUS_REG__SRBM_IDLE_MASK 0x00004000L +#define SDMA0_STATUS_REG__CONTEXT_EMPTY_MASK 0x00008000L +#define SDMA0_STATUS_REG__DELTA_RPTR_FULL_MASK 0x00010000L +#define SDMA0_STATUS_REG__RB_MC_RREQ_IDLE_MASK 0x00020000L +#define SDMA0_STATUS_REG__IB_MC_RREQ_IDLE_MASK 0x00040000L +#define SDMA0_STATUS_REG__MC_RD_IDLE_MASK 0x00080000L +#define SDMA0_STATUS_REG__DELTA_RPTR_EMPTY_MASK 0x00100000L +#define SDMA0_STATUS_REG__MC_RD_RET_STALL_MASK 0x00200000L +#define SDMA0_STATUS_REG__MC_RD_NO_POLL_IDLE_MASK 0x00400000L +#define SDMA0_STATUS_REG__PREV_CMD_IDLE_MASK 0x02000000L +#define SDMA0_STATUS_REG__SEM_IDLE_MASK 0x04000000L +#define SDMA0_STATUS_REG__SEM_REQ_STALL_MASK 0x08000000L +#define SDMA0_STATUS_REG__SEM_RESP_STATE_MASK 0x30000000L +#define SDMA0_STATUS_REG__INT_IDLE_MASK 0x40000000L +#define SDMA0_STATUS_REG__INT_REQ_STALL_MASK 0x80000000L +//SDMA0_STATUS1_REG +#define SDMA0_STATUS1_REG__CE_WREQ_IDLE__SHIFT 0x0 +#define SDMA0_STATUS1_REG__CE_WR_IDLE__SHIFT 0x1 +#define SDMA0_STATUS1_REG__CE_SPLIT_IDLE__SHIFT 0x2 +#define SDMA0_STATUS1_REG__CE_RREQ_IDLE__SHIFT 0x3 +#define SDMA0_STATUS1_REG__CE_OUT_IDLE__SHIFT 0x4 +#define SDMA0_STATUS1_REG__CE_IN_IDLE__SHIFT 0x5 +#define SDMA0_STATUS1_REG__CE_DST_IDLE__SHIFT 0x6 +#define SDMA0_STATUS1_REG__CE_CMD_IDLE__SHIFT 0x9 +#define SDMA0_STATUS1_REG__CE_AFIFO_FULL__SHIFT 0xa +#define SDMA0_STATUS1_REG__CE_INFO_FULL__SHIFT 0xd +#define SDMA0_STATUS1_REG__CE_INFO1_FULL__SHIFT 0xe +#define SDMA0_STATUS1_REG__EX_START__SHIFT 0xf +#define SDMA0_STATUS1_REG__CE_RD_STALL__SHIFT 0x11 +#define SDMA0_STATUS1_REG__CE_WR_STALL__SHIFT 0x12 +#define SDMA0_STATUS1_REG__CE_WREQ_IDLE_MASK 0x00000001L +#define SDMA0_STATUS1_REG__CE_WR_IDLE_MASK 0x00000002L +#define SDMA0_STATUS1_REG__CE_SPLIT_IDLE_MASK 0x00000004L +#define SDMA0_STATUS1_REG__CE_RREQ_IDLE_MASK 0x00000008L +#define SDMA0_STATUS1_REG__CE_OUT_IDLE_MASK 0x00000010L +#define SDMA0_STATUS1_REG__CE_IN_IDLE_MASK 0x00000020L +#define SDMA0_STATUS1_REG__CE_DST_IDLE_MASK 0x00000040L +#define SDMA0_STATUS1_REG__CE_CMD_IDLE_MASK 0x00000200L +#define SDMA0_STATUS1_REG__CE_AFIFO_FULL_MASK 0x00000400L +#define SDMA0_STATUS1_REG__CE_INFO_FULL_MASK 0x00002000L +#define SDMA0_STATUS1_REG__CE_INFO1_FULL_MASK 0x00004000L +#define SDMA0_STATUS1_REG__EX_START_MASK 0x00008000L +#define SDMA0_STATUS1_REG__CE_RD_STALL_MASK 0x00020000L +#define SDMA0_STATUS1_REG__CE_WR_STALL_MASK 0x00040000L +//SDMA0_RD_BURST_CNTL +#define SDMA0_RD_BURST_CNTL__RD_BURST__SHIFT 0x0 +#define SDMA0_RD_BURST_CNTL__CMD_BUFFER_RD_BURST__SHIFT 0x2 +#define SDMA0_RD_BURST_CNTL__RD_BURST_MASK 0x00000003L +#define SDMA0_RD_BURST_CNTL__CMD_BUFFER_RD_BURST_MASK 0x0000000CL +//SDMA0_HBM_PAGE_CONFIG +#define SDMA0_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT__SHIFT 0x0 +#define SDMA0_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT_MASK 0x00000003L +//SDMA0_UCODE_CHECKSUM +#define SDMA0_UCODE_CHECKSUM__DATA__SHIFT 0x0 +#define SDMA0_UCODE_CHECKSUM__DATA_MASK 0xFFFFFFFFL +//SDMA0_F32_CNTL +#define SDMA0_F32_CNTL__HALT__SHIFT 0x0 +#define SDMA0_F32_CNTL__STEP__SHIFT 0x1 +#define SDMA0_F32_CNTL__HALT_MASK 0x00000001L +#define SDMA0_F32_CNTL__STEP_MASK 0x00000002L +//SDMA0_FREEZE +#define SDMA0_FREEZE__PREEMPT__SHIFT 0x0 +#define SDMA0_FREEZE__FREEZE__SHIFT 0x4 +#define SDMA0_FREEZE__FROZEN__SHIFT 0x5 +#define SDMA0_FREEZE__F32_FREEZE__SHIFT 0x6 +#define SDMA0_FREEZE__PREEMPT_MASK 0x00000001L +#define SDMA0_FREEZE__FREEZE_MASK 0x00000010L +#define SDMA0_FREEZE__FROZEN_MASK 0x00000020L +#define SDMA0_FREEZE__F32_FREEZE_MASK 0x00000040L +//SDMA0_PHASE0_QUANTUM +#define SDMA0_PHASE0_QUANTUM__UNIT__SHIFT 0x0 +#define SDMA0_PHASE0_QUANTUM__VALUE__SHIFT 0x8 +#define SDMA0_PHASE0_QUANTUM__PREFER__SHIFT 0x1e +#define SDMA0_PHASE0_QUANTUM__UNIT_MASK 0x0000000FL +#define SDMA0_PHASE0_QUANTUM__VALUE_MASK 0x00FFFF00L +#define SDMA0_PHASE0_QUANTUM__PREFER_MASK 0x40000000L +//SDMA0_PHASE1_QUANTUM +#define SDMA0_PHASE1_QUANTUM__UNIT__SHIFT 0x0 +#define SDMA0_PHASE1_QUANTUM__VALUE__SHIFT 0x8 +#define SDMA0_PHASE1_QUANTUM__PREFER__SHIFT 0x1e +#define SDMA0_PHASE1_QUANTUM__UNIT_MASK 0x0000000FL +#define SDMA0_PHASE1_QUANTUM__VALUE_MASK 0x00FFFF00L +#define SDMA0_PHASE1_QUANTUM__PREFER_MASK 0x40000000L +//SDMA_POWER_GATING +#define SDMA_POWER_GATING__SDMA0_POWER_OFF_CONDITION__SHIFT 0x0 +#define SDMA_POWER_GATING__SDMA0_POWER_ON_CONDITION__SHIFT 0x1 +#define SDMA_POWER_GATING__SDMA0_POWER_OFF_REQ__SHIFT 0x2 +#define SDMA_POWER_GATING__SDMA0_POWER_ON_REQ__SHIFT 0x3 +#define SDMA_POWER_GATING__PG_CNTL_STATUS__SHIFT 0x4 +#define SDMA_POWER_GATING__SDMA0_POWER_OFF_CONDITION_MASK 0x00000001L +#define SDMA_POWER_GATING__SDMA0_POWER_ON_CONDITION_MASK 0x00000002L +#define SDMA_POWER_GATING__SDMA0_POWER_OFF_REQ_MASK 0x00000004L +#define SDMA_POWER_GATING__SDMA0_POWER_ON_REQ_MASK 0x00000008L +#define SDMA_POWER_GATING__PG_CNTL_STATUS_MASK 0x00000030L +//SDMA_PGFSM_CONFIG +#define SDMA_PGFSM_CONFIG__FSM_ADDR__SHIFT 0x0 +#define SDMA_PGFSM_CONFIG__POWER_DOWN__SHIFT 0x8 +#define SDMA_PGFSM_CONFIG__POWER_UP__SHIFT 0x9 +#define SDMA_PGFSM_CONFIG__P1_SELECT__SHIFT 0xa +#define SDMA_PGFSM_CONFIG__P2_SELECT__SHIFT 0xb +#define SDMA_PGFSM_CONFIG__WRITE__SHIFT 0xc +#define SDMA_PGFSM_CONFIG__READ__SHIFT 0xd +#define SDMA_PGFSM_CONFIG__SRBM_OVERRIDE__SHIFT 0x1b +#define SDMA_PGFSM_CONFIG__REG_ADDR__SHIFT 0x1c +#define SDMA_PGFSM_CONFIG__FSM_ADDR_MASK 0x000000FFL +#define SDMA_PGFSM_CONFIG__POWER_DOWN_MASK 0x00000100L +#define SDMA_PGFSM_CONFIG__POWER_UP_MASK 0x00000200L +#define SDMA_PGFSM_CONFIG__P1_SELECT_MASK 0x00000400L +#define SDMA_PGFSM_CONFIG__P2_SELECT_MASK 0x00000800L +#define SDMA_PGFSM_CONFIG__WRITE_MASK 0x00001000L +#define SDMA_PGFSM_CONFIG__READ_MASK 0x00002000L +#define SDMA_PGFSM_CONFIG__SRBM_OVERRIDE_MASK 0x08000000L +#define SDMA_PGFSM_CONFIG__REG_ADDR_MASK 0xF0000000L +//SDMA_PGFSM_WRITE +#define SDMA_PGFSM_WRITE__VALUE__SHIFT 0x0 +#define SDMA_PGFSM_WRITE__VALUE_MASK 0xFFFFFFFFL +//SDMA_PGFSM_READ +#define SDMA_PGFSM_READ__VALUE__SHIFT 0x0 +#define SDMA_PGFSM_READ__VALUE_MASK 0x00FFFFFFL +//SDMA0_EDC_CONFIG +#define SDMA0_EDC_CONFIG__DIS_EDC__SHIFT 0x1 +#define SDMA0_EDC_CONFIG__ECC_INT_ENABLE__SHIFT 0x2 +#define SDMA0_EDC_CONFIG__DIS_EDC_MASK 0x00000002L +#define SDMA0_EDC_CONFIG__ECC_INT_ENABLE_MASK 0x00000004L +//SDMA0_BA_THRESHOLD +#define SDMA0_BA_THRESHOLD__READ_THRES__SHIFT 0x0 +#define SDMA0_BA_THRESHOLD__WRITE_THRES__SHIFT 0x10 +#define SDMA0_BA_THRESHOLD__READ_THRES_MASK 0x000003FFL +#define SDMA0_BA_THRESHOLD__WRITE_THRES_MASK 0x03FF0000L +//SDMA0_ID +#define SDMA0_ID__DEVICE_ID__SHIFT 0x0 +#define SDMA0_ID__DEVICE_ID_MASK 0x000000FFL +//SDMA0_VERSION +#define SDMA0_VERSION__MINVER__SHIFT 0x0 +#define SDMA0_VERSION__MAJVER__SHIFT 0x8 +#define SDMA0_VERSION__REV__SHIFT 0x10 +#define SDMA0_VERSION__MINVER_MASK 0x0000007FL +#define SDMA0_VERSION__MAJVER_MASK 0x00007F00L +#define SDMA0_VERSION__REV_MASK 0x003F0000L +//SDMA0_EDC_COUNTER +#define SDMA0_EDC_COUNTER__SDMA_UCODE_BUF_SED__SHIFT 0x0 +#define SDMA0_EDC_COUNTER__SDMA_RB_CMD_BUF_SED__SHIFT 0x2 +#define SDMA0_EDC_COUNTER__SDMA_IB_CMD_BUF_SED__SHIFT 0x3 +#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED__SHIFT 0x4 +#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED__SHIFT 0x5 +#define SDMA0_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED__SHIFT 0x6 +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED__SHIFT 0x7 +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED__SHIFT 0x8 +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED__SHIFT 0x9 +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED__SHIFT 0xa +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED__SHIFT 0xb +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED__SHIFT 0xc +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED__SHIFT 0xd +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED__SHIFT 0xe +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED__SHIFT 0xf +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED__SHIFT 0x10 +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED__SHIFT 0x11 +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED__SHIFT 0x12 +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED__SHIFT 0x13 +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED__SHIFT 0x14 +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED__SHIFT 0x15 +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED__SHIFT 0x16 +#define SDMA0_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED__SHIFT 0x17 +#define SDMA0_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED__SHIFT 0x18 +#define SDMA0_EDC_COUNTER__SDMA_UCODE_BUF_SED_MASK 0x00000001L +#define SDMA0_EDC_COUNTER__SDMA_RB_CMD_BUF_SED_MASK 0x00000004L +#define SDMA0_EDC_COUNTER__SDMA_IB_CMD_BUF_SED_MASK 0x00000008L +#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED_MASK 0x00000010L +#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED_MASK 0x00000020L +#define SDMA0_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED_MASK 0x00000040L +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED_MASK 0x00000080L +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED_MASK 0x00000100L +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED_MASK 0x00000200L +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED_MASK 0x00000400L +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED_MASK 0x00000800L +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED_MASK 0x00001000L +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED_MASK 0x00002000L +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED_MASK 0x00004000L +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED_MASK 0x00008000L +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED_MASK 0x00010000L +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED_MASK 0x00020000L +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED_MASK 0x00040000L +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED_MASK 0x00080000L +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED_MASK 0x00100000L +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED_MASK 0x00200000L +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED_MASK 0x00400000L +#define SDMA0_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED_MASK 0x00800000L +#define SDMA0_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED_MASK 0x01000000L +//SDMA0_EDC_COUNTER_CLEAR +#define SDMA0_EDC_COUNTER_CLEAR__DUMMY__SHIFT 0x0 +#define SDMA0_EDC_COUNTER_CLEAR__DUMMY_MASK 0x00000001L +//SDMA0_STATUS2_REG +#define SDMA0_STATUS2_REG__ID__SHIFT 0x0 +#define SDMA0_STATUS2_REG__F32_INSTR_PTR__SHIFT 0x2 +#define SDMA0_STATUS2_REG__CMD_OP__SHIFT 0x10 +#define SDMA0_STATUS2_REG__ID_MASK 0x00000003L +#define SDMA0_STATUS2_REG__F32_INSTR_PTR_MASK 0x00000FFCL +#define SDMA0_STATUS2_REG__CMD_OP_MASK 0xFFFF0000L +//SDMA0_ATOMIC_CNTL +#define SDMA0_ATOMIC_CNTL__LOOP_TIMER__SHIFT 0x0 +#define SDMA0_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE__SHIFT 0x1f +#define SDMA0_ATOMIC_CNTL__LOOP_TIMER_MASK 0x7FFFFFFFL +#define SDMA0_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE_MASK 0x80000000L +//SDMA0_ATOMIC_PREOP_LO +#define SDMA0_ATOMIC_PREOP_LO__DATA__SHIFT 0x0 +#define SDMA0_ATOMIC_PREOP_LO__DATA_MASK 0xFFFFFFFFL +//SDMA0_ATOMIC_PREOP_HI +#define SDMA0_ATOMIC_PREOP_HI__DATA__SHIFT 0x0 +#define SDMA0_ATOMIC_PREOP_HI__DATA_MASK 0xFFFFFFFFL +//SDMA0_UTCL1_CNTL +#define SDMA0_UTCL1_CNTL__REDO_ENABLE__SHIFT 0x0 +#define SDMA0_UTCL1_CNTL__REDO_DELAY__SHIFT 0x1 +#define SDMA0_UTCL1_CNTL__REDO_WATERMK__SHIFT 0xb +#define SDMA0_UTCL1_CNTL__INVACK_DELAY__SHIFT 0xe +#define SDMA0_UTCL1_CNTL__REQL2_CREDIT__SHIFT 0x18 +#define SDMA0_UTCL1_CNTL__VADDR_WATERMK__SHIFT 0x1d +#define SDMA0_UTCL1_CNTL__REDO_ENABLE_MASK 0x00000001L +#define SDMA0_UTCL1_CNTL__REDO_DELAY_MASK 0x000007FEL +#define SDMA0_UTCL1_CNTL__REDO_WATERMK_MASK 0x00003800L +#define SDMA0_UTCL1_CNTL__INVACK_DELAY_MASK 0x00FFC000L +#define SDMA0_UTCL1_CNTL__REQL2_CREDIT_MASK 0x1F000000L +#define SDMA0_UTCL1_CNTL__VADDR_WATERMK_MASK 0xE0000000L +//SDMA0_UTCL1_WATERMK +#define SDMA0_UTCL1_WATERMK__REQMC_WATERMK__SHIFT 0x0 +#define SDMA0_UTCL1_WATERMK__REQPG_WATERMK__SHIFT 0x9 +#define SDMA0_UTCL1_WATERMK__INVREQ_WATERMK__SHIFT 0x11 +#define SDMA0_UTCL1_WATERMK__XNACK_WATERMK__SHIFT 0x19 +#define SDMA0_UTCL1_WATERMK__REQMC_WATERMK_MASK 0x000001FFL +#define SDMA0_UTCL1_WATERMK__REQPG_WATERMK_MASK 0x0001FE00L +#define SDMA0_UTCL1_WATERMK__INVREQ_WATERMK_MASK 0x01FE0000L +#define SDMA0_UTCL1_WATERMK__XNACK_WATERMK_MASK 0xFE000000L +//SDMA0_UTCL1_RD_STATUS +#define SDMA0_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0 +#define SDMA0_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1 +#define SDMA0_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2 +#define SDMA0_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3 +#define SDMA0_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4 +#define SDMA0_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5 +#define SDMA0_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6 +#define SDMA0_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7 +#define SDMA0_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8 +#define SDMA0_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9 +#define SDMA0_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa +#define SDMA0_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb +#define SDMA0_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc +#define SDMA0_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd +#define SDMA0_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe +#define SDMA0_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf +#define SDMA0_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10 +#define SDMA0_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11 +#define SDMA0_UTCL1_RD_STATUS__PAGE_FAULT__SHIFT 0x12 +#define SDMA0_UTCL1_RD_STATUS__PAGE_NULL__SHIFT 0x13 +#define SDMA0_UTCL1_RD_STATUS__REQL2_IDLE__SHIFT 0x14 +#define SDMA0_UTCL1_RD_STATUS__CE_L1_STALL__SHIFT 0x15 +#define SDMA0_UTCL1_RD_STATUS__NEXT_RD_VECTOR__SHIFT 0x16 +#define SDMA0_UTCL1_RD_STATUS__MERGE_STATE__SHIFT 0x1a +#define SDMA0_UTCL1_RD_STATUS__ADDR_RD_RTR__SHIFT 0x1d +#define SDMA0_UTCL1_RD_STATUS__WPTR_POLLING__SHIFT 0x1e +#define SDMA0_UTCL1_RD_STATUS__INVREQ_SIZE__SHIFT 0x1f +#define SDMA0_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L +#define SDMA0_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L +#define SDMA0_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L +#define SDMA0_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L +#define SDMA0_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L +#define SDMA0_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L +#define SDMA0_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L +#define SDMA0_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L +#define SDMA0_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L +#define SDMA0_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L +#define SDMA0_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L +#define SDMA0_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L +#define SDMA0_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L +#define SDMA0_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L +#define SDMA0_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L +#define SDMA0_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L +#define SDMA0_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L +#define SDMA0_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L +#define SDMA0_UTCL1_RD_STATUS__PAGE_FAULT_MASK 0x00040000L +#define SDMA0_UTCL1_RD_STATUS__PAGE_NULL_MASK 0x00080000L +#define SDMA0_UTCL1_RD_STATUS__REQL2_IDLE_MASK 0x00100000L +#define SDMA0_UTCL1_RD_STATUS__CE_L1_STALL_MASK 0x00200000L +#define SDMA0_UTCL1_RD_STATUS__NEXT_RD_VECTOR_MASK 0x03C00000L +#define SDMA0_UTCL1_RD_STATUS__MERGE_STATE_MASK 0x1C000000L +#define SDMA0_UTCL1_RD_STATUS__ADDR_RD_RTR_MASK 0x20000000L +#define SDMA0_UTCL1_RD_STATUS__WPTR_POLLING_MASK 0x40000000L +#define SDMA0_UTCL1_RD_STATUS__INVREQ_SIZE_MASK 0x80000000L +//SDMA0_UTCL1_WR_STATUS +#define SDMA0_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0 +#define SDMA0_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1 +#define SDMA0_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2 +#define SDMA0_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3 +#define SDMA0_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4 +#define SDMA0_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5 +#define SDMA0_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6 +#define SDMA0_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7 +#define SDMA0_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8 +#define SDMA0_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9 +#define SDMA0_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa +#define SDMA0_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb +#define SDMA0_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc +#define SDMA0_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd +#define SDMA0_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe +#define SDMA0_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf +#define SDMA0_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10 +#define SDMA0_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11 +#define SDMA0_UTCL1_WR_STATUS__PAGE_FAULT__SHIFT 0x12 +#define SDMA0_UTCL1_WR_STATUS__PAGE_NULL__SHIFT 0x13 +#define SDMA0_UTCL1_WR_STATUS__REQL2_IDLE__SHIFT 0x14 +#define SDMA0_UTCL1_WR_STATUS__F32_WR_RTR__SHIFT 0x15 +#define SDMA0_UTCL1_WR_STATUS__NEXT_WR_VECTOR__SHIFT 0x16 +#define SDMA0_UTCL1_WR_STATUS__MERGE_STATE__SHIFT 0x19 +#define SDMA0_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY__SHIFT 0x1c +#define SDMA0_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL__SHIFT 0x1d +#define SDMA0_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY__SHIFT 0x1e +#define SDMA0_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL__SHIFT 0x1f +#define SDMA0_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L +#define SDMA0_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L +#define SDMA0_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L +#define SDMA0_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L +#define SDMA0_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L +#define SDMA0_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L +#define SDMA0_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L +#define SDMA0_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L +#define SDMA0_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L +#define SDMA0_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L +#define SDMA0_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L +#define SDMA0_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L +#define SDMA0_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L +#define SDMA0_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L +#define SDMA0_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L +#define SDMA0_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L +#define SDMA0_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L +#define SDMA0_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L +#define SDMA0_UTCL1_WR_STATUS__PAGE_FAULT_MASK 0x00040000L +#define SDMA0_UTCL1_WR_STATUS__PAGE_NULL_MASK 0x00080000L +#define SDMA0_UTCL1_WR_STATUS__REQL2_IDLE_MASK 0x00100000L +#define SDMA0_UTCL1_WR_STATUS__F32_WR_RTR_MASK 0x00200000L +#define SDMA0_UTCL1_WR_STATUS__NEXT_WR_VECTOR_MASK 0x01C00000L +#define SDMA0_UTCL1_WR_STATUS__MERGE_STATE_MASK 0x0E000000L +#define SDMA0_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY_MASK 0x10000000L +#define SDMA0_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL_MASK 0x20000000L +#define SDMA0_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY_MASK 0x40000000L +#define SDMA0_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL_MASK 0x80000000L +//SDMA0_UTCL1_INV0 +#define SDMA0_UTCL1_INV0__INV_MIDDLE__SHIFT 0x0 +#define SDMA0_UTCL1_INV0__RD_TIMEOUT__SHIFT 0x1 +#define SDMA0_UTCL1_INV0__WR_TIMEOUT__SHIFT 0x2 +#define SDMA0_UTCL1_INV0__RD_IN_INVADR__SHIFT 0x3 +#define SDMA0_UTCL1_INV0__WR_IN_INVADR__SHIFT 0x4 +#define SDMA0_UTCL1_INV0__PAGE_NULL_SW__SHIFT 0x5 +#define SDMA0_UTCL1_INV0__XNACK_IS_INVADR__SHIFT 0x6 +#define SDMA0_UTCL1_INV0__INVREQ_ENABLE__SHIFT 0x7 +#define SDMA0_UTCL1_INV0__NACK_TIMEOUT_SW__SHIFT 0x8 +#define SDMA0_UTCL1_INV0__NFLUSH_INV_IDLE__SHIFT 0x9 +#define SDMA0_UTCL1_INV0__FLUSH_INV_IDLE__SHIFT 0xa +#define SDMA0_UTCL1_INV0__INV_FLUSHTYPE__SHIFT 0xb +#define SDMA0_UTCL1_INV0__INV_VMID_VEC__SHIFT 0xc +#define SDMA0_UTCL1_INV0__INV_ADDR_HI__SHIFT 0x1c +#define SDMA0_UTCL1_INV0__INV_MIDDLE_MASK 0x00000001L +#define SDMA0_UTCL1_INV0__RD_TIMEOUT_MASK 0x00000002L +#define SDMA0_UTCL1_INV0__WR_TIMEOUT_MASK 0x00000004L +#define SDMA0_UTCL1_INV0__RD_IN_INVADR_MASK 0x00000008L +#define SDMA0_UTCL1_INV0__WR_IN_INVADR_MASK 0x00000010L +#define SDMA0_UTCL1_INV0__PAGE_NULL_SW_MASK 0x00000020L +#define SDMA0_UTCL1_INV0__XNACK_IS_INVADR_MASK 0x00000040L +#define SDMA0_UTCL1_INV0__INVREQ_ENABLE_MASK 0x00000080L +#define SDMA0_UTCL1_INV0__NACK_TIMEOUT_SW_MASK 0x00000100L +#define SDMA0_UTCL1_INV0__NFLUSH_INV_IDLE_MASK 0x00000200L +#define SDMA0_UTCL1_INV0__FLUSH_INV_IDLE_MASK 0x00000400L +#define SDMA0_UTCL1_INV0__INV_FLUSHTYPE_MASK 0x00000800L +#define SDMA0_UTCL1_INV0__INV_VMID_VEC_MASK 0x0FFFF000L +#define SDMA0_UTCL1_INV0__INV_ADDR_HI_MASK 0xF0000000L +//SDMA0_UTCL1_INV1 +#define SDMA0_UTCL1_INV1__INV_ADDR_LO__SHIFT 0x0 +#define SDMA0_UTCL1_INV1__INV_ADDR_LO_MASK 0xFFFFFFFFL +//SDMA0_UTCL1_INV2 +#define SDMA0_UTCL1_INV2__INV_NFLUSH_VMID_VEC__SHIFT 0x0 +#define SDMA0_UTCL1_INV2__INV_NFLUSH_VMID_VEC_MASK 0xFFFFFFFFL +//SDMA0_UTCL1_RD_XNACK0 +#define SDMA0_UTCL1_RD_XNACK0__XNACK_ADDR_LO__SHIFT 0x0 +#define SDMA0_UTCL1_RD_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL +//SDMA0_UTCL1_RD_XNACK1 +#define SDMA0_UTCL1_RD_XNACK1__XNACK_ADDR_HI__SHIFT 0x0 +#define SDMA0_UTCL1_RD_XNACK1__XNACK_VMID__SHIFT 0x4 +#define SDMA0_UTCL1_RD_XNACK1__XNACK_VECTOR__SHIFT 0x8 +#define SDMA0_UTCL1_RD_XNACK1__IS_XNACK__SHIFT 0x1a +#define SDMA0_UTCL1_RD_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL +#define SDMA0_UTCL1_RD_XNACK1__XNACK_VMID_MASK 0x000000F0L +#define SDMA0_UTCL1_RD_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L +#define SDMA0_UTCL1_RD_XNACK1__IS_XNACK_MASK 0x0C000000L +//SDMA0_UTCL1_WR_XNACK0 +#define SDMA0_UTCL1_WR_XNACK0__XNACK_ADDR_LO__SHIFT 0x0 +#define SDMA0_UTCL1_WR_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL +//SDMA0_UTCL1_WR_XNACK1 +#define SDMA0_UTCL1_WR_XNACK1__XNACK_ADDR_HI__SHIFT 0x0 +#define SDMA0_UTCL1_WR_XNACK1__XNACK_VMID__SHIFT 0x4 +#define SDMA0_UTCL1_WR_XNACK1__XNACK_VECTOR__SHIFT 0x8 +#define SDMA0_UTCL1_WR_XNACK1__IS_XNACK__SHIFT 0x1a +#define SDMA0_UTCL1_WR_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL +#define SDMA0_UTCL1_WR_XNACK1__XNACK_VMID_MASK 0x000000F0L +#define SDMA0_UTCL1_WR_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L +#define SDMA0_UTCL1_WR_XNACK1__IS_XNACK_MASK 0x0C000000L +//SDMA0_UTCL1_TIMEOUT +#define SDMA0_UTCL1_TIMEOUT__RD_XNACK_LIMIT__SHIFT 0x0 +#define SDMA0_UTCL1_TIMEOUT__WR_XNACK_LIMIT__SHIFT 0x10 +#define SDMA0_UTCL1_TIMEOUT__RD_XNACK_LIMIT_MASK 0x0000FFFFL +#define SDMA0_UTCL1_TIMEOUT__WR_XNACK_LIMIT_MASK 0xFFFF0000L +//SDMA0_UTCL1_PAGE +#define SDMA0_UTCL1_PAGE__VM_HOLE__SHIFT 0x0 +#define SDMA0_UTCL1_PAGE__REQ_TYPE__SHIFT 0x1 +#define SDMA0_UTCL1_PAGE__USE_MTYPE__SHIFT 0x6 +#define SDMA0_UTCL1_PAGE__USE_PT_SNOOP__SHIFT 0x9 +#define SDMA0_UTCL1_PAGE__VM_HOLE_MASK 0x00000001L +#define SDMA0_UTCL1_PAGE__REQ_TYPE_MASK 0x0000001EL +#define SDMA0_UTCL1_PAGE__USE_MTYPE_MASK 0x000001C0L +#define SDMA0_UTCL1_PAGE__USE_PT_SNOOP_MASK 0x00000200L +//SDMA0_POWER_CNTL_IDLE +#define SDMA0_POWER_CNTL_IDLE__DELAY0__SHIFT 0x0 +#define SDMA0_POWER_CNTL_IDLE__DELAY1__SHIFT 0x10 +#define SDMA0_POWER_CNTL_IDLE__DELAY2__SHIFT 0x18 +#define SDMA0_POWER_CNTL_IDLE__DELAY0_MASK 0x0000FFFFL +#define SDMA0_POWER_CNTL_IDLE__DELAY1_MASK 0x00FF0000L +#define SDMA0_POWER_CNTL_IDLE__DELAY2_MASK 0xFF000000L +//SDMA0_RELAX_ORDERING_LUT +#define SDMA0_RELAX_ORDERING_LUT__RESERVED0__SHIFT 0x0 +#define SDMA0_RELAX_ORDERING_LUT__COPY__SHIFT 0x1 +#define SDMA0_RELAX_ORDERING_LUT__WRITE__SHIFT 0x2 +#define SDMA0_RELAX_ORDERING_LUT__RESERVED3__SHIFT 0x3 +#define SDMA0_RELAX_ORDERING_LUT__RESERVED4__SHIFT 0x4 +#define SDMA0_RELAX_ORDERING_LUT__FENCE__SHIFT 0x5 +#define SDMA0_RELAX_ORDERING_LUT__RESERVED76__SHIFT 0x6 +#define SDMA0_RELAX_ORDERING_LUT__POLL_MEM__SHIFT 0x8 +#define SDMA0_RELAX_ORDERING_LUT__COND_EXE__SHIFT 0x9 +#define SDMA0_RELAX_ORDERING_LUT__ATOMIC__SHIFT 0xa +#define SDMA0_RELAX_ORDERING_LUT__CONST_FILL__SHIFT 0xb +#define SDMA0_RELAX_ORDERING_LUT__PTEPDE__SHIFT 0xc +#define SDMA0_RELAX_ORDERING_LUT__TIMESTAMP__SHIFT 0xd +#define SDMA0_RELAX_ORDERING_LUT__RESERVED__SHIFT 0xe +#define SDMA0_RELAX_ORDERING_LUT__WORLD_SWITCH__SHIFT 0x1b +#define SDMA0_RELAX_ORDERING_LUT__RPTR_WRB__SHIFT 0x1c +#define SDMA0_RELAX_ORDERING_LUT__WPTR_POLL__SHIFT 0x1d +#define SDMA0_RELAX_ORDERING_LUT__IB_FETCH__SHIFT 0x1e +#define SDMA0_RELAX_ORDERING_LUT__RB_FETCH__SHIFT 0x1f +#define SDMA0_RELAX_ORDERING_LUT__RESERVED0_MASK 0x00000001L +#define SDMA0_RELAX_ORDERING_LUT__COPY_MASK 0x00000002L +#define SDMA0_RELAX_ORDERING_LUT__WRITE_MASK 0x00000004L +#define SDMA0_RELAX_ORDERING_LUT__RESERVED3_MASK 0x00000008L +#define SDMA0_RELAX_ORDERING_LUT__RESERVED4_MASK 0x00000010L +#define SDMA0_RELAX_ORDERING_LUT__FENCE_MASK 0x00000020L +#define SDMA0_RELAX_ORDERING_LUT__RESERVED76_MASK 0x000000C0L +#define SDMA0_RELAX_ORDERING_LUT__POLL_MEM_MASK 0x00000100L +#define SDMA0_RELAX_ORDERING_LUT__COND_EXE_MASK 0x00000200L +#define SDMA0_RELAX_ORDERING_LUT__ATOMIC_MASK 0x00000400L +#define SDMA0_RELAX_ORDERING_LUT__CONST_FILL_MASK 0x00000800L +#define SDMA0_RELAX_ORDERING_LUT__PTEPDE_MASK 0x00001000L +#define SDMA0_RELAX_ORDERING_LUT__TIMESTAMP_MASK 0x00002000L +#define SDMA0_RELAX_ORDERING_LUT__RESERVED_MASK 0x07FFC000L +#define SDMA0_RELAX_ORDERING_LUT__WORLD_SWITCH_MASK 0x08000000L +#define SDMA0_RELAX_ORDERING_LUT__RPTR_WRB_MASK 0x10000000L +#define SDMA0_RELAX_ORDERING_LUT__WPTR_POLL_MASK 0x20000000L +#define SDMA0_RELAX_ORDERING_LUT__IB_FETCH_MASK 0x40000000L +#define SDMA0_RELAX_ORDERING_LUT__RB_FETCH_MASK 0x80000000L +//SDMA0_CHICKEN_BITS_2 +#define SDMA0_CHICKEN_BITS_2__F32_CMD_PROC_DELAY__SHIFT 0x0 +#define SDMA0_CHICKEN_BITS_2__F32_CMD_PROC_DELAY_MASK 0x0000000FL +//SDMA0_STATUS3_REG +#define SDMA0_STATUS3_REG__CMD_OP_STATUS__SHIFT 0x0 +#define SDMA0_STATUS3_REG__PREV_VM_CMD__SHIFT 0x10 +#define SDMA0_STATUS3_REG__EXCEPTION_IDLE__SHIFT 0x14 +#define SDMA0_STATUS3_REG__QUEUE_ID_MATCH__SHIFT 0x15 +#define SDMA0_STATUS3_REG__INT_QUEUE_ID__SHIFT 0x16 +#define SDMA0_STATUS3_REG__CMD_OP_STATUS_MASK 0x0000FFFFL +#define SDMA0_STATUS3_REG__PREV_VM_CMD_MASK 0x000F0000L +#define SDMA0_STATUS3_REG__EXCEPTION_IDLE_MASK 0x00100000L +#define SDMA0_STATUS3_REG__QUEUE_ID_MATCH_MASK 0x00200000L +#define SDMA0_STATUS3_REG__INT_QUEUE_ID_MASK 0x03C00000L +//SDMA0_PHYSICAL_ADDR_LO +#define SDMA0_PHYSICAL_ADDR_LO__D_VALID__SHIFT 0x0 +#define SDMA0_PHYSICAL_ADDR_LO__DIRTY__SHIFT 0x1 +#define SDMA0_PHYSICAL_ADDR_LO__PHY_VALID__SHIFT 0x2 +#define SDMA0_PHYSICAL_ADDR_LO__ADDR__SHIFT 0xc +#define SDMA0_PHYSICAL_ADDR_LO__D_VALID_MASK 0x00000001L +#define SDMA0_PHYSICAL_ADDR_LO__DIRTY_MASK 0x00000002L +#define SDMA0_PHYSICAL_ADDR_LO__PHY_VALID_MASK 0x00000004L +#define SDMA0_PHYSICAL_ADDR_LO__ADDR_MASK 0xFFFFF000L +//SDMA0_PHYSICAL_ADDR_HI +#define SDMA0_PHYSICAL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_PHYSICAL_ADDR_HI__ADDR_MASK 0x0000FFFFL +//SDMA0_PHASE2_QUANTUM +#define SDMA0_PHASE2_QUANTUM__UNIT__SHIFT 0x0 +#define SDMA0_PHASE2_QUANTUM__VALUE__SHIFT 0x8 +#define SDMA0_PHASE2_QUANTUM__PREFER__SHIFT 0x1e +#define SDMA0_PHASE2_QUANTUM__UNIT_MASK 0x0000000FL +#define SDMA0_PHASE2_QUANTUM__VALUE_MASK 0x00FFFF00L +#define SDMA0_PHASE2_QUANTUM__PREFER_MASK 0x40000000L +//SDMA0_ERROR_LOG +#define SDMA0_ERROR_LOG__OVERRIDE__SHIFT 0x0 +#define SDMA0_ERROR_LOG__STATUS__SHIFT 0x10 +#define SDMA0_ERROR_LOG__OVERRIDE_MASK 0x0000FFFFL +#define SDMA0_ERROR_LOG__STATUS_MASK 0xFFFF0000L +//SDMA0_PUB_DUMMY_REG0 +#define SDMA0_PUB_DUMMY_REG0__VALUE__SHIFT 0x0 +#define SDMA0_PUB_DUMMY_REG0__VALUE_MASK 0xFFFFFFFFL +//SDMA0_PUB_DUMMY_REG1 +#define SDMA0_PUB_DUMMY_REG1__VALUE__SHIFT 0x0 +#define SDMA0_PUB_DUMMY_REG1__VALUE_MASK 0xFFFFFFFFL +//SDMA0_PUB_DUMMY_REG2 +#define SDMA0_PUB_DUMMY_REG2__VALUE__SHIFT 0x0 +#define SDMA0_PUB_DUMMY_REG2__VALUE_MASK 0xFFFFFFFFL +//SDMA0_PUB_DUMMY_REG3 +#define SDMA0_PUB_DUMMY_REG3__VALUE__SHIFT 0x0 +#define SDMA0_PUB_DUMMY_REG3__VALUE_MASK 0xFFFFFFFFL +//SDMA0_F32_COUNTER +#define SDMA0_F32_COUNTER__VALUE__SHIFT 0x0 +#define SDMA0_F32_COUNTER__VALUE_MASK 0xFFFFFFFFL +//SDMA0_PERFMON_CNTL +#define SDMA0_PERFMON_CNTL__PERF_ENABLE0__SHIFT 0x0 +#define SDMA0_PERFMON_CNTL__PERF_CLEAR0__SHIFT 0x1 +#define SDMA0_PERFMON_CNTL__PERF_SEL0__SHIFT 0x2 +#define SDMA0_PERFMON_CNTL__PERF_ENABLE1__SHIFT 0xa +#define SDMA0_PERFMON_CNTL__PERF_CLEAR1__SHIFT 0xb +#define SDMA0_PERFMON_CNTL__PERF_SEL1__SHIFT 0xc +#define SDMA0_PERFMON_CNTL__PERF_ENABLE0_MASK 0x00000001L +#define SDMA0_PERFMON_CNTL__PERF_CLEAR0_MASK 0x00000002L +#define SDMA0_PERFMON_CNTL__PERF_SEL0_MASK 0x000003FCL +#define SDMA0_PERFMON_CNTL__PERF_ENABLE1_MASK 0x00000400L +#define SDMA0_PERFMON_CNTL__PERF_CLEAR1_MASK 0x00000800L +#define SDMA0_PERFMON_CNTL__PERF_SEL1_MASK 0x000FF000L +//SDMA0_PERFCOUNTER0_RESULT +#define SDMA0_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x0 +#define SDMA0_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL +//SDMA0_PERFCOUNTER1_RESULT +#define SDMA0_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x0 +#define SDMA0_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL +//SDMA0_PERFCOUNTER_TAG_DELAY_RANGE +#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW__SHIFT 0x0 +#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH__SHIFT 0xe +#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW__SHIFT 0x1c +#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW_MASK 0x00003FFFL +#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH_MASK 0x0FFFC000L +#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW_MASK 0x10000000L +//SDMA0_CRD_CNTL +#define SDMA0_CRD_CNTL__MC_WRREQ_CREDIT__SHIFT 0x7 +#define SDMA0_CRD_CNTL__MC_RDREQ_CREDIT__SHIFT 0xd +#define SDMA0_CRD_CNTL__MC_WRREQ_CREDIT_MASK 0x00001F80L +#define SDMA0_CRD_CNTL__MC_RDREQ_CREDIT_MASK 0x0007E000L +//SDMA0_GPU_IOV_VIOLATION_LOG +#define SDMA0_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0 +#define SDMA0_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1 +#define SDMA0_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2 +#define SDMA0_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION__SHIFT 0x12 +#define SDMA0_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x13 +#define SDMA0_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x14 +#define SDMA0_GPU_IOV_VIOLATION_LOG__INITIATOR_ID__SHIFT 0x18 +#define SDMA0_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L +#define SDMA0_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L +#define SDMA0_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x0003FFFCL +#define SDMA0_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION_MASK 0x00040000L +#define SDMA0_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00080000L +#define SDMA0_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x00F00000L +#define SDMA0_GPU_IOV_VIOLATION_LOG__INITIATOR_ID_MASK 0xFF000000L +//SDMA0_ULV_CNTL +#define SDMA0_ULV_CNTL__HYSTERESIS__SHIFT 0x0 +#define SDMA0_ULV_CNTL__ENTER_ULV_INT_CLR__SHIFT 0x1b +#define SDMA0_ULV_CNTL__EXIT_ULV_INT_CLR__SHIFT 0x1c +#define SDMA0_ULV_CNTL__ENTER_ULV_INT__SHIFT 0x1d +#define SDMA0_ULV_CNTL__EXIT_ULV_INT__SHIFT 0x1e +#define SDMA0_ULV_CNTL__ULV_STATUS__SHIFT 0x1f +#define SDMA0_ULV_CNTL__HYSTERESIS_MASK 0x0000001FL +#define SDMA0_ULV_CNTL__ENTER_ULV_INT_CLR_MASK 0x08000000L +#define SDMA0_ULV_CNTL__EXIT_ULV_INT_CLR_MASK 0x10000000L +#define SDMA0_ULV_CNTL__ENTER_ULV_INT_MASK 0x20000000L +#define SDMA0_ULV_CNTL__EXIT_ULV_INT_MASK 0x40000000L +#define SDMA0_ULV_CNTL__ULV_STATUS_MASK 0x80000000L +//SDMA0_EA_DBIT_ADDR_DATA +#define SDMA0_EA_DBIT_ADDR_DATA__VALUE__SHIFT 0x0 +#define SDMA0_EA_DBIT_ADDR_DATA__VALUE_MASK 0xFFFFFFFFL +//SDMA0_EA_DBIT_ADDR_INDEX +#define SDMA0_EA_DBIT_ADDR_INDEX__VALUE__SHIFT 0x0 +#define SDMA0_EA_DBIT_ADDR_INDEX__VALUE_MASK 0x00000007L +//SDMA0_GFX_RB_CNTL +#define SDMA0_GFX_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA0_GFX_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA0_GFX_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA0_GFX_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA0_GFX_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA0_GFX_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA0_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA0_GFX_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA0_GFX_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA0_GFX_RB_BASE +#define SDMA0_GFX_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA0_GFX_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA0_GFX_RB_BASE_HI +#define SDMA0_GFX_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA0_GFX_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA0_GFX_RB_RPTR +#define SDMA0_GFX_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA0_GFX_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_GFX_RB_RPTR_HI +#define SDMA0_GFX_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA0_GFX_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_GFX_RB_WPTR +#define SDMA0_GFX_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA0_GFX_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_GFX_RB_WPTR_HI +#define SDMA0_GFX_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA0_GFX_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_GFX_RB_WPTR_POLL_CNTL +#define SDMA0_GFX_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA0_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA0_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA0_GFX_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA0_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA0_GFX_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA0_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA0_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA0_GFX_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA0_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA0_GFX_RB_RPTR_ADDR_HI +#define SDMA0_GFX_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_GFX_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_GFX_RB_RPTR_ADDR_LO +#define SDMA0_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA0_GFX_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA0_GFX_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_GFX_IB_CNTL +#define SDMA0_GFX_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA0_GFX_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA0_GFX_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA0_GFX_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA0_GFX_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA0_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA0_GFX_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA0_GFX_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA0_GFX_IB_RPTR +#define SDMA0_GFX_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA0_GFX_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA0_GFX_IB_OFFSET +#define SDMA0_GFX_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA0_GFX_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA0_GFX_IB_BASE_LO +#define SDMA0_GFX_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA0_GFX_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA0_GFX_IB_BASE_HI +#define SDMA0_GFX_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA0_GFX_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_GFX_IB_SIZE +#define SDMA0_GFX_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA0_GFX_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA0_GFX_SKIP_CNTL +#define SDMA0_GFX_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA0_GFX_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA0_GFX_CONTEXT_STATUS +#define SDMA0_GFX_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA0_GFX_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA0_GFX_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA0_GFX_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA0_GFX_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA0_GFX_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA0_GFX_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA0_GFX_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA0_GFX_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA0_GFX_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA0_GFX_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA0_GFX_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA0_GFX_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA0_GFX_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA0_GFX_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA0_GFX_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA0_GFX_DOORBELL +#define SDMA0_GFX_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA0_GFX_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA0_GFX_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA0_GFX_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA0_GFX_CONTEXT_CNTL +#define SDMA0_GFX_CONTEXT_CNTL__RESUME_CTX__SHIFT 0x10 +#define SDMA0_GFX_CONTEXT_CNTL__RESUME_CTX_MASK 0x00010000L +//SDMA0_GFX_STATUS +#define SDMA0_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA0_GFX_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA0_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA0_GFX_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA0_GFX_DOORBELL_LOG +#define SDMA0_GFX_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA0_GFX_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA0_GFX_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA0_GFX_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA0_GFX_WATERMARK +#define SDMA0_GFX_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA0_GFX_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA0_GFX_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA0_GFX_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA0_GFX_DOORBELL_OFFSET +#define SDMA0_GFX_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA0_GFX_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA0_GFX_CSA_ADDR_LO +#define SDMA0_GFX_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_GFX_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_GFX_CSA_ADDR_HI +#define SDMA0_GFX_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_GFX_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_GFX_IB_SUB_REMAIN +#define SDMA0_GFX_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA0_GFX_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA0_GFX_PREEMPT +#define SDMA0_GFX_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA0_GFX_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA0_GFX_DUMMY_REG +#define SDMA0_GFX_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA0_GFX_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA0_GFX_RB_WPTR_POLL_ADDR_HI +#define SDMA0_GFX_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_GFX_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_GFX_RB_WPTR_POLL_ADDR_LO +#define SDMA0_GFX_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_GFX_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_GFX_RB_AQL_CNTL +#define SDMA0_GFX_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA0_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA0_GFX_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA0_GFX_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA0_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA0_GFX_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA0_GFX_MINOR_PTR_UPDATE +#define SDMA0_GFX_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA0_GFX_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA0_GFX_MIDCMD_DATA0 +#define SDMA0_GFX_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA0_GFX_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA0_GFX_MIDCMD_DATA1 +#define SDMA0_GFX_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA0_GFX_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA0_GFX_MIDCMD_DATA2 +#define SDMA0_GFX_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA0_GFX_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA0_GFX_MIDCMD_DATA3 +#define SDMA0_GFX_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA0_GFX_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA0_GFX_MIDCMD_DATA4 +#define SDMA0_GFX_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA0_GFX_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA0_GFX_MIDCMD_DATA5 +#define SDMA0_GFX_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA0_GFX_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA0_GFX_MIDCMD_DATA6 +#define SDMA0_GFX_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA0_GFX_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA0_GFX_MIDCMD_DATA7 +#define SDMA0_GFX_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA0_GFX_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA0_GFX_MIDCMD_DATA8 +#define SDMA0_GFX_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA0_GFX_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA0_GFX_MIDCMD_CNTL +#define SDMA0_GFX_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA0_GFX_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA0_GFX_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA0_GFX_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA0_GFX_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA0_GFX_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA0_GFX_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA0_GFX_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA0_PAGE_RB_CNTL +#define SDMA0_PAGE_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA0_PAGE_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA0_PAGE_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA0_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA0_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA0_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA0_PAGE_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA0_PAGE_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA0_PAGE_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA0_PAGE_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA0_PAGE_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA0_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA0_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA0_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA0_PAGE_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA0_PAGE_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA0_PAGE_RB_BASE +#define SDMA0_PAGE_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA0_PAGE_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA0_PAGE_RB_BASE_HI +#define SDMA0_PAGE_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA0_PAGE_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA0_PAGE_RB_RPTR +#define SDMA0_PAGE_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA0_PAGE_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_PAGE_RB_RPTR_HI +#define SDMA0_PAGE_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA0_PAGE_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_PAGE_RB_WPTR +#define SDMA0_PAGE_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA0_PAGE_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_PAGE_RB_WPTR_HI +#define SDMA0_PAGE_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA0_PAGE_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_PAGE_RB_WPTR_POLL_CNTL +#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA0_PAGE_RB_RPTR_ADDR_HI +#define SDMA0_PAGE_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_PAGE_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_PAGE_RB_RPTR_ADDR_LO +#define SDMA0_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA0_PAGE_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA0_PAGE_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_PAGE_IB_CNTL +#define SDMA0_PAGE_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA0_PAGE_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA0_PAGE_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA0_PAGE_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA0_PAGE_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA0_PAGE_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA0_PAGE_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA0_PAGE_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA0_PAGE_IB_RPTR +#define SDMA0_PAGE_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA0_PAGE_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA0_PAGE_IB_OFFSET +#define SDMA0_PAGE_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA0_PAGE_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA0_PAGE_IB_BASE_LO +#define SDMA0_PAGE_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA0_PAGE_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA0_PAGE_IB_BASE_HI +#define SDMA0_PAGE_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA0_PAGE_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_PAGE_IB_SIZE +#define SDMA0_PAGE_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA0_PAGE_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA0_PAGE_SKIP_CNTL +#define SDMA0_PAGE_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA0_PAGE_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA0_PAGE_CONTEXT_STATUS +#define SDMA0_PAGE_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA0_PAGE_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA0_PAGE_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA0_PAGE_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA0_PAGE_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA0_PAGE_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA0_PAGE_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA0_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA0_PAGE_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA0_PAGE_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA0_PAGE_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA0_PAGE_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA0_PAGE_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA0_PAGE_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA0_PAGE_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA0_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA0_PAGE_DOORBELL +#define SDMA0_PAGE_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA0_PAGE_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA0_PAGE_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA0_PAGE_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA0_PAGE_STATUS +#define SDMA0_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA0_PAGE_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA0_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA0_PAGE_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA0_PAGE_DOORBELL_LOG +#define SDMA0_PAGE_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA0_PAGE_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA0_PAGE_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA0_PAGE_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA0_PAGE_WATERMARK +#define SDMA0_PAGE_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA0_PAGE_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA0_PAGE_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA0_PAGE_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA0_PAGE_DOORBELL_OFFSET +#define SDMA0_PAGE_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA0_PAGE_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA0_PAGE_CSA_ADDR_LO +#define SDMA0_PAGE_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_PAGE_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_PAGE_CSA_ADDR_HI +#define SDMA0_PAGE_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_PAGE_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_PAGE_IB_SUB_REMAIN +#define SDMA0_PAGE_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA0_PAGE_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA0_PAGE_PREEMPT +#define SDMA0_PAGE_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA0_PAGE_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA0_PAGE_DUMMY_REG +#define SDMA0_PAGE_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA0_PAGE_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA0_PAGE_RB_WPTR_POLL_ADDR_HI +#define SDMA0_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_PAGE_RB_WPTR_POLL_ADDR_LO +#define SDMA0_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_PAGE_RB_AQL_CNTL +#define SDMA0_PAGE_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA0_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA0_PAGE_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA0_PAGE_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA0_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA0_PAGE_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA0_PAGE_MINOR_PTR_UPDATE +#define SDMA0_PAGE_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA0_PAGE_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA0_PAGE_MIDCMD_DATA0 +#define SDMA0_PAGE_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA0_PAGE_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA0_PAGE_MIDCMD_DATA1 +#define SDMA0_PAGE_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA0_PAGE_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA0_PAGE_MIDCMD_DATA2 +#define SDMA0_PAGE_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA0_PAGE_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA0_PAGE_MIDCMD_DATA3 +#define SDMA0_PAGE_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA0_PAGE_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA0_PAGE_MIDCMD_DATA4 +#define SDMA0_PAGE_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA0_PAGE_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA0_PAGE_MIDCMD_DATA5 +#define SDMA0_PAGE_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA0_PAGE_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA0_PAGE_MIDCMD_DATA6 +#define SDMA0_PAGE_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA0_PAGE_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA0_PAGE_MIDCMD_DATA7 +#define SDMA0_PAGE_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA0_PAGE_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA0_PAGE_MIDCMD_DATA8 +#define SDMA0_PAGE_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA0_PAGE_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA0_PAGE_MIDCMD_CNTL +#define SDMA0_PAGE_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA0_PAGE_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA0_PAGE_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA0_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA0_PAGE_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA0_PAGE_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA0_PAGE_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA0_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA0_RLC0_RB_CNTL +#define SDMA0_RLC0_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA0_RLC0_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA0_RLC0_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA0_RLC0_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA0_RLC0_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA0_RLC0_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA0_RLC0_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA0_RLC0_RB_BASE +#define SDMA0_RLC0_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA0_RLC0_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC0_RB_BASE_HI +#define SDMA0_RLC0_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC0_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA0_RLC0_RB_RPTR +#define SDMA0_RLC0_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA0_RLC0_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC0_RB_RPTR_HI +#define SDMA0_RLC0_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA0_RLC0_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC0_RB_WPTR +#define SDMA0_RLC0_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA0_RLC0_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC0_RB_WPTR_HI +#define SDMA0_RLC0_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA0_RLC0_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC0_RB_WPTR_POLL_CNTL +#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA0_RLC0_RB_RPTR_ADDR_HI +#define SDMA0_RLC0_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC0_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC0_RB_RPTR_ADDR_LO +#define SDMA0_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA0_RLC0_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA0_RLC0_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC0_IB_CNTL +#define SDMA0_RLC0_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA0_RLC0_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA0_RLC0_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA0_RLC0_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA0_RLC0_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA0_RLC0_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA0_RLC0_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA0_RLC0_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA0_RLC0_IB_RPTR +#define SDMA0_RLC0_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA0_RLC0_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA0_RLC0_IB_OFFSET +#define SDMA0_RLC0_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA0_RLC0_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA0_RLC0_IB_BASE_LO +#define SDMA0_RLC0_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA0_RLC0_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA0_RLC0_IB_BASE_HI +#define SDMA0_RLC0_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC0_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC0_IB_SIZE +#define SDMA0_RLC0_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA0_RLC0_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA0_RLC0_SKIP_CNTL +#define SDMA0_RLC0_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA0_RLC0_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA0_RLC0_CONTEXT_STATUS +#define SDMA0_RLC0_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA0_RLC0_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA0_RLC0_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA0_RLC0_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA0_RLC0_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA0_RLC0_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA0_RLC0_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA0_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA0_RLC0_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA0_RLC0_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA0_RLC0_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA0_RLC0_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA0_RLC0_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA0_RLC0_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA0_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA0_RLC0_DOORBELL +#define SDMA0_RLC0_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA0_RLC0_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA0_RLC0_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA0_RLC0_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA0_RLC0_STATUS +#define SDMA0_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA0_RLC0_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA0_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA0_RLC0_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA0_RLC0_DOORBELL_LOG +#define SDMA0_RLC0_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA0_RLC0_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA0_RLC0_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA0_RLC0_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA0_RLC0_WATERMARK +#define SDMA0_RLC0_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA0_RLC0_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA0_RLC0_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA0_RLC0_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA0_RLC0_DOORBELL_OFFSET +#define SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA0_RLC0_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA0_RLC0_CSA_ADDR_LO +#define SDMA0_RLC0_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC0_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC0_CSA_ADDR_HI +#define SDMA0_RLC0_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC0_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC0_IB_SUB_REMAIN +#define SDMA0_RLC0_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA0_RLC0_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA0_RLC0_PREEMPT +#define SDMA0_RLC0_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA0_RLC0_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA0_RLC0_DUMMY_REG +#define SDMA0_RLC0_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA0_RLC0_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA0_RLC0_RB_WPTR_POLL_ADDR_HI +#define SDMA0_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC0_RB_WPTR_POLL_ADDR_LO +#define SDMA0_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC0_RB_AQL_CNTL +#define SDMA0_RLC0_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA0_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA0_RLC0_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA0_RLC0_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA0_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA0_RLC0_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA0_RLC0_MINOR_PTR_UPDATE +#define SDMA0_RLC0_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA0_RLC0_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA0_RLC0_MIDCMD_DATA0 +#define SDMA0_RLC0_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA0_RLC0_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA0_RLC0_MIDCMD_DATA1 +#define SDMA0_RLC0_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA0_RLC0_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA0_RLC0_MIDCMD_DATA2 +#define SDMA0_RLC0_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA0_RLC0_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA0_RLC0_MIDCMD_DATA3 +#define SDMA0_RLC0_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA0_RLC0_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA0_RLC0_MIDCMD_DATA4 +#define SDMA0_RLC0_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA0_RLC0_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA0_RLC0_MIDCMD_DATA5 +#define SDMA0_RLC0_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA0_RLC0_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA0_RLC0_MIDCMD_DATA6 +#define SDMA0_RLC0_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA0_RLC0_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA0_RLC0_MIDCMD_DATA7 +#define SDMA0_RLC0_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA0_RLC0_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA0_RLC0_MIDCMD_DATA8 +#define SDMA0_RLC0_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA0_RLC0_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA0_RLC0_MIDCMD_CNTL +#define SDMA0_RLC0_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA0_RLC0_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA0_RLC0_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA0_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA0_RLC0_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA0_RLC0_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA0_RLC0_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA0_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA0_RLC1_RB_CNTL +#define SDMA0_RLC1_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA0_RLC1_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA0_RLC1_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA0_RLC1_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA0_RLC1_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA0_RLC1_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA0_RLC1_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA0_RLC1_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA0_RLC1_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA0_RLC1_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA0_RLC1_RB_BASE +#define SDMA0_RLC1_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA0_RLC1_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC1_RB_BASE_HI +#define SDMA0_RLC1_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC1_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA0_RLC1_RB_RPTR +#define SDMA0_RLC1_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA0_RLC1_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC1_RB_RPTR_HI +#define SDMA0_RLC1_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA0_RLC1_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC1_RB_WPTR +#define SDMA0_RLC1_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA0_RLC1_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC1_RB_WPTR_HI +#define SDMA0_RLC1_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA0_RLC1_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC1_RB_WPTR_POLL_CNTL +#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA0_RLC1_RB_RPTR_ADDR_HI +#define SDMA0_RLC1_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC1_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC1_RB_RPTR_ADDR_LO +#define SDMA0_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA0_RLC1_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA0_RLC1_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC1_IB_CNTL +#define SDMA0_RLC1_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA0_RLC1_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA0_RLC1_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA0_RLC1_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA0_RLC1_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA0_RLC1_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA0_RLC1_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA0_RLC1_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA0_RLC1_IB_RPTR +#define SDMA0_RLC1_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA0_RLC1_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA0_RLC1_IB_OFFSET +#define SDMA0_RLC1_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA0_RLC1_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA0_RLC1_IB_BASE_LO +#define SDMA0_RLC1_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA0_RLC1_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA0_RLC1_IB_BASE_HI +#define SDMA0_RLC1_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC1_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC1_IB_SIZE +#define SDMA0_RLC1_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA0_RLC1_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA0_RLC1_SKIP_CNTL +#define SDMA0_RLC1_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA0_RLC1_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA0_RLC1_CONTEXT_STATUS +#define SDMA0_RLC1_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA0_RLC1_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA0_RLC1_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA0_RLC1_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA0_RLC1_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA0_RLC1_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA0_RLC1_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA0_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA0_RLC1_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA0_RLC1_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA0_RLC1_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA0_RLC1_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA0_RLC1_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA0_RLC1_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA0_RLC1_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA0_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA0_RLC1_DOORBELL +#define SDMA0_RLC1_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA0_RLC1_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA0_RLC1_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA0_RLC1_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA0_RLC1_STATUS +#define SDMA0_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA0_RLC1_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA0_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA0_RLC1_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA0_RLC1_DOORBELL_LOG +#define SDMA0_RLC1_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA0_RLC1_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA0_RLC1_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA0_RLC1_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA0_RLC1_WATERMARK +#define SDMA0_RLC1_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA0_RLC1_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA0_RLC1_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA0_RLC1_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA0_RLC1_DOORBELL_OFFSET +#define SDMA0_RLC1_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA0_RLC1_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA0_RLC1_CSA_ADDR_LO +#define SDMA0_RLC1_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC1_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC1_CSA_ADDR_HI +#define SDMA0_RLC1_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC1_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC1_IB_SUB_REMAIN +#define SDMA0_RLC1_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA0_RLC1_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA0_RLC1_PREEMPT +#define SDMA0_RLC1_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA0_RLC1_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA0_RLC1_DUMMY_REG +#define SDMA0_RLC1_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA0_RLC1_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA0_RLC1_RB_WPTR_POLL_ADDR_HI +#define SDMA0_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC1_RB_WPTR_POLL_ADDR_LO +#define SDMA0_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC1_RB_AQL_CNTL +#define SDMA0_RLC1_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA0_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA0_RLC1_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA0_RLC1_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA0_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA0_RLC1_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA0_RLC1_MINOR_PTR_UPDATE +#define SDMA0_RLC1_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA0_RLC1_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA0_RLC1_MIDCMD_DATA0 +#define SDMA0_RLC1_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA0_RLC1_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA0_RLC1_MIDCMD_DATA1 +#define SDMA0_RLC1_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA0_RLC1_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA0_RLC1_MIDCMD_DATA2 +#define SDMA0_RLC1_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA0_RLC1_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA0_RLC1_MIDCMD_DATA3 +#define SDMA0_RLC1_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA0_RLC1_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA0_RLC1_MIDCMD_DATA4 +#define SDMA0_RLC1_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA0_RLC1_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA0_RLC1_MIDCMD_DATA5 +#define SDMA0_RLC1_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA0_RLC1_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA0_RLC1_MIDCMD_DATA6 +#define SDMA0_RLC1_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA0_RLC1_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA0_RLC1_MIDCMD_DATA7 +#define SDMA0_RLC1_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA0_RLC1_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA0_RLC1_MIDCMD_DATA8 +#define SDMA0_RLC1_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA0_RLC1_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA0_RLC1_MIDCMD_CNTL +#define SDMA0_RLC1_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA0_RLC1_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA0_RLC1_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA0_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA0_RLC1_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA0_RLC1_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA0_RLC1_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA0_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA0_RLC2_RB_CNTL +#define SDMA0_RLC2_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA0_RLC2_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA0_RLC2_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA0_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA0_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA0_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA0_RLC2_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA0_RLC2_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA0_RLC2_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA0_RLC2_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA0_RLC2_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA0_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA0_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA0_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA0_RLC2_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA0_RLC2_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA0_RLC2_RB_BASE +#define SDMA0_RLC2_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA0_RLC2_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC2_RB_BASE_HI +#define SDMA0_RLC2_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC2_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA0_RLC2_RB_RPTR +#define SDMA0_RLC2_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA0_RLC2_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC2_RB_RPTR_HI +#define SDMA0_RLC2_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA0_RLC2_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC2_RB_WPTR +#define SDMA0_RLC2_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA0_RLC2_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC2_RB_WPTR_HI +#define SDMA0_RLC2_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA0_RLC2_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC2_RB_WPTR_POLL_CNTL +#define SDMA0_RLC2_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA0_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA0_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA0_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA0_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA0_RLC2_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA0_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA0_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA0_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA0_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA0_RLC2_RB_RPTR_ADDR_HI +#define SDMA0_RLC2_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC2_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC2_RB_RPTR_ADDR_LO +#define SDMA0_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA0_RLC2_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA0_RLC2_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC2_IB_CNTL +#define SDMA0_RLC2_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA0_RLC2_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA0_RLC2_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA0_RLC2_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA0_RLC2_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA0_RLC2_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA0_RLC2_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA0_RLC2_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA0_RLC2_IB_RPTR +#define SDMA0_RLC2_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA0_RLC2_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA0_RLC2_IB_OFFSET +#define SDMA0_RLC2_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA0_RLC2_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA0_RLC2_IB_BASE_LO +#define SDMA0_RLC2_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA0_RLC2_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA0_RLC2_IB_BASE_HI +#define SDMA0_RLC2_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC2_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC2_IB_SIZE +#define SDMA0_RLC2_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA0_RLC2_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA0_RLC2_SKIP_CNTL +#define SDMA0_RLC2_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA0_RLC2_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA0_RLC2_CONTEXT_STATUS +#define SDMA0_RLC2_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA0_RLC2_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA0_RLC2_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA0_RLC2_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA0_RLC2_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA0_RLC2_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA0_RLC2_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA0_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA0_RLC2_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA0_RLC2_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA0_RLC2_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA0_RLC2_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA0_RLC2_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA0_RLC2_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA0_RLC2_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA0_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA0_RLC2_DOORBELL +#define SDMA0_RLC2_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA0_RLC2_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA0_RLC2_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA0_RLC2_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA0_RLC2_STATUS +#define SDMA0_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA0_RLC2_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA0_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA0_RLC2_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA0_RLC2_DOORBELL_LOG +#define SDMA0_RLC2_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA0_RLC2_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA0_RLC2_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA0_RLC2_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA0_RLC2_WATERMARK +#define SDMA0_RLC2_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA0_RLC2_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA0_RLC2_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA0_RLC2_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA0_RLC2_DOORBELL_OFFSET +#define SDMA0_RLC2_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA0_RLC2_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA0_RLC2_CSA_ADDR_LO +#define SDMA0_RLC2_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC2_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC2_CSA_ADDR_HI +#define SDMA0_RLC2_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC2_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC2_IB_SUB_REMAIN +#define SDMA0_RLC2_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA0_RLC2_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA0_RLC2_PREEMPT +#define SDMA0_RLC2_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA0_RLC2_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA0_RLC2_DUMMY_REG +#define SDMA0_RLC2_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA0_RLC2_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA0_RLC2_RB_WPTR_POLL_ADDR_HI +#define SDMA0_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC2_RB_WPTR_POLL_ADDR_LO +#define SDMA0_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC2_RB_AQL_CNTL +#define SDMA0_RLC2_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA0_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA0_RLC2_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA0_RLC2_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA0_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA0_RLC2_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA0_RLC2_MINOR_PTR_UPDATE +#define SDMA0_RLC2_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA0_RLC2_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA0_RLC2_MIDCMD_DATA0 +#define SDMA0_RLC2_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA0_RLC2_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA0_RLC2_MIDCMD_DATA1 +#define SDMA0_RLC2_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA0_RLC2_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA0_RLC2_MIDCMD_DATA2 +#define SDMA0_RLC2_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA0_RLC2_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA0_RLC2_MIDCMD_DATA3 +#define SDMA0_RLC2_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA0_RLC2_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA0_RLC2_MIDCMD_DATA4 +#define SDMA0_RLC2_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA0_RLC2_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA0_RLC2_MIDCMD_DATA5 +#define SDMA0_RLC2_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA0_RLC2_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA0_RLC2_MIDCMD_DATA6 +#define SDMA0_RLC2_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA0_RLC2_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA0_RLC2_MIDCMD_DATA7 +#define SDMA0_RLC2_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA0_RLC2_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA0_RLC2_MIDCMD_DATA8 +#define SDMA0_RLC2_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA0_RLC2_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA0_RLC2_MIDCMD_CNTL +#define SDMA0_RLC2_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA0_RLC2_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA0_RLC2_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA0_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA0_RLC2_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA0_RLC2_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA0_RLC2_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA0_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA0_RLC3_RB_CNTL +#define SDMA0_RLC3_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA0_RLC3_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA0_RLC3_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA0_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA0_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA0_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA0_RLC3_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA0_RLC3_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA0_RLC3_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA0_RLC3_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA0_RLC3_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA0_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA0_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA0_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA0_RLC3_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA0_RLC3_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA0_RLC3_RB_BASE +#define SDMA0_RLC3_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA0_RLC3_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC3_RB_BASE_HI +#define SDMA0_RLC3_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC3_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA0_RLC3_RB_RPTR +#define SDMA0_RLC3_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA0_RLC3_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC3_RB_RPTR_HI +#define SDMA0_RLC3_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA0_RLC3_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC3_RB_WPTR +#define SDMA0_RLC3_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA0_RLC3_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC3_RB_WPTR_HI +#define SDMA0_RLC3_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA0_RLC3_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC3_RB_WPTR_POLL_CNTL +#define SDMA0_RLC3_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA0_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA0_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA0_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA0_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA0_RLC3_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA0_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA0_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA0_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA0_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA0_RLC3_RB_RPTR_ADDR_HI +#define SDMA0_RLC3_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC3_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC3_RB_RPTR_ADDR_LO +#define SDMA0_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA0_RLC3_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA0_RLC3_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC3_IB_CNTL +#define SDMA0_RLC3_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA0_RLC3_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA0_RLC3_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA0_RLC3_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA0_RLC3_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA0_RLC3_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA0_RLC3_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA0_RLC3_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA0_RLC3_IB_RPTR +#define SDMA0_RLC3_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA0_RLC3_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA0_RLC3_IB_OFFSET +#define SDMA0_RLC3_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA0_RLC3_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA0_RLC3_IB_BASE_LO +#define SDMA0_RLC3_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA0_RLC3_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA0_RLC3_IB_BASE_HI +#define SDMA0_RLC3_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC3_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC3_IB_SIZE +#define SDMA0_RLC3_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA0_RLC3_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA0_RLC3_SKIP_CNTL +#define SDMA0_RLC3_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA0_RLC3_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA0_RLC3_CONTEXT_STATUS +#define SDMA0_RLC3_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA0_RLC3_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA0_RLC3_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA0_RLC3_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA0_RLC3_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA0_RLC3_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA0_RLC3_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA0_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA0_RLC3_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA0_RLC3_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA0_RLC3_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA0_RLC3_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA0_RLC3_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA0_RLC3_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA0_RLC3_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA0_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA0_RLC3_DOORBELL +#define SDMA0_RLC3_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA0_RLC3_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA0_RLC3_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA0_RLC3_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA0_RLC3_STATUS +#define SDMA0_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA0_RLC3_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA0_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA0_RLC3_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA0_RLC3_DOORBELL_LOG +#define SDMA0_RLC3_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA0_RLC3_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA0_RLC3_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA0_RLC3_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA0_RLC3_WATERMARK +#define SDMA0_RLC3_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA0_RLC3_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA0_RLC3_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA0_RLC3_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA0_RLC3_DOORBELL_OFFSET +#define SDMA0_RLC3_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA0_RLC3_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA0_RLC3_CSA_ADDR_LO +#define SDMA0_RLC3_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC3_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC3_CSA_ADDR_HI +#define SDMA0_RLC3_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC3_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC3_IB_SUB_REMAIN +#define SDMA0_RLC3_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA0_RLC3_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA0_RLC3_PREEMPT +#define SDMA0_RLC3_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA0_RLC3_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA0_RLC3_DUMMY_REG +#define SDMA0_RLC3_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA0_RLC3_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA0_RLC3_RB_WPTR_POLL_ADDR_HI +#define SDMA0_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC3_RB_WPTR_POLL_ADDR_LO +#define SDMA0_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC3_RB_AQL_CNTL +#define SDMA0_RLC3_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA0_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA0_RLC3_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA0_RLC3_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA0_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA0_RLC3_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA0_RLC3_MINOR_PTR_UPDATE +#define SDMA0_RLC3_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA0_RLC3_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA0_RLC3_MIDCMD_DATA0 +#define SDMA0_RLC3_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA0_RLC3_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA0_RLC3_MIDCMD_DATA1 +#define SDMA0_RLC3_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA0_RLC3_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA0_RLC3_MIDCMD_DATA2 +#define SDMA0_RLC3_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA0_RLC3_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA0_RLC3_MIDCMD_DATA3 +#define SDMA0_RLC3_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA0_RLC3_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA0_RLC3_MIDCMD_DATA4 +#define SDMA0_RLC3_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA0_RLC3_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA0_RLC3_MIDCMD_DATA5 +#define SDMA0_RLC3_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA0_RLC3_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA0_RLC3_MIDCMD_DATA6 +#define SDMA0_RLC3_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA0_RLC3_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA0_RLC3_MIDCMD_DATA7 +#define SDMA0_RLC3_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA0_RLC3_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA0_RLC3_MIDCMD_DATA8 +#define SDMA0_RLC3_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA0_RLC3_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA0_RLC3_MIDCMD_CNTL +#define SDMA0_RLC3_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA0_RLC3_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA0_RLC3_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA0_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA0_RLC3_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA0_RLC3_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA0_RLC3_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA0_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA0_RLC4_RB_CNTL +#define SDMA0_RLC4_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA0_RLC4_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA0_RLC4_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA0_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA0_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA0_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA0_RLC4_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA0_RLC4_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA0_RLC4_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA0_RLC4_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA0_RLC4_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA0_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA0_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA0_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA0_RLC4_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA0_RLC4_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA0_RLC4_RB_BASE +#define SDMA0_RLC4_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA0_RLC4_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC4_RB_BASE_HI +#define SDMA0_RLC4_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC4_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA0_RLC4_RB_RPTR +#define SDMA0_RLC4_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA0_RLC4_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC4_RB_RPTR_HI +#define SDMA0_RLC4_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA0_RLC4_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC4_RB_WPTR +#define SDMA0_RLC4_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA0_RLC4_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC4_RB_WPTR_HI +#define SDMA0_RLC4_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA0_RLC4_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC4_RB_WPTR_POLL_CNTL +#define SDMA0_RLC4_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA0_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA0_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA0_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA0_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA0_RLC4_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA0_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA0_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA0_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA0_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA0_RLC4_RB_RPTR_ADDR_HI +#define SDMA0_RLC4_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC4_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC4_RB_RPTR_ADDR_LO +#define SDMA0_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA0_RLC4_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA0_RLC4_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC4_IB_CNTL +#define SDMA0_RLC4_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA0_RLC4_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA0_RLC4_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA0_RLC4_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA0_RLC4_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA0_RLC4_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA0_RLC4_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA0_RLC4_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA0_RLC4_IB_RPTR +#define SDMA0_RLC4_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA0_RLC4_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA0_RLC4_IB_OFFSET +#define SDMA0_RLC4_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA0_RLC4_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA0_RLC4_IB_BASE_LO +#define SDMA0_RLC4_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA0_RLC4_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA0_RLC4_IB_BASE_HI +#define SDMA0_RLC4_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC4_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC4_IB_SIZE +#define SDMA0_RLC4_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA0_RLC4_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA0_RLC4_SKIP_CNTL +#define SDMA0_RLC4_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA0_RLC4_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA0_RLC4_CONTEXT_STATUS +#define SDMA0_RLC4_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA0_RLC4_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA0_RLC4_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA0_RLC4_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA0_RLC4_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA0_RLC4_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA0_RLC4_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA0_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA0_RLC4_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA0_RLC4_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA0_RLC4_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA0_RLC4_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA0_RLC4_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA0_RLC4_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA0_RLC4_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA0_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA0_RLC4_DOORBELL +#define SDMA0_RLC4_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA0_RLC4_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA0_RLC4_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA0_RLC4_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA0_RLC4_STATUS +#define SDMA0_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA0_RLC4_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA0_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA0_RLC4_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA0_RLC4_DOORBELL_LOG +#define SDMA0_RLC4_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA0_RLC4_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA0_RLC4_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA0_RLC4_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA0_RLC4_WATERMARK +#define SDMA0_RLC4_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA0_RLC4_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA0_RLC4_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA0_RLC4_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA0_RLC4_DOORBELL_OFFSET +#define SDMA0_RLC4_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA0_RLC4_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA0_RLC4_CSA_ADDR_LO +#define SDMA0_RLC4_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC4_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC4_CSA_ADDR_HI +#define SDMA0_RLC4_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC4_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC4_IB_SUB_REMAIN +#define SDMA0_RLC4_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA0_RLC4_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA0_RLC4_PREEMPT +#define SDMA0_RLC4_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA0_RLC4_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA0_RLC4_DUMMY_REG +#define SDMA0_RLC4_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA0_RLC4_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA0_RLC4_RB_WPTR_POLL_ADDR_HI +#define SDMA0_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC4_RB_WPTR_POLL_ADDR_LO +#define SDMA0_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC4_RB_AQL_CNTL +#define SDMA0_RLC4_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA0_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA0_RLC4_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA0_RLC4_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA0_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA0_RLC4_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA0_RLC4_MINOR_PTR_UPDATE +#define SDMA0_RLC4_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA0_RLC4_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA0_RLC4_MIDCMD_DATA0 +#define SDMA0_RLC4_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA0_RLC4_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA0_RLC4_MIDCMD_DATA1 +#define SDMA0_RLC4_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA0_RLC4_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA0_RLC4_MIDCMD_DATA2 +#define SDMA0_RLC4_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA0_RLC4_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA0_RLC4_MIDCMD_DATA3 +#define SDMA0_RLC4_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA0_RLC4_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA0_RLC4_MIDCMD_DATA4 +#define SDMA0_RLC4_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA0_RLC4_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA0_RLC4_MIDCMD_DATA5 +#define SDMA0_RLC4_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA0_RLC4_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA0_RLC4_MIDCMD_DATA6 +#define SDMA0_RLC4_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA0_RLC4_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA0_RLC4_MIDCMD_DATA7 +#define SDMA0_RLC4_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA0_RLC4_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA0_RLC4_MIDCMD_DATA8 +#define SDMA0_RLC4_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA0_RLC4_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA0_RLC4_MIDCMD_CNTL +#define SDMA0_RLC4_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA0_RLC4_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA0_RLC4_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA0_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA0_RLC4_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA0_RLC4_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA0_RLC4_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA0_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA0_RLC5_RB_CNTL +#define SDMA0_RLC5_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA0_RLC5_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA0_RLC5_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA0_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA0_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA0_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA0_RLC5_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA0_RLC5_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA0_RLC5_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA0_RLC5_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA0_RLC5_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA0_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA0_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA0_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA0_RLC5_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA0_RLC5_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA0_RLC5_RB_BASE +#define SDMA0_RLC5_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA0_RLC5_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC5_RB_BASE_HI +#define SDMA0_RLC5_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC5_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA0_RLC5_RB_RPTR +#define SDMA0_RLC5_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA0_RLC5_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC5_RB_RPTR_HI +#define SDMA0_RLC5_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA0_RLC5_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC5_RB_WPTR +#define SDMA0_RLC5_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA0_RLC5_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC5_RB_WPTR_HI +#define SDMA0_RLC5_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA0_RLC5_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC5_RB_WPTR_POLL_CNTL +#define SDMA0_RLC5_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA0_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA0_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA0_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA0_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA0_RLC5_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA0_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA0_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA0_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA0_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA0_RLC5_RB_RPTR_ADDR_HI +#define SDMA0_RLC5_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC5_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC5_RB_RPTR_ADDR_LO +#define SDMA0_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA0_RLC5_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA0_RLC5_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC5_IB_CNTL +#define SDMA0_RLC5_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA0_RLC5_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA0_RLC5_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA0_RLC5_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA0_RLC5_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA0_RLC5_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA0_RLC5_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA0_RLC5_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA0_RLC5_IB_RPTR +#define SDMA0_RLC5_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA0_RLC5_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA0_RLC5_IB_OFFSET +#define SDMA0_RLC5_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA0_RLC5_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA0_RLC5_IB_BASE_LO +#define SDMA0_RLC5_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA0_RLC5_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA0_RLC5_IB_BASE_HI +#define SDMA0_RLC5_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC5_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC5_IB_SIZE +#define SDMA0_RLC5_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA0_RLC5_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA0_RLC5_SKIP_CNTL +#define SDMA0_RLC5_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA0_RLC5_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA0_RLC5_CONTEXT_STATUS +#define SDMA0_RLC5_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA0_RLC5_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA0_RLC5_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA0_RLC5_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA0_RLC5_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA0_RLC5_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA0_RLC5_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA0_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA0_RLC5_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA0_RLC5_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA0_RLC5_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA0_RLC5_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA0_RLC5_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA0_RLC5_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA0_RLC5_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA0_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA0_RLC5_DOORBELL +#define SDMA0_RLC5_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA0_RLC5_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA0_RLC5_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA0_RLC5_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA0_RLC5_STATUS +#define SDMA0_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA0_RLC5_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA0_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA0_RLC5_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA0_RLC5_DOORBELL_LOG +#define SDMA0_RLC5_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA0_RLC5_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA0_RLC5_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA0_RLC5_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA0_RLC5_WATERMARK +#define SDMA0_RLC5_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA0_RLC5_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA0_RLC5_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA0_RLC5_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA0_RLC5_DOORBELL_OFFSET +#define SDMA0_RLC5_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA0_RLC5_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA0_RLC5_CSA_ADDR_LO +#define SDMA0_RLC5_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC5_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC5_CSA_ADDR_HI +#define SDMA0_RLC5_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC5_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC5_IB_SUB_REMAIN +#define SDMA0_RLC5_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA0_RLC5_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA0_RLC5_PREEMPT +#define SDMA0_RLC5_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA0_RLC5_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA0_RLC5_DUMMY_REG +#define SDMA0_RLC5_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA0_RLC5_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA0_RLC5_RB_WPTR_POLL_ADDR_HI +#define SDMA0_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC5_RB_WPTR_POLL_ADDR_LO +#define SDMA0_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC5_RB_AQL_CNTL +#define SDMA0_RLC5_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA0_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA0_RLC5_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA0_RLC5_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA0_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA0_RLC5_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA0_RLC5_MINOR_PTR_UPDATE +#define SDMA0_RLC5_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA0_RLC5_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA0_RLC5_MIDCMD_DATA0 +#define SDMA0_RLC5_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA0_RLC5_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA0_RLC5_MIDCMD_DATA1 +#define SDMA0_RLC5_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA0_RLC5_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA0_RLC5_MIDCMD_DATA2 +#define SDMA0_RLC5_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA0_RLC5_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA0_RLC5_MIDCMD_DATA3 +#define SDMA0_RLC5_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA0_RLC5_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA0_RLC5_MIDCMD_DATA4 +#define SDMA0_RLC5_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA0_RLC5_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA0_RLC5_MIDCMD_DATA5 +#define SDMA0_RLC5_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA0_RLC5_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA0_RLC5_MIDCMD_DATA6 +#define SDMA0_RLC5_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA0_RLC5_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA0_RLC5_MIDCMD_DATA7 +#define SDMA0_RLC5_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA0_RLC5_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA0_RLC5_MIDCMD_DATA8 +#define SDMA0_RLC5_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA0_RLC5_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA0_RLC5_MIDCMD_CNTL +#define SDMA0_RLC5_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA0_RLC5_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA0_RLC5_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA0_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA0_RLC5_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA0_RLC5_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA0_RLC5_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA0_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA0_RLC6_RB_CNTL +#define SDMA0_RLC6_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA0_RLC6_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA0_RLC6_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA0_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA0_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA0_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA0_RLC6_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA0_RLC6_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA0_RLC6_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA0_RLC6_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA0_RLC6_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA0_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA0_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA0_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA0_RLC6_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA0_RLC6_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA0_RLC6_RB_BASE +#define SDMA0_RLC6_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA0_RLC6_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC6_RB_BASE_HI +#define SDMA0_RLC6_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC6_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA0_RLC6_RB_RPTR +#define SDMA0_RLC6_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA0_RLC6_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC6_RB_RPTR_HI +#define SDMA0_RLC6_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA0_RLC6_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC6_RB_WPTR +#define SDMA0_RLC6_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA0_RLC6_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC6_RB_WPTR_HI +#define SDMA0_RLC6_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA0_RLC6_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC6_RB_WPTR_POLL_CNTL +#define SDMA0_RLC6_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA0_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA0_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA0_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA0_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA0_RLC6_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA0_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA0_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA0_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA0_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA0_RLC6_RB_RPTR_ADDR_HI +#define SDMA0_RLC6_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC6_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC6_RB_RPTR_ADDR_LO +#define SDMA0_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA0_RLC6_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA0_RLC6_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC6_IB_CNTL +#define SDMA0_RLC6_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA0_RLC6_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA0_RLC6_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA0_RLC6_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA0_RLC6_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA0_RLC6_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA0_RLC6_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA0_RLC6_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA0_RLC6_IB_RPTR +#define SDMA0_RLC6_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA0_RLC6_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA0_RLC6_IB_OFFSET +#define SDMA0_RLC6_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA0_RLC6_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA0_RLC6_IB_BASE_LO +#define SDMA0_RLC6_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA0_RLC6_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA0_RLC6_IB_BASE_HI +#define SDMA0_RLC6_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC6_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC6_IB_SIZE +#define SDMA0_RLC6_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA0_RLC6_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA0_RLC6_SKIP_CNTL +#define SDMA0_RLC6_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA0_RLC6_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA0_RLC6_CONTEXT_STATUS +#define SDMA0_RLC6_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA0_RLC6_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA0_RLC6_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA0_RLC6_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA0_RLC6_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA0_RLC6_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA0_RLC6_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA0_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA0_RLC6_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA0_RLC6_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA0_RLC6_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA0_RLC6_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA0_RLC6_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA0_RLC6_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA0_RLC6_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA0_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA0_RLC6_DOORBELL +#define SDMA0_RLC6_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA0_RLC6_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA0_RLC6_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA0_RLC6_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA0_RLC6_STATUS +#define SDMA0_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA0_RLC6_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA0_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA0_RLC6_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA0_RLC6_DOORBELL_LOG +#define SDMA0_RLC6_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA0_RLC6_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA0_RLC6_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA0_RLC6_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA0_RLC6_WATERMARK +#define SDMA0_RLC6_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA0_RLC6_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA0_RLC6_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA0_RLC6_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA0_RLC6_DOORBELL_OFFSET +#define SDMA0_RLC6_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA0_RLC6_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA0_RLC6_CSA_ADDR_LO +#define SDMA0_RLC6_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC6_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC6_CSA_ADDR_HI +#define SDMA0_RLC6_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC6_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC6_IB_SUB_REMAIN +#define SDMA0_RLC6_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA0_RLC6_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA0_RLC6_PREEMPT +#define SDMA0_RLC6_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA0_RLC6_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA0_RLC6_DUMMY_REG +#define SDMA0_RLC6_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA0_RLC6_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA0_RLC6_RB_WPTR_POLL_ADDR_HI +#define SDMA0_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC6_RB_WPTR_POLL_ADDR_LO +#define SDMA0_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC6_RB_AQL_CNTL +#define SDMA0_RLC6_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA0_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA0_RLC6_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA0_RLC6_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA0_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA0_RLC6_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA0_RLC6_MINOR_PTR_UPDATE +#define SDMA0_RLC6_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA0_RLC6_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA0_RLC6_MIDCMD_DATA0 +#define SDMA0_RLC6_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA0_RLC6_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA0_RLC6_MIDCMD_DATA1 +#define SDMA0_RLC6_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA0_RLC6_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA0_RLC6_MIDCMD_DATA2 +#define SDMA0_RLC6_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA0_RLC6_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA0_RLC6_MIDCMD_DATA3 +#define SDMA0_RLC6_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA0_RLC6_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA0_RLC6_MIDCMD_DATA4 +#define SDMA0_RLC6_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA0_RLC6_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA0_RLC6_MIDCMD_DATA5 +#define SDMA0_RLC6_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA0_RLC6_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA0_RLC6_MIDCMD_DATA6 +#define SDMA0_RLC6_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA0_RLC6_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA0_RLC6_MIDCMD_DATA7 +#define SDMA0_RLC6_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA0_RLC6_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA0_RLC6_MIDCMD_DATA8 +#define SDMA0_RLC6_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA0_RLC6_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA0_RLC6_MIDCMD_CNTL +#define SDMA0_RLC6_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA0_RLC6_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA0_RLC6_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA0_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA0_RLC6_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA0_RLC6_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA0_RLC6_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA0_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA0_RLC7_RB_CNTL +#define SDMA0_RLC7_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA0_RLC7_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA0_RLC7_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA0_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA0_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA0_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA0_RLC7_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA0_RLC7_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA0_RLC7_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA0_RLC7_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA0_RLC7_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA0_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA0_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA0_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA0_RLC7_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA0_RLC7_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA0_RLC7_RB_BASE +#define SDMA0_RLC7_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA0_RLC7_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC7_RB_BASE_HI +#define SDMA0_RLC7_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC7_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA0_RLC7_RB_RPTR +#define SDMA0_RLC7_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA0_RLC7_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC7_RB_RPTR_HI +#define SDMA0_RLC7_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA0_RLC7_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC7_RB_WPTR +#define SDMA0_RLC7_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA0_RLC7_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC7_RB_WPTR_HI +#define SDMA0_RLC7_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA0_RLC7_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC7_RB_WPTR_POLL_CNTL +#define SDMA0_RLC7_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA0_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA0_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA0_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA0_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA0_RLC7_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA0_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA0_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA0_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA0_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA0_RLC7_RB_RPTR_ADDR_HI +#define SDMA0_RLC7_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC7_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC7_RB_RPTR_ADDR_LO +#define SDMA0_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA0_RLC7_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA0_RLC7_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC7_IB_CNTL +#define SDMA0_RLC7_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA0_RLC7_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA0_RLC7_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA0_RLC7_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA0_RLC7_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA0_RLC7_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA0_RLC7_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA0_RLC7_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA0_RLC7_IB_RPTR +#define SDMA0_RLC7_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA0_RLC7_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA0_RLC7_IB_OFFSET +#define SDMA0_RLC7_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA0_RLC7_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA0_RLC7_IB_BASE_LO +#define SDMA0_RLC7_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA0_RLC7_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA0_RLC7_IB_BASE_HI +#define SDMA0_RLC7_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC7_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC7_IB_SIZE +#define SDMA0_RLC7_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA0_RLC7_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA0_RLC7_SKIP_CNTL +#define SDMA0_RLC7_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA0_RLC7_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA0_RLC7_CONTEXT_STATUS +#define SDMA0_RLC7_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA0_RLC7_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA0_RLC7_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA0_RLC7_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA0_RLC7_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA0_RLC7_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA0_RLC7_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA0_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA0_RLC7_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA0_RLC7_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA0_RLC7_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA0_RLC7_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA0_RLC7_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA0_RLC7_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA0_RLC7_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA0_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA0_RLC7_DOORBELL +#define SDMA0_RLC7_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA0_RLC7_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA0_RLC7_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA0_RLC7_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA0_RLC7_STATUS +#define SDMA0_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA0_RLC7_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA0_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA0_RLC7_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA0_RLC7_DOORBELL_LOG +#define SDMA0_RLC7_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA0_RLC7_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA0_RLC7_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA0_RLC7_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA0_RLC7_WATERMARK +#define SDMA0_RLC7_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA0_RLC7_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA0_RLC7_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA0_RLC7_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA0_RLC7_DOORBELL_OFFSET +#define SDMA0_RLC7_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA0_RLC7_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA0_RLC7_CSA_ADDR_LO +#define SDMA0_RLC7_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC7_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC7_CSA_ADDR_HI +#define SDMA0_RLC7_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC7_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC7_IB_SUB_REMAIN +#define SDMA0_RLC7_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA0_RLC7_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA0_RLC7_PREEMPT +#define SDMA0_RLC7_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA0_RLC7_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA0_RLC7_DUMMY_REG +#define SDMA0_RLC7_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA0_RLC7_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA0_RLC7_RB_WPTR_POLL_ADDR_HI +#define SDMA0_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC7_RB_WPTR_POLL_ADDR_LO +#define SDMA0_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC7_RB_AQL_CNTL +#define SDMA0_RLC7_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA0_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA0_RLC7_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA0_RLC7_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA0_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA0_RLC7_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA0_RLC7_MINOR_PTR_UPDATE +#define SDMA0_RLC7_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA0_RLC7_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA0_RLC7_MIDCMD_DATA0 +#define SDMA0_RLC7_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA0_RLC7_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA0_RLC7_MIDCMD_DATA1 +#define SDMA0_RLC7_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA0_RLC7_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA0_RLC7_MIDCMD_DATA2 +#define SDMA0_RLC7_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA0_RLC7_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA0_RLC7_MIDCMD_DATA3 +#define SDMA0_RLC7_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA0_RLC7_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA0_RLC7_MIDCMD_DATA4 +#define SDMA0_RLC7_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA0_RLC7_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA0_RLC7_MIDCMD_DATA5 +#define SDMA0_RLC7_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA0_RLC7_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA0_RLC7_MIDCMD_DATA6 +#define SDMA0_RLC7_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA0_RLC7_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA0_RLC7_MIDCMD_DATA7 +#define SDMA0_RLC7_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA0_RLC7_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA0_RLC7_MIDCMD_DATA8 +#define SDMA0_RLC7_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA0_RLC7_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA0_RLC7_MIDCMD_CNTL +#define SDMA0_RLC7_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA0_RLC7_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA0_RLC7_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA0_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA0_RLC7_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA0_RLC7_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA0_RLC7_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA0_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_2_offset.h new file mode 100644 index 000000000000..db24d5eb16c9 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_2_offset.h @@ -0,0 +1,1039 @@ +/* + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _sdma1_4_2_0_OFFSET_HEADER +#define _sdma1_4_2_0_OFFSET_HEADER + + + +// addressBlock: sdma1_sdma1dec +// base address: 0x6180 +#define mmSDMA1_UCODE_ADDR 0x0000 +#define mmSDMA1_UCODE_ADDR_BASE_IDX 0 +#define mmSDMA1_UCODE_DATA 0x0001 +#define mmSDMA1_UCODE_DATA_BASE_IDX 0 +#define mmSDMA1_VM_CNTL 0x0004 +#define mmSDMA1_VM_CNTL_BASE_IDX 0 +#define mmSDMA1_VM_CTX_LO 0x0005 +#define mmSDMA1_VM_CTX_LO_BASE_IDX 0 +#define mmSDMA1_VM_CTX_HI 0x0006 +#define mmSDMA1_VM_CTX_HI_BASE_IDX 0 +#define mmSDMA1_ACTIVE_FCN_ID 0x0007 +#define mmSDMA1_ACTIVE_FCN_ID_BASE_IDX 0 +#define mmSDMA1_VM_CTX_CNTL 0x0008 +#define mmSDMA1_VM_CTX_CNTL_BASE_IDX 0 +#define mmSDMA1_VIRT_RESET_REQ 0x0009 +#define mmSDMA1_VIRT_RESET_REQ_BASE_IDX 0 +#define mmSDMA1_VF_ENABLE 0x000a +#define mmSDMA1_VF_ENABLE_BASE_IDX 0 +#define mmSDMA1_CONTEXT_REG_TYPE0 0x000b +#define mmSDMA1_CONTEXT_REG_TYPE0_BASE_IDX 0 +#define mmSDMA1_CONTEXT_REG_TYPE1 0x000c +#define mmSDMA1_CONTEXT_REG_TYPE1_BASE_IDX 0 +#define mmSDMA1_CONTEXT_REG_TYPE2 0x000d +#define mmSDMA1_CONTEXT_REG_TYPE2_BASE_IDX 0 +#define mmSDMA1_CONTEXT_REG_TYPE3 0x000e +#define mmSDMA1_CONTEXT_REG_TYPE3_BASE_IDX 0 +#define mmSDMA1_PUB_REG_TYPE0 0x000f +#define mmSDMA1_PUB_REG_TYPE0_BASE_IDX 0 +#define mmSDMA1_PUB_REG_TYPE1 0x0010 +#define mmSDMA1_PUB_REG_TYPE1_BASE_IDX 0 +#define mmSDMA1_PUB_REG_TYPE2 0x0011 +#define mmSDMA1_PUB_REG_TYPE2_BASE_IDX 0 +#define mmSDMA1_PUB_REG_TYPE3 0x0012 +#define mmSDMA1_PUB_REG_TYPE3_BASE_IDX 0 +#define mmSDMA1_MMHUB_CNTL 0x0013 +#define mmSDMA1_MMHUB_CNTL_BASE_IDX 0 +#define mmSDMA1_CONTEXT_GROUP_BOUNDARY 0x0019 +#define mmSDMA1_CONTEXT_GROUP_BOUNDARY_BASE_IDX 0 +#define mmSDMA1_POWER_CNTL 0x001a +#define mmSDMA1_POWER_CNTL_BASE_IDX 0 +#define mmSDMA1_CLK_CTRL 0x001b +#define mmSDMA1_CLK_CTRL_BASE_IDX 0 +#define mmSDMA1_CNTL 0x001c +#define mmSDMA1_CNTL_BASE_IDX 0 +#define mmSDMA1_CHICKEN_BITS 0x001d +#define mmSDMA1_CHICKEN_BITS_BASE_IDX 0 +#define mmSDMA1_GB_ADDR_CONFIG 0x001e +#define mmSDMA1_GB_ADDR_CONFIG_BASE_IDX 0 +#define mmSDMA1_GB_ADDR_CONFIG_READ 0x001f +#define mmSDMA1_GB_ADDR_CONFIG_READ_BASE_IDX 0 +#define mmSDMA1_RB_RPTR_FETCH_HI 0x0020 +#define mmSDMA1_RB_RPTR_FETCH_HI_BASE_IDX 0 +#define mmSDMA1_SEM_WAIT_FAIL_TIMER_CNTL 0x0021 +#define mmSDMA1_SEM_WAIT_FAIL_TIMER_CNTL_BASE_IDX 0 +#define mmSDMA1_RB_RPTR_FETCH 0x0022 +#define mmSDMA1_RB_RPTR_FETCH_BASE_IDX 0 +#define mmSDMA1_IB_OFFSET_FETCH 0x0023 +#define mmSDMA1_IB_OFFSET_FETCH_BASE_IDX 0 +#define mmSDMA1_PROGRAM 0x0024 +#define mmSDMA1_PROGRAM_BASE_IDX 0 +#define mmSDMA1_STATUS_REG 0x0025 +#define mmSDMA1_STATUS_REG_BASE_IDX 0 +#define mmSDMA1_STATUS1_REG 0x0026 +#define mmSDMA1_STATUS1_REG_BASE_IDX 0 +#define mmSDMA1_RD_BURST_CNTL 0x0027 +#define mmSDMA1_RD_BURST_CNTL_BASE_IDX 0 +#define mmSDMA1_HBM_PAGE_CONFIG 0x0028 +#define mmSDMA1_HBM_PAGE_CONFIG_BASE_IDX 0 +#define mmSDMA1_UCODE_CHECKSUM 0x0029 +#define mmSDMA1_UCODE_CHECKSUM_BASE_IDX 0 +#define mmSDMA1_F32_CNTL 0x002a +#define mmSDMA1_F32_CNTL_BASE_IDX 0 +#define mmSDMA1_FREEZE 0x002b +#define mmSDMA1_FREEZE_BASE_IDX 0 +#define mmSDMA1_PHASE0_QUANTUM 0x002c +#define mmSDMA1_PHASE0_QUANTUM_BASE_IDX 0 +#define mmSDMA1_PHASE1_QUANTUM 0x002d +#define mmSDMA1_PHASE1_QUANTUM_BASE_IDX 0 +#define mmSDMA1_EDC_CONFIG 0x0032 +#define mmSDMA1_EDC_CONFIG_BASE_IDX 0 +#define mmSDMA1_BA_THRESHOLD 0x0033 +#define mmSDMA1_BA_THRESHOLD_BASE_IDX 0 +#define mmSDMA1_ID 0x0034 +#define mmSDMA1_ID_BASE_IDX 0 +#define mmSDMA1_VERSION 0x0035 +#define mmSDMA1_VERSION_BASE_IDX 0 +#define mmSDMA1_EDC_COUNTER 0x0036 +#define mmSDMA1_EDC_COUNTER_BASE_IDX 0 +#define mmSDMA1_EDC_COUNTER_CLEAR 0x0037 +#define mmSDMA1_EDC_COUNTER_CLEAR_BASE_IDX 0 +#define mmSDMA1_STATUS2_REG 0x0038 +#define mmSDMA1_STATUS2_REG_BASE_IDX 0 +#define mmSDMA1_ATOMIC_CNTL 0x0039 +#define mmSDMA1_ATOMIC_CNTL_BASE_IDX 0 +#define mmSDMA1_ATOMIC_PREOP_LO 0x003a +#define mmSDMA1_ATOMIC_PREOP_LO_BASE_IDX 0 +#define mmSDMA1_ATOMIC_PREOP_HI 0x003b +#define mmSDMA1_ATOMIC_PREOP_HI_BASE_IDX 0 +#define mmSDMA1_UTCL1_CNTL 0x003c +#define mmSDMA1_UTCL1_CNTL_BASE_IDX 0 +#define mmSDMA1_UTCL1_WATERMK 0x003d +#define mmSDMA1_UTCL1_WATERMK_BASE_IDX 0 +#define mmSDMA1_UTCL1_RD_STATUS 0x003e +#define mmSDMA1_UTCL1_RD_STATUS_BASE_IDX 0 +#define mmSDMA1_UTCL1_WR_STATUS 0x003f +#define mmSDMA1_UTCL1_WR_STATUS_BASE_IDX 0 +#define mmSDMA1_UTCL1_INV0 0x0040 +#define mmSDMA1_UTCL1_INV0_BASE_IDX 0 +#define mmSDMA1_UTCL1_INV1 0x0041 +#define mmSDMA1_UTCL1_INV1_BASE_IDX 0 +#define mmSDMA1_UTCL1_INV2 0x0042 +#define mmSDMA1_UTCL1_INV2_BASE_IDX 0 +#define mmSDMA1_UTCL1_RD_XNACK0 0x0043 +#define mmSDMA1_UTCL1_RD_XNACK0_BASE_IDX 0 +#define mmSDMA1_UTCL1_RD_XNACK1 0x0044 +#define mmSDMA1_UTCL1_RD_XNACK1_BASE_IDX 0 +#define mmSDMA1_UTCL1_WR_XNACK0 0x0045 +#define mmSDMA1_UTCL1_WR_XNACK0_BASE_IDX 0 +#define mmSDMA1_UTCL1_WR_XNACK1 0x0046 +#define mmSDMA1_UTCL1_WR_XNACK1_BASE_IDX 0 +#define mmSDMA1_UTCL1_TIMEOUT 0x0047 +#define mmSDMA1_UTCL1_TIMEOUT_BASE_IDX 0 +#define mmSDMA1_UTCL1_PAGE 0x0048 +#define mmSDMA1_UTCL1_PAGE_BASE_IDX 0 +#define mmSDMA1_POWER_CNTL_IDLE 0x0049 +#define mmSDMA1_POWER_CNTL_IDLE_BASE_IDX 0 +#define mmSDMA1_RELAX_ORDERING_LUT 0x004a +#define mmSDMA1_RELAX_ORDERING_LUT_BASE_IDX 0 +#define mmSDMA1_CHICKEN_BITS_2 0x004b +#define mmSDMA1_CHICKEN_BITS_2_BASE_IDX 0 +#define mmSDMA1_STATUS3_REG 0x004c +#define mmSDMA1_STATUS3_REG_BASE_IDX 0 +#define mmSDMA1_PHYSICAL_ADDR_LO 0x004d +#define mmSDMA1_PHYSICAL_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_PHYSICAL_ADDR_HI 0x004e +#define mmSDMA1_PHYSICAL_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_PHASE2_QUANTUM 0x004f +#define mmSDMA1_PHASE2_QUANTUM_BASE_IDX 0 +#define mmSDMA1_ERROR_LOG 0x0050 +#define mmSDMA1_ERROR_LOG_BASE_IDX 0 +#define mmSDMA1_PUB_DUMMY_REG0 0x0051 +#define mmSDMA1_PUB_DUMMY_REG0_BASE_IDX 0 +#define mmSDMA1_PUB_DUMMY_REG1 0x0052 +#define mmSDMA1_PUB_DUMMY_REG1_BASE_IDX 0 +#define mmSDMA1_PUB_DUMMY_REG2 0x0053 +#define mmSDMA1_PUB_DUMMY_REG2_BASE_IDX 0 +#define mmSDMA1_PUB_DUMMY_REG3 0x0054 +#define mmSDMA1_PUB_DUMMY_REG3_BASE_IDX 0 +#define mmSDMA1_F32_COUNTER 0x0055 +#define mmSDMA1_F32_COUNTER_BASE_IDX 0 +#define mmSDMA1_PERFMON_CNTL 0x0057 +#define mmSDMA1_PERFMON_CNTL_BASE_IDX 0 +#define mmSDMA1_PERFCOUNTER0_RESULT 0x0058 +#define mmSDMA1_PERFCOUNTER0_RESULT_BASE_IDX 0 +#define mmSDMA1_PERFCOUNTER1_RESULT 0x0059 +#define mmSDMA1_PERFCOUNTER1_RESULT_BASE_IDX 0 +#define mmSDMA1_PERFCOUNTER_TAG_DELAY_RANGE 0x005a +#define mmSDMA1_PERFCOUNTER_TAG_DELAY_RANGE_BASE_IDX 0 +#define mmSDMA1_CRD_CNTL 0x005b +#define mmSDMA1_CRD_CNTL_BASE_IDX 0 +#define mmSDMA1_GPU_IOV_VIOLATION_LOG 0x005d +#define mmSDMA1_GPU_IOV_VIOLATION_LOG_BASE_IDX 0 +#define mmSDMA1_ULV_CNTL 0x005e +#define mmSDMA1_ULV_CNTL_BASE_IDX 0 +#define mmSDMA1_EA_DBIT_ADDR_DATA 0x0060 +#define mmSDMA1_EA_DBIT_ADDR_DATA_BASE_IDX 0 +#define mmSDMA1_EA_DBIT_ADDR_INDEX 0x0061 +#define mmSDMA1_EA_DBIT_ADDR_INDEX_BASE_IDX 0 +#define mmSDMA1_GFX_RB_CNTL 0x0080 +#define mmSDMA1_GFX_RB_CNTL_BASE_IDX 0 +#define mmSDMA1_GFX_RB_BASE 0x0081 +#define mmSDMA1_GFX_RB_BASE_BASE_IDX 0 +#define mmSDMA1_GFX_RB_BASE_HI 0x0082 +#define mmSDMA1_GFX_RB_BASE_HI_BASE_IDX 0 +#define mmSDMA1_GFX_RB_RPTR 0x0083 +#define mmSDMA1_GFX_RB_RPTR_BASE_IDX 0 +#define mmSDMA1_GFX_RB_RPTR_HI 0x0084 +#define mmSDMA1_GFX_RB_RPTR_HI_BASE_IDX 0 +#define mmSDMA1_GFX_RB_WPTR 0x0085 +#define mmSDMA1_GFX_RB_WPTR_BASE_IDX 0 +#define mmSDMA1_GFX_RB_WPTR_HI 0x0086 +#define mmSDMA1_GFX_RB_WPTR_HI_BASE_IDX 0 +#define mmSDMA1_GFX_RB_WPTR_POLL_CNTL 0x0087 +#define mmSDMA1_GFX_RB_WPTR_POLL_CNTL_BASE_IDX 0 +#define mmSDMA1_GFX_RB_RPTR_ADDR_HI 0x0088 +#define mmSDMA1_GFX_RB_RPTR_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_GFX_RB_RPTR_ADDR_LO 0x0089 +#define mmSDMA1_GFX_RB_RPTR_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_GFX_IB_CNTL 0x008a +#define mmSDMA1_GFX_IB_CNTL_BASE_IDX 0 +#define mmSDMA1_GFX_IB_RPTR 0x008b +#define mmSDMA1_GFX_IB_RPTR_BASE_IDX 0 +#define mmSDMA1_GFX_IB_OFFSET 0x008c +#define mmSDMA1_GFX_IB_OFFSET_BASE_IDX 0 +#define mmSDMA1_GFX_IB_BASE_LO 0x008d +#define mmSDMA1_GFX_IB_BASE_LO_BASE_IDX 0 +#define mmSDMA1_GFX_IB_BASE_HI 0x008e +#define mmSDMA1_GFX_IB_BASE_HI_BASE_IDX 0 +#define mmSDMA1_GFX_IB_SIZE 0x008f +#define mmSDMA1_GFX_IB_SIZE_BASE_IDX 0 +#define mmSDMA1_GFX_SKIP_CNTL 0x0090 +#define mmSDMA1_GFX_SKIP_CNTL_BASE_IDX 0 +#define mmSDMA1_GFX_CONTEXT_STATUS 0x0091 +#define mmSDMA1_GFX_CONTEXT_STATUS_BASE_IDX 0 +#define mmSDMA1_GFX_DOORBELL 0x0092 +#define mmSDMA1_GFX_DOORBELL_BASE_IDX 0 +#define mmSDMA1_GFX_CONTEXT_CNTL 0x0093 +#define mmSDMA1_GFX_CONTEXT_CNTL_BASE_IDX 0 +#define mmSDMA1_GFX_STATUS 0x00a8 +#define mmSDMA1_GFX_STATUS_BASE_IDX 0 +#define mmSDMA1_GFX_DOORBELL_LOG 0x00a9 +#define mmSDMA1_GFX_DOORBELL_LOG_BASE_IDX 0 +#define mmSDMA1_GFX_WATERMARK 0x00aa +#define mmSDMA1_GFX_WATERMARK_BASE_IDX 0 +#define mmSDMA1_GFX_DOORBELL_OFFSET 0x00ab +#define mmSDMA1_GFX_DOORBELL_OFFSET_BASE_IDX 0 +#define mmSDMA1_GFX_CSA_ADDR_LO 0x00ac +#define mmSDMA1_GFX_CSA_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_GFX_CSA_ADDR_HI 0x00ad +#define mmSDMA1_GFX_CSA_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_GFX_IB_SUB_REMAIN 0x00af +#define mmSDMA1_GFX_IB_SUB_REMAIN_BASE_IDX 0 +#define mmSDMA1_GFX_PREEMPT 0x00b0 +#define mmSDMA1_GFX_PREEMPT_BASE_IDX 0 +#define mmSDMA1_GFX_DUMMY_REG 0x00b1 +#define mmSDMA1_GFX_DUMMY_REG_BASE_IDX 0 +#define mmSDMA1_GFX_RB_WPTR_POLL_ADDR_HI 0x00b2 +#define mmSDMA1_GFX_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_GFX_RB_WPTR_POLL_ADDR_LO 0x00b3 +#define mmSDMA1_GFX_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_GFX_RB_AQL_CNTL 0x00b4 +#define mmSDMA1_GFX_RB_AQL_CNTL_BASE_IDX 0 +#define mmSDMA1_GFX_MINOR_PTR_UPDATE 0x00b5 +#define mmSDMA1_GFX_MINOR_PTR_UPDATE_BASE_IDX 0 +#define mmSDMA1_GFX_MIDCMD_DATA0 0x00c0 +#define mmSDMA1_GFX_MIDCMD_DATA0_BASE_IDX 0 +#define mmSDMA1_GFX_MIDCMD_DATA1 0x00c1 +#define mmSDMA1_GFX_MIDCMD_DATA1_BASE_IDX 0 +#define mmSDMA1_GFX_MIDCMD_DATA2 0x00c2 +#define mmSDMA1_GFX_MIDCMD_DATA2_BASE_IDX 0 +#define mmSDMA1_GFX_MIDCMD_DATA3 0x00c3 +#define mmSDMA1_GFX_MIDCMD_DATA3_BASE_IDX 0 +#define mmSDMA1_GFX_MIDCMD_DATA4 0x00c4 +#define mmSDMA1_GFX_MIDCMD_DATA4_BASE_IDX 0 +#define mmSDMA1_GFX_MIDCMD_DATA5 0x00c5 +#define mmSDMA1_GFX_MIDCMD_DATA5_BASE_IDX 0 +#define mmSDMA1_GFX_MIDCMD_DATA6 0x00c6 +#define mmSDMA1_GFX_MIDCMD_DATA6_BASE_IDX 0 +#define mmSDMA1_GFX_MIDCMD_DATA7 0x00c7 +#define mmSDMA1_GFX_MIDCMD_DATA7_BASE_IDX 0 +#define mmSDMA1_GFX_MIDCMD_DATA8 0x00c8 +#define mmSDMA1_GFX_MIDCMD_DATA8_BASE_IDX 0 +#define mmSDMA1_GFX_MIDCMD_CNTL 0x00c9 +#define mmSDMA1_GFX_MIDCMD_CNTL_BASE_IDX 0 +#define mmSDMA1_PAGE_RB_CNTL 0x00e0 +#define mmSDMA1_PAGE_RB_CNTL_BASE_IDX 0 +#define mmSDMA1_PAGE_RB_BASE 0x00e1 +#define mmSDMA1_PAGE_RB_BASE_BASE_IDX 0 +#define mmSDMA1_PAGE_RB_BASE_HI 0x00e2 +#define mmSDMA1_PAGE_RB_BASE_HI_BASE_IDX 0 +#define mmSDMA1_PAGE_RB_RPTR 0x00e3 +#define mmSDMA1_PAGE_RB_RPTR_BASE_IDX 0 +#define mmSDMA1_PAGE_RB_RPTR_HI 0x00e4 +#define mmSDMA1_PAGE_RB_RPTR_HI_BASE_IDX 0 +#define mmSDMA1_PAGE_RB_WPTR 0x00e5 +#define mmSDMA1_PAGE_RB_WPTR_BASE_IDX 0 +#define mmSDMA1_PAGE_RB_WPTR_HI 0x00e6 +#define mmSDMA1_PAGE_RB_WPTR_HI_BASE_IDX 0 +#define mmSDMA1_PAGE_RB_WPTR_POLL_CNTL 0x00e7 +#define mmSDMA1_PAGE_RB_WPTR_POLL_CNTL_BASE_IDX 0 +#define mmSDMA1_PAGE_RB_RPTR_ADDR_HI 0x00e8 +#define mmSDMA1_PAGE_RB_RPTR_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_PAGE_RB_RPTR_ADDR_LO 0x00e9 +#define mmSDMA1_PAGE_RB_RPTR_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_PAGE_IB_CNTL 0x00ea +#define mmSDMA1_PAGE_IB_CNTL_BASE_IDX 0 +#define mmSDMA1_PAGE_IB_RPTR 0x00eb +#define mmSDMA1_PAGE_IB_RPTR_BASE_IDX 0 +#define mmSDMA1_PAGE_IB_OFFSET 0x00ec +#define mmSDMA1_PAGE_IB_OFFSET_BASE_IDX 0 +#define mmSDMA1_PAGE_IB_BASE_LO 0x00ed +#define mmSDMA1_PAGE_IB_BASE_LO_BASE_IDX 0 +#define mmSDMA1_PAGE_IB_BASE_HI 0x00ee +#define mmSDMA1_PAGE_IB_BASE_HI_BASE_IDX 0 +#define mmSDMA1_PAGE_IB_SIZE 0x00ef +#define mmSDMA1_PAGE_IB_SIZE_BASE_IDX 0 +#define mmSDMA1_PAGE_SKIP_CNTL 0x00f0 +#define mmSDMA1_PAGE_SKIP_CNTL_BASE_IDX 0 +#define mmSDMA1_PAGE_CONTEXT_STATUS 0x00f1 +#define mmSDMA1_PAGE_CONTEXT_STATUS_BASE_IDX 0 +#define mmSDMA1_PAGE_DOORBELL 0x00f2 +#define mmSDMA1_PAGE_DOORBELL_BASE_IDX 0 +#define mmSDMA1_PAGE_STATUS 0x0108 +#define mmSDMA1_PAGE_STATUS_BASE_IDX 0 +#define mmSDMA1_PAGE_DOORBELL_LOG 0x0109 +#define mmSDMA1_PAGE_DOORBELL_LOG_BASE_IDX 0 +#define mmSDMA1_PAGE_WATERMARK 0x010a +#define mmSDMA1_PAGE_WATERMARK_BASE_IDX 0 +#define mmSDMA1_PAGE_DOORBELL_OFFSET 0x010b +#define mmSDMA1_PAGE_DOORBELL_OFFSET_BASE_IDX 0 +#define mmSDMA1_PAGE_CSA_ADDR_LO 0x010c +#define mmSDMA1_PAGE_CSA_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_PAGE_CSA_ADDR_HI 0x010d +#define mmSDMA1_PAGE_CSA_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_PAGE_IB_SUB_REMAIN 0x010f +#define mmSDMA1_PAGE_IB_SUB_REMAIN_BASE_IDX 0 +#define mmSDMA1_PAGE_PREEMPT 0x0110 +#define mmSDMA1_PAGE_PREEMPT_BASE_IDX 0 +#define mmSDMA1_PAGE_DUMMY_REG 0x0111 +#define mmSDMA1_PAGE_DUMMY_REG_BASE_IDX 0 +#define mmSDMA1_PAGE_RB_WPTR_POLL_ADDR_HI 0x0112 +#define mmSDMA1_PAGE_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_PAGE_RB_WPTR_POLL_ADDR_LO 0x0113 +#define mmSDMA1_PAGE_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_PAGE_RB_AQL_CNTL 0x0114 +#define mmSDMA1_PAGE_RB_AQL_CNTL_BASE_IDX 0 +#define mmSDMA1_PAGE_MINOR_PTR_UPDATE 0x0115 +#define mmSDMA1_PAGE_MINOR_PTR_UPDATE_BASE_IDX 0 +#define mmSDMA1_PAGE_MIDCMD_DATA0 0x0120 +#define mmSDMA1_PAGE_MIDCMD_DATA0_BASE_IDX 0 +#define mmSDMA1_PAGE_MIDCMD_DATA1 0x0121 +#define mmSDMA1_PAGE_MIDCMD_DATA1_BASE_IDX 0 +#define mmSDMA1_PAGE_MIDCMD_DATA2 0x0122 +#define mmSDMA1_PAGE_MIDCMD_DATA2_BASE_IDX 0 +#define mmSDMA1_PAGE_MIDCMD_DATA3 0x0123 +#define mmSDMA1_PAGE_MIDCMD_DATA3_BASE_IDX 0 +#define mmSDMA1_PAGE_MIDCMD_DATA4 0x0124 +#define mmSDMA1_PAGE_MIDCMD_DATA4_BASE_IDX 0 +#define mmSDMA1_PAGE_MIDCMD_DATA5 0x0125 +#define mmSDMA1_PAGE_MIDCMD_DATA5_BASE_IDX 0 +#define mmSDMA1_PAGE_MIDCMD_DATA6 0x0126 +#define mmSDMA1_PAGE_MIDCMD_DATA6_BASE_IDX 0 +#define mmSDMA1_PAGE_MIDCMD_DATA7 0x0127 +#define mmSDMA1_PAGE_MIDCMD_DATA7_BASE_IDX 0 +#define mmSDMA1_PAGE_MIDCMD_DATA8 0x0128 +#define mmSDMA1_PAGE_MIDCMD_DATA8_BASE_IDX 0 +#define mmSDMA1_PAGE_MIDCMD_CNTL 0x0129 +#define mmSDMA1_PAGE_MIDCMD_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC0_RB_CNTL 0x0140 +#define mmSDMA1_RLC0_RB_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC0_RB_BASE 0x0141 +#define mmSDMA1_RLC0_RB_BASE_BASE_IDX 0 +#define mmSDMA1_RLC0_RB_BASE_HI 0x0142 +#define mmSDMA1_RLC0_RB_BASE_HI_BASE_IDX 0 +#define mmSDMA1_RLC0_RB_RPTR 0x0143 +#define mmSDMA1_RLC0_RB_RPTR_BASE_IDX 0 +#define mmSDMA1_RLC0_RB_RPTR_HI 0x0144 +#define mmSDMA1_RLC0_RB_RPTR_HI_BASE_IDX 0 +#define mmSDMA1_RLC0_RB_WPTR 0x0145 +#define mmSDMA1_RLC0_RB_WPTR_BASE_IDX 0 +#define mmSDMA1_RLC0_RB_WPTR_HI 0x0146 +#define mmSDMA1_RLC0_RB_WPTR_HI_BASE_IDX 0 +#define mmSDMA1_RLC0_RB_WPTR_POLL_CNTL 0x0147 +#define mmSDMA1_RLC0_RB_WPTR_POLL_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC0_RB_RPTR_ADDR_HI 0x0148 +#define mmSDMA1_RLC0_RB_RPTR_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC0_RB_RPTR_ADDR_LO 0x0149 +#define mmSDMA1_RLC0_RB_RPTR_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC0_IB_CNTL 0x014a +#define mmSDMA1_RLC0_IB_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC0_IB_RPTR 0x014b +#define mmSDMA1_RLC0_IB_RPTR_BASE_IDX 0 +#define mmSDMA1_RLC0_IB_OFFSET 0x014c +#define mmSDMA1_RLC0_IB_OFFSET_BASE_IDX 0 +#define mmSDMA1_RLC0_IB_BASE_LO 0x014d +#define mmSDMA1_RLC0_IB_BASE_LO_BASE_IDX 0 +#define mmSDMA1_RLC0_IB_BASE_HI 0x014e +#define mmSDMA1_RLC0_IB_BASE_HI_BASE_IDX 0 +#define mmSDMA1_RLC0_IB_SIZE 0x014f +#define mmSDMA1_RLC0_IB_SIZE_BASE_IDX 0 +#define mmSDMA1_RLC0_SKIP_CNTL 0x0150 +#define mmSDMA1_RLC0_SKIP_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC0_CONTEXT_STATUS 0x0151 +#define mmSDMA1_RLC0_CONTEXT_STATUS_BASE_IDX 0 +#define mmSDMA1_RLC0_DOORBELL 0x0152 +#define mmSDMA1_RLC0_DOORBELL_BASE_IDX 0 +#define mmSDMA1_RLC0_STATUS 0x0168 +#define mmSDMA1_RLC0_STATUS_BASE_IDX 0 +#define mmSDMA1_RLC0_DOORBELL_LOG 0x0169 +#define mmSDMA1_RLC0_DOORBELL_LOG_BASE_IDX 0 +#define mmSDMA1_RLC0_WATERMARK 0x016a +#define mmSDMA1_RLC0_WATERMARK_BASE_IDX 0 +#define mmSDMA1_RLC0_DOORBELL_OFFSET 0x016b +#define mmSDMA1_RLC0_DOORBELL_OFFSET_BASE_IDX 0 +#define mmSDMA1_RLC0_CSA_ADDR_LO 0x016c +#define mmSDMA1_RLC0_CSA_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC0_CSA_ADDR_HI 0x016d +#define mmSDMA1_RLC0_CSA_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC0_IB_SUB_REMAIN 0x016f +#define mmSDMA1_RLC0_IB_SUB_REMAIN_BASE_IDX 0 +#define mmSDMA1_RLC0_PREEMPT 0x0170 +#define mmSDMA1_RLC0_PREEMPT_BASE_IDX 0 +#define mmSDMA1_RLC0_DUMMY_REG 0x0171 +#define mmSDMA1_RLC0_DUMMY_REG_BASE_IDX 0 +#define mmSDMA1_RLC0_RB_WPTR_POLL_ADDR_HI 0x0172 +#define mmSDMA1_RLC0_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC0_RB_WPTR_POLL_ADDR_LO 0x0173 +#define mmSDMA1_RLC0_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC0_RB_AQL_CNTL 0x0174 +#define mmSDMA1_RLC0_RB_AQL_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC0_MINOR_PTR_UPDATE 0x0175 +#define mmSDMA1_RLC0_MINOR_PTR_UPDATE_BASE_IDX 0 +#define mmSDMA1_RLC0_MIDCMD_DATA0 0x0180 +#define mmSDMA1_RLC0_MIDCMD_DATA0_BASE_IDX 0 +#define mmSDMA1_RLC0_MIDCMD_DATA1 0x0181 +#define mmSDMA1_RLC0_MIDCMD_DATA1_BASE_IDX 0 +#define mmSDMA1_RLC0_MIDCMD_DATA2 0x0182 +#define mmSDMA1_RLC0_MIDCMD_DATA2_BASE_IDX 0 +#define mmSDMA1_RLC0_MIDCMD_DATA3 0x0183 +#define mmSDMA1_RLC0_MIDCMD_DATA3_BASE_IDX 0 +#define mmSDMA1_RLC0_MIDCMD_DATA4 0x0184 +#define mmSDMA1_RLC0_MIDCMD_DATA4_BASE_IDX 0 +#define mmSDMA1_RLC0_MIDCMD_DATA5 0x0185 +#define mmSDMA1_RLC0_MIDCMD_DATA5_BASE_IDX 0 +#define mmSDMA1_RLC0_MIDCMD_DATA6 0x0186 +#define mmSDMA1_RLC0_MIDCMD_DATA6_BASE_IDX 0 +#define mmSDMA1_RLC0_MIDCMD_DATA7 0x0187 +#define mmSDMA1_RLC0_MIDCMD_DATA7_BASE_IDX 0 +#define mmSDMA1_RLC0_MIDCMD_DATA8 0x0188 +#define mmSDMA1_RLC0_MIDCMD_DATA8_BASE_IDX 0 +#define mmSDMA1_RLC0_MIDCMD_CNTL 0x0189 +#define mmSDMA1_RLC0_MIDCMD_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC1_RB_CNTL 0x01a0 +#define mmSDMA1_RLC1_RB_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC1_RB_BASE 0x01a1 +#define mmSDMA1_RLC1_RB_BASE_BASE_IDX 0 +#define mmSDMA1_RLC1_RB_BASE_HI 0x01a2 +#define mmSDMA1_RLC1_RB_BASE_HI_BASE_IDX 0 +#define mmSDMA1_RLC1_RB_RPTR 0x01a3 +#define mmSDMA1_RLC1_RB_RPTR_BASE_IDX 0 +#define mmSDMA1_RLC1_RB_RPTR_HI 0x01a4 +#define mmSDMA1_RLC1_RB_RPTR_HI_BASE_IDX 0 +#define mmSDMA1_RLC1_RB_WPTR 0x01a5 +#define mmSDMA1_RLC1_RB_WPTR_BASE_IDX 0 +#define mmSDMA1_RLC1_RB_WPTR_HI 0x01a6 +#define mmSDMA1_RLC1_RB_WPTR_HI_BASE_IDX 0 +#define mmSDMA1_RLC1_RB_WPTR_POLL_CNTL 0x01a7 +#define mmSDMA1_RLC1_RB_WPTR_POLL_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC1_RB_RPTR_ADDR_HI 0x01a8 +#define mmSDMA1_RLC1_RB_RPTR_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC1_RB_RPTR_ADDR_LO 0x01a9 +#define mmSDMA1_RLC1_RB_RPTR_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC1_IB_CNTL 0x01aa +#define mmSDMA1_RLC1_IB_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC1_IB_RPTR 0x01ab +#define mmSDMA1_RLC1_IB_RPTR_BASE_IDX 0 +#define mmSDMA1_RLC1_IB_OFFSET 0x01ac +#define mmSDMA1_RLC1_IB_OFFSET_BASE_IDX 0 +#define mmSDMA1_RLC1_IB_BASE_LO 0x01ad +#define mmSDMA1_RLC1_IB_BASE_LO_BASE_IDX 0 +#define mmSDMA1_RLC1_IB_BASE_HI 0x01ae +#define mmSDMA1_RLC1_IB_BASE_HI_BASE_IDX 0 +#define mmSDMA1_RLC1_IB_SIZE 0x01af +#define mmSDMA1_RLC1_IB_SIZE_BASE_IDX 0 +#define mmSDMA1_RLC1_SKIP_CNTL 0x01b0 +#define mmSDMA1_RLC1_SKIP_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC1_CONTEXT_STATUS 0x01b1 +#define mmSDMA1_RLC1_CONTEXT_STATUS_BASE_IDX 0 +#define mmSDMA1_RLC1_DOORBELL 0x01b2 +#define mmSDMA1_RLC1_DOORBELL_BASE_IDX 0 +#define mmSDMA1_RLC1_STATUS 0x01c8 +#define mmSDMA1_RLC1_STATUS_BASE_IDX 0 +#define mmSDMA1_RLC1_DOORBELL_LOG 0x01c9 +#define mmSDMA1_RLC1_DOORBELL_LOG_BASE_IDX 0 +#define mmSDMA1_RLC1_WATERMARK 0x01ca +#define mmSDMA1_RLC1_WATERMARK_BASE_IDX 0 +#define mmSDMA1_RLC1_DOORBELL_OFFSET 0x01cb +#define mmSDMA1_RLC1_DOORBELL_OFFSET_BASE_IDX 0 +#define mmSDMA1_RLC1_CSA_ADDR_LO 0x01cc +#define mmSDMA1_RLC1_CSA_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC1_CSA_ADDR_HI 0x01cd +#define mmSDMA1_RLC1_CSA_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC1_IB_SUB_REMAIN 0x01cf +#define mmSDMA1_RLC1_IB_SUB_REMAIN_BASE_IDX 0 +#define mmSDMA1_RLC1_PREEMPT 0x01d0 +#define mmSDMA1_RLC1_PREEMPT_BASE_IDX 0 +#define mmSDMA1_RLC1_DUMMY_REG 0x01d1 +#define mmSDMA1_RLC1_DUMMY_REG_BASE_IDX 0 +#define mmSDMA1_RLC1_RB_WPTR_POLL_ADDR_HI 0x01d2 +#define mmSDMA1_RLC1_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC1_RB_WPTR_POLL_ADDR_LO 0x01d3 +#define mmSDMA1_RLC1_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC1_RB_AQL_CNTL 0x01d4 +#define mmSDMA1_RLC1_RB_AQL_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC1_MINOR_PTR_UPDATE 0x01d5 +#define mmSDMA1_RLC1_MINOR_PTR_UPDATE_BASE_IDX 0 +#define mmSDMA1_RLC1_MIDCMD_DATA0 0x01e0 +#define mmSDMA1_RLC1_MIDCMD_DATA0_BASE_IDX 0 +#define mmSDMA1_RLC1_MIDCMD_DATA1 0x01e1 +#define mmSDMA1_RLC1_MIDCMD_DATA1_BASE_IDX 0 +#define mmSDMA1_RLC1_MIDCMD_DATA2 0x01e2 +#define mmSDMA1_RLC1_MIDCMD_DATA2_BASE_IDX 0 +#define mmSDMA1_RLC1_MIDCMD_DATA3 0x01e3 +#define mmSDMA1_RLC1_MIDCMD_DATA3_BASE_IDX 0 +#define mmSDMA1_RLC1_MIDCMD_DATA4 0x01e4 +#define mmSDMA1_RLC1_MIDCMD_DATA4_BASE_IDX 0 +#define mmSDMA1_RLC1_MIDCMD_DATA5 0x01e5 +#define mmSDMA1_RLC1_MIDCMD_DATA5_BASE_IDX 0 +#define mmSDMA1_RLC1_MIDCMD_DATA6 0x01e6 +#define mmSDMA1_RLC1_MIDCMD_DATA6_BASE_IDX 0 +#define mmSDMA1_RLC1_MIDCMD_DATA7 0x01e7 +#define mmSDMA1_RLC1_MIDCMD_DATA7_BASE_IDX 0 +#define mmSDMA1_RLC1_MIDCMD_DATA8 0x01e8 +#define mmSDMA1_RLC1_MIDCMD_DATA8_BASE_IDX 0 +#define mmSDMA1_RLC1_MIDCMD_CNTL 0x01e9 +#define mmSDMA1_RLC1_MIDCMD_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC2_RB_CNTL 0x0200 +#define mmSDMA1_RLC2_RB_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC2_RB_BASE 0x0201 +#define mmSDMA1_RLC2_RB_BASE_BASE_IDX 0 +#define mmSDMA1_RLC2_RB_BASE_HI 0x0202 +#define mmSDMA1_RLC2_RB_BASE_HI_BASE_IDX 0 +#define mmSDMA1_RLC2_RB_RPTR 0x0203 +#define mmSDMA1_RLC2_RB_RPTR_BASE_IDX 0 +#define mmSDMA1_RLC2_RB_RPTR_HI 0x0204 +#define mmSDMA1_RLC2_RB_RPTR_HI_BASE_IDX 0 +#define mmSDMA1_RLC2_RB_WPTR 0x0205 +#define mmSDMA1_RLC2_RB_WPTR_BASE_IDX 0 +#define mmSDMA1_RLC2_RB_WPTR_HI 0x0206 +#define mmSDMA1_RLC2_RB_WPTR_HI_BASE_IDX 0 +#define mmSDMA1_RLC2_RB_WPTR_POLL_CNTL 0x0207 +#define mmSDMA1_RLC2_RB_WPTR_POLL_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC2_RB_RPTR_ADDR_HI 0x0208 +#define mmSDMA1_RLC2_RB_RPTR_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC2_RB_RPTR_ADDR_LO 0x0209 +#define mmSDMA1_RLC2_RB_RPTR_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC2_IB_CNTL 0x020a +#define mmSDMA1_RLC2_IB_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC2_IB_RPTR 0x020b +#define mmSDMA1_RLC2_IB_RPTR_BASE_IDX 0 +#define mmSDMA1_RLC2_IB_OFFSET 0x020c +#define mmSDMA1_RLC2_IB_OFFSET_BASE_IDX 0 +#define mmSDMA1_RLC2_IB_BASE_LO 0x020d +#define mmSDMA1_RLC2_IB_BASE_LO_BASE_IDX 0 +#define mmSDMA1_RLC2_IB_BASE_HI 0x020e +#define mmSDMA1_RLC2_IB_BASE_HI_BASE_IDX 0 +#define mmSDMA1_RLC2_IB_SIZE 0x020f +#define mmSDMA1_RLC2_IB_SIZE_BASE_IDX 0 +#define mmSDMA1_RLC2_SKIP_CNTL 0x0210 +#define mmSDMA1_RLC2_SKIP_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC2_CONTEXT_STATUS 0x0211 +#define mmSDMA1_RLC2_CONTEXT_STATUS_BASE_IDX 0 +#define mmSDMA1_RLC2_DOORBELL 0x0212 +#define mmSDMA1_RLC2_DOORBELL_BASE_IDX 0 +#define mmSDMA1_RLC2_STATUS 0x0228 +#define mmSDMA1_RLC2_STATUS_BASE_IDX 0 +#define mmSDMA1_RLC2_DOORBELL_LOG 0x0229 +#define mmSDMA1_RLC2_DOORBELL_LOG_BASE_IDX 0 +#define mmSDMA1_RLC2_WATERMARK 0x022a +#define mmSDMA1_RLC2_WATERMARK_BASE_IDX 0 +#define mmSDMA1_RLC2_DOORBELL_OFFSET 0x022b +#define mmSDMA1_RLC2_DOORBELL_OFFSET_BASE_IDX 0 +#define mmSDMA1_RLC2_CSA_ADDR_LO 0x022c +#define mmSDMA1_RLC2_CSA_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC2_CSA_ADDR_HI 0x022d +#define mmSDMA1_RLC2_CSA_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC2_IB_SUB_REMAIN 0x022f +#define mmSDMA1_RLC2_IB_SUB_REMAIN_BASE_IDX 0 +#define mmSDMA1_RLC2_PREEMPT 0x0230 +#define mmSDMA1_RLC2_PREEMPT_BASE_IDX 0 +#define mmSDMA1_RLC2_DUMMY_REG 0x0231 +#define mmSDMA1_RLC2_DUMMY_REG_BASE_IDX 0 +#define mmSDMA1_RLC2_RB_WPTR_POLL_ADDR_HI 0x0232 +#define mmSDMA1_RLC2_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC2_RB_WPTR_POLL_ADDR_LO 0x0233 +#define mmSDMA1_RLC2_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC2_RB_AQL_CNTL 0x0234 +#define mmSDMA1_RLC2_RB_AQL_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC2_MINOR_PTR_UPDATE 0x0235 +#define mmSDMA1_RLC2_MINOR_PTR_UPDATE_BASE_IDX 0 +#define mmSDMA1_RLC2_MIDCMD_DATA0 0x0240 +#define mmSDMA1_RLC2_MIDCMD_DATA0_BASE_IDX 0 +#define mmSDMA1_RLC2_MIDCMD_DATA1 0x0241 +#define mmSDMA1_RLC2_MIDCMD_DATA1_BASE_IDX 0 +#define mmSDMA1_RLC2_MIDCMD_DATA2 0x0242 +#define mmSDMA1_RLC2_MIDCMD_DATA2_BASE_IDX 0 +#define mmSDMA1_RLC2_MIDCMD_DATA3 0x0243 +#define mmSDMA1_RLC2_MIDCMD_DATA3_BASE_IDX 0 +#define mmSDMA1_RLC2_MIDCMD_DATA4 0x0244 +#define mmSDMA1_RLC2_MIDCMD_DATA4_BASE_IDX 0 +#define mmSDMA1_RLC2_MIDCMD_DATA5 0x0245 +#define mmSDMA1_RLC2_MIDCMD_DATA5_BASE_IDX 0 +#define mmSDMA1_RLC2_MIDCMD_DATA6 0x0246 +#define mmSDMA1_RLC2_MIDCMD_DATA6_BASE_IDX 0 +#define mmSDMA1_RLC2_MIDCMD_DATA7 0x0247 +#define mmSDMA1_RLC2_MIDCMD_DATA7_BASE_IDX 0 +#define mmSDMA1_RLC2_MIDCMD_DATA8 0x0248 +#define mmSDMA1_RLC2_MIDCMD_DATA8_BASE_IDX 0 +#define mmSDMA1_RLC2_MIDCMD_CNTL 0x0249 +#define mmSDMA1_RLC2_MIDCMD_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC3_RB_CNTL 0x0260 +#define mmSDMA1_RLC3_RB_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC3_RB_BASE 0x0261 +#define mmSDMA1_RLC3_RB_BASE_BASE_IDX 0 +#define mmSDMA1_RLC3_RB_BASE_HI 0x0262 +#define mmSDMA1_RLC3_RB_BASE_HI_BASE_IDX 0 +#define mmSDMA1_RLC3_RB_RPTR 0x0263 +#define mmSDMA1_RLC3_RB_RPTR_BASE_IDX 0 +#define mmSDMA1_RLC3_RB_RPTR_HI 0x0264 +#define mmSDMA1_RLC3_RB_RPTR_HI_BASE_IDX 0 +#define mmSDMA1_RLC3_RB_WPTR 0x0265 +#define mmSDMA1_RLC3_RB_WPTR_BASE_IDX 0 +#define mmSDMA1_RLC3_RB_WPTR_HI 0x0266 +#define mmSDMA1_RLC3_RB_WPTR_HI_BASE_IDX 0 +#define mmSDMA1_RLC3_RB_WPTR_POLL_CNTL 0x0267 +#define mmSDMA1_RLC3_RB_WPTR_POLL_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC3_RB_RPTR_ADDR_HI 0x0268 +#define mmSDMA1_RLC3_RB_RPTR_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC3_RB_RPTR_ADDR_LO 0x0269 +#define mmSDMA1_RLC3_RB_RPTR_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC3_IB_CNTL 0x026a +#define mmSDMA1_RLC3_IB_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC3_IB_RPTR 0x026b +#define mmSDMA1_RLC3_IB_RPTR_BASE_IDX 0 +#define mmSDMA1_RLC3_IB_OFFSET 0x026c +#define mmSDMA1_RLC3_IB_OFFSET_BASE_IDX 0 +#define mmSDMA1_RLC3_IB_BASE_LO 0x026d +#define mmSDMA1_RLC3_IB_BASE_LO_BASE_IDX 0 +#define mmSDMA1_RLC3_IB_BASE_HI 0x026e +#define mmSDMA1_RLC3_IB_BASE_HI_BASE_IDX 0 +#define mmSDMA1_RLC3_IB_SIZE 0x026f +#define mmSDMA1_RLC3_IB_SIZE_BASE_IDX 0 +#define mmSDMA1_RLC3_SKIP_CNTL 0x0270 +#define mmSDMA1_RLC3_SKIP_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC3_CONTEXT_STATUS 0x0271 +#define mmSDMA1_RLC3_CONTEXT_STATUS_BASE_IDX 0 +#define mmSDMA1_RLC3_DOORBELL 0x0272 +#define mmSDMA1_RLC3_DOORBELL_BASE_IDX 0 +#define mmSDMA1_RLC3_STATUS 0x0288 +#define mmSDMA1_RLC3_STATUS_BASE_IDX 0 +#define mmSDMA1_RLC3_DOORBELL_LOG 0x0289 +#define mmSDMA1_RLC3_DOORBELL_LOG_BASE_IDX 0 +#define mmSDMA1_RLC3_WATERMARK 0x028a +#define mmSDMA1_RLC3_WATERMARK_BASE_IDX 0 +#define mmSDMA1_RLC3_DOORBELL_OFFSET 0x028b +#define mmSDMA1_RLC3_DOORBELL_OFFSET_BASE_IDX 0 +#define mmSDMA1_RLC3_CSA_ADDR_LO 0x028c +#define mmSDMA1_RLC3_CSA_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC3_CSA_ADDR_HI 0x028d +#define mmSDMA1_RLC3_CSA_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC3_IB_SUB_REMAIN 0x028f +#define mmSDMA1_RLC3_IB_SUB_REMAIN_BASE_IDX 0 +#define mmSDMA1_RLC3_PREEMPT 0x0290 +#define mmSDMA1_RLC3_PREEMPT_BASE_IDX 0 +#define mmSDMA1_RLC3_DUMMY_REG 0x0291 +#define mmSDMA1_RLC3_DUMMY_REG_BASE_IDX 0 +#define mmSDMA1_RLC3_RB_WPTR_POLL_ADDR_HI 0x0292 +#define mmSDMA1_RLC3_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC3_RB_WPTR_POLL_ADDR_LO 0x0293 +#define mmSDMA1_RLC3_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC3_RB_AQL_CNTL 0x0294 +#define mmSDMA1_RLC3_RB_AQL_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC3_MINOR_PTR_UPDATE 0x0295 +#define mmSDMA1_RLC3_MINOR_PTR_UPDATE_BASE_IDX 0 +#define mmSDMA1_RLC3_MIDCMD_DATA0 0x02a0 +#define mmSDMA1_RLC3_MIDCMD_DATA0_BASE_IDX 0 +#define mmSDMA1_RLC3_MIDCMD_DATA1 0x02a1 +#define mmSDMA1_RLC3_MIDCMD_DATA1_BASE_IDX 0 +#define mmSDMA1_RLC3_MIDCMD_DATA2 0x02a2 +#define mmSDMA1_RLC3_MIDCMD_DATA2_BASE_IDX 0 +#define mmSDMA1_RLC3_MIDCMD_DATA3 0x02a3 +#define mmSDMA1_RLC3_MIDCMD_DATA3_BASE_IDX 0 +#define mmSDMA1_RLC3_MIDCMD_DATA4 0x02a4 +#define mmSDMA1_RLC3_MIDCMD_DATA4_BASE_IDX 0 +#define mmSDMA1_RLC3_MIDCMD_DATA5 0x02a5 +#define mmSDMA1_RLC3_MIDCMD_DATA5_BASE_IDX 0 +#define mmSDMA1_RLC3_MIDCMD_DATA6 0x02a6 +#define mmSDMA1_RLC3_MIDCMD_DATA6_BASE_IDX 0 +#define mmSDMA1_RLC3_MIDCMD_DATA7 0x02a7 +#define mmSDMA1_RLC3_MIDCMD_DATA7_BASE_IDX 0 +#define mmSDMA1_RLC3_MIDCMD_DATA8 0x02a8 +#define mmSDMA1_RLC3_MIDCMD_DATA8_BASE_IDX 0 +#define mmSDMA1_RLC3_MIDCMD_CNTL 0x02a9 +#define mmSDMA1_RLC3_MIDCMD_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC4_RB_CNTL 0x02c0 +#define mmSDMA1_RLC4_RB_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC4_RB_BASE 0x02c1 +#define mmSDMA1_RLC4_RB_BASE_BASE_IDX 0 +#define mmSDMA1_RLC4_RB_BASE_HI 0x02c2 +#define mmSDMA1_RLC4_RB_BASE_HI_BASE_IDX 0 +#define mmSDMA1_RLC4_RB_RPTR 0x02c3 +#define mmSDMA1_RLC4_RB_RPTR_BASE_IDX 0 +#define mmSDMA1_RLC4_RB_RPTR_HI 0x02c4 +#define mmSDMA1_RLC4_RB_RPTR_HI_BASE_IDX 0 +#define mmSDMA1_RLC4_RB_WPTR 0x02c5 +#define mmSDMA1_RLC4_RB_WPTR_BASE_IDX 0 +#define mmSDMA1_RLC4_RB_WPTR_HI 0x02c6 +#define mmSDMA1_RLC4_RB_WPTR_HI_BASE_IDX 0 +#define mmSDMA1_RLC4_RB_WPTR_POLL_CNTL 0x02c7 +#define mmSDMA1_RLC4_RB_WPTR_POLL_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC4_RB_RPTR_ADDR_HI 0x02c8 +#define mmSDMA1_RLC4_RB_RPTR_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC4_RB_RPTR_ADDR_LO 0x02c9 +#define mmSDMA1_RLC4_RB_RPTR_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC4_IB_CNTL 0x02ca +#define mmSDMA1_RLC4_IB_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC4_IB_RPTR 0x02cb +#define mmSDMA1_RLC4_IB_RPTR_BASE_IDX 0 +#define mmSDMA1_RLC4_IB_OFFSET 0x02cc +#define mmSDMA1_RLC4_IB_OFFSET_BASE_IDX 0 +#define mmSDMA1_RLC4_IB_BASE_LO 0x02cd +#define mmSDMA1_RLC4_IB_BASE_LO_BASE_IDX 0 +#define mmSDMA1_RLC4_IB_BASE_HI 0x02ce +#define mmSDMA1_RLC4_IB_BASE_HI_BASE_IDX 0 +#define mmSDMA1_RLC4_IB_SIZE 0x02cf +#define mmSDMA1_RLC4_IB_SIZE_BASE_IDX 0 +#define mmSDMA1_RLC4_SKIP_CNTL 0x02d0 +#define mmSDMA1_RLC4_SKIP_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC4_CONTEXT_STATUS 0x02d1 +#define mmSDMA1_RLC4_CONTEXT_STATUS_BASE_IDX 0 +#define mmSDMA1_RLC4_DOORBELL 0x02d2 +#define mmSDMA1_RLC4_DOORBELL_BASE_IDX 0 +#define mmSDMA1_RLC4_STATUS 0x02e8 +#define mmSDMA1_RLC4_STATUS_BASE_IDX 0 +#define mmSDMA1_RLC4_DOORBELL_LOG 0x02e9 +#define mmSDMA1_RLC4_DOORBELL_LOG_BASE_IDX 0 +#define mmSDMA1_RLC4_WATERMARK 0x02ea +#define mmSDMA1_RLC4_WATERMARK_BASE_IDX 0 +#define mmSDMA1_RLC4_DOORBELL_OFFSET 0x02eb +#define mmSDMA1_RLC4_DOORBELL_OFFSET_BASE_IDX 0 +#define mmSDMA1_RLC4_CSA_ADDR_LO 0x02ec +#define mmSDMA1_RLC4_CSA_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC4_CSA_ADDR_HI 0x02ed +#define mmSDMA1_RLC4_CSA_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC4_IB_SUB_REMAIN 0x02ef +#define mmSDMA1_RLC4_IB_SUB_REMAIN_BASE_IDX 0 +#define mmSDMA1_RLC4_PREEMPT 0x02f0 +#define mmSDMA1_RLC4_PREEMPT_BASE_IDX 0 +#define mmSDMA1_RLC4_DUMMY_REG 0x02f1 +#define mmSDMA1_RLC4_DUMMY_REG_BASE_IDX 0 +#define mmSDMA1_RLC4_RB_WPTR_POLL_ADDR_HI 0x02f2 +#define mmSDMA1_RLC4_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC4_RB_WPTR_POLL_ADDR_LO 0x02f3 +#define mmSDMA1_RLC4_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC4_RB_AQL_CNTL 0x02f4 +#define mmSDMA1_RLC4_RB_AQL_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC4_MINOR_PTR_UPDATE 0x02f5 +#define mmSDMA1_RLC4_MINOR_PTR_UPDATE_BASE_IDX 0 +#define mmSDMA1_RLC4_MIDCMD_DATA0 0x0300 +#define mmSDMA1_RLC4_MIDCMD_DATA0_BASE_IDX 0 +#define mmSDMA1_RLC4_MIDCMD_DATA1 0x0301 +#define mmSDMA1_RLC4_MIDCMD_DATA1_BASE_IDX 0 +#define mmSDMA1_RLC4_MIDCMD_DATA2 0x0302 +#define mmSDMA1_RLC4_MIDCMD_DATA2_BASE_IDX 0 +#define mmSDMA1_RLC4_MIDCMD_DATA3 0x0303 +#define mmSDMA1_RLC4_MIDCMD_DATA3_BASE_IDX 0 +#define mmSDMA1_RLC4_MIDCMD_DATA4 0x0304 +#define mmSDMA1_RLC4_MIDCMD_DATA4_BASE_IDX 0 +#define mmSDMA1_RLC4_MIDCMD_DATA5 0x0305 +#define mmSDMA1_RLC4_MIDCMD_DATA5_BASE_IDX 0 +#define mmSDMA1_RLC4_MIDCMD_DATA6 0x0306 +#define mmSDMA1_RLC4_MIDCMD_DATA6_BASE_IDX 0 +#define mmSDMA1_RLC4_MIDCMD_DATA7 0x0307 +#define mmSDMA1_RLC4_MIDCMD_DATA7_BASE_IDX 0 +#define mmSDMA1_RLC4_MIDCMD_DATA8 0x0308 +#define mmSDMA1_RLC4_MIDCMD_DATA8_BASE_IDX 0 +#define mmSDMA1_RLC4_MIDCMD_CNTL 0x0309 +#define mmSDMA1_RLC4_MIDCMD_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC5_RB_CNTL 0x0320 +#define mmSDMA1_RLC5_RB_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC5_RB_BASE 0x0321 +#define mmSDMA1_RLC5_RB_BASE_BASE_IDX 0 +#define mmSDMA1_RLC5_RB_BASE_HI 0x0322 +#define mmSDMA1_RLC5_RB_BASE_HI_BASE_IDX 0 +#define mmSDMA1_RLC5_RB_RPTR 0x0323 +#define mmSDMA1_RLC5_RB_RPTR_BASE_IDX 0 +#define mmSDMA1_RLC5_RB_RPTR_HI 0x0324 +#define mmSDMA1_RLC5_RB_RPTR_HI_BASE_IDX 0 +#define mmSDMA1_RLC5_RB_WPTR 0x0325 +#define mmSDMA1_RLC5_RB_WPTR_BASE_IDX 0 +#define mmSDMA1_RLC5_RB_WPTR_HI 0x0326 +#define mmSDMA1_RLC5_RB_WPTR_HI_BASE_IDX 0 +#define mmSDMA1_RLC5_RB_WPTR_POLL_CNTL 0x0327 +#define mmSDMA1_RLC5_RB_WPTR_POLL_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC5_RB_RPTR_ADDR_HI 0x0328 +#define mmSDMA1_RLC5_RB_RPTR_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC5_RB_RPTR_ADDR_LO 0x0329 +#define mmSDMA1_RLC5_RB_RPTR_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC5_IB_CNTL 0x032a +#define mmSDMA1_RLC5_IB_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC5_IB_RPTR 0x032b +#define mmSDMA1_RLC5_IB_RPTR_BASE_IDX 0 +#define mmSDMA1_RLC5_IB_OFFSET 0x032c +#define mmSDMA1_RLC5_IB_OFFSET_BASE_IDX 0 +#define mmSDMA1_RLC5_IB_BASE_LO 0x032d +#define mmSDMA1_RLC5_IB_BASE_LO_BASE_IDX 0 +#define mmSDMA1_RLC5_IB_BASE_HI 0x032e +#define mmSDMA1_RLC5_IB_BASE_HI_BASE_IDX 0 +#define mmSDMA1_RLC5_IB_SIZE 0x032f +#define mmSDMA1_RLC5_IB_SIZE_BASE_IDX 0 +#define mmSDMA1_RLC5_SKIP_CNTL 0x0330 +#define mmSDMA1_RLC5_SKIP_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC5_CONTEXT_STATUS 0x0331 +#define mmSDMA1_RLC5_CONTEXT_STATUS_BASE_IDX 0 +#define mmSDMA1_RLC5_DOORBELL 0x0332 +#define mmSDMA1_RLC5_DOORBELL_BASE_IDX 0 +#define mmSDMA1_RLC5_STATUS 0x0348 +#define mmSDMA1_RLC5_STATUS_BASE_IDX 0 +#define mmSDMA1_RLC5_DOORBELL_LOG 0x0349 +#define mmSDMA1_RLC5_DOORBELL_LOG_BASE_IDX 0 +#define mmSDMA1_RLC5_WATERMARK 0x034a +#define mmSDMA1_RLC5_WATERMARK_BASE_IDX 0 +#define mmSDMA1_RLC5_DOORBELL_OFFSET 0x034b +#define mmSDMA1_RLC5_DOORBELL_OFFSET_BASE_IDX 0 +#define mmSDMA1_RLC5_CSA_ADDR_LO 0x034c +#define mmSDMA1_RLC5_CSA_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC5_CSA_ADDR_HI 0x034d +#define mmSDMA1_RLC5_CSA_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC5_IB_SUB_REMAIN 0x034f +#define mmSDMA1_RLC5_IB_SUB_REMAIN_BASE_IDX 0 +#define mmSDMA1_RLC5_PREEMPT 0x0350 +#define mmSDMA1_RLC5_PREEMPT_BASE_IDX 0 +#define mmSDMA1_RLC5_DUMMY_REG 0x0351 +#define mmSDMA1_RLC5_DUMMY_REG_BASE_IDX 0 +#define mmSDMA1_RLC5_RB_WPTR_POLL_ADDR_HI 0x0352 +#define mmSDMA1_RLC5_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC5_RB_WPTR_POLL_ADDR_LO 0x0353 +#define mmSDMA1_RLC5_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC5_RB_AQL_CNTL 0x0354 +#define mmSDMA1_RLC5_RB_AQL_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC5_MINOR_PTR_UPDATE 0x0355 +#define mmSDMA1_RLC5_MINOR_PTR_UPDATE_BASE_IDX 0 +#define mmSDMA1_RLC5_MIDCMD_DATA0 0x0360 +#define mmSDMA1_RLC5_MIDCMD_DATA0_BASE_IDX 0 +#define mmSDMA1_RLC5_MIDCMD_DATA1 0x0361 +#define mmSDMA1_RLC5_MIDCMD_DATA1_BASE_IDX 0 +#define mmSDMA1_RLC5_MIDCMD_DATA2 0x0362 +#define mmSDMA1_RLC5_MIDCMD_DATA2_BASE_IDX 0 +#define mmSDMA1_RLC5_MIDCMD_DATA3 0x0363 +#define mmSDMA1_RLC5_MIDCMD_DATA3_BASE_IDX 0 +#define mmSDMA1_RLC5_MIDCMD_DATA4 0x0364 +#define mmSDMA1_RLC5_MIDCMD_DATA4_BASE_IDX 0 +#define mmSDMA1_RLC5_MIDCMD_DATA5 0x0365 +#define mmSDMA1_RLC5_MIDCMD_DATA5_BASE_IDX 0 +#define mmSDMA1_RLC5_MIDCMD_DATA6 0x0366 +#define mmSDMA1_RLC5_MIDCMD_DATA6_BASE_IDX 0 +#define mmSDMA1_RLC5_MIDCMD_DATA7 0x0367 +#define mmSDMA1_RLC5_MIDCMD_DATA7_BASE_IDX 0 +#define mmSDMA1_RLC5_MIDCMD_DATA8 0x0368 +#define mmSDMA1_RLC5_MIDCMD_DATA8_BASE_IDX 0 +#define mmSDMA1_RLC5_MIDCMD_CNTL 0x0369 +#define mmSDMA1_RLC5_MIDCMD_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC6_RB_CNTL 0x0380 +#define mmSDMA1_RLC6_RB_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC6_RB_BASE 0x0381 +#define mmSDMA1_RLC6_RB_BASE_BASE_IDX 0 +#define mmSDMA1_RLC6_RB_BASE_HI 0x0382 +#define mmSDMA1_RLC6_RB_BASE_HI_BASE_IDX 0 +#define mmSDMA1_RLC6_RB_RPTR 0x0383 +#define mmSDMA1_RLC6_RB_RPTR_BASE_IDX 0 +#define mmSDMA1_RLC6_RB_RPTR_HI 0x0384 +#define mmSDMA1_RLC6_RB_RPTR_HI_BASE_IDX 0 +#define mmSDMA1_RLC6_RB_WPTR 0x0385 +#define mmSDMA1_RLC6_RB_WPTR_BASE_IDX 0 +#define mmSDMA1_RLC6_RB_WPTR_HI 0x0386 +#define mmSDMA1_RLC6_RB_WPTR_HI_BASE_IDX 0 +#define mmSDMA1_RLC6_RB_WPTR_POLL_CNTL 0x0387 +#define mmSDMA1_RLC6_RB_WPTR_POLL_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC6_RB_RPTR_ADDR_HI 0x0388 +#define mmSDMA1_RLC6_RB_RPTR_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC6_RB_RPTR_ADDR_LO 0x0389 +#define mmSDMA1_RLC6_RB_RPTR_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC6_IB_CNTL 0x038a +#define mmSDMA1_RLC6_IB_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC6_IB_RPTR 0x038b +#define mmSDMA1_RLC6_IB_RPTR_BASE_IDX 0 +#define mmSDMA1_RLC6_IB_OFFSET 0x038c +#define mmSDMA1_RLC6_IB_OFFSET_BASE_IDX 0 +#define mmSDMA1_RLC6_IB_BASE_LO 0x038d +#define mmSDMA1_RLC6_IB_BASE_LO_BASE_IDX 0 +#define mmSDMA1_RLC6_IB_BASE_HI 0x038e +#define mmSDMA1_RLC6_IB_BASE_HI_BASE_IDX 0 +#define mmSDMA1_RLC6_IB_SIZE 0x038f +#define mmSDMA1_RLC6_IB_SIZE_BASE_IDX 0 +#define mmSDMA1_RLC6_SKIP_CNTL 0x0390 +#define mmSDMA1_RLC6_SKIP_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC6_CONTEXT_STATUS 0x0391 +#define mmSDMA1_RLC6_CONTEXT_STATUS_BASE_IDX 0 +#define mmSDMA1_RLC6_DOORBELL 0x0392 +#define mmSDMA1_RLC6_DOORBELL_BASE_IDX 0 +#define mmSDMA1_RLC6_STATUS 0x03a8 +#define mmSDMA1_RLC6_STATUS_BASE_IDX 0 +#define mmSDMA1_RLC6_DOORBELL_LOG 0x03a9 +#define mmSDMA1_RLC6_DOORBELL_LOG_BASE_IDX 0 +#define mmSDMA1_RLC6_WATERMARK 0x03aa +#define mmSDMA1_RLC6_WATERMARK_BASE_IDX 0 +#define mmSDMA1_RLC6_DOORBELL_OFFSET 0x03ab +#define mmSDMA1_RLC6_DOORBELL_OFFSET_BASE_IDX 0 +#define mmSDMA1_RLC6_CSA_ADDR_LO 0x03ac +#define mmSDMA1_RLC6_CSA_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC6_CSA_ADDR_HI 0x03ad +#define mmSDMA1_RLC6_CSA_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC6_IB_SUB_REMAIN 0x03af +#define mmSDMA1_RLC6_IB_SUB_REMAIN_BASE_IDX 0 +#define mmSDMA1_RLC6_PREEMPT 0x03b0 +#define mmSDMA1_RLC6_PREEMPT_BASE_IDX 0 +#define mmSDMA1_RLC6_DUMMY_REG 0x03b1 +#define mmSDMA1_RLC6_DUMMY_REG_BASE_IDX 0 +#define mmSDMA1_RLC6_RB_WPTR_POLL_ADDR_HI 0x03b2 +#define mmSDMA1_RLC6_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC6_RB_WPTR_POLL_ADDR_LO 0x03b3 +#define mmSDMA1_RLC6_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC6_RB_AQL_CNTL 0x03b4 +#define mmSDMA1_RLC6_RB_AQL_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC6_MINOR_PTR_UPDATE 0x03b5 +#define mmSDMA1_RLC6_MINOR_PTR_UPDATE_BASE_IDX 0 +#define mmSDMA1_RLC6_MIDCMD_DATA0 0x03c0 +#define mmSDMA1_RLC6_MIDCMD_DATA0_BASE_IDX 0 +#define mmSDMA1_RLC6_MIDCMD_DATA1 0x03c1 +#define mmSDMA1_RLC6_MIDCMD_DATA1_BASE_IDX 0 +#define mmSDMA1_RLC6_MIDCMD_DATA2 0x03c2 +#define mmSDMA1_RLC6_MIDCMD_DATA2_BASE_IDX 0 +#define mmSDMA1_RLC6_MIDCMD_DATA3 0x03c3 +#define mmSDMA1_RLC6_MIDCMD_DATA3_BASE_IDX 0 +#define mmSDMA1_RLC6_MIDCMD_DATA4 0x03c4 +#define mmSDMA1_RLC6_MIDCMD_DATA4_BASE_IDX 0 +#define mmSDMA1_RLC6_MIDCMD_DATA5 0x03c5 +#define mmSDMA1_RLC6_MIDCMD_DATA5_BASE_IDX 0 +#define mmSDMA1_RLC6_MIDCMD_DATA6 0x03c6 +#define mmSDMA1_RLC6_MIDCMD_DATA6_BASE_IDX 0 +#define mmSDMA1_RLC6_MIDCMD_DATA7 0x03c7 +#define mmSDMA1_RLC6_MIDCMD_DATA7_BASE_IDX 0 +#define mmSDMA1_RLC6_MIDCMD_DATA8 0x03c8 +#define mmSDMA1_RLC6_MIDCMD_DATA8_BASE_IDX 0 +#define mmSDMA1_RLC6_MIDCMD_CNTL 0x03c9 +#define mmSDMA1_RLC6_MIDCMD_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC7_RB_CNTL 0x03e0 +#define mmSDMA1_RLC7_RB_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC7_RB_BASE 0x03e1 +#define mmSDMA1_RLC7_RB_BASE_BASE_IDX 0 +#define mmSDMA1_RLC7_RB_BASE_HI 0x03e2 +#define mmSDMA1_RLC7_RB_BASE_HI_BASE_IDX 0 +#define mmSDMA1_RLC7_RB_RPTR 0x03e3 +#define mmSDMA1_RLC7_RB_RPTR_BASE_IDX 0 +#define mmSDMA1_RLC7_RB_RPTR_HI 0x03e4 +#define mmSDMA1_RLC7_RB_RPTR_HI_BASE_IDX 0 +#define mmSDMA1_RLC7_RB_WPTR 0x03e5 +#define mmSDMA1_RLC7_RB_WPTR_BASE_IDX 0 +#define mmSDMA1_RLC7_RB_WPTR_HI 0x03e6 +#define mmSDMA1_RLC7_RB_WPTR_HI_BASE_IDX 0 +#define mmSDMA1_RLC7_RB_WPTR_POLL_CNTL 0x03e7 +#define mmSDMA1_RLC7_RB_WPTR_POLL_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC7_RB_RPTR_ADDR_HI 0x03e8 +#define mmSDMA1_RLC7_RB_RPTR_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC7_RB_RPTR_ADDR_LO 0x03e9 +#define mmSDMA1_RLC7_RB_RPTR_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC7_IB_CNTL 0x03ea +#define mmSDMA1_RLC7_IB_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC7_IB_RPTR 0x03eb +#define mmSDMA1_RLC7_IB_RPTR_BASE_IDX 0 +#define mmSDMA1_RLC7_IB_OFFSET 0x03ec +#define mmSDMA1_RLC7_IB_OFFSET_BASE_IDX 0 +#define mmSDMA1_RLC7_IB_BASE_LO 0x03ed +#define mmSDMA1_RLC7_IB_BASE_LO_BASE_IDX 0 +#define mmSDMA1_RLC7_IB_BASE_HI 0x03ee +#define mmSDMA1_RLC7_IB_BASE_HI_BASE_IDX 0 +#define mmSDMA1_RLC7_IB_SIZE 0x03ef +#define mmSDMA1_RLC7_IB_SIZE_BASE_IDX 0 +#define mmSDMA1_RLC7_SKIP_CNTL 0x03f0 +#define mmSDMA1_RLC7_SKIP_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC7_CONTEXT_STATUS 0x03f1 +#define mmSDMA1_RLC7_CONTEXT_STATUS_BASE_IDX 0 +#define mmSDMA1_RLC7_DOORBELL 0x03f2 +#define mmSDMA1_RLC7_DOORBELL_BASE_IDX 0 +#define mmSDMA1_RLC7_STATUS 0x0408 +#define mmSDMA1_RLC7_STATUS_BASE_IDX 0 +#define mmSDMA1_RLC7_DOORBELL_LOG 0x0409 +#define mmSDMA1_RLC7_DOORBELL_LOG_BASE_IDX 0 +#define mmSDMA1_RLC7_WATERMARK 0x040a +#define mmSDMA1_RLC7_WATERMARK_BASE_IDX 0 +#define mmSDMA1_RLC7_DOORBELL_OFFSET 0x040b +#define mmSDMA1_RLC7_DOORBELL_OFFSET_BASE_IDX 0 +#define mmSDMA1_RLC7_CSA_ADDR_LO 0x040c +#define mmSDMA1_RLC7_CSA_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC7_CSA_ADDR_HI 0x040d +#define mmSDMA1_RLC7_CSA_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC7_IB_SUB_REMAIN 0x040f +#define mmSDMA1_RLC7_IB_SUB_REMAIN_BASE_IDX 0 +#define mmSDMA1_RLC7_PREEMPT 0x0410 +#define mmSDMA1_RLC7_PREEMPT_BASE_IDX 0 +#define mmSDMA1_RLC7_DUMMY_REG 0x0411 +#define mmSDMA1_RLC7_DUMMY_REG_BASE_IDX 0 +#define mmSDMA1_RLC7_RB_WPTR_POLL_ADDR_HI 0x0412 +#define mmSDMA1_RLC7_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC7_RB_WPTR_POLL_ADDR_LO 0x0413 +#define mmSDMA1_RLC7_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC7_RB_AQL_CNTL 0x0414 +#define mmSDMA1_RLC7_RB_AQL_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC7_MINOR_PTR_UPDATE 0x0415 +#define mmSDMA1_RLC7_MINOR_PTR_UPDATE_BASE_IDX 0 +#define mmSDMA1_RLC7_MIDCMD_DATA0 0x0420 +#define mmSDMA1_RLC7_MIDCMD_DATA0_BASE_IDX 0 +#define mmSDMA1_RLC7_MIDCMD_DATA1 0x0421 +#define mmSDMA1_RLC7_MIDCMD_DATA1_BASE_IDX 0 +#define mmSDMA1_RLC7_MIDCMD_DATA2 0x0422 +#define mmSDMA1_RLC7_MIDCMD_DATA2_BASE_IDX 0 +#define mmSDMA1_RLC7_MIDCMD_DATA3 0x0423 +#define mmSDMA1_RLC7_MIDCMD_DATA3_BASE_IDX 0 +#define mmSDMA1_RLC7_MIDCMD_DATA4 0x0424 +#define mmSDMA1_RLC7_MIDCMD_DATA4_BASE_IDX 0 +#define mmSDMA1_RLC7_MIDCMD_DATA5 0x0425 +#define mmSDMA1_RLC7_MIDCMD_DATA5_BASE_IDX 0 +#define mmSDMA1_RLC7_MIDCMD_DATA6 0x0426 +#define mmSDMA1_RLC7_MIDCMD_DATA6_BASE_IDX 0 +#define mmSDMA1_RLC7_MIDCMD_DATA7 0x0427 +#define mmSDMA1_RLC7_MIDCMD_DATA7_BASE_IDX 0 +#define mmSDMA1_RLC7_MIDCMD_DATA8 0x0428 +#define mmSDMA1_RLC7_MIDCMD_DATA8_BASE_IDX 0 +#define mmSDMA1_RLC7_MIDCMD_CNTL 0x0429 +#define mmSDMA1_RLC7_MIDCMD_CNTL_BASE_IDX 0 + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_2_sh_mask.h new file mode 100644 index 000000000000..0420ca583099 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_2_sh_mask.h @@ -0,0 +1,2948 @@ +/* + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _sdma1_4_2_0_SH_MASK_HEADER +#define _sdma1_4_2_0_SH_MASK_HEADER + + +// addressBlock: sdma1_sdma1dec +//SDMA1_UCODE_ADDR +#define SDMA1_UCODE_ADDR__VALUE__SHIFT 0x0 +#define SDMA1_UCODE_ADDR__VALUE_MASK 0x00001FFFL +//SDMA1_UCODE_DATA +#define SDMA1_UCODE_DATA__VALUE__SHIFT 0x0 +#define SDMA1_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL +//SDMA1_VM_CNTL +#define SDMA1_VM_CNTL__CMD__SHIFT 0x0 +#define SDMA1_VM_CNTL__CMD_MASK 0x0000000FL +//SDMA1_VM_CTX_LO +#define SDMA1_VM_CTX_LO__ADDR__SHIFT 0x2 +#define SDMA1_VM_CTX_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_VM_CTX_HI +#define SDMA1_VM_CTX_HI__ADDR__SHIFT 0x0 +#define SDMA1_VM_CTX_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_ACTIVE_FCN_ID +#define SDMA1_ACTIVE_FCN_ID__VFID__SHIFT 0x0 +#define SDMA1_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4 +#define SDMA1_ACTIVE_FCN_ID__VF__SHIFT 0x1f +#define SDMA1_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL +#define SDMA1_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L +#define SDMA1_ACTIVE_FCN_ID__VF_MASK 0x80000000L +//SDMA1_VM_CTX_CNTL +#define SDMA1_VM_CTX_CNTL__PRIV__SHIFT 0x0 +#define SDMA1_VM_CTX_CNTL__VMID__SHIFT 0x4 +#define SDMA1_VM_CTX_CNTL__PRIV_MASK 0x00000001L +#define SDMA1_VM_CTX_CNTL__VMID_MASK 0x000000F0L +//SDMA1_VIRT_RESET_REQ +#define SDMA1_VIRT_RESET_REQ__VF__SHIFT 0x0 +#define SDMA1_VIRT_RESET_REQ__PF__SHIFT 0x1f +#define SDMA1_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL +#define SDMA1_VIRT_RESET_REQ__PF_MASK 0x80000000L +//SDMA1_VF_ENABLE +#define SDMA1_VF_ENABLE__VF_ENABLE__SHIFT 0x0 +#define SDMA1_VF_ENABLE__VF_ENABLE_MASK 0x00000001L +//SDMA1_CONTEXT_REG_TYPE0 +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_CNTL__SHIFT 0x0 +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_BASE__SHIFT 0x1 +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_BASE_HI__SHIFT 0x2 +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR__SHIFT 0x3 +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_HI__SHIFT 0x4 +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR__SHIFT 0x5 +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR_HI__SHIFT 0x6 +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR_POLL_CNTL__SHIFT 0x7 +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_ADDR_HI__SHIFT 0x8 +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_ADDR_LO__SHIFT 0x9 +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_CNTL__SHIFT 0xa +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_RPTR__SHIFT 0xb +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_OFFSET__SHIFT 0xc +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_BASE_LO__SHIFT 0xd +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_BASE_HI__SHIFT 0xe +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_SIZE__SHIFT 0xf +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_SKIP_CNTL__SHIFT 0x10 +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_CONTEXT_STATUS__SHIFT 0x11 +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_DOORBELL__SHIFT 0x12 +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_CONTEXT_CNTL__SHIFT 0x13 +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_CNTL_MASK 0x00000001L +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_BASE_MASK 0x00000002L +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_BASE_HI_MASK 0x00000004L +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_MASK 0x00000008L +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_HI_MASK 0x00000010L +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR_MASK 0x00000020L +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR_HI_MASK 0x00000040L +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR_POLL_CNTL_MASK 0x00000080L +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_ADDR_HI_MASK 0x00000100L +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_ADDR_LO_MASK 0x00000200L +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_CNTL_MASK 0x00000400L +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_RPTR_MASK 0x00000800L +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_OFFSET_MASK 0x00001000L +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_BASE_LO_MASK 0x00002000L +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_BASE_HI_MASK 0x00004000L +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_SIZE_MASK 0x00008000L +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_SKIP_CNTL_MASK 0x00010000L +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_CONTEXT_STATUS_MASK 0x00020000L +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_DOORBELL_MASK 0x00040000L +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_CONTEXT_CNTL_MASK 0x00080000L +//SDMA1_CONTEXT_REG_TYPE1 +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_STATUS__SHIFT 0x8 +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DOORBELL_LOG__SHIFT 0x9 +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_WATERMARK__SHIFT 0xa +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DOORBELL_OFFSET__SHIFT 0xb +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_CSA_ADDR_LO__SHIFT 0xc +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_CSA_ADDR_HI__SHIFT 0xd +#define SDMA1_CONTEXT_REG_TYPE1__VOID_REG2__SHIFT 0xe +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_IB_SUB_REMAIN__SHIFT 0xf +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_PREEMPT__SHIFT 0x10 +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DUMMY_REG__SHIFT 0x11 +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_WPTR_POLL_ADDR_HI__SHIFT 0x12 +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_WPTR_POLL_ADDR_LO__SHIFT 0x13 +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_AQL_CNTL__SHIFT 0x14 +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_MINOR_PTR_UPDATE__SHIFT 0x15 +#define SDMA1_CONTEXT_REG_TYPE1__RESERVED__SHIFT 0x16 +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_STATUS_MASK 0x00000100L +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DOORBELL_LOG_MASK 0x00000200L +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_WATERMARK_MASK 0x00000400L +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DOORBELL_OFFSET_MASK 0x00000800L +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_CSA_ADDR_LO_MASK 0x00001000L +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_CSA_ADDR_HI_MASK 0x00002000L +#define SDMA1_CONTEXT_REG_TYPE1__VOID_REG2_MASK 0x00004000L +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_IB_SUB_REMAIN_MASK 0x00008000L +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_PREEMPT_MASK 0x00010000L +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DUMMY_REG_MASK 0x00020000L +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_WPTR_POLL_ADDR_HI_MASK 0x00040000L +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_WPTR_POLL_ADDR_LO_MASK 0x00080000L +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_AQL_CNTL_MASK 0x00100000L +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_MINOR_PTR_UPDATE_MASK 0x00200000L +#define SDMA1_CONTEXT_REG_TYPE1__RESERVED_MASK 0xFFC00000L +//SDMA1_CONTEXT_REG_TYPE2 +#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA0__SHIFT 0x0 +#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA1__SHIFT 0x1 +#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA2__SHIFT 0x2 +#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA3__SHIFT 0x3 +#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA4__SHIFT 0x4 +#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA5__SHIFT 0x5 +#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA6__SHIFT 0x6 +#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA7__SHIFT 0x7 +#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA8__SHIFT 0x8 +#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_CNTL__SHIFT 0x9 +#define SDMA1_CONTEXT_REG_TYPE2__RESERVED__SHIFT 0xa +#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA0_MASK 0x00000001L +#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA1_MASK 0x00000002L +#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA2_MASK 0x00000004L +#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA3_MASK 0x00000008L +#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA4_MASK 0x00000010L +#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA5_MASK 0x00000020L +#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA6_MASK 0x00000040L +#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA7_MASK 0x00000080L +#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA8_MASK 0x00000100L +#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_CNTL_MASK 0x00000200L +#define SDMA1_CONTEXT_REG_TYPE2__RESERVED_MASK 0xFFFFFC00L +//SDMA1_CONTEXT_REG_TYPE3 +#define SDMA1_CONTEXT_REG_TYPE3__RESERVED__SHIFT 0x0 +#define SDMA1_CONTEXT_REG_TYPE3__RESERVED_MASK 0xFFFFFFFFL +//SDMA1_PUB_REG_TYPE0 +#define SDMA1_PUB_REG_TYPE0__SDMA1_UCODE_ADDR__SHIFT 0x0 +#define SDMA1_PUB_REG_TYPE0__SDMA1_UCODE_DATA__SHIFT 0x1 +#define SDMA1_PUB_REG_TYPE0__SDMA1_REGISTER_SECURITY_CNTL__SHIFT 0x2 +#define SDMA1_PUB_REG_TYPE0__RESERVED3__SHIFT 0x3 +#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CNTL__SHIFT 0x4 +#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_LO__SHIFT 0x5 +#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_HI__SHIFT 0x6 +#define SDMA1_PUB_REG_TYPE0__SDMA1_ACTIVE_FCN_ID__SHIFT 0x7 +#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_CNTL__SHIFT 0x8 +#define SDMA1_PUB_REG_TYPE0__SDMA1_VIRT_RESET_REQ__SHIFT 0x9 +#define SDMA1_PUB_REG_TYPE0__RESERVED10__SHIFT 0xa +#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE0__SHIFT 0xb +#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE1__SHIFT 0xc +#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE2__SHIFT 0xd +#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE3__SHIFT 0xe +#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE0__SHIFT 0xf +#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE1__SHIFT 0x10 +#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE2__SHIFT 0x11 +#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE3__SHIFT 0x12 +#define SDMA1_PUB_REG_TYPE0__SDMA1_MMHUB_CNTL__SHIFT 0x13 +#define SDMA1_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY__SHIFT 0x14 +#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_GROUP_BOUNDARY__SHIFT 0x19 +#define SDMA1_PUB_REG_TYPE0__SDMA1_POWER_CNTL__SHIFT 0x1a +#define SDMA1_PUB_REG_TYPE0__SDMA1_CLK_CTRL__SHIFT 0x1b +#define SDMA1_PUB_REG_TYPE0__SDMA1_CNTL__SHIFT 0x1c +#define SDMA1_PUB_REG_TYPE0__SDMA1_CHICKEN_BITS__SHIFT 0x1d +#define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG__SHIFT 0x1e +#define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG_READ__SHIFT 0x1f +#define SDMA1_PUB_REG_TYPE0__SDMA1_UCODE_ADDR_MASK 0x00000001L +#define SDMA1_PUB_REG_TYPE0__SDMA1_UCODE_DATA_MASK 0x00000002L +#define SDMA1_PUB_REG_TYPE0__SDMA1_REGISTER_SECURITY_CNTL_MASK 0x00000004L +#define SDMA1_PUB_REG_TYPE0__RESERVED3_MASK 0x00000008L +#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CNTL_MASK 0x00000010L +#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_LO_MASK 0x00000020L +#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_HI_MASK 0x00000040L +#define SDMA1_PUB_REG_TYPE0__SDMA1_ACTIVE_FCN_ID_MASK 0x00000080L +#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_CNTL_MASK 0x00000100L +#define SDMA1_PUB_REG_TYPE0__SDMA1_VIRT_RESET_REQ_MASK 0x00000200L +#define SDMA1_PUB_REG_TYPE0__RESERVED10_MASK 0x00000400L +#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE0_MASK 0x00000800L +#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE1_MASK 0x00001000L +#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE2_MASK 0x00002000L +#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE3_MASK 0x00004000L +#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE0_MASK 0x00008000L +#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE1_MASK 0x00010000L +#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE2_MASK 0x00020000L +#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE3_MASK 0x00040000L +#define SDMA1_PUB_REG_TYPE0__SDMA1_MMHUB_CNTL_MASK 0x00080000L +#define SDMA1_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY_MASK 0x01F00000L +#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_GROUP_BOUNDARY_MASK 0x02000000L +#define SDMA1_PUB_REG_TYPE0__SDMA1_POWER_CNTL_MASK 0x04000000L +#define SDMA1_PUB_REG_TYPE0__SDMA1_CLK_CTRL_MASK 0x08000000L +#define SDMA1_PUB_REG_TYPE0__SDMA1_CNTL_MASK 0x10000000L +#define SDMA1_PUB_REG_TYPE0__SDMA1_CHICKEN_BITS_MASK 0x20000000L +#define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG_MASK 0x40000000L +#define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG_READ_MASK 0x80000000L +//SDMA1_PUB_REG_TYPE1 +#define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH_HI__SHIFT 0x0 +#define SDMA1_PUB_REG_TYPE1__SDMA1_SEM_WAIT_FAIL_TIMER_CNTL__SHIFT 0x1 +#define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH__SHIFT 0x2 +#define SDMA1_PUB_REG_TYPE1__SDMA1_IB_OFFSET_FETCH__SHIFT 0x3 +#define SDMA1_PUB_REG_TYPE1__SDMA1_PROGRAM__SHIFT 0x4 +#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS_REG__SHIFT 0x5 +#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS1_REG__SHIFT 0x6 +#define SDMA1_PUB_REG_TYPE1__SDMA1_RD_BURST_CNTL__SHIFT 0x7 +#define SDMA1_PUB_REG_TYPE1__SDMA1_HBM_PAGE_CONFIG__SHIFT 0x8 +#define SDMA1_PUB_REG_TYPE1__SDMA1_UCODE_CHECKSUM__SHIFT 0x9 +#define SDMA1_PUB_REG_TYPE1__SDMA1_F32_CNTL__SHIFT 0xa +#define SDMA1_PUB_REG_TYPE1__SDMA1_FREEZE__SHIFT 0xb +#define SDMA1_PUB_REG_TYPE1__SDMA1_PHASE0_QUANTUM__SHIFT 0xc +#define SDMA1_PUB_REG_TYPE1__SDMA1_PHASE1_QUANTUM__SHIFT 0xd +#define SDMA1_PUB_REG_TYPE1__SDMA_POWER_GATING__SHIFT 0xe +#define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG__SHIFT 0xf +#define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_WRITE__SHIFT 0x10 +#define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_READ__SHIFT 0x11 +#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_CONFIG__SHIFT 0x12 +#define SDMA1_PUB_REG_TYPE1__SDMA1_BA_THRESHOLD__SHIFT 0x13 +#define SDMA1_PUB_REG_TYPE1__SDMA1_ID__SHIFT 0x14 +#define SDMA1_PUB_REG_TYPE1__SDMA1_VERSION__SHIFT 0x15 +#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER__SHIFT 0x16 +#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER_CLEAR__SHIFT 0x17 +#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS2_REG__SHIFT 0x18 +#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_CNTL__SHIFT 0x19 +#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_LO__SHIFT 0x1a +#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_HI__SHIFT 0x1b +#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_CNTL__SHIFT 0x1c +#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_WATERMK__SHIFT 0x1d +#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_RD_STATUS__SHIFT 0x1e +#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_WR_STATUS__SHIFT 0x1f +#define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH_HI_MASK 0x00000001L +#define SDMA1_PUB_REG_TYPE1__SDMA1_SEM_WAIT_FAIL_TIMER_CNTL_MASK 0x00000002L +#define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH_MASK 0x00000004L +#define SDMA1_PUB_REG_TYPE1__SDMA1_IB_OFFSET_FETCH_MASK 0x00000008L +#define SDMA1_PUB_REG_TYPE1__SDMA1_PROGRAM_MASK 0x00000010L +#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS_REG_MASK 0x00000020L +#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS1_REG_MASK 0x00000040L +#define SDMA1_PUB_REG_TYPE1__SDMA1_RD_BURST_CNTL_MASK 0x00000080L +#define SDMA1_PUB_REG_TYPE1__SDMA1_HBM_PAGE_CONFIG_MASK 0x00000100L +#define SDMA1_PUB_REG_TYPE1__SDMA1_UCODE_CHECKSUM_MASK 0x00000200L +#define SDMA1_PUB_REG_TYPE1__SDMA1_F32_CNTL_MASK 0x00000400L +#define SDMA1_PUB_REG_TYPE1__SDMA1_FREEZE_MASK 0x00000800L +#define SDMA1_PUB_REG_TYPE1__SDMA1_PHASE0_QUANTUM_MASK 0x00001000L +#define SDMA1_PUB_REG_TYPE1__SDMA1_PHASE1_QUANTUM_MASK 0x00002000L +#define SDMA1_PUB_REG_TYPE1__SDMA_POWER_GATING_MASK 0x00004000L +#define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG_MASK 0x00008000L +#define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_WRITE_MASK 0x00010000L +#define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_READ_MASK 0x00020000L +#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_CONFIG_MASK 0x00040000L +#define SDMA1_PUB_REG_TYPE1__SDMA1_BA_THRESHOLD_MASK 0x00080000L +#define SDMA1_PUB_REG_TYPE1__SDMA1_ID_MASK 0x00100000L +#define SDMA1_PUB_REG_TYPE1__SDMA1_VERSION_MASK 0x00200000L +#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER_MASK 0x00400000L +#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER_CLEAR_MASK 0x00800000L +#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS2_REG_MASK 0x01000000L +#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_CNTL_MASK 0x02000000L +#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_LO_MASK 0x04000000L +#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_HI_MASK 0x08000000L +#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_CNTL_MASK 0x10000000L +#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_WATERMK_MASK 0x20000000L +#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_RD_STATUS_MASK 0x40000000L +#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_WR_STATUS_MASK 0x80000000L +//SDMA1_PUB_REG_TYPE2 +#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV0__SHIFT 0x0 +#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV1__SHIFT 0x1 +#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV2__SHIFT 0x2 +#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK0__SHIFT 0x3 +#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK1__SHIFT 0x4 +#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK0__SHIFT 0x5 +#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK1__SHIFT 0x6 +#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_TIMEOUT__SHIFT 0x7 +#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_PAGE__SHIFT 0x8 +#define SDMA1_PUB_REG_TYPE2__SDMA1_POWER_CNTL_IDLE__SHIFT 0x9 +#define SDMA1_PUB_REG_TYPE2__SDMA1_RELAX_ORDERING_LUT__SHIFT 0xa +#define SDMA1_PUB_REG_TYPE2__SDMA1_CHICKEN_BITS_2__SHIFT 0xb +#define SDMA1_PUB_REG_TYPE2__SDMA1_STATUS3_REG__SHIFT 0xc +#define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_LO__SHIFT 0xd +#define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_HI__SHIFT 0xe +#define SDMA1_PUB_REG_TYPE2__SDMA1_PHASE2_QUANTUM__SHIFT 0xf +#define SDMA1_PUB_REG_TYPE2__SDMA1_ERROR_LOG__SHIFT 0x10 +#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG0__SHIFT 0x11 +#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG1__SHIFT 0x12 +#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG2__SHIFT 0x13 +#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG3__SHIFT 0x14 +#define SDMA1_PUB_REG_TYPE2__SDMA1_F32_COUNTER__SHIFT 0x15 +#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFMON_CNTL__SHIFT 0x17 +#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER0_RESULT__SHIFT 0x18 +#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER1_RESULT__SHIFT 0x19 +#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__SHIFT 0x1a +#define SDMA1_PUB_REG_TYPE2__SDMA1_CRD_CNTL__SHIFT 0x1b +#define SDMA1_PUB_REG_TYPE2__SDMA1_GPU_IOV_VIOLATION_LOG__SHIFT 0x1d +#define SDMA1_PUB_REG_TYPE2__SDMA1_ULV_CNTL__SHIFT 0x1e +#define SDMA1_PUB_REG_TYPE2__RESERVED__SHIFT 0x1f +#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV0_MASK 0x00000001L +#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV1_MASK 0x00000002L +#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV2_MASK 0x00000004L +#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK0_MASK 0x00000008L +#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK1_MASK 0x00000010L +#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK0_MASK 0x00000020L +#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK1_MASK 0x00000040L +#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_TIMEOUT_MASK 0x00000080L +#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_PAGE_MASK 0x00000100L +#define SDMA1_PUB_REG_TYPE2__SDMA1_POWER_CNTL_IDLE_MASK 0x00000200L +#define SDMA1_PUB_REG_TYPE2__SDMA1_RELAX_ORDERING_LUT_MASK 0x00000400L +#define SDMA1_PUB_REG_TYPE2__SDMA1_CHICKEN_BITS_2_MASK 0x00000800L +#define SDMA1_PUB_REG_TYPE2__SDMA1_STATUS3_REG_MASK 0x00001000L +#define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_LO_MASK 0x00002000L +#define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_HI_MASK 0x00004000L +#define SDMA1_PUB_REG_TYPE2__SDMA1_PHASE2_QUANTUM_MASK 0x00008000L +#define SDMA1_PUB_REG_TYPE2__SDMA1_ERROR_LOG_MASK 0x00010000L +#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG0_MASK 0x00020000L +#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG1_MASK 0x00040000L +#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG2_MASK 0x00080000L +#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG3_MASK 0x00100000L +#define SDMA1_PUB_REG_TYPE2__SDMA1_F32_COUNTER_MASK 0x00200000L +#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFMON_CNTL_MASK 0x00800000L +#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER0_RESULT_MASK 0x01000000L +#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER1_RESULT_MASK 0x02000000L +#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER_TAG_DELAY_RANGE_MASK 0x04000000L +#define SDMA1_PUB_REG_TYPE2__SDMA1_CRD_CNTL_MASK 0x08000000L +#define SDMA1_PUB_REG_TYPE2__SDMA1_GPU_IOV_VIOLATION_LOG_MASK 0x20000000L +#define SDMA1_PUB_REG_TYPE2__SDMA1_ULV_CNTL_MASK 0x40000000L +#define SDMA1_PUB_REG_TYPE2__RESERVED_MASK 0x80000000L +//SDMA1_PUB_REG_TYPE3 +#define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_DATA__SHIFT 0x0 +#define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_INDEX__SHIFT 0x1 +#define SDMA1_PUB_REG_TYPE3__RESERVED__SHIFT 0x2 +#define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_DATA_MASK 0x00000001L +#define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_INDEX_MASK 0x00000002L +#define SDMA1_PUB_REG_TYPE3__RESERVED_MASK 0xFFFFFFFCL +//SDMA1_MMHUB_CNTL +#define SDMA1_MMHUB_CNTL__UNIT_ID__SHIFT 0x0 +#define SDMA1_MMHUB_CNTL__UNIT_ID_MASK 0x0000003FL +//SDMA1_CONTEXT_GROUP_BOUNDARY +#define SDMA1_CONTEXT_GROUP_BOUNDARY__RESERVED__SHIFT 0x0 +#define SDMA1_CONTEXT_GROUP_BOUNDARY__RESERVED_MASK 0xFFFFFFFFL +//SDMA1_POWER_CNTL +#define SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE__SHIFT 0x8 +#define SDMA1_POWER_CNTL__MEM_POWER_LS_EN__SHIFT 0x9 +#define SDMA1_POWER_CNTL__MEM_POWER_DS_EN__SHIFT 0xa +#define SDMA1_POWER_CNTL__MEM_POWER_SD_EN__SHIFT 0xb +#define SDMA1_POWER_CNTL__MEM_POWER_DELAY__SHIFT 0xc +#define SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK 0x00000100L +#define SDMA1_POWER_CNTL__MEM_POWER_LS_EN_MASK 0x00000200L +#define SDMA1_POWER_CNTL__MEM_POWER_DS_EN_MASK 0x00000400L +#define SDMA1_POWER_CNTL__MEM_POWER_SD_EN_MASK 0x00000800L +#define SDMA1_POWER_CNTL__MEM_POWER_DELAY_MASK 0x003FF000L +//SDMA1_CLK_CTRL +#define SDMA1_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define SDMA1_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define SDMA1_CLK_CTRL__RESERVED__SHIFT 0xc +#define SDMA1_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18 +#define SDMA1_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19 +#define SDMA1_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a +#define SDMA1_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b +#define SDMA1_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c +#define SDMA1_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d +#define SDMA1_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e +#define SDMA1_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f +#define SDMA1_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define SDMA1_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define SDMA1_CLK_CTRL__RESERVED_MASK 0x00FFF000L +#define SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L +#define SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L +#define SDMA1_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L +#define SDMA1_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L +#define SDMA1_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L +#define SDMA1_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L +#define SDMA1_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L +#define SDMA1_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L +//SDMA1_CNTL +#define SDMA1_CNTL__TRAP_ENABLE__SHIFT 0x0 +#define SDMA1_CNTL__UTC_L1_ENABLE__SHIFT 0x1 +#define SDMA1_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x2 +#define SDMA1_CNTL__DATA_SWAP_ENABLE__SHIFT 0x3 +#define SDMA1_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x4 +#define SDMA1_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x5 +#define SDMA1_CNTL__MIDCMD_WORLDSWITCH_ENABLE__SHIFT 0x11 +#define SDMA1_CNTL__AUTO_CTXSW_ENABLE__SHIFT 0x12 +#define SDMA1_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x1c +#define SDMA1_CNTL__FROZEN_INT_ENABLE__SHIFT 0x1d +#define SDMA1_CNTL__IB_PREEMPT_INT_ENABLE__SHIFT 0x1e +#define SDMA1_CNTL__TRAP_ENABLE_MASK 0x00000001L +#define SDMA1_CNTL__UTC_L1_ENABLE_MASK 0x00000002L +#define SDMA1_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L +#define SDMA1_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L +#define SDMA1_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L +#define SDMA1_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00000020L +#define SDMA1_CNTL__MIDCMD_WORLDSWITCH_ENABLE_MASK 0x00020000L +#define SDMA1_CNTL__AUTO_CTXSW_ENABLE_MASK 0x00040000L +#define SDMA1_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L +#define SDMA1_CNTL__FROZEN_INT_ENABLE_MASK 0x20000000L +#define SDMA1_CNTL__IB_PREEMPT_INT_ENABLE_MASK 0x40000000L +//SDMA1_CHICKEN_BITS +#define SDMA1_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE__SHIFT 0x0 +#define SDMA1_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE__SHIFT 0x1 +#define SDMA1_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE__SHIFT 0x2 +#define SDMA1_CHICKEN_BITS__WRITE_BURST_LENGTH__SHIFT 0x8 +#define SDMA1_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE__SHIFT 0xa +#define SDMA1_CHICKEN_BITS__COPY_OVERLAP_ENABLE__SHIFT 0x10 +#define SDMA1_CHICKEN_BITS__RAW_CHECK_ENABLE__SHIFT 0x11 +#define SDMA1_CHICKEN_BITS__SRBM_POLL_RETRYING__SHIFT 0x14 +#define SDMA1_CHICKEN_BITS__CG_STATUS_OUTPUT__SHIFT 0x17 +#define SDMA1_CHICKEN_BITS__TIME_BASED_QOS__SHIFT 0x19 +#define SDMA1_CHICKEN_BITS__CE_AFIFO_WATERMARK__SHIFT 0x1a +#define SDMA1_CHICKEN_BITS__CE_DFIFO_WATERMARK__SHIFT 0x1c +#define SDMA1_CHICKEN_BITS__CE_LFIFO_WATERMARK__SHIFT 0x1e +#define SDMA1_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE_MASK 0x00000001L +#define SDMA1_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE_MASK 0x00000002L +#define SDMA1_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE_MASK 0x00000004L +#define SDMA1_CHICKEN_BITS__WRITE_BURST_LENGTH_MASK 0x00000300L +#define SDMA1_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE_MASK 0x00001C00L +#define SDMA1_CHICKEN_BITS__COPY_OVERLAP_ENABLE_MASK 0x00010000L +#define SDMA1_CHICKEN_BITS__RAW_CHECK_ENABLE_MASK 0x00020000L +#define SDMA1_CHICKEN_BITS__SRBM_POLL_RETRYING_MASK 0x00100000L +#define SDMA1_CHICKEN_BITS__CG_STATUS_OUTPUT_MASK 0x00800000L +#define SDMA1_CHICKEN_BITS__TIME_BASED_QOS_MASK 0x02000000L +#define SDMA1_CHICKEN_BITS__CE_AFIFO_WATERMARK_MASK 0x0C000000L +#define SDMA1_CHICKEN_BITS__CE_DFIFO_WATERMARK_MASK 0x30000000L +#define SDMA1_CHICKEN_BITS__CE_LFIFO_WATERMARK_MASK 0xC0000000L +//SDMA1_GB_ADDR_CONFIG +#define SDMA1_GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0 +#define SDMA1_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3 +#define SDMA1_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x8 +#define SDMA1_GB_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc +#define SDMA1_GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13 +#define SDMA1_GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L +#define SDMA1_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L +#define SDMA1_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L +#define SDMA1_GB_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L +#define SDMA1_GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L +//SDMA1_GB_ADDR_CONFIG_READ +#define SDMA1_GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0 +#define SDMA1_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3 +#define SDMA1_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE__SHIFT 0x8 +#define SDMA1_GB_ADDR_CONFIG_READ__NUM_BANKS__SHIFT 0xc +#define SDMA1_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13 +#define SDMA1_GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L +#define SDMA1_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L +#define SDMA1_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE_MASK 0x00000700L +#define SDMA1_GB_ADDR_CONFIG_READ__NUM_BANKS_MASK 0x00007000L +#define SDMA1_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L +//SDMA1_RB_RPTR_FETCH_HI +#define SDMA1_RB_RPTR_FETCH_HI__OFFSET__SHIFT 0x0 +#define SDMA1_RB_RPTR_FETCH_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_SEM_WAIT_FAIL_TIMER_CNTL +#define SDMA1_SEM_WAIT_FAIL_TIMER_CNTL__TIMER__SHIFT 0x0 +#define SDMA1_SEM_WAIT_FAIL_TIMER_CNTL__TIMER_MASK 0xFFFFFFFFL +//SDMA1_RB_RPTR_FETCH +#define SDMA1_RB_RPTR_FETCH__OFFSET__SHIFT 0x2 +#define SDMA1_RB_RPTR_FETCH__OFFSET_MASK 0xFFFFFFFCL +//SDMA1_IB_OFFSET_FETCH +#define SDMA1_IB_OFFSET_FETCH__OFFSET__SHIFT 0x2 +#define SDMA1_IB_OFFSET_FETCH__OFFSET_MASK 0x003FFFFCL +//SDMA1_PROGRAM +#define SDMA1_PROGRAM__STREAM__SHIFT 0x0 +#define SDMA1_PROGRAM__STREAM_MASK 0xFFFFFFFFL +//SDMA1_STATUS_REG +#define SDMA1_STATUS_REG__IDLE__SHIFT 0x0 +#define SDMA1_STATUS_REG__REG_IDLE__SHIFT 0x1 +#define SDMA1_STATUS_REG__RB_EMPTY__SHIFT 0x2 +#define SDMA1_STATUS_REG__RB_FULL__SHIFT 0x3 +#define SDMA1_STATUS_REG__RB_CMD_IDLE__SHIFT 0x4 +#define SDMA1_STATUS_REG__RB_CMD_FULL__SHIFT 0x5 +#define SDMA1_STATUS_REG__IB_CMD_IDLE__SHIFT 0x6 +#define SDMA1_STATUS_REG__IB_CMD_FULL__SHIFT 0x7 +#define SDMA1_STATUS_REG__BLOCK_IDLE__SHIFT 0x8 +#define SDMA1_STATUS_REG__INSIDE_IB__SHIFT 0x9 +#define SDMA1_STATUS_REG__EX_IDLE__SHIFT 0xa +#define SDMA1_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE__SHIFT 0xb +#define SDMA1_STATUS_REG__PACKET_READY__SHIFT 0xc +#define SDMA1_STATUS_REG__MC_WR_IDLE__SHIFT 0xd +#define SDMA1_STATUS_REG__SRBM_IDLE__SHIFT 0xe +#define SDMA1_STATUS_REG__CONTEXT_EMPTY__SHIFT 0xf +#define SDMA1_STATUS_REG__DELTA_RPTR_FULL__SHIFT 0x10 +#define SDMA1_STATUS_REG__RB_MC_RREQ_IDLE__SHIFT 0x11 +#define SDMA1_STATUS_REG__IB_MC_RREQ_IDLE__SHIFT 0x12 +#define SDMA1_STATUS_REG__MC_RD_IDLE__SHIFT 0x13 +#define SDMA1_STATUS_REG__DELTA_RPTR_EMPTY__SHIFT 0x14 +#define SDMA1_STATUS_REG__MC_RD_RET_STALL__SHIFT 0x15 +#define SDMA1_STATUS_REG__MC_RD_NO_POLL_IDLE__SHIFT 0x16 +#define SDMA1_STATUS_REG__PREV_CMD_IDLE__SHIFT 0x19 +#define SDMA1_STATUS_REG__SEM_IDLE__SHIFT 0x1a +#define SDMA1_STATUS_REG__SEM_REQ_STALL__SHIFT 0x1b +#define SDMA1_STATUS_REG__SEM_RESP_STATE__SHIFT 0x1c +#define SDMA1_STATUS_REG__INT_IDLE__SHIFT 0x1e +#define SDMA1_STATUS_REG__INT_REQ_STALL__SHIFT 0x1f +#define SDMA1_STATUS_REG__IDLE_MASK 0x00000001L +#define SDMA1_STATUS_REG__REG_IDLE_MASK 0x00000002L +#define SDMA1_STATUS_REG__RB_EMPTY_MASK 0x00000004L +#define SDMA1_STATUS_REG__RB_FULL_MASK 0x00000008L +#define SDMA1_STATUS_REG__RB_CMD_IDLE_MASK 0x00000010L +#define SDMA1_STATUS_REG__RB_CMD_FULL_MASK 0x00000020L +#define SDMA1_STATUS_REG__IB_CMD_IDLE_MASK 0x00000040L +#define SDMA1_STATUS_REG__IB_CMD_FULL_MASK 0x00000080L +#define SDMA1_STATUS_REG__BLOCK_IDLE_MASK 0x00000100L +#define SDMA1_STATUS_REG__INSIDE_IB_MASK 0x00000200L +#define SDMA1_STATUS_REG__EX_IDLE_MASK 0x00000400L +#define SDMA1_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE_MASK 0x00000800L +#define SDMA1_STATUS_REG__PACKET_READY_MASK 0x00001000L +#define SDMA1_STATUS_REG__MC_WR_IDLE_MASK 0x00002000L +#define SDMA1_STATUS_REG__SRBM_IDLE_MASK 0x00004000L +#define SDMA1_STATUS_REG__CONTEXT_EMPTY_MASK 0x00008000L +#define SDMA1_STATUS_REG__DELTA_RPTR_FULL_MASK 0x00010000L +#define SDMA1_STATUS_REG__RB_MC_RREQ_IDLE_MASK 0x00020000L +#define SDMA1_STATUS_REG__IB_MC_RREQ_IDLE_MASK 0x00040000L +#define SDMA1_STATUS_REG__MC_RD_IDLE_MASK 0x00080000L +#define SDMA1_STATUS_REG__DELTA_RPTR_EMPTY_MASK 0x00100000L +#define SDMA1_STATUS_REG__MC_RD_RET_STALL_MASK 0x00200000L +#define SDMA1_STATUS_REG__MC_RD_NO_POLL_IDLE_MASK 0x00400000L +#define SDMA1_STATUS_REG__PREV_CMD_IDLE_MASK 0x02000000L +#define SDMA1_STATUS_REG__SEM_IDLE_MASK 0x04000000L +#define SDMA1_STATUS_REG__SEM_REQ_STALL_MASK 0x08000000L +#define SDMA1_STATUS_REG__SEM_RESP_STATE_MASK 0x30000000L +#define SDMA1_STATUS_REG__INT_IDLE_MASK 0x40000000L +#define SDMA1_STATUS_REG__INT_REQ_STALL_MASK 0x80000000L +//SDMA1_STATUS1_REG +#define SDMA1_STATUS1_REG__CE_WREQ_IDLE__SHIFT 0x0 +#define SDMA1_STATUS1_REG__CE_WR_IDLE__SHIFT 0x1 +#define SDMA1_STATUS1_REG__CE_SPLIT_IDLE__SHIFT 0x2 +#define SDMA1_STATUS1_REG__CE_RREQ_IDLE__SHIFT 0x3 +#define SDMA1_STATUS1_REG__CE_OUT_IDLE__SHIFT 0x4 +#define SDMA1_STATUS1_REG__CE_IN_IDLE__SHIFT 0x5 +#define SDMA1_STATUS1_REG__CE_DST_IDLE__SHIFT 0x6 +#define SDMA1_STATUS1_REG__CE_CMD_IDLE__SHIFT 0x9 +#define SDMA1_STATUS1_REG__CE_AFIFO_FULL__SHIFT 0xa +#define SDMA1_STATUS1_REG__CE_INFO_FULL__SHIFT 0xd +#define SDMA1_STATUS1_REG__CE_INFO1_FULL__SHIFT 0xe +#define SDMA1_STATUS1_REG__EX_START__SHIFT 0xf +#define SDMA1_STATUS1_REG__CE_RD_STALL__SHIFT 0x11 +#define SDMA1_STATUS1_REG__CE_WR_STALL__SHIFT 0x12 +#define SDMA1_STATUS1_REG__CE_WREQ_IDLE_MASK 0x00000001L +#define SDMA1_STATUS1_REG__CE_WR_IDLE_MASK 0x00000002L +#define SDMA1_STATUS1_REG__CE_SPLIT_IDLE_MASK 0x00000004L +#define SDMA1_STATUS1_REG__CE_RREQ_IDLE_MASK 0x00000008L +#define SDMA1_STATUS1_REG__CE_OUT_IDLE_MASK 0x00000010L +#define SDMA1_STATUS1_REG__CE_IN_IDLE_MASK 0x00000020L +#define SDMA1_STATUS1_REG__CE_DST_IDLE_MASK 0x00000040L +#define SDMA1_STATUS1_REG__CE_CMD_IDLE_MASK 0x00000200L +#define SDMA1_STATUS1_REG__CE_AFIFO_FULL_MASK 0x00000400L +#define SDMA1_STATUS1_REG__CE_INFO_FULL_MASK 0x00002000L +#define SDMA1_STATUS1_REG__CE_INFO1_FULL_MASK 0x00004000L +#define SDMA1_STATUS1_REG__EX_START_MASK 0x00008000L +#define SDMA1_STATUS1_REG__CE_RD_STALL_MASK 0x00020000L +#define SDMA1_STATUS1_REG__CE_WR_STALL_MASK 0x00040000L +//SDMA1_RD_BURST_CNTL +#define SDMA1_RD_BURST_CNTL__RD_BURST__SHIFT 0x0 +#define SDMA1_RD_BURST_CNTL__CMD_BUFFER_RD_BURST__SHIFT 0x2 +#define SDMA1_RD_BURST_CNTL__RD_BURST_MASK 0x00000003L +#define SDMA1_RD_BURST_CNTL__CMD_BUFFER_RD_BURST_MASK 0x0000000CL +//SDMA1_HBM_PAGE_CONFIG +#define SDMA1_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT__SHIFT 0x0 +#define SDMA1_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT_MASK 0x00000001L +//SDMA1_UCODE_CHECKSUM +#define SDMA1_UCODE_CHECKSUM__DATA__SHIFT 0x0 +#define SDMA1_UCODE_CHECKSUM__DATA_MASK 0xFFFFFFFFL +//SDMA1_F32_CNTL +#define SDMA1_F32_CNTL__HALT__SHIFT 0x0 +#define SDMA1_F32_CNTL__STEP__SHIFT 0x1 +#define SDMA1_F32_CNTL__HALT_MASK 0x00000001L +#define SDMA1_F32_CNTL__STEP_MASK 0x00000002L +//SDMA1_FREEZE +#define SDMA1_FREEZE__PREEMPT__SHIFT 0x0 +#define SDMA1_FREEZE__FREEZE__SHIFT 0x4 +#define SDMA1_FREEZE__FROZEN__SHIFT 0x5 +#define SDMA1_FREEZE__F32_FREEZE__SHIFT 0x6 +#define SDMA1_FREEZE__PREEMPT_MASK 0x00000001L +#define SDMA1_FREEZE__FREEZE_MASK 0x00000010L +#define SDMA1_FREEZE__FROZEN_MASK 0x00000020L +#define SDMA1_FREEZE__F32_FREEZE_MASK 0x00000040L +//SDMA1_PHASE0_QUANTUM +#define SDMA1_PHASE0_QUANTUM__UNIT__SHIFT 0x0 +#define SDMA1_PHASE0_QUANTUM__VALUE__SHIFT 0x8 +#define SDMA1_PHASE0_QUANTUM__PREFER__SHIFT 0x1e +#define SDMA1_PHASE0_QUANTUM__UNIT_MASK 0x0000000FL +#define SDMA1_PHASE0_QUANTUM__VALUE_MASK 0x00FFFF00L +#define SDMA1_PHASE0_QUANTUM__PREFER_MASK 0x40000000L +//SDMA1_PHASE1_QUANTUM +#define SDMA1_PHASE1_QUANTUM__UNIT__SHIFT 0x0 +#define SDMA1_PHASE1_QUANTUM__VALUE__SHIFT 0x8 +#define SDMA1_PHASE1_QUANTUM__PREFER__SHIFT 0x1e +#define SDMA1_PHASE1_QUANTUM__UNIT_MASK 0x0000000FL +#define SDMA1_PHASE1_QUANTUM__VALUE_MASK 0x00FFFF00L +#define SDMA1_PHASE1_QUANTUM__PREFER_MASK 0x40000000L +//SDMA1_EDC_CONFIG +#define SDMA1_EDC_CONFIG__DIS_EDC__SHIFT 0x1 +#define SDMA1_EDC_CONFIG__ECC_INT_ENABLE__SHIFT 0x2 +#define SDMA1_EDC_CONFIG__DIS_EDC_MASK 0x00000002L +#define SDMA1_EDC_CONFIG__ECC_INT_ENABLE_MASK 0x00000004L +//SDMA1_BA_THRESHOLD +#define SDMA1_BA_THRESHOLD__READ_THRES__SHIFT 0x0 +#define SDMA1_BA_THRESHOLD__WRITE_THRES__SHIFT 0x10 +#define SDMA1_BA_THRESHOLD__READ_THRES_MASK 0x000003FFL +#define SDMA1_BA_THRESHOLD__WRITE_THRES_MASK 0x03FF0000L +//SDMA1_ID +#define SDMA1_ID__DEVICE_ID__SHIFT 0x0 +#define SDMA1_ID__DEVICE_ID_MASK 0x000000FFL +//SDMA1_VERSION +#define SDMA1_VERSION__MINVER__SHIFT 0x0 +#define SDMA1_VERSION__MAJVER__SHIFT 0x8 +#define SDMA1_VERSION__REV__SHIFT 0x10 +#define SDMA1_VERSION__MINVER_MASK 0x0000007FL +#define SDMA1_VERSION__MAJVER_MASK 0x00007F00L +#define SDMA1_VERSION__REV_MASK 0x003F0000L +//SDMA1_EDC_COUNTER +#define SDMA1_EDC_COUNTER__SDMA_UCODE_BUF_SED__SHIFT 0x0 +#define SDMA1_EDC_COUNTER__SDMA_RB_CMD_BUF_SED__SHIFT 0x2 +#define SDMA1_EDC_COUNTER__SDMA_IB_CMD_BUF_SED__SHIFT 0x3 +#define SDMA1_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED__SHIFT 0x4 +#define SDMA1_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED__SHIFT 0x5 +#define SDMA1_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED__SHIFT 0x6 +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED__SHIFT 0x7 +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED__SHIFT 0x8 +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED__SHIFT 0x9 +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED__SHIFT 0xa +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED__SHIFT 0xb +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED__SHIFT 0xc +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED__SHIFT 0xd +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED__SHIFT 0xe +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED__SHIFT 0xf +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED__SHIFT 0x10 +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED__SHIFT 0x11 +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED__SHIFT 0x12 +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED__SHIFT 0x13 +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED__SHIFT 0x14 +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED__SHIFT 0x15 +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED__SHIFT 0x16 +#define SDMA1_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED__SHIFT 0x17 +#define SDMA1_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED__SHIFT 0x18 +#define SDMA1_EDC_COUNTER__SDMA_UCODE_BUF_SED_MASK 0x00000001L +#define SDMA1_EDC_COUNTER__SDMA_RB_CMD_BUF_SED_MASK 0x00000004L +#define SDMA1_EDC_COUNTER__SDMA_IB_CMD_BUF_SED_MASK 0x00000008L +#define SDMA1_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED_MASK 0x00000010L +#define SDMA1_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED_MASK 0x00000020L +#define SDMA1_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED_MASK 0x00000040L +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED_MASK 0x00000080L +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED_MASK 0x00000100L +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED_MASK 0x00000200L +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED_MASK 0x00000400L +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED_MASK 0x00000800L +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED_MASK 0x00001000L +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED_MASK 0x00002000L +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED_MASK 0x00004000L +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED_MASK 0x00008000L +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED_MASK 0x00010000L +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED_MASK 0x00020000L +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED_MASK 0x00040000L +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED_MASK 0x00080000L +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED_MASK 0x00100000L +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED_MASK 0x00200000L +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED_MASK 0x00400000L +#define SDMA1_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED_MASK 0x00800000L +#define SDMA1_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED_MASK 0x01000000L +//SDMA1_EDC_COUNTER_CLEAR +#define SDMA1_EDC_COUNTER_CLEAR__DUMMY__SHIFT 0x0 +#define SDMA1_EDC_COUNTER_CLEAR__DUMMY_MASK 0x00000001L +//SDMA1_STATUS2_REG +#define SDMA1_STATUS2_REG__ID__SHIFT 0x0 +#define SDMA1_STATUS2_REG__F32_INSTR_PTR__SHIFT 0x2 +#define SDMA1_STATUS2_REG__CMD_OP__SHIFT 0x10 +#define SDMA1_STATUS2_REG__ID_MASK 0x00000003L +#define SDMA1_STATUS2_REG__F32_INSTR_PTR_MASK 0x00000FFCL +#define SDMA1_STATUS2_REG__CMD_OP_MASK 0xFFFF0000L +//SDMA1_ATOMIC_CNTL +#define SDMA1_ATOMIC_CNTL__LOOP_TIMER__SHIFT 0x0 +#define SDMA1_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE__SHIFT 0x1f +#define SDMA1_ATOMIC_CNTL__LOOP_TIMER_MASK 0x7FFFFFFFL +#define SDMA1_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE_MASK 0x80000000L +//SDMA1_ATOMIC_PREOP_LO +#define SDMA1_ATOMIC_PREOP_LO__DATA__SHIFT 0x0 +#define SDMA1_ATOMIC_PREOP_LO__DATA_MASK 0xFFFFFFFFL +//SDMA1_ATOMIC_PREOP_HI +#define SDMA1_ATOMIC_PREOP_HI__DATA__SHIFT 0x0 +#define SDMA1_ATOMIC_PREOP_HI__DATA_MASK 0xFFFFFFFFL +//SDMA1_UTCL1_CNTL +#define SDMA1_UTCL1_CNTL__REDO_ENABLE__SHIFT 0x0 +#define SDMA1_UTCL1_CNTL__REDO_DELAY__SHIFT 0x1 +#define SDMA1_UTCL1_CNTL__REDO_WATERMK__SHIFT 0xb +#define SDMA1_UTCL1_CNTL__INVACK_DELAY__SHIFT 0xe +#define SDMA1_UTCL1_CNTL__REQL2_CREDIT__SHIFT 0x18 +#define SDMA1_UTCL1_CNTL__VADDR_WATERMK__SHIFT 0x1d +#define SDMA1_UTCL1_CNTL__REDO_ENABLE_MASK 0x00000001L +#define SDMA1_UTCL1_CNTL__REDO_DELAY_MASK 0x000007FEL +#define SDMA1_UTCL1_CNTL__REDO_WATERMK_MASK 0x00003800L +#define SDMA1_UTCL1_CNTL__INVACK_DELAY_MASK 0x00FFC000L +#define SDMA1_UTCL1_CNTL__REQL2_CREDIT_MASK 0x1F000000L +#define SDMA1_UTCL1_CNTL__VADDR_WATERMK_MASK 0xE0000000L +//SDMA1_UTCL1_WATERMK +#define SDMA1_UTCL1_WATERMK__REQMC_WATERMK__SHIFT 0x0 +#define SDMA1_UTCL1_WATERMK__REQPG_WATERMK__SHIFT 0x9 +#define SDMA1_UTCL1_WATERMK__INVREQ_WATERMK__SHIFT 0x11 +#define SDMA1_UTCL1_WATERMK__XNACK_WATERMK__SHIFT 0x19 +#define SDMA1_UTCL1_WATERMK__REQMC_WATERMK_MASK 0x000001FFL +#define SDMA1_UTCL1_WATERMK__REQPG_WATERMK_MASK 0x0001FE00L +#define SDMA1_UTCL1_WATERMK__INVREQ_WATERMK_MASK 0x01FE0000L +#define SDMA1_UTCL1_WATERMK__XNACK_WATERMK_MASK 0xFE000000L +//SDMA1_UTCL1_RD_STATUS +#define SDMA1_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0 +#define SDMA1_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1 +#define SDMA1_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2 +#define SDMA1_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3 +#define SDMA1_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4 +#define SDMA1_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5 +#define SDMA1_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6 +#define SDMA1_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7 +#define SDMA1_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8 +#define SDMA1_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9 +#define SDMA1_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa +#define SDMA1_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb +#define SDMA1_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc +#define SDMA1_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd +#define SDMA1_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe +#define SDMA1_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf +#define SDMA1_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10 +#define SDMA1_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11 +#define SDMA1_UTCL1_RD_STATUS__PAGE_FAULT__SHIFT 0x12 +#define SDMA1_UTCL1_RD_STATUS__PAGE_NULL__SHIFT 0x13 +#define SDMA1_UTCL1_RD_STATUS__REQL2_IDLE__SHIFT 0x14 +#define SDMA1_UTCL1_RD_STATUS__CE_L1_STALL__SHIFT 0x15 +#define SDMA1_UTCL1_RD_STATUS__NEXT_RD_VECTOR__SHIFT 0x16 +#define SDMA1_UTCL1_RD_STATUS__MERGE_STATE__SHIFT 0x1a +#define SDMA1_UTCL1_RD_STATUS__ADDR_RD_RTR__SHIFT 0x1d +#define SDMA1_UTCL1_RD_STATUS__WPTR_POLLING__SHIFT 0x1e +#define SDMA1_UTCL1_RD_STATUS__INVREQ_SIZE__SHIFT 0x1f +#define SDMA1_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L +#define SDMA1_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L +#define SDMA1_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L +#define SDMA1_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L +#define SDMA1_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L +#define SDMA1_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L +#define SDMA1_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L +#define SDMA1_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L +#define SDMA1_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L +#define SDMA1_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L +#define SDMA1_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L +#define SDMA1_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L +#define SDMA1_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L +#define SDMA1_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L +#define SDMA1_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L +#define SDMA1_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L +#define SDMA1_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L +#define SDMA1_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L +#define SDMA1_UTCL1_RD_STATUS__PAGE_FAULT_MASK 0x00040000L +#define SDMA1_UTCL1_RD_STATUS__PAGE_NULL_MASK 0x00080000L +#define SDMA1_UTCL1_RD_STATUS__REQL2_IDLE_MASK 0x00100000L +#define SDMA1_UTCL1_RD_STATUS__CE_L1_STALL_MASK 0x00200000L +#define SDMA1_UTCL1_RD_STATUS__NEXT_RD_VECTOR_MASK 0x03C00000L +#define SDMA1_UTCL1_RD_STATUS__MERGE_STATE_MASK 0x1C000000L +#define SDMA1_UTCL1_RD_STATUS__ADDR_RD_RTR_MASK 0x20000000L +#define SDMA1_UTCL1_RD_STATUS__WPTR_POLLING_MASK 0x40000000L +#define SDMA1_UTCL1_RD_STATUS__INVREQ_SIZE_MASK 0x80000000L +//SDMA1_UTCL1_WR_STATUS +#define SDMA1_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0 +#define SDMA1_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1 +#define SDMA1_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2 +#define SDMA1_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3 +#define SDMA1_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4 +#define SDMA1_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5 +#define SDMA1_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6 +#define SDMA1_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7 +#define SDMA1_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8 +#define SDMA1_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9 +#define SDMA1_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa +#define SDMA1_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb +#define SDMA1_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc +#define SDMA1_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd +#define SDMA1_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe +#define SDMA1_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf +#define SDMA1_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10 +#define SDMA1_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11 +#define SDMA1_UTCL1_WR_STATUS__PAGE_FAULT__SHIFT 0x12 +#define SDMA1_UTCL1_WR_STATUS__PAGE_NULL__SHIFT 0x13 +#define SDMA1_UTCL1_WR_STATUS__REQL2_IDLE__SHIFT 0x14 +#define SDMA1_UTCL1_WR_STATUS__F32_WR_RTR__SHIFT 0x15 +#define SDMA1_UTCL1_WR_STATUS__NEXT_WR_VECTOR__SHIFT 0x16 +#define SDMA1_UTCL1_WR_STATUS__MERGE_STATE__SHIFT 0x19 +#define SDMA1_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY__SHIFT 0x1c +#define SDMA1_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL__SHIFT 0x1d +#define SDMA1_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY__SHIFT 0x1e +#define SDMA1_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL__SHIFT 0x1f +#define SDMA1_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L +#define SDMA1_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L +#define SDMA1_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L +#define SDMA1_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L +#define SDMA1_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L +#define SDMA1_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L +#define SDMA1_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L +#define SDMA1_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L +#define SDMA1_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L +#define SDMA1_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L +#define SDMA1_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L +#define SDMA1_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L +#define SDMA1_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L +#define SDMA1_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L +#define SDMA1_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L +#define SDMA1_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L +#define SDMA1_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L +#define SDMA1_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L +#define SDMA1_UTCL1_WR_STATUS__PAGE_FAULT_MASK 0x00040000L +#define SDMA1_UTCL1_WR_STATUS__PAGE_NULL_MASK 0x00080000L +#define SDMA1_UTCL1_WR_STATUS__REQL2_IDLE_MASK 0x00100000L +#define SDMA1_UTCL1_WR_STATUS__F32_WR_RTR_MASK 0x00200000L +#define SDMA1_UTCL1_WR_STATUS__NEXT_WR_VECTOR_MASK 0x01C00000L +#define SDMA1_UTCL1_WR_STATUS__MERGE_STATE_MASK 0x0E000000L +#define SDMA1_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY_MASK 0x10000000L +#define SDMA1_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL_MASK 0x20000000L +#define SDMA1_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY_MASK 0x40000000L +#define SDMA1_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL_MASK 0x80000000L +//SDMA1_UTCL1_INV0 +#define SDMA1_UTCL1_INV0__INV_MIDDLE__SHIFT 0x0 +#define SDMA1_UTCL1_INV0__RD_TIMEOUT__SHIFT 0x1 +#define SDMA1_UTCL1_INV0__WR_TIMEOUT__SHIFT 0x2 +#define SDMA1_UTCL1_INV0__RD_IN_INVADR__SHIFT 0x3 +#define SDMA1_UTCL1_INV0__WR_IN_INVADR__SHIFT 0x4 +#define SDMA1_UTCL1_INV0__PAGE_NULL_SW__SHIFT 0x5 +#define SDMA1_UTCL1_INV0__XNACK_IS_INVADR__SHIFT 0x6 +#define SDMA1_UTCL1_INV0__INVREQ_ENABLE__SHIFT 0x7 +#define SDMA1_UTCL1_INV0__NACK_TIMEOUT_SW__SHIFT 0x8 +#define SDMA1_UTCL1_INV0__NFLUSH_INV_IDLE__SHIFT 0x9 +#define SDMA1_UTCL1_INV0__FLUSH_INV_IDLE__SHIFT 0xa +#define SDMA1_UTCL1_INV0__INV_FLUSHTYPE__SHIFT 0xb +#define SDMA1_UTCL1_INV0__INV_VMID_VEC__SHIFT 0xc +#define SDMA1_UTCL1_INV0__INV_ADDR_HI__SHIFT 0x1c +#define SDMA1_UTCL1_INV0__INV_MIDDLE_MASK 0x00000001L +#define SDMA1_UTCL1_INV0__RD_TIMEOUT_MASK 0x00000002L +#define SDMA1_UTCL1_INV0__WR_TIMEOUT_MASK 0x00000004L +#define SDMA1_UTCL1_INV0__RD_IN_INVADR_MASK 0x00000008L +#define SDMA1_UTCL1_INV0__WR_IN_INVADR_MASK 0x00000010L +#define SDMA1_UTCL1_INV0__PAGE_NULL_SW_MASK 0x00000020L +#define SDMA1_UTCL1_INV0__XNACK_IS_INVADR_MASK 0x00000040L +#define SDMA1_UTCL1_INV0__INVREQ_ENABLE_MASK 0x00000080L +#define SDMA1_UTCL1_INV0__NACK_TIMEOUT_SW_MASK 0x00000100L +#define SDMA1_UTCL1_INV0__NFLUSH_INV_IDLE_MASK 0x00000200L +#define SDMA1_UTCL1_INV0__FLUSH_INV_IDLE_MASK 0x00000400L +#define SDMA1_UTCL1_INV0__INV_FLUSHTYPE_MASK 0x00000800L +#define SDMA1_UTCL1_INV0__INV_VMID_VEC_MASK 0x0FFFF000L +#define SDMA1_UTCL1_INV0__INV_ADDR_HI_MASK 0xF0000000L +//SDMA1_UTCL1_INV1 +#define SDMA1_UTCL1_INV1__INV_ADDR_LO__SHIFT 0x0 +#define SDMA1_UTCL1_INV1__INV_ADDR_LO_MASK 0xFFFFFFFFL +//SDMA1_UTCL1_INV2 +#define SDMA1_UTCL1_INV2__INV_NFLUSH_VMID_VEC__SHIFT 0x0 +#define SDMA1_UTCL1_INV2__INV_NFLUSH_VMID_VEC_MASK 0xFFFFFFFFL +//SDMA1_UTCL1_RD_XNACK0 +#define SDMA1_UTCL1_RD_XNACK0__XNACK_ADDR_LO__SHIFT 0x0 +#define SDMA1_UTCL1_RD_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL +//SDMA1_UTCL1_RD_XNACK1 +#define SDMA1_UTCL1_RD_XNACK1__XNACK_ADDR_HI__SHIFT 0x0 +#define SDMA1_UTCL1_RD_XNACK1__XNACK_VMID__SHIFT 0x4 +#define SDMA1_UTCL1_RD_XNACK1__XNACK_VECTOR__SHIFT 0x8 +#define SDMA1_UTCL1_RD_XNACK1__IS_XNACK__SHIFT 0x1a +#define SDMA1_UTCL1_RD_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL +#define SDMA1_UTCL1_RD_XNACK1__XNACK_VMID_MASK 0x000000F0L +#define SDMA1_UTCL1_RD_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L +#define SDMA1_UTCL1_RD_XNACK1__IS_XNACK_MASK 0x0C000000L +//SDMA1_UTCL1_WR_XNACK0 +#define SDMA1_UTCL1_WR_XNACK0__XNACK_ADDR_LO__SHIFT 0x0 +#define SDMA1_UTCL1_WR_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL +//SDMA1_UTCL1_WR_XNACK1 +#define SDMA1_UTCL1_WR_XNACK1__XNACK_ADDR_HI__SHIFT 0x0 +#define SDMA1_UTCL1_WR_XNACK1__XNACK_VMID__SHIFT 0x4 +#define SDMA1_UTCL1_WR_XNACK1__XNACK_VECTOR__SHIFT 0x8 +#define SDMA1_UTCL1_WR_XNACK1__IS_XNACK__SHIFT 0x1a +#define SDMA1_UTCL1_WR_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL +#define SDMA1_UTCL1_WR_XNACK1__XNACK_VMID_MASK 0x000000F0L +#define SDMA1_UTCL1_WR_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L +#define SDMA1_UTCL1_WR_XNACK1__IS_XNACK_MASK 0x0C000000L +//SDMA1_UTCL1_TIMEOUT +#define SDMA1_UTCL1_TIMEOUT__RD_XNACK_LIMIT__SHIFT 0x0 +#define SDMA1_UTCL1_TIMEOUT__WR_XNACK_LIMIT__SHIFT 0x10 +#define SDMA1_UTCL1_TIMEOUT__RD_XNACK_LIMIT_MASK 0x0000FFFFL +#define SDMA1_UTCL1_TIMEOUT__WR_XNACK_LIMIT_MASK 0xFFFF0000L +//SDMA1_UTCL1_PAGE +#define SDMA1_UTCL1_PAGE__VM_HOLE__SHIFT 0x0 +#define SDMA1_UTCL1_PAGE__REQ_TYPE__SHIFT 0x1 +#define SDMA1_UTCL1_PAGE__USE_MTYPE__SHIFT 0x6 +#define SDMA1_UTCL1_PAGE__USE_PT_SNOOP__SHIFT 0x9 +#define SDMA1_UTCL1_PAGE__VM_HOLE_MASK 0x00000001L +#define SDMA1_UTCL1_PAGE__REQ_TYPE_MASK 0x0000001EL +#define SDMA1_UTCL1_PAGE__USE_MTYPE_MASK 0x000001C0L +#define SDMA1_UTCL1_PAGE__USE_PT_SNOOP_MASK 0x00000200L +//SDMA1_POWER_CNTL_IDLE +#define SDMA1_POWER_CNTL_IDLE__DELAY0__SHIFT 0x0 +#define SDMA1_POWER_CNTL_IDLE__DELAY1__SHIFT 0x10 +#define SDMA1_POWER_CNTL_IDLE__DELAY2__SHIFT 0x18 +#define SDMA1_POWER_CNTL_IDLE__DELAY0_MASK 0x0000FFFFL +#define SDMA1_POWER_CNTL_IDLE__DELAY1_MASK 0x00FF0000L +#define SDMA1_POWER_CNTL_IDLE__DELAY2_MASK 0xFF000000L +//SDMA1_RELAX_ORDERING_LUT +#define SDMA1_RELAX_ORDERING_LUT__RESERVED0__SHIFT 0x0 +#define SDMA1_RELAX_ORDERING_LUT__COPY__SHIFT 0x1 +#define SDMA1_RELAX_ORDERING_LUT__WRITE__SHIFT 0x2 +#define SDMA1_RELAX_ORDERING_LUT__RESERVED3__SHIFT 0x3 +#define SDMA1_RELAX_ORDERING_LUT__RESERVED4__SHIFT 0x4 +#define SDMA1_RELAX_ORDERING_LUT__FENCE__SHIFT 0x5 +#define SDMA1_RELAX_ORDERING_LUT__RESERVED76__SHIFT 0x6 +#define SDMA1_RELAX_ORDERING_LUT__POLL_MEM__SHIFT 0x8 +#define SDMA1_RELAX_ORDERING_LUT__COND_EXE__SHIFT 0x9 +#define SDMA1_RELAX_ORDERING_LUT__ATOMIC__SHIFT 0xa +#define SDMA1_RELAX_ORDERING_LUT__CONST_FILL__SHIFT 0xb +#define SDMA1_RELAX_ORDERING_LUT__PTEPDE__SHIFT 0xc +#define SDMA1_RELAX_ORDERING_LUT__TIMESTAMP__SHIFT 0xd +#define SDMA1_RELAX_ORDERING_LUT__RESERVED__SHIFT 0xe +#define SDMA1_RELAX_ORDERING_LUT__WORLD_SWITCH__SHIFT 0x1b +#define SDMA1_RELAX_ORDERING_LUT__RPTR_WRB__SHIFT 0x1c +#define SDMA1_RELAX_ORDERING_LUT__WPTR_POLL__SHIFT 0x1d +#define SDMA1_RELAX_ORDERING_LUT__IB_FETCH__SHIFT 0x1e +#define SDMA1_RELAX_ORDERING_LUT__RB_FETCH__SHIFT 0x1f +#define SDMA1_RELAX_ORDERING_LUT__RESERVED0_MASK 0x00000001L +#define SDMA1_RELAX_ORDERING_LUT__COPY_MASK 0x00000002L +#define SDMA1_RELAX_ORDERING_LUT__WRITE_MASK 0x00000004L +#define SDMA1_RELAX_ORDERING_LUT__RESERVED3_MASK 0x00000008L +#define SDMA1_RELAX_ORDERING_LUT__RESERVED4_MASK 0x00000010L +#define SDMA1_RELAX_ORDERING_LUT__FENCE_MASK 0x00000020L +#define SDMA1_RELAX_ORDERING_LUT__RESERVED76_MASK 0x000000C0L +#define SDMA1_RELAX_ORDERING_LUT__POLL_MEM_MASK 0x00000100L +#define SDMA1_RELAX_ORDERING_LUT__COND_EXE_MASK 0x00000200L +#define SDMA1_RELAX_ORDERING_LUT__ATOMIC_MASK 0x00000400L +#define SDMA1_RELAX_ORDERING_LUT__CONST_FILL_MASK 0x00000800L +#define SDMA1_RELAX_ORDERING_LUT__PTEPDE_MASK 0x00001000L +#define SDMA1_RELAX_ORDERING_LUT__TIMESTAMP_MASK 0x00002000L +#define SDMA1_RELAX_ORDERING_LUT__RESERVED_MASK 0x07FFC000L +#define SDMA1_RELAX_ORDERING_LUT__WORLD_SWITCH_MASK 0x08000000L +#define SDMA1_RELAX_ORDERING_LUT__RPTR_WRB_MASK 0x10000000L +#define SDMA1_RELAX_ORDERING_LUT__WPTR_POLL_MASK 0x20000000L +#define SDMA1_RELAX_ORDERING_LUT__IB_FETCH_MASK 0x40000000L +#define SDMA1_RELAX_ORDERING_LUT__RB_FETCH_MASK 0x80000000L +//SDMA1_CHICKEN_BITS_2 +#define SDMA1_CHICKEN_BITS_2__F32_CMD_PROC_DELAY__SHIFT 0x0 +#define SDMA1_CHICKEN_BITS_2__F32_CMD_PROC_DELAY_MASK 0x0000000FL +//SDMA1_STATUS3_REG +#define SDMA1_STATUS3_REG__CMD_OP_STATUS__SHIFT 0x0 +#define SDMA1_STATUS3_REG__PREV_VM_CMD__SHIFT 0x10 +#define SDMA1_STATUS3_REG__EXCEPTION_IDLE__SHIFT 0x14 +#define SDMA1_STATUS3_REG__QUEUE_ID_MATCH__SHIFT 0x15 +#define SDMA1_STATUS3_REG__INT_QUEUE_ID__SHIFT 0x16 +#define SDMA1_STATUS3_REG__CMD_OP_STATUS_MASK 0x0000FFFFL +#define SDMA1_STATUS3_REG__PREV_VM_CMD_MASK 0x000F0000L +#define SDMA1_STATUS3_REG__EXCEPTION_IDLE_MASK 0x00100000L +#define SDMA1_STATUS3_REG__QUEUE_ID_MATCH_MASK 0x00200000L +#define SDMA1_STATUS3_REG__INT_QUEUE_ID_MASK 0x03C00000L +//SDMA1_PHYSICAL_ADDR_LO +#define SDMA1_PHYSICAL_ADDR_LO__D_VALID__SHIFT 0x0 +#define SDMA1_PHYSICAL_ADDR_LO__DIRTY__SHIFT 0x1 +#define SDMA1_PHYSICAL_ADDR_LO__PHY_VALID__SHIFT 0x2 +#define SDMA1_PHYSICAL_ADDR_LO__ADDR__SHIFT 0xc +#define SDMA1_PHYSICAL_ADDR_LO__D_VALID_MASK 0x00000001L +#define SDMA1_PHYSICAL_ADDR_LO__DIRTY_MASK 0x00000002L +#define SDMA1_PHYSICAL_ADDR_LO__PHY_VALID_MASK 0x00000004L +#define SDMA1_PHYSICAL_ADDR_LO__ADDR_MASK 0xFFFFF000L +//SDMA1_PHYSICAL_ADDR_HI +#define SDMA1_PHYSICAL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_PHYSICAL_ADDR_HI__ADDR_MASK 0x0000FFFFL +//SDMA1_PHASE2_QUANTUM +#define SDMA1_PHASE2_QUANTUM__UNIT__SHIFT 0x0 +#define SDMA1_PHASE2_QUANTUM__VALUE__SHIFT 0x8 +#define SDMA1_PHASE2_QUANTUM__PREFER__SHIFT 0x1e +#define SDMA1_PHASE2_QUANTUM__UNIT_MASK 0x0000000FL +#define SDMA1_PHASE2_QUANTUM__VALUE_MASK 0x00FFFF00L +#define SDMA1_PHASE2_QUANTUM__PREFER_MASK 0x40000000L +//SDMA1_ERROR_LOG +#define SDMA1_ERROR_LOG__OVERRIDE__SHIFT 0x0 +#define SDMA1_ERROR_LOG__STATUS__SHIFT 0x10 +#define SDMA1_ERROR_LOG__OVERRIDE_MASK 0x0000FFFFL +#define SDMA1_ERROR_LOG__STATUS_MASK 0xFFFF0000L +//SDMA1_PUB_DUMMY_REG0 +#define SDMA1_PUB_DUMMY_REG0__VALUE__SHIFT 0x0 +#define SDMA1_PUB_DUMMY_REG0__VALUE_MASK 0xFFFFFFFFL +//SDMA1_PUB_DUMMY_REG1 +#define SDMA1_PUB_DUMMY_REG1__VALUE__SHIFT 0x0 +#define SDMA1_PUB_DUMMY_REG1__VALUE_MASK 0xFFFFFFFFL +//SDMA1_PUB_DUMMY_REG2 +#define SDMA1_PUB_DUMMY_REG2__VALUE__SHIFT 0x0 +#define SDMA1_PUB_DUMMY_REG2__VALUE_MASK 0xFFFFFFFFL +//SDMA1_PUB_DUMMY_REG3 +#define SDMA1_PUB_DUMMY_REG3__VALUE__SHIFT 0x0 +#define SDMA1_PUB_DUMMY_REG3__VALUE_MASK 0xFFFFFFFFL +//SDMA1_F32_COUNTER +#define SDMA1_F32_COUNTER__VALUE__SHIFT 0x0 +#define SDMA1_F32_COUNTER__VALUE_MASK 0xFFFFFFFFL +//SDMA1_PERFMON_CNTL +#define SDMA1_PERFMON_CNTL__PERF_ENABLE0__SHIFT 0x0 +#define SDMA1_PERFMON_CNTL__PERF_CLEAR0__SHIFT 0x1 +#define SDMA1_PERFMON_CNTL__PERF_SEL0__SHIFT 0x2 +#define SDMA1_PERFMON_CNTL__PERF_ENABLE1__SHIFT 0xa +#define SDMA1_PERFMON_CNTL__PERF_CLEAR1__SHIFT 0xb +#define SDMA1_PERFMON_CNTL__PERF_SEL1__SHIFT 0xc +#define SDMA1_PERFMON_CNTL__PERF_ENABLE0_MASK 0x00000001L +#define SDMA1_PERFMON_CNTL__PERF_CLEAR0_MASK 0x00000002L +#define SDMA1_PERFMON_CNTL__PERF_SEL0_MASK 0x000003FCL +#define SDMA1_PERFMON_CNTL__PERF_ENABLE1_MASK 0x00000400L +#define SDMA1_PERFMON_CNTL__PERF_CLEAR1_MASK 0x00000800L +#define SDMA1_PERFMON_CNTL__PERF_SEL1_MASK 0x000FF000L +//SDMA1_PERFCOUNTER0_RESULT +#define SDMA1_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x0 +#define SDMA1_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL +//SDMA1_PERFCOUNTER1_RESULT +#define SDMA1_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x0 +#define SDMA1_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL +//SDMA1_PERFCOUNTER_TAG_DELAY_RANGE +#define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW__SHIFT 0x0 +#define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH__SHIFT 0xe +#define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW__SHIFT 0x1c +#define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW_MASK 0x00003FFFL +#define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH_MASK 0x0FFFC000L +#define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW_MASK 0x10000000L +//SDMA1_CRD_CNTL +#define SDMA1_CRD_CNTL__MC_WRREQ_CREDIT__SHIFT 0x7 +#define SDMA1_CRD_CNTL__MC_RDREQ_CREDIT__SHIFT 0xd +#define SDMA1_CRD_CNTL__MC_WRREQ_CREDIT_MASK 0x00001F80L +#define SDMA1_CRD_CNTL__MC_RDREQ_CREDIT_MASK 0x0007E000L +//SDMA1_GPU_IOV_VIOLATION_LOG +#define SDMA1_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0 +#define SDMA1_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1 +#define SDMA1_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2 +#define SDMA1_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION__SHIFT 0x12 +#define SDMA1_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x13 +#define SDMA1_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x14 +#define SDMA1_GPU_IOV_VIOLATION_LOG__INITIATOR_ID__SHIFT 0x18 +#define SDMA1_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L +#define SDMA1_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L +#define SDMA1_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x0003FFFCL +#define SDMA1_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION_MASK 0x00040000L +#define SDMA1_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00080000L +#define SDMA1_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x00F00000L +#define SDMA1_GPU_IOV_VIOLATION_LOG__INITIATOR_ID_MASK 0xFF000000L +//SDMA1_ULV_CNTL +#define SDMA1_ULV_CNTL__HYSTERESIS__SHIFT 0x0 +#define SDMA1_ULV_CNTL__ENTER_ULV_INT_CLR__SHIFT 0x1b +#define SDMA1_ULV_CNTL__EXIT_ULV_INT_CLR__SHIFT 0x1c +#define SDMA1_ULV_CNTL__ENTER_ULV_INT__SHIFT 0x1d +#define SDMA1_ULV_CNTL__EXIT_ULV_INT__SHIFT 0x1e +#define SDMA1_ULV_CNTL__ULV_STATUS__SHIFT 0x1f +#define SDMA1_ULV_CNTL__HYSTERESIS_MASK 0x0000001FL +#define SDMA1_ULV_CNTL__ENTER_ULV_INT_CLR_MASK 0x08000000L +#define SDMA1_ULV_CNTL__EXIT_ULV_INT_CLR_MASK 0x10000000L +#define SDMA1_ULV_CNTL__ENTER_ULV_INT_MASK 0x20000000L +#define SDMA1_ULV_CNTL__EXIT_ULV_INT_MASK 0x40000000L +#define SDMA1_ULV_CNTL__ULV_STATUS_MASK 0x80000000L +//SDMA1_EA_DBIT_ADDR_DATA +#define SDMA1_EA_DBIT_ADDR_DATA__VALUE__SHIFT 0x0 +#define SDMA1_EA_DBIT_ADDR_DATA__VALUE_MASK 0xFFFFFFFFL +//SDMA1_EA_DBIT_ADDR_INDEX +#define SDMA1_EA_DBIT_ADDR_INDEX__VALUE__SHIFT 0x0 +#define SDMA1_EA_DBIT_ADDR_INDEX__VALUE_MASK 0x00000007L +//SDMA1_GFX_RB_CNTL +#define SDMA1_GFX_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA1_GFX_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA1_GFX_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA1_GFX_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA1_GFX_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA1_GFX_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA1_GFX_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA1_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA1_GFX_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA1_GFX_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA1_GFX_RB_BASE +#define SDMA1_GFX_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA1_GFX_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA1_GFX_RB_BASE_HI +#define SDMA1_GFX_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA1_GFX_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA1_GFX_RB_RPTR +#define SDMA1_GFX_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA1_GFX_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_GFX_RB_RPTR_HI +#define SDMA1_GFX_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA1_GFX_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_GFX_RB_WPTR +#define SDMA1_GFX_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA1_GFX_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_GFX_RB_WPTR_HI +#define SDMA1_GFX_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA1_GFX_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_GFX_RB_WPTR_POLL_CNTL +#define SDMA1_GFX_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA1_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA1_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA1_GFX_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA1_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA1_GFX_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA1_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA1_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA1_GFX_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA1_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA1_GFX_RB_RPTR_ADDR_HI +#define SDMA1_GFX_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_GFX_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_GFX_RB_RPTR_ADDR_LO +#define SDMA1_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA1_GFX_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA1_GFX_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_GFX_IB_CNTL +#define SDMA1_GFX_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA1_GFX_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA1_GFX_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA1_GFX_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA1_GFX_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA1_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA1_GFX_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA1_GFX_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA1_GFX_IB_RPTR +#define SDMA1_GFX_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA1_GFX_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA1_GFX_IB_OFFSET +#define SDMA1_GFX_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA1_GFX_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA1_GFX_IB_BASE_LO +#define SDMA1_GFX_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA1_GFX_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA1_GFX_IB_BASE_HI +#define SDMA1_GFX_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA1_GFX_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_GFX_IB_SIZE +#define SDMA1_GFX_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA1_GFX_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA1_GFX_SKIP_CNTL +#define SDMA1_GFX_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA1_GFX_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA1_GFX_CONTEXT_STATUS +#define SDMA1_GFX_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA1_GFX_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA1_GFX_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA1_GFX_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA1_GFX_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA1_GFX_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA1_GFX_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA1_GFX_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA1_GFX_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA1_GFX_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA1_GFX_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA1_GFX_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA1_GFX_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA1_GFX_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA1_GFX_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA1_GFX_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA1_GFX_DOORBELL +#define SDMA1_GFX_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA1_GFX_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA1_GFX_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA1_GFX_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA1_GFX_CONTEXT_CNTL +#define SDMA1_GFX_CONTEXT_CNTL__RESUME_CTX__SHIFT 0x10 +#define SDMA1_GFX_CONTEXT_CNTL__RESUME_CTX_MASK 0x00010000L +//SDMA1_GFX_STATUS +#define SDMA1_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA1_GFX_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA1_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA1_GFX_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA1_GFX_DOORBELL_LOG +#define SDMA1_GFX_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA1_GFX_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA1_GFX_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA1_GFX_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA1_GFX_WATERMARK +#define SDMA1_GFX_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA1_GFX_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA1_GFX_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA1_GFX_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA1_GFX_DOORBELL_OFFSET +#define SDMA1_GFX_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA1_GFX_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA1_GFX_CSA_ADDR_LO +#define SDMA1_GFX_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_GFX_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_GFX_CSA_ADDR_HI +#define SDMA1_GFX_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_GFX_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_GFX_IB_SUB_REMAIN +#define SDMA1_GFX_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA1_GFX_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA1_GFX_PREEMPT +#define SDMA1_GFX_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA1_GFX_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA1_GFX_DUMMY_REG +#define SDMA1_GFX_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA1_GFX_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA1_GFX_RB_WPTR_POLL_ADDR_HI +#define SDMA1_GFX_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_GFX_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_GFX_RB_WPTR_POLL_ADDR_LO +#define SDMA1_GFX_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_GFX_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_GFX_RB_AQL_CNTL +#define SDMA1_GFX_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA1_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA1_GFX_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA1_GFX_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA1_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA1_GFX_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA1_GFX_MINOR_PTR_UPDATE +#define SDMA1_GFX_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA1_GFX_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA1_GFX_MIDCMD_DATA0 +#define SDMA1_GFX_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA1_GFX_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA1_GFX_MIDCMD_DATA1 +#define SDMA1_GFX_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA1_GFX_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA1_GFX_MIDCMD_DATA2 +#define SDMA1_GFX_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA1_GFX_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA1_GFX_MIDCMD_DATA3 +#define SDMA1_GFX_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA1_GFX_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA1_GFX_MIDCMD_DATA4 +#define SDMA1_GFX_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA1_GFX_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA1_GFX_MIDCMD_DATA5 +#define SDMA1_GFX_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA1_GFX_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA1_GFX_MIDCMD_DATA6 +#define SDMA1_GFX_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA1_GFX_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA1_GFX_MIDCMD_DATA7 +#define SDMA1_GFX_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA1_GFX_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA1_GFX_MIDCMD_DATA8 +#define SDMA1_GFX_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA1_GFX_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA1_GFX_MIDCMD_CNTL +#define SDMA1_GFX_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA1_GFX_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA1_GFX_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA1_GFX_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA1_GFX_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA1_GFX_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA1_GFX_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA1_GFX_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA1_PAGE_RB_CNTL +#define SDMA1_PAGE_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA1_PAGE_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA1_PAGE_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA1_PAGE_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA1_PAGE_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA1_PAGE_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA1_PAGE_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA1_PAGE_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA1_PAGE_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA1_PAGE_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA1_PAGE_RB_BASE +#define SDMA1_PAGE_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA1_PAGE_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA1_PAGE_RB_BASE_HI +#define SDMA1_PAGE_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA1_PAGE_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA1_PAGE_RB_RPTR +#define SDMA1_PAGE_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA1_PAGE_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_PAGE_RB_RPTR_HI +#define SDMA1_PAGE_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA1_PAGE_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_PAGE_RB_WPTR +#define SDMA1_PAGE_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA1_PAGE_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_PAGE_RB_WPTR_HI +#define SDMA1_PAGE_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA1_PAGE_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_PAGE_RB_WPTR_POLL_CNTL +#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA1_PAGE_RB_RPTR_ADDR_HI +#define SDMA1_PAGE_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_PAGE_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_PAGE_RB_RPTR_ADDR_LO +#define SDMA1_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA1_PAGE_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA1_PAGE_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_PAGE_IB_CNTL +#define SDMA1_PAGE_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA1_PAGE_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA1_PAGE_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA1_PAGE_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA1_PAGE_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA1_PAGE_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA1_PAGE_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA1_PAGE_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA1_PAGE_IB_RPTR +#define SDMA1_PAGE_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA1_PAGE_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA1_PAGE_IB_OFFSET +#define SDMA1_PAGE_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA1_PAGE_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA1_PAGE_IB_BASE_LO +#define SDMA1_PAGE_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA1_PAGE_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA1_PAGE_IB_BASE_HI +#define SDMA1_PAGE_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA1_PAGE_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_PAGE_IB_SIZE +#define SDMA1_PAGE_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA1_PAGE_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA1_PAGE_SKIP_CNTL +#define SDMA1_PAGE_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA1_PAGE_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA1_PAGE_CONTEXT_STATUS +#define SDMA1_PAGE_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA1_PAGE_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA1_PAGE_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA1_PAGE_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA1_PAGE_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA1_PAGE_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA1_PAGE_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA1_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA1_PAGE_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA1_PAGE_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA1_PAGE_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA1_PAGE_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA1_PAGE_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA1_PAGE_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA1_PAGE_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA1_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA1_PAGE_DOORBELL +#define SDMA1_PAGE_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA1_PAGE_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA1_PAGE_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA1_PAGE_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA1_PAGE_STATUS +#define SDMA1_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA1_PAGE_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA1_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA1_PAGE_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA1_PAGE_DOORBELL_LOG +#define SDMA1_PAGE_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA1_PAGE_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA1_PAGE_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA1_PAGE_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA1_PAGE_WATERMARK +#define SDMA1_PAGE_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA1_PAGE_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA1_PAGE_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA1_PAGE_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA1_PAGE_DOORBELL_OFFSET +#define SDMA1_PAGE_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA1_PAGE_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA1_PAGE_CSA_ADDR_LO +#define SDMA1_PAGE_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_PAGE_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_PAGE_CSA_ADDR_HI +#define SDMA1_PAGE_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_PAGE_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_PAGE_IB_SUB_REMAIN +#define SDMA1_PAGE_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA1_PAGE_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA1_PAGE_PREEMPT +#define SDMA1_PAGE_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA1_PAGE_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA1_PAGE_DUMMY_REG +#define SDMA1_PAGE_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA1_PAGE_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA1_PAGE_RB_WPTR_POLL_ADDR_HI +#define SDMA1_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_PAGE_RB_WPTR_POLL_ADDR_LO +#define SDMA1_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_PAGE_RB_AQL_CNTL +#define SDMA1_PAGE_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA1_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA1_PAGE_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA1_PAGE_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA1_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA1_PAGE_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA1_PAGE_MINOR_PTR_UPDATE +#define SDMA1_PAGE_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA1_PAGE_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA1_PAGE_MIDCMD_DATA0 +#define SDMA1_PAGE_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA1_PAGE_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA1_PAGE_MIDCMD_DATA1 +#define SDMA1_PAGE_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA1_PAGE_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA1_PAGE_MIDCMD_DATA2 +#define SDMA1_PAGE_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA1_PAGE_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA1_PAGE_MIDCMD_DATA3 +#define SDMA1_PAGE_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA1_PAGE_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA1_PAGE_MIDCMD_DATA4 +#define SDMA1_PAGE_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA1_PAGE_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA1_PAGE_MIDCMD_DATA5 +#define SDMA1_PAGE_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA1_PAGE_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA1_PAGE_MIDCMD_DATA6 +#define SDMA1_PAGE_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA1_PAGE_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA1_PAGE_MIDCMD_DATA7 +#define SDMA1_PAGE_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA1_PAGE_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA1_PAGE_MIDCMD_DATA8 +#define SDMA1_PAGE_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA1_PAGE_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA1_PAGE_MIDCMD_CNTL +#define SDMA1_PAGE_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA1_PAGE_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA1_PAGE_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA1_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA1_PAGE_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA1_PAGE_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA1_PAGE_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA1_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA1_RLC0_RB_CNTL +#define SDMA1_RLC0_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA1_RLC0_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA1_RLC0_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA1_RLC0_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA1_RLC0_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA1_RLC0_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA1_RLC0_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA1_RLC0_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA1_RLC0_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA1_RLC0_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA1_RLC0_RB_BASE +#define SDMA1_RLC0_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA1_RLC0_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC0_RB_BASE_HI +#define SDMA1_RLC0_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC0_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA1_RLC0_RB_RPTR +#define SDMA1_RLC0_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA1_RLC0_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC0_RB_RPTR_HI +#define SDMA1_RLC0_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA1_RLC0_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC0_RB_WPTR +#define SDMA1_RLC0_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA1_RLC0_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC0_RB_WPTR_HI +#define SDMA1_RLC0_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA1_RLC0_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC0_RB_WPTR_POLL_CNTL +#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA1_RLC0_RB_RPTR_ADDR_HI +#define SDMA1_RLC0_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC0_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC0_RB_RPTR_ADDR_LO +#define SDMA1_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA1_RLC0_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA1_RLC0_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC0_IB_CNTL +#define SDMA1_RLC0_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA1_RLC0_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA1_RLC0_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA1_RLC0_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA1_RLC0_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA1_RLC0_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA1_RLC0_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA1_RLC0_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA1_RLC0_IB_RPTR +#define SDMA1_RLC0_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA1_RLC0_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA1_RLC0_IB_OFFSET +#define SDMA1_RLC0_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA1_RLC0_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA1_RLC0_IB_BASE_LO +#define SDMA1_RLC0_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA1_RLC0_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA1_RLC0_IB_BASE_HI +#define SDMA1_RLC0_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC0_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC0_IB_SIZE +#define SDMA1_RLC0_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA1_RLC0_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA1_RLC0_SKIP_CNTL +#define SDMA1_RLC0_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA1_RLC0_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA1_RLC0_CONTEXT_STATUS +#define SDMA1_RLC0_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA1_RLC0_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA1_RLC0_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA1_RLC0_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA1_RLC0_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA1_RLC0_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA1_RLC0_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA1_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA1_RLC0_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA1_RLC0_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA1_RLC0_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA1_RLC0_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA1_RLC0_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA1_RLC0_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA1_RLC0_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA1_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA1_RLC0_DOORBELL +#define SDMA1_RLC0_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA1_RLC0_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA1_RLC0_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA1_RLC0_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA1_RLC0_STATUS +#define SDMA1_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA1_RLC0_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA1_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA1_RLC0_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA1_RLC0_DOORBELL_LOG +#define SDMA1_RLC0_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA1_RLC0_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA1_RLC0_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA1_RLC0_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA1_RLC0_WATERMARK +#define SDMA1_RLC0_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA1_RLC0_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA1_RLC0_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA1_RLC0_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA1_RLC0_DOORBELL_OFFSET +#define SDMA1_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA1_RLC0_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA1_RLC0_CSA_ADDR_LO +#define SDMA1_RLC0_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC0_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC0_CSA_ADDR_HI +#define SDMA1_RLC0_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC0_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC0_IB_SUB_REMAIN +#define SDMA1_RLC0_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA1_RLC0_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA1_RLC0_PREEMPT +#define SDMA1_RLC0_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA1_RLC0_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA1_RLC0_DUMMY_REG +#define SDMA1_RLC0_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA1_RLC0_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA1_RLC0_RB_WPTR_POLL_ADDR_HI +#define SDMA1_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC0_RB_WPTR_POLL_ADDR_LO +#define SDMA1_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC0_RB_AQL_CNTL +#define SDMA1_RLC0_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA1_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA1_RLC0_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA1_RLC0_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA1_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA1_RLC0_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA1_RLC0_MINOR_PTR_UPDATE +#define SDMA1_RLC0_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA1_RLC0_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA1_RLC0_MIDCMD_DATA0 +#define SDMA1_RLC0_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA1_RLC0_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA1_RLC0_MIDCMD_DATA1 +#define SDMA1_RLC0_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA1_RLC0_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA1_RLC0_MIDCMD_DATA2 +#define SDMA1_RLC0_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA1_RLC0_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA1_RLC0_MIDCMD_DATA3 +#define SDMA1_RLC0_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA1_RLC0_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA1_RLC0_MIDCMD_DATA4 +#define SDMA1_RLC0_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA1_RLC0_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA1_RLC0_MIDCMD_DATA5 +#define SDMA1_RLC0_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA1_RLC0_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA1_RLC0_MIDCMD_DATA6 +#define SDMA1_RLC0_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA1_RLC0_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA1_RLC0_MIDCMD_DATA7 +#define SDMA1_RLC0_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA1_RLC0_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA1_RLC0_MIDCMD_DATA8 +#define SDMA1_RLC0_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA1_RLC0_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA1_RLC0_MIDCMD_CNTL +#define SDMA1_RLC0_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA1_RLC0_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA1_RLC0_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA1_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA1_RLC0_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA1_RLC0_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA1_RLC0_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA1_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA1_RLC1_RB_CNTL +#define SDMA1_RLC1_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA1_RLC1_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA1_RLC1_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA1_RLC1_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA1_RLC1_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA1_RLC1_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA1_RLC1_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA1_RLC1_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA1_RLC1_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA1_RLC1_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA1_RLC1_RB_BASE +#define SDMA1_RLC1_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA1_RLC1_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC1_RB_BASE_HI +#define SDMA1_RLC1_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC1_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA1_RLC1_RB_RPTR +#define SDMA1_RLC1_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA1_RLC1_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC1_RB_RPTR_HI +#define SDMA1_RLC1_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA1_RLC1_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC1_RB_WPTR +#define SDMA1_RLC1_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA1_RLC1_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC1_RB_WPTR_HI +#define SDMA1_RLC1_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA1_RLC1_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC1_RB_WPTR_POLL_CNTL +#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA1_RLC1_RB_RPTR_ADDR_HI +#define SDMA1_RLC1_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC1_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC1_RB_RPTR_ADDR_LO +#define SDMA1_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA1_RLC1_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA1_RLC1_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC1_IB_CNTL +#define SDMA1_RLC1_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA1_RLC1_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA1_RLC1_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA1_RLC1_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA1_RLC1_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA1_RLC1_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA1_RLC1_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA1_RLC1_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA1_RLC1_IB_RPTR +#define SDMA1_RLC1_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA1_RLC1_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA1_RLC1_IB_OFFSET +#define SDMA1_RLC1_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA1_RLC1_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA1_RLC1_IB_BASE_LO +#define SDMA1_RLC1_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA1_RLC1_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA1_RLC1_IB_BASE_HI +#define SDMA1_RLC1_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC1_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC1_IB_SIZE +#define SDMA1_RLC1_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA1_RLC1_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA1_RLC1_SKIP_CNTL +#define SDMA1_RLC1_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA1_RLC1_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA1_RLC1_CONTEXT_STATUS +#define SDMA1_RLC1_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA1_RLC1_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA1_RLC1_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA1_RLC1_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA1_RLC1_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA1_RLC1_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA1_RLC1_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA1_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA1_RLC1_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA1_RLC1_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA1_RLC1_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA1_RLC1_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA1_RLC1_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA1_RLC1_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA1_RLC1_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA1_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA1_RLC1_DOORBELL +#define SDMA1_RLC1_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA1_RLC1_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA1_RLC1_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA1_RLC1_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA1_RLC1_STATUS +#define SDMA1_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA1_RLC1_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA1_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA1_RLC1_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA1_RLC1_DOORBELL_LOG +#define SDMA1_RLC1_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA1_RLC1_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA1_RLC1_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA1_RLC1_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA1_RLC1_WATERMARK +#define SDMA1_RLC1_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA1_RLC1_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA1_RLC1_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA1_RLC1_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA1_RLC1_DOORBELL_OFFSET +#define SDMA1_RLC1_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA1_RLC1_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA1_RLC1_CSA_ADDR_LO +#define SDMA1_RLC1_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC1_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC1_CSA_ADDR_HI +#define SDMA1_RLC1_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC1_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC1_IB_SUB_REMAIN +#define SDMA1_RLC1_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA1_RLC1_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA1_RLC1_PREEMPT +#define SDMA1_RLC1_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA1_RLC1_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA1_RLC1_DUMMY_REG +#define SDMA1_RLC1_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA1_RLC1_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA1_RLC1_RB_WPTR_POLL_ADDR_HI +#define SDMA1_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC1_RB_WPTR_POLL_ADDR_LO +#define SDMA1_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC1_RB_AQL_CNTL +#define SDMA1_RLC1_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA1_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA1_RLC1_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA1_RLC1_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA1_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA1_RLC1_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA1_RLC1_MINOR_PTR_UPDATE +#define SDMA1_RLC1_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA1_RLC1_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA1_RLC1_MIDCMD_DATA0 +#define SDMA1_RLC1_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA1_RLC1_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA1_RLC1_MIDCMD_DATA1 +#define SDMA1_RLC1_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA1_RLC1_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA1_RLC1_MIDCMD_DATA2 +#define SDMA1_RLC1_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA1_RLC1_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA1_RLC1_MIDCMD_DATA3 +#define SDMA1_RLC1_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA1_RLC1_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA1_RLC1_MIDCMD_DATA4 +#define SDMA1_RLC1_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA1_RLC1_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA1_RLC1_MIDCMD_DATA5 +#define SDMA1_RLC1_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA1_RLC1_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA1_RLC1_MIDCMD_DATA6 +#define SDMA1_RLC1_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA1_RLC1_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA1_RLC1_MIDCMD_DATA7 +#define SDMA1_RLC1_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA1_RLC1_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA1_RLC1_MIDCMD_DATA8 +#define SDMA1_RLC1_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA1_RLC1_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA1_RLC1_MIDCMD_CNTL +#define SDMA1_RLC1_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA1_RLC1_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA1_RLC1_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA1_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA1_RLC1_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA1_RLC1_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA1_RLC1_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA1_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA1_RLC2_RB_CNTL +#define SDMA1_RLC2_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA1_RLC2_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA1_RLC2_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA1_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA1_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA1_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA1_RLC2_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA1_RLC2_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA1_RLC2_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA1_RLC2_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA1_RLC2_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA1_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA1_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA1_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA1_RLC2_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA1_RLC2_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA1_RLC2_RB_BASE +#define SDMA1_RLC2_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA1_RLC2_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC2_RB_BASE_HI +#define SDMA1_RLC2_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC2_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA1_RLC2_RB_RPTR +#define SDMA1_RLC2_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA1_RLC2_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC2_RB_RPTR_HI +#define SDMA1_RLC2_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA1_RLC2_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC2_RB_WPTR +#define SDMA1_RLC2_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA1_RLC2_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC2_RB_WPTR_HI +#define SDMA1_RLC2_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA1_RLC2_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC2_RB_WPTR_POLL_CNTL +#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA1_RLC2_RB_RPTR_ADDR_HI +#define SDMA1_RLC2_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC2_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC2_RB_RPTR_ADDR_LO +#define SDMA1_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA1_RLC2_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA1_RLC2_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC2_IB_CNTL +#define SDMA1_RLC2_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA1_RLC2_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA1_RLC2_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA1_RLC2_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA1_RLC2_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA1_RLC2_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA1_RLC2_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA1_RLC2_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA1_RLC2_IB_RPTR +#define SDMA1_RLC2_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA1_RLC2_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA1_RLC2_IB_OFFSET +#define SDMA1_RLC2_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA1_RLC2_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA1_RLC2_IB_BASE_LO +#define SDMA1_RLC2_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA1_RLC2_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA1_RLC2_IB_BASE_HI +#define SDMA1_RLC2_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC2_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC2_IB_SIZE +#define SDMA1_RLC2_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA1_RLC2_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA1_RLC2_SKIP_CNTL +#define SDMA1_RLC2_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA1_RLC2_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA1_RLC2_CONTEXT_STATUS +#define SDMA1_RLC2_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA1_RLC2_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA1_RLC2_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA1_RLC2_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA1_RLC2_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA1_RLC2_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA1_RLC2_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA1_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA1_RLC2_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA1_RLC2_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA1_RLC2_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA1_RLC2_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA1_RLC2_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA1_RLC2_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA1_RLC2_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA1_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA1_RLC2_DOORBELL +#define SDMA1_RLC2_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA1_RLC2_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA1_RLC2_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA1_RLC2_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA1_RLC2_STATUS +#define SDMA1_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA1_RLC2_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA1_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA1_RLC2_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA1_RLC2_DOORBELL_LOG +#define SDMA1_RLC2_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA1_RLC2_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA1_RLC2_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA1_RLC2_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA1_RLC2_WATERMARK +#define SDMA1_RLC2_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA1_RLC2_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA1_RLC2_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA1_RLC2_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA1_RLC2_DOORBELL_OFFSET +#define SDMA1_RLC2_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA1_RLC2_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA1_RLC2_CSA_ADDR_LO +#define SDMA1_RLC2_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC2_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC2_CSA_ADDR_HI +#define SDMA1_RLC2_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC2_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC2_IB_SUB_REMAIN +#define SDMA1_RLC2_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA1_RLC2_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA1_RLC2_PREEMPT +#define SDMA1_RLC2_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA1_RLC2_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA1_RLC2_DUMMY_REG +#define SDMA1_RLC2_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA1_RLC2_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA1_RLC2_RB_WPTR_POLL_ADDR_HI +#define SDMA1_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC2_RB_WPTR_POLL_ADDR_LO +#define SDMA1_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC2_RB_AQL_CNTL +#define SDMA1_RLC2_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA1_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA1_RLC2_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA1_RLC2_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA1_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA1_RLC2_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA1_RLC2_MINOR_PTR_UPDATE +#define SDMA1_RLC2_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA1_RLC2_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA1_RLC2_MIDCMD_DATA0 +#define SDMA1_RLC2_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA1_RLC2_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA1_RLC2_MIDCMD_DATA1 +#define SDMA1_RLC2_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA1_RLC2_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA1_RLC2_MIDCMD_DATA2 +#define SDMA1_RLC2_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA1_RLC2_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA1_RLC2_MIDCMD_DATA3 +#define SDMA1_RLC2_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA1_RLC2_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA1_RLC2_MIDCMD_DATA4 +#define SDMA1_RLC2_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA1_RLC2_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA1_RLC2_MIDCMD_DATA5 +#define SDMA1_RLC2_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA1_RLC2_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA1_RLC2_MIDCMD_DATA6 +#define SDMA1_RLC2_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA1_RLC2_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA1_RLC2_MIDCMD_DATA7 +#define SDMA1_RLC2_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA1_RLC2_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA1_RLC2_MIDCMD_DATA8 +#define SDMA1_RLC2_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA1_RLC2_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA1_RLC2_MIDCMD_CNTL +#define SDMA1_RLC2_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA1_RLC2_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA1_RLC2_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA1_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA1_RLC2_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA1_RLC2_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA1_RLC2_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA1_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA1_RLC3_RB_CNTL +#define SDMA1_RLC3_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA1_RLC3_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA1_RLC3_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA1_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA1_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA1_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA1_RLC3_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA1_RLC3_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA1_RLC3_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA1_RLC3_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA1_RLC3_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA1_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA1_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA1_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA1_RLC3_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA1_RLC3_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA1_RLC3_RB_BASE +#define SDMA1_RLC3_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA1_RLC3_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC3_RB_BASE_HI +#define SDMA1_RLC3_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC3_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA1_RLC3_RB_RPTR +#define SDMA1_RLC3_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA1_RLC3_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC3_RB_RPTR_HI +#define SDMA1_RLC3_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA1_RLC3_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC3_RB_WPTR +#define SDMA1_RLC3_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA1_RLC3_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC3_RB_WPTR_HI +#define SDMA1_RLC3_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA1_RLC3_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC3_RB_WPTR_POLL_CNTL +#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA1_RLC3_RB_RPTR_ADDR_HI +#define SDMA1_RLC3_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC3_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC3_RB_RPTR_ADDR_LO +#define SDMA1_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA1_RLC3_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA1_RLC3_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC3_IB_CNTL +#define SDMA1_RLC3_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA1_RLC3_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA1_RLC3_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA1_RLC3_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA1_RLC3_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA1_RLC3_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA1_RLC3_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA1_RLC3_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA1_RLC3_IB_RPTR +#define SDMA1_RLC3_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA1_RLC3_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA1_RLC3_IB_OFFSET +#define SDMA1_RLC3_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA1_RLC3_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA1_RLC3_IB_BASE_LO +#define SDMA1_RLC3_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA1_RLC3_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA1_RLC3_IB_BASE_HI +#define SDMA1_RLC3_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC3_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC3_IB_SIZE +#define SDMA1_RLC3_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA1_RLC3_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA1_RLC3_SKIP_CNTL +#define SDMA1_RLC3_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA1_RLC3_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA1_RLC3_CONTEXT_STATUS +#define SDMA1_RLC3_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA1_RLC3_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA1_RLC3_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA1_RLC3_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA1_RLC3_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA1_RLC3_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA1_RLC3_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA1_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA1_RLC3_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA1_RLC3_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA1_RLC3_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA1_RLC3_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA1_RLC3_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA1_RLC3_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA1_RLC3_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA1_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA1_RLC3_DOORBELL +#define SDMA1_RLC3_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA1_RLC3_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA1_RLC3_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA1_RLC3_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA1_RLC3_STATUS +#define SDMA1_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA1_RLC3_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA1_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA1_RLC3_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA1_RLC3_DOORBELL_LOG +#define SDMA1_RLC3_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA1_RLC3_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA1_RLC3_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA1_RLC3_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA1_RLC3_WATERMARK +#define SDMA1_RLC3_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA1_RLC3_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA1_RLC3_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA1_RLC3_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA1_RLC3_DOORBELL_OFFSET +#define SDMA1_RLC3_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA1_RLC3_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA1_RLC3_CSA_ADDR_LO +#define SDMA1_RLC3_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC3_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC3_CSA_ADDR_HI +#define SDMA1_RLC3_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC3_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC3_IB_SUB_REMAIN +#define SDMA1_RLC3_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA1_RLC3_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA1_RLC3_PREEMPT +#define SDMA1_RLC3_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA1_RLC3_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA1_RLC3_DUMMY_REG +#define SDMA1_RLC3_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA1_RLC3_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA1_RLC3_RB_WPTR_POLL_ADDR_HI +#define SDMA1_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC3_RB_WPTR_POLL_ADDR_LO +#define SDMA1_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC3_RB_AQL_CNTL +#define SDMA1_RLC3_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA1_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA1_RLC3_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA1_RLC3_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA1_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA1_RLC3_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA1_RLC3_MINOR_PTR_UPDATE +#define SDMA1_RLC3_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA1_RLC3_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA1_RLC3_MIDCMD_DATA0 +#define SDMA1_RLC3_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA1_RLC3_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA1_RLC3_MIDCMD_DATA1 +#define SDMA1_RLC3_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA1_RLC3_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA1_RLC3_MIDCMD_DATA2 +#define SDMA1_RLC3_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA1_RLC3_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA1_RLC3_MIDCMD_DATA3 +#define SDMA1_RLC3_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA1_RLC3_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA1_RLC3_MIDCMD_DATA4 +#define SDMA1_RLC3_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA1_RLC3_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA1_RLC3_MIDCMD_DATA5 +#define SDMA1_RLC3_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA1_RLC3_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA1_RLC3_MIDCMD_DATA6 +#define SDMA1_RLC3_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA1_RLC3_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA1_RLC3_MIDCMD_DATA7 +#define SDMA1_RLC3_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA1_RLC3_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA1_RLC3_MIDCMD_DATA8 +#define SDMA1_RLC3_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA1_RLC3_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA1_RLC3_MIDCMD_CNTL +#define SDMA1_RLC3_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA1_RLC3_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA1_RLC3_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA1_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA1_RLC3_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA1_RLC3_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA1_RLC3_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA1_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA1_RLC4_RB_CNTL +#define SDMA1_RLC4_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA1_RLC4_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA1_RLC4_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA1_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA1_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA1_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA1_RLC4_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA1_RLC4_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA1_RLC4_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA1_RLC4_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA1_RLC4_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA1_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA1_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA1_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA1_RLC4_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA1_RLC4_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA1_RLC4_RB_BASE +#define SDMA1_RLC4_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA1_RLC4_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC4_RB_BASE_HI +#define SDMA1_RLC4_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC4_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA1_RLC4_RB_RPTR +#define SDMA1_RLC4_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA1_RLC4_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC4_RB_RPTR_HI +#define SDMA1_RLC4_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA1_RLC4_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC4_RB_WPTR +#define SDMA1_RLC4_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA1_RLC4_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC4_RB_WPTR_HI +#define SDMA1_RLC4_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA1_RLC4_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC4_RB_WPTR_POLL_CNTL +#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA1_RLC4_RB_RPTR_ADDR_HI +#define SDMA1_RLC4_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC4_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC4_RB_RPTR_ADDR_LO +#define SDMA1_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA1_RLC4_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA1_RLC4_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC4_IB_CNTL +#define SDMA1_RLC4_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA1_RLC4_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA1_RLC4_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA1_RLC4_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA1_RLC4_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA1_RLC4_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA1_RLC4_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA1_RLC4_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA1_RLC4_IB_RPTR +#define SDMA1_RLC4_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA1_RLC4_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA1_RLC4_IB_OFFSET +#define SDMA1_RLC4_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA1_RLC4_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA1_RLC4_IB_BASE_LO +#define SDMA1_RLC4_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA1_RLC4_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA1_RLC4_IB_BASE_HI +#define SDMA1_RLC4_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC4_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC4_IB_SIZE +#define SDMA1_RLC4_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA1_RLC4_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA1_RLC4_SKIP_CNTL +#define SDMA1_RLC4_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA1_RLC4_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA1_RLC4_CONTEXT_STATUS +#define SDMA1_RLC4_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA1_RLC4_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA1_RLC4_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA1_RLC4_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA1_RLC4_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA1_RLC4_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA1_RLC4_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA1_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA1_RLC4_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA1_RLC4_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA1_RLC4_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA1_RLC4_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA1_RLC4_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA1_RLC4_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA1_RLC4_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA1_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA1_RLC4_DOORBELL +#define SDMA1_RLC4_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA1_RLC4_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA1_RLC4_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA1_RLC4_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA1_RLC4_STATUS +#define SDMA1_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA1_RLC4_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA1_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA1_RLC4_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA1_RLC4_DOORBELL_LOG +#define SDMA1_RLC4_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA1_RLC4_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA1_RLC4_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA1_RLC4_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA1_RLC4_WATERMARK +#define SDMA1_RLC4_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA1_RLC4_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA1_RLC4_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA1_RLC4_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA1_RLC4_DOORBELL_OFFSET +#define SDMA1_RLC4_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA1_RLC4_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA1_RLC4_CSA_ADDR_LO +#define SDMA1_RLC4_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC4_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC4_CSA_ADDR_HI +#define SDMA1_RLC4_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC4_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC4_IB_SUB_REMAIN +#define SDMA1_RLC4_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA1_RLC4_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA1_RLC4_PREEMPT +#define SDMA1_RLC4_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA1_RLC4_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA1_RLC4_DUMMY_REG +#define SDMA1_RLC4_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA1_RLC4_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA1_RLC4_RB_WPTR_POLL_ADDR_HI +#define SDMA1_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC4_RB_WPTR_POLL_ADDR_LO +#define SDMA1_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC4_RB_AQL_CNTL +#define SDMA1_RLC4_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA1_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA1_RLC4_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA1_RLC4_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA1_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA1_RLC4_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA1_RLC4_MINOR_PTR_UPDATE +#define SDMA1_RLC4_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA1_RLC4_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA1_RLC4_MIDCMD_DATA0 +#define SDMA1_RLC4_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA1_RLC4_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA1_RLC4_MIDCMD_DATA1 +#define SDMA1_RLC4_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA1_RLC4_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA1_RLC4_MIDCMD_DATA2 +#define SDMA1_RLC4_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA1_RLC4_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA1_RLC4_MIDCMD_DATA3 +#define SDMA1_RLC4_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA1_RLC4_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA1_RLC4_MIDCMD_DATA4 +#define SDMA1_RLC4_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA1_RLC4_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA1_RLC4_MIDCMD_DATA5 +#define SDMA1_RLC4_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA1_RLC4_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA1_RLC4_MIDCMD_DATA6 +#define SDMA1_RLC4_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA1_RLC4_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA1_RLC4_MIDCMD_DATA7 +#define SDMA1_RLC4_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA1_RLC4_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA1_RLC4_MIDCMD_DATA8 +#define SDMA1_RLC4_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA1_RLC4_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA1_RLC4_MIDCMD_CNTL +#define SDMA1_RLC4_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA1_RLC4_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA1_RLC4_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA1_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA1_RLC4_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA1_RLC4_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA1_RLC4_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA1_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA1_RLC5_RB_CNTL +#define SDMA1_RLC5_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA1_RLC5_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA1_RLC5_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA1_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA1_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA1_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA1_RLC5_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA1_RLC5_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA1_RLC5_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA1_RLC5_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA1_RLC5_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA1_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA1_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA1_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA1_RLC5_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA1_RLC5_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA1_RLC5_RB_BASE +#define SDMA1_RLC5_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA1_RLC5_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC5_RB_BASE_HI +#define SDMA1_RLC5_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC5_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA1_RLC5_RB_RPTR +#define SDMA1_RLC5_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA1_RLC5_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC5_RB_RPTR_HI +#define SDMA1_RLC5_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA1_RLC5_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC5_RB_WPTR +#define SDMA1_RLC5_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA1_RLC5_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC5_RB_WPTR_HI +#define SDMA1_RLC5_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA1_RLC5_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC5_RB_WPTR_POLL_CNTL +#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA1_RLC5_RB_RPTR_ADDR_HI +#define SDMA1_RLC5_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC5_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC5_RB_RPTR_ADDR_LO +#define SDMA1_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA1_RLC5_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA1_RLC5_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC5_IB_CNTL +#define SDMA1_RLC5_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA1_RLC5_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA1_RLC5_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA1_RLC5_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA1_RLC5_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA1_RLC5_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA1_RLC5_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA1_RLC5_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA1_RLC5_IB_RPTR +#define SDMA1_RLC5_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA1_RLC5_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA1_RLC5_IB_OFFSET +#define SDMA1_RLC5_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA1_RLC5_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA1_RLC5_IB_BASE_LO +#define SDMA1_RLC5_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA1_RLC5_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA1_RLC5_IB_BASE_HI +#define SDMA1_RLC5_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC5_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC5_IB_SIZE +#define SDMA1_RLC5_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA1_RLC5_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA1_RLC5_SKIP_CNTL +#define SDMA1_RLC5_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA1_RLC5_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA1_RLC5_CONTEXT_STATUS +#define SDMA1_RLC5_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA1_RLC5_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA1_RLC5_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA1_RLC5_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA1_RLC5_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA1_RLC5_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA1_RLC5_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA1_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA1_RLC5_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA1_RLC5_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA1_RLC5_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA1_RLC5_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA1_RLC5_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA1_RLC5_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA1_RLC5_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA1_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA1_RLC5_DOORBELL +#define SDMA1_RLC5_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA1_RLC5_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA1_RLC5_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA1_RLC5_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA1_RLC5_STATUS +#define SDMA1_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA1_RLC5_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA1_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA1_RLC5_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA1_RLC5_DOORBELL_LOG +#define SDMA1_RLC5_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA1_RLC5_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA1_RLC5_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA1_RLC5_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA1_RLC5_WATERMARK +#define SDMA1_RLC5_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA1_RLC5_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA1_RLC5_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA1_RLC5_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA1_RLC5_DOORBELL_OFFSET +#define SDMA1_RLC5_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA1_RLC5_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA1_RLC5_CSA_ADDR_LO +#define SDMA1_RLC5_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC5_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC5_CSA_ADDR_HI +#define SDMA1_RLC5_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC5_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC5_IB_SUB_REMAIN +#define SDMA1_RLC5_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA1_RLC5_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA1_RLC5_PREEMPT +#define SDMA1_RLC5_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA1_RLC5_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA1_RLC5_DUMMY_REG +#define SDMA1_RLC5_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA1_RLC5_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA1_RLC5_RB_WPTR_POLL_ADDR_HI +#define SDMA1_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC5_RB_WPTR_POLL_ADDR_LO +#define SDMA1_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC5_RB_AQL_CNTL +#define SDMA1_RLC5_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA1_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA1_RLC5_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA1_RLC5_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA1_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA1_RLC5_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA1_RLC5_MINOR_PTR_UPDATE +#define SDMA1_RLC5_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA1_RLC5_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA1_RLC5_MIDCMD_DATA0 +#define SDMA1_RLC5_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA1_RLC5_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA1_RLC5_MIDCMD_DATA1 +#define SDMA1_RLC5_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA1_RLC5_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA1_RLC5_MIDCMD_DATA2 +#define SDMA1_RLC5_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA1_RLC5_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA1_RLC5_MIDCMD_DATA3 +#define SDMA1_RLC5_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA1_RLC5_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA1_RLC5_MIDCMD_DATA4 +#define SDMA1_RLC5_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA1_RLC5_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA1_RLC5_MIDCMD_DATA5 +#define SDMA1_RLC5_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA1_RLC5_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA1_RLC5_MIDCMD_DATA6 +#define SDMA1_RLC5_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA1_RLC5_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA1_RLC5_MIDCMD_DATA7 +#define SDMA1_RLC5_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA1_RLC5_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA1_RLC5_MIDCMD_DATA8 +#define SDMA1_RLC5_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA1_RLC5_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA1_RLC5_MIDCMD_CNTL +#define SDMA1_RLC5_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA1_RLC5_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA1_RLC5_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA1_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA1_RLC5_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA1_RLC5_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA1_RLC5_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA1_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA1_RLC6_RB_CNTL +#define SDMA1_RLC6_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA1_RLC6_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA1_RLC6_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA1_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA1_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA1_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA1_RLC6_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA1_RLC6_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA1_RLC6_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA1_RLC6_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA1_RLC6_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA1_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA1_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA1_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA1_RLC6_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA1_RLC6_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA1_RLC6_RB_BASE +#define SDMA1_RLC6_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA1_RLC6_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC6_RB_BASE_HI +#define SDMA1_RLC6_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC6_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA1_RLC6_RB_RPTR +#define SDMA1_RLC6_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA1_RLC6_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC6_RB_RPTR_HI +#define SDMA1_RLC6_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA1_RLC6_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC6_RB_WPTR +#define SDMA1_RLC6_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA1_RLC6_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC6_RB_WPTR_HI +#define SDMA1_RLC6_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA1_RLC6_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC6_RB_WPTR_POLL_CNTL +#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA1_RLC6_RB_RPTR_ADDR_HI +#define SDMA1_RLC6_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC6_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC6_RB_RPTR_ADDR_LO +#define SDMA1_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA1_RLC6_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA1_RLC6_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC6_IB_CNTL +#define SDMA1_RLC6_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA1_RLC6_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA1_RLC6_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA1_RLC6_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA1_RLC6_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA1_RLC6_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA1_RLC6_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA1_RLC6_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA1_RLC6_IB_RPTR +#define SDMA1_RLC6_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA1_RLC6_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA1_RLC6_IB_OFFSET +#define SDMA1_RLC6_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA1_RLC6_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA1_RLC6_IB_BASE_LO +#define SDMA1_RLC6_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA1_RLC6_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA1_RLC6_IB_BASE_HI +#define SDMA1_RLC6_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC6_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC6_IB_SIZE +#define SDMA1_RLC6_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA1_RLC6_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA1_RLC6_SKIP_CNTL +#define SDMA1_RLC6_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA1_RLC6_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA1_RLC6_CONTEXT_STATUS +#define SDMA1_RLC6_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA1_RLC6_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA1_RLC6_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA1_RLC6_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA1_RLC6_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA1_RLC6_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA1_RLC6_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA1_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA1_RLC6_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA1_RLC6_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA1_RLC6_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA1_RLC6_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA1_RLC6_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA1_RLC6_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA1_RLC6_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA1_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA1_RLC6_DOORBELL +#define SDMA1_RLC6_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA1_RLC6_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA1_RLC6_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA1_RLC6_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA1_RLC6_STATUS +#define SDMA1_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA1_RLC6_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA1_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA1_RLC6_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA1_RLC6_DOORBELL_LOG +#define SDMA1_RLC6_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA1_RLC6_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA1_RLC6_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA1_RLC6_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA1_RLC6_WATERMARK +#define SDMA1_RLC6_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA1_RLC6_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA1_RLC6_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA1_RLC6_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA1_RLC6_DOORBELL_OFFSET +#define SDMA1_RLC6_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA1_RLC6_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA1_RLC6_CSA_ADDR_LO +#define SDMA1_RLC6_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC6_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC6_CSA_ADDR_HI +#define SDMA1_RLC6_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC6_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC6_IB_SUB_REMAIN +#define SDMA1_RLC6_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA1_RLC6_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA1_RLC6_PREEMPT +#define SDMA1_RLC6_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA1_RLC6_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA1_RLC6_DUMMY_REG +#define SDMA1_RLC6_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA1_RLC6_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA1_RLC6_RB_WPTR_POLL_ADDR_HI +#define SDMA1_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC6_RB_WPTR_POLL_ADDR_LO +#define SDMA1_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC6_RB_AQL_CNTL +#define SDMA1_RLC6_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA1_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA1_RLC6_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA1_RLC6_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA1_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA1_RLC6_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA1_RLC6_MINOR_PTR_UPDATE +#define SDMA1_RLC6_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA1_RLC6_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA1_RLC6_MIDCMD_DATA0 +#define SDMA1_RLC6_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA1_RLC6_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA1_RLC6_MIDCMD_DATA1 +#define SDMA1_RLC6_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA1_RLC6_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA1_RLC6_MIDCMD_DATA2 +#define SDMA1_RLC6_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA1_RLC6_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA1_RLC6_MIDCMD_DATA3 +#define SDMA1_RLC6_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA1_RLC6_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA1_RLC6_MIDCMD_DATA4 +#define SDMA1_RLC6_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA1_RLC6_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA1_RLC6_MIDCMD_DATA5 +#define SDMA1_RLC6_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA1_RLC6_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA1_RLC6_MIDCMD_DATA6 +#define SDMA1_RLC6_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA1_RLC6_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA1_RLC6_MIDCMD_DATA7 +#define SDMA1_RLC6_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA1_RLC6_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA1_RLC6_MIDCMD_DATA8 +#define SDMA1_RLC6_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA1_RLC6_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA1_RLC6_MIDCMD_CNTL +#define SDMA1_RLC6_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA1_RLC6_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA1_RLC6_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA1_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA1_RLC6_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA1_RLC6_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA1_RLC6_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA1_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA1_RLC7_RB_CNTL +#define SDMA1_RLC7_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA1_RLC7_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA1_RLC7_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA1_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA1_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA1_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA1_RLC7_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA1_RLC7_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA1_RLC7_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA1_RLC7_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA1_RLC7_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA1_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA1_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA1_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA1_RLC7_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA1_RLC7_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA1_RLC7_RB_BASE +#define SDMA1_RLC7_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA1_RLC7_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC7_RB_BASE_HI +#define SDMA1_RLC7_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC7_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA1_RLC7_RB_RPTR +#define SDMA1_RLC7_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA1_RLC7_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC7_RB_RPTR_HI +#define SDMA1_RLC7_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA1_RLC7_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC7_RB_WPTR +#define SDMA1_RLC7_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA1_RLC7_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC7_RB_WPTR_HI +#define SDMA1_RLC7_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA1_RLC7_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC7_RB_WPTR_POLL_CNTL +#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA1_RLC7_RB_RPTR_ADDR_HI +#define SDMA1_RLC7_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC7_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC7_RB_RPTR_ADDR_LO +#define SDMA1_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA1_RLC7_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA1_RLC7_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC7_IB_CNTL +#define SDMA1_RLC7_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA1_RLC7_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA1_RLC7_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA1_RLC7_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA1_RLC7_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA1_RLC7_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA1_RLC7_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA1_RLC7_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA1_RLC7_IB_RPTR +#define SDMA1_RLC7_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA1_RLC7_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA1_RLC7_IB_OFFSET +#define SDMA1_RLC7_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA1_RLC7_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA1_RLC7_IB_BASE_LO +#define SDMA1_RLC7_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA1_RLC7_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA1_RLC7_IB_BASE_HI +#define SDMA1_RLC7_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC7_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC7_IB_SIZE +#define SDMA1_RLC7_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA1_RLC7_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA1_RLC7_SKIP_CNTL +#define SDMA1_RLC7_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA1_RLC7_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA1_RLC7_CONTEXT_STATUS +#define SDMA1_RLC7_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA1_RLC7_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA1_RLC7_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA1_RLC7_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA1_RLC7_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA1_RLC7_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA1_RLC7_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA1_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA1_RLC7_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA1_RLC7_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA1_RLC7_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA1_RLC7_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA1_RLC7_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA1_RLC7_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA1_RLC7_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA1_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA1_RLC7_DOORBELL +#define SDMA1_RLC7_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA1_RLC7_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA1_RLC7_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA1_RLC7_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA1_RLC7_STATUS +#define SDMA1_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA1_RLC7_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA1_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA1_RLC7_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA1_RLC7_DOORBELL_LOG +#define SDMA1_RLC7_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA1_RLC7_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA1_RLC7_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA1_RLC7_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA1_RLC7_WATERMARK +#define SDMA1_RLC7_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA1_RLC7_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA1_RLC7_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA1_RLC7_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA1_RLC7_DOORBELL_OFFSET +#define SDMA1_RLC7_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA1_RLC7_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA1_RLC7_CSA_ADDR_LO +#define SDMA1_RLC7_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC7_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC7_CSA_ADDR_HI +#define SDMA1_RLC7_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC7_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC7_IB_SUB_REMAIN +#define SDMA1_RLC7_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA1_RLC7_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA1_RLC7_PREEMPT +#define SDMA1_RLC7_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA1_RLC7_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA1_RLC7_DUMMY_REG +#define SDMA1_RLC7_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA1_RLC7_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA1_RLC7_RB_WPTR_POLL_ADDR_HI +#define SDMA1_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC7_RB_WPTR_POLL_ADDR_LO +#define SDMA1_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC7_RB_AQL_CNTL +#define SDMA1_RLC7_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA1_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA1_RLC7_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA1_RLC7_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA1_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA1_RLC7_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA1_RLC7_MINOR_PTR_UPDATE +#define SDMA1_RLC7_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA1_RLC7_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA1_RLC7_MIDCMD_DATA0 +#define SDMA1_RLC7_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA1_RLC7_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA1_RLC7_MIDCMD_DATA1 +#define SDMA1_RLC7_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA1_RLC7_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA1_RLC7_MIDCMD_DATA2 +#define SDMA1_RLC7_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA1_RLC7_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA1_RLC7_MIDCMD_DATA3 +#define SDMA1_RLC7_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA1_RLC7_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA1_RLC7_MIDCMD_DATA4 +#define SDMA1_RLC7_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA1_RLC7_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA1_RLC7_MIDCMD_DATA5 +#define SDMA1_RLC7_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA1_RLC7_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA1_RLC7_MIDCMD_DATA6 +#define SDMA1_RLC7_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA1_RLC7_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA1_RLC7_MIDCMD_DATA7 +#define SDMA1_RLC7_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA1_RLC7_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA1_RLC7_MIDCMD_DATA8 +#define SDMA1_RLC7_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA1_RLC7_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA1_RLC7_MIDCMD_CNTL +#define SDMA1_RLC7_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA1_RLC7_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA1_RLC7_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA1_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA1_RLC7_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA1_RLC7_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA1_RLC7_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA1_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L + +#endif -- GitLab From e6af616a7822294dac294d92a04772a467ef9fd7 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 26 Mar 2018 15:29:48 +0800 Subject: [PATCH 0395/1692] drm/amdgpu/include: add thm 11.0.2 headers Headers for thermal controller. Signed-off-by: Evan Quan Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- .../include/asic_reg/thm/thm_11_0_2_offset.h | 37 ++++++++ .../include/asic_reg/thm/thm_11_0_2_sh_mask.h | 86 +++++++++++++++++++ 2 files changed, 123 insertions(+) create mode 100644 drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_offset.h create mode 100644 drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_sh_mask.h diff --git a/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_offset.h new file mode 100644 index 000000000000..510ec3c70626 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_offset.h @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _thm_11_0_2_OFFSET_HEADER +#define _thm_11_0_2_OFFSET_HEADER + + +#define mmCG_MULT_THERMAL_STATUS 0x005f +#define mmCG_MULT_THERMAL_STATUS_BASE_IDX 0 + +#define mmTHM_THERMAL_INT_ENA 0x000a +#define mmTHM_THERMAL_INT_ENA_BASE_IDX 0 +#define mmTHM_THERMAL_INT_CTRL 0x000b +#define mmTHM_THERMAL_INT_CTRL_BASE_IDX 0 + +#define mmTHM_TCON_THERM_TRIP 0x0002 +#define mmTHM_TCON_THERM_TRIP_BASE_IDX 0 + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_sh_mask.h new file mode 100644 index 000000000000..f69533fa6abf --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_sh_mask.h @@ -0,0 +1,86 @@ +/* + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _thm_11_0_2_SH_MASK_HEADER +#define _thm_11_0_2_SH_MASK_HEADER + + +//CG_MULT_THERMAL_STATUS +#define CG_MULT_THERMAL_STATUS__ASIC_MAX_TEMP__SHIFT 0x0 +#define CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT 0x9 +#define CG_MULT_THERMAL_STATUS__ASIC_MAX_TEMP_MASK 0x000001FFL +#define CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK 0x0003FE00L + +//THM_THERMAL_INT_ENA +#define THM_THERMAL_INT_ENA__THERM_INTH_SET__SHIFT 0x0 +#define THM_THERMAL_INT_ENA__THERM_INTL_SET__SHIFT 0x1 +#define THM_THERMAL_INT_ENA__THERM_TRIGGER_SET__SHIFT 0x2 +#define THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT 0x3 +#define THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT 0x4 +#define THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT 0x5 +#define THM_THERMAL_INT_ENA__THERM_INTH_SET_MASK 0x00000001L +#define THM_THERMAL_INT_ENA__THERM_INTL_SET_MASK 0x00000002L +#define THM_THERMAL_INT_ENA__THERM_TRIGGER_SET_MASK 0x00000004L +#define THM_THERMAL_INT_ENA__THERM_INTH_CLR_MASK 0x00000008L +#define THM_THERMAL_INT_ENA__THERM_INTL_CLR_MASK 0x00000010L +#define THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR_MASK 0x00000020L +//THM_THERMAL_INT_CTRL +#define THM_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT 0x0 +#define THM_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT 0x8 +#define THM_THERMAL_INT_CTRL__TEMP_THRESHOLD__SHIFT 0x10 +#define THM_THERMAL_INT_CTRL__THERM_INTH_MASK__SHIFT 0x18 +#define THM_THERMAL_INT_CTRL__THERM_INTL_MASK__SHIFT 0x19 +#define THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK__SHIFT 0x1a +#define THM_THERMAL_INT_CTRL__THERM_PROCHOT_MASK__SHIFT 0x1b +#define THM_THERMAL_INT_CTRL__THERM_IH_HW_ENA__SHIFT 0x1c +#define THM_THERMAL_INT_CTRL__MAX_IH_CREDIT__SHIFT 0x1d +#define THM_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK 0x000000FFL +#define THM_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK 0x0000FF00L +#define THM_THERMAL_INT_CTRL__TEMP_THRESHOLD_MASK 0x00FF0000L +#define THM_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK 0x01000000L +#define THM_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK 0x02000000L +#define THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK 0x04000000L +#define THM_THERMAL_INT_CTRL__THERM_PROCHOT_MASK_MASK 0x08000000L +#define THM_THERMAL_INT_CTRL__THERM_IH_HW_ENA_MASK 0x10000000L +#define THM_THERMAL_INT_CTRL__MAX_IH_CREDIT_MASK 0xE0000000L + +//THM_TCON_THERM_TRIP +#define THM_TCON_THERM_TRIP__CTF_PAD_POLARITY__SHIFT 0x0 +#define THM_TCON_THERM_TRIP__THERM_TP__SHIFT 0x1 +#define THM_TCON_THERM_TRIP__CTF_THRESHOLD_EXCEEDED__SHIFT 0x2 +#define THM_TCON_THERM_TRIP__THERM_TP_SENSE__SHIFT 0x3 +#define THM_TCON_THERM_TRIP__RSVD2__SHIFT 0x4 +#define THM_TCON_THERM_TRIP__THERM_TP_EN__SHIFT 0x5 +#define THM_TCON_THERM_TRIP__THERM_TP_LMT__SHIFT 0x6 +#define THM_TCON_THERM_TRIP__RSVD3__SHIFT 0xe +#define THM_TCON_THERM_TRIP__SW_THERM_TP__SHIFT 0x1f +#define THM_TCON_THERM_TRIP__CTF_PAD_POLARITY_MASK 0x00000001L +#define THM_TCON_THERM_TRIP__THERM_TP_MASK 0x00000002L +#define THM_TCON_THERM_TRIP__CTF_THRESHOLD_EXCEEDED_MASK 0x00000004L +#define THM_TCON_THERM_TRIP__THERM_TP_SENSE_MASK 0x00000008L +#define THM_TCON_THERM_TRIP__RSVD2_MASK 0x00000010L +#define THM_TCON_THERM_TRIP__THERM_TP_EN_MASK 0x00000020L +#define THM_TCON_THERM_TRIP__THERM_TP_LMT_MASK 0x00003FC0L +#define THM_TCON_THERM_TRIP__RSVD3_MASK 0x7FFFC000L +#define THM_TCON_THERM_TRIP__SW_THERM_TP_MASK 0x80000000L + +#endif + -- GitLab From e9126d09eeb514d349771c3b90c94535396ffe0a Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Thu, 10 May 2018 21:23:58 +0800 Subject: [PATCH 0396/1692] drm/amdgpu/include: Add mp 11.0 header files. (v2) Add the system management controller v11.0 header files. v2: cleanup Signed-off-by: Feifei Xu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../amd/include/asic_reg/mp/mp_11_0_offset.h | 358 ++++++++++++ .../amd/include/asic_reg/mp/mp_11_0_sh_mask.h | 534 ++++++++++++++++++ 2 files changed, 892 insertions(+) create mode 100644 drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_offset.h create mode 100644 drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_sh_mask.h diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_offset.h new file mode 100644 index 000000000000..6d0052ce6bed --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_offset.h @@ -0,0 +1,358 @@ +/* + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _mp_11_0_2_OFFSET_HEADER +#define _mp_11_0_2_OFFSET_HEADER + + +// addressBlock: mp_SmuMp0_SmnDec +// base address: 0x0 +#define mmMP0_SMN_C2PMSG_32 0x0060 +#define mmMP0_SMN_C2PMSG_32_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_33 0x0061 +#define mmMP0_SMN_C2PMSG_33_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_34 0x0062 +#define mmMP0_SMN_C2PMSG_34_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_35 0x0063 +#define mmMP0_SMN_C2PMSG_35_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_36 0x0064 +#define mmMP0_SMN_C2PMSG_36_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_37 0x0065 +#define mmMP0_SMN_C2PMSG_37_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_38 0x0066 +#define mmMP0_SMN_C2PMSG_38_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_39 0x0067 +#define mmMP0_SMN_C2PMSG_39_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_40 0x0068 +#define mmMP0_SMN_C2PMSG_40_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_41 0x0069 +#define mmMP0_SMN_C2PMSG_41_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_42 0x006a +#define mmMP0_SMN_C2PMSG_42_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_43 0x006b +#define mmMP0_SMN_C2PMSG_43_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_44 0x006c +#define mmMP0_SMN_C2PMSG_44_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_45 0x006d +#define mmMP0_SMN_C2PMSG_45_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_46 0x006e +#define mmMP0_SMN_C2PMSG_46_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_47 0x006f +#define mmMP0_SMN_C2PMSG_47_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_48 0x0070 +#define mmMP0_SMN_C2PMSG_48_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_49 0x0071 +#define mmMP0_SMN_C2PMSG_49_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_50 0x0072 +#define mmMP0_SMN_C2PMSG_50_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_51 0x0073 +#define mmMP0_SMN_C2PMSG_51_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_52 0x0074 +#define mmMP0_SMN_C2PMSG_52_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_53 0x0075 +#define mmMP0_SMN_C2PMSG_53_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_54 0x0076 +#define mmMP0_SMN_C2PMSG_54_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_55 0x0077 +#define mmMP0_SMN_C2PMSG_55_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_56 0x0078 +#define mmMP0_SMN_C2PMSG_56_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_57 0x0079 +#define mmMP0_SMN_C2PMSG_57_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_58 0x007a +#define mmMP0_SMN_C2PMSG_58_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_59 0x007b +#define mmMP0_SMN_C2PMSG_59_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_60 0x007c +#define mmMP0_SMN_C2PMSG_60_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_61 0x007d +#define mmMP0_SMN_C2PMSG_61_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_62 0x007e +#define mmMP0_SMN_C2PMSG_62_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_63 0x007f +#define mmMP0_SMN_C2PMSG_63_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_64 0x0080 +#define mmMP0_SMN_C2PMSG_64_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_65 0x0081 +#define mmMP0_SMN_C2PMSG_65_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_66 0x0082 +#define mmMP0_SMN_C2PMSG_66_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_67 0x0083 +#define mmMP0_SMN_C2PMSG_67_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_68 0x0084 +#define mmMP0_SMN_C2PMSG_68_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_69 0x0085 +#define mmMP0_SMN_C2PMSG_69_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_70 0x0086 +#define mmMP0_SMN_C2PMSG_70_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_71 0x0087 +#define mmMP0_SMN_C2PMSG_71_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_72 0x0088 +#define mmMP0_SMN_C2PMSG_72_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_73 0x0089 +#define mmMP0_SMN_C2PMSG_73_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_74 0x008a +#define mmMP0_SMN_C2PMSG_74_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_75 0x008b +#define mmMP0_SMN_C2PMSG_75_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_76 0x008c +#define mmMP0_SMN_C2PMSG_76_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_77 0x008d +#define mmMP0_SMN_C2PMSG_77_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_78 0x008e +#define mmMP0_SMN_C2PMSG_78_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_79 0x008f +#define mmMP0_SMN_C2PMSG_79_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_80 0x0090 +#define mmMP0_SMN_C2PMSG_80_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_81 0x0091 +#define mmMP0_SMN_C2PMSG_81_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_82 0x0092 +#define mmMP0_SMN_C2PMSG_82_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_83 0x0093 +#define mmMP0_SMN_C2PMSG_83_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_84 0x0094 +#define mmMP0_SMN_C2PMSG_84_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_85 0x0095 +#define mmMP0_SMN_C2PMSG_85_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_86 0x0096 +#define mmMP0_SMN_C2PMSG_86_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_87 0x0097 +#define mmMP0_SMN_C2PMSG_87_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_88 0x0098 +#define mmMP0_SMN_C2PMSG_88_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_89 0x0099 +#define mmMP0_SMN_C2PMSG_89_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_90 0x009a +#define mmMP0_SMN_C2PMSG_90_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_91 0x009b +#define mmMP0_SMN_C2PMSG_91_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_92 0x009c +#define mmMP0_SMN_C2PMSG_92_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_93 0x009d +#define mmMP0_SMN_C2PMSG_93_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_94 0x009e +#define mmMP0_SMN_C2PMSG_94_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_95 0x009f +#define mmMP0_SMN_C2PMSG_95_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_96 0x00a0 +#define mmMP0_SMN_C2PMSG_96_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_97 0x00a1 +#define mmMP0_SMN_C2PMSG_97_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_98 0x00a2 +#define mmMP0_SMN_C2PMSG_98_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_99 0x00a3 +#define mmMP0_SMN_C2PMSG_99_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_100 0x00a4 +#define mmMP0_SMN_C2PMSG_100_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_101 0x00a5 +#define mmMP0_SMN_C2PMSG_101_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_102 0x00a6 +#define mmMP0_SMN_C2PMSG_102_BASE_IDX 0 +#define mmMP0_SMN_C2PMSG_103 0x00a7 +#define mmMP0_SMN_C2PMSG_103_BASE_IDX 0 +#define mmMP0_SMN_ACTIVE_FCN_ID 0x00c0 +#define mmMP0_SMN_ACTIVE_FCN_ID_BASE_IDX 0 +#define mmMP0_SMN_IH_CREDIT 0x00c1 +#define mmMP0_SMN_IH_CREDIT_BASE_IDX 0 +#define mmMP0_SMN_IH_SW_INT 0x00c2 +#define mmMP0_SMN_IH_SW_INT_BASE_IDX 0 +#define mmMP0_SMN_IH_SW_INT_CTRL 0x00c3 +#define mmMP0_SMN_IH_SW_INT_CTRL_BASE_IDX 0 + + +// addressBlock: mp_SmuMp1_SmnDec +// base address: 0x0 +#define mmMP1_SMN_C2PMSG_32 0x0260 +#define mmMP1_SMN_C2PMSG_32_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_33 0x0261 +#define mmMP1_SMN_C2PMSG_33_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_34 0x0262 +#define mmMP1_SMN_C2PMSG_34_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_35 0x0263 +#define mmMP1_SMN_C2PMSG_35_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_36 0x0264 +#define mmMP1_SMN_C2PMSG_36_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_37 0x0265 +#define mmMP1_SMN_C2PMSG_37_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_38 0x0266 +#define mmMP1_SMN_C2PMSG_38_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_39 0x0267 +#define mmMP1_SMN_C2PMSG_39_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_40 0x0268 +#define mmMP1_SMN_C2PMSG_40_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_41 0x0269 +#define mmMP1_SMN_C2PMSG_41_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_42 0x026a +#define mmMP1_SMN_C2PMSG_42_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_43 0x026b +#define mmMP1_SMN_C2PMSG_43_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_44 0x026c +#define mmMP1_SMN_C2PMSG_44_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_45 0x026d +#define mmMP1_SMN_C2PMSG_45_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_46 0x026e +#define mmMP1_SMN_C2PMSG_46_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_47 0x026f +#define mmMP1_SMN_C2PMSG_47_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_48 0x0270 +#define mmMP1_SMN_C2PMSG_48_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_49 0x0271 +#define mmMP1_SMN_C2PMSG_49_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_50 0x0272 +#define mmMP1_SMN_C2PMSG_50_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_51 0x0273 +#define mmMP1_SMN_C2PMSG_51_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_52 0x0274 +#define mmMP1_SMN_C2PMSG_52_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_53 0x0275 +#define mmMP1_SMN_C2PMSG_53_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_54 0x0276 +#define mmMP1_SMN_C2PMSG_54_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_55 0x0277 +#define mmMP1_SMN_C2PMSG_55_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_56 0x0278 +#define mmMP1_SMN_C2PMSG_56_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_57 0x0279 +#define mmMP1_SMN_C2PMSG_57_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_58 0x027a +#define mmMP1_SMN_C2PMSG_58_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_59 0x027b +#define mmMP1_SMN_C2PMSG_59_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_60 0x027c +#define mmMP1_SMN_C2PMSG_60_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_61 0x027d +#define mmMP1_SMN_C2PMSG_61_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_62 0x027e +#define mmMP1_SMN_C2PMSG_62_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_63 0x027f +#define mmMP1_SMN_C2PMSG_63_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_64 0x0280 +#define mmMP1_SMN_C2PMSG_64_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_65 0x0281 +#define mmMP1_SMN_C2PMSG_65_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_66 0x0282 +#define mmMP1_SMN_C2PMSG_66_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_67 0x0283 +#define mmMP1_SMN_C2PMSG_67_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_68 0x0284 +#define mmMP1_SMN_C2PMSG_68_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_69 0x0285 +#define mmMP1_SMN_C2PMSG_69_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_70 0x0286 +#define mmMP1_SMN_C2PMSG_70_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_71 0x0287 +#define mmMP1_SMN_C2PMSG_71_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_72 0x0288 +#define mmMP1_SMN_C2PMSG_72_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_73 0x0289 +#define mmMP1_SMN_C2PMSG_73_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_74 0x028a +#define mmMP1_SMN_C2PMSG_74_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_75 0x028b +#define mmMP1_SMN_C2PMSG_75_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_76 0x028c +#define mmMP1_SMN_C2PMSG_76_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_77 0x028d +#define mmMP1_SMN_C2PMSG_77_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_78 0x028e +#define mmMP1_SMN_C2PMSG_78_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_79 0x028f +#define mmMP1_SMN_C2PMSG_79_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_80 0x0290 +#define mmMP1_SMN_C2PMSG_80_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_81 0x0291 +#define mmMP1_SMN_C2PMSG_81_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_82 0x0292 +#define mmMP1_SMN_C2PMSG_82_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_83 0x0293 +#define mmMP1_SMN_C2PMSG_83_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_84 0x0294 +#define mmMP1_SMN_C2PMSG_84_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_85 0x0295 +#define mmMP1_SMN_C2PMSG_85_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_86 0x0296 +#define mmMP1_SMN_C2PMSG_86_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_87 0x0297 +#define mmMP1_SMN_C2PMSG_87_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_88 0x0298 +#define mmMP1_SMN_C2PMSG_88_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_89 0x0299 +#define mmMP1_SMN_C2PMSG_89_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_90 0x029a +#define mmMP1_SMN_C2PMSG_90_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_91 0x029b +#define mmMP1_SMN_C2PMSG_91_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_92 0x029c +#define mmMP1_SMN_C2PMSG_92_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_93 0x029d +#define mmMP1_SMN_C2PMSG_93_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_94 0x029e +#define mmMP1_SMN_C2PMSG_94_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_95 0x029f +#define mmMP1_SMN_C2PMSG_95_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_96 0x02a0 +#define mmMP1_SMN_C2PMSG_96_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_97 0x02a1 +#define mmMP1_SMN_C2PMSG_97_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_98 0x02a2 +#define mmMP1_SMN_C2PMSG_98_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_99 0x02a3 +#define mmMP1_SMN_C2PMSG_99_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_100 0x02a4 +#define mmMP1_SMN_C2PMSG_100_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_101 0x02a5 +#define mmMP1_SMN_C2PMSG_101_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_102 0x02a6 +#define mmMP1_SMN_C2PMSG_102_BASE_IDX 0 +#define mmMP1_SMN_C2PMSG_103 0x02a7 +#define mmMP1_SMN_C2PMSG_103_BASE_IDX 0 +#define mmMP1_SMN_ACTIVE_FCN_ID 0x02c0 +#define mmMP1_SMN_ACTIVE_FCN_ID_BASE_IDX 0 +#define mmMP1_SMN_IH_CREDIT 0x02c1 +#define mmMP1_SMN_IH_CREDIT_BASE_IDX 0 +#define mmMP1_SMN_IH_SW_INT 0x02c2 +#define mmMP1_SMN_IH_SW_INT_BASE_IDX 0 +#define mmMP1_SMN_IH_SW_INT_CTRL 0x02c3 +#define mmMP1_SMN_IH_SW_INT_CTRL_BASE_IDX 0 +#define mmMP1_SMN_FPS_CNT 0x02c4 +#define mmMP1_SMN_FPS_CNT_BASE_IDX 0 +#define mmMP1_SMN_PUB_CTRL 0x02c5 +#define mmMP1_SMN_PUB_CTRL_BASE_IDX 0 +#define mmMP1_SMN_EXT_SCRATCH0 0x03c0 +#define mmMP1_SMN_EXT_SCRATCH0_BASE_IDX 0 +#define mmMP1_SMN_EXT_SCRATCH1 0x03c1 +#define mmMP1_SMN_EXT_SCRATCH1_BASE_IDX 0 +#define mmMP1_SMN_EXT_SCRATCH2 0x03c2 +#define mmMP1_SMN_EXT_SCRATCH2_BASE_IDX 0 +#define mmMP1_SMN_EXT_SCRATCH3 0x03c3 +#define mmMP1_SMN_EXT_SCRATCH3_BASE_IDX 0 +#define mmMP1_SMN_EXT_SCRATCH4 0x03c4 +#define mmMP1_SMN_EXT_SCRATCH4_BASE_IDX 0 +#define mmMP1_SMN_EXT_SCRATCH5 0x03c5 +#define mmMP1_SMN_EXT_SCRATCH5_BASE_IDX 0 +#define mmMP1_SMN_EXT_SCRATCH6 0x03c6 +#define mmMP1_SMN_EXT_SCRATCH6_BASE_IDX 0 +#define mmMP1_SMN_EXT_SCRATCH7 0x03c7 +#define mmMP1_SMN_EXT_SCRATCH7_BASE_IDX 0 + + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_sh_mask.h new file mode 100644 index 000000000000..1ac8895c29a9 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_sh_mask.h @@ -0,0 +1,534 @@ +/* + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _mp_11_0_2_SH_MASK_HEADER +#define _mp_11_0_2_SH_MASK_HEADER + + +// addressBlock: mp_SmuMp0_SmnDec +//MP0_SMN_C2PMSG_32 +#define MP0_SMN_C2PMSG_32__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_33 +#define MP0_SMN_C2PMSG_33__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_34 +#define MP0_SMN_C2PMSG_34__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_35 +#define MP0_SMN_C2PMSG_35__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_36 +#define MP0_SMN_C2PMSG_36__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_37 +#define MP0_SMN_C2PMSG_37__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_38 +#define MP0_SMN_C2PMSG_38__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_39 +#define MP0_SMN_C2PMSG_39__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_40 +#define MP0_SMN_C2PMSG_40__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_41 +#define MP0_SMN_C2PMSG_41__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_42 +#define MP0_SMN_C2PMSG_42__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_43 +#define MP0_SMN_C2PMSG_43__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_44 +#define MP0_SMN_C2PMSG_44__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_45 +#define MP0_SMN_C2PMSG_45__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_46 +#define MP0_SMN_C2PMSG_46__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_47 +#define MP0_SMN_C2PMSG_47__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_48 +#define MP0_SMN_C2PMSG_48__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_49 +#define MP0_SMN_C2PMSG_49__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_50 +#define MP0_SMN_C2PMSG_50__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_51 +#define MP0_SMN_C2PMSG_51__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_52 +#define MP0_SMN_C2PMSG_52__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_53 +#define MP0_SMN_C2PMSG_53__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_54 +#define MP0_SMN_C2PMSG_54__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_55 +#define MP0_SMN_C2PMSG_55__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_56 +#define MP0_SMN_C2PMSG_56__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_57 +#define MP0_SMN_C2PMSG_57__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_58 +#define MP0_SMN_C2PMSG_58__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_59 +#define MP0_SMN_C2PMSG_59__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_60 +#define MP0_SMN_C2PMSG_60__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_61 +#define MP0_SMN_C2PMSG_61__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_62 +#define MP0_SMN_C2PMSG_62__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_63 +#define MP0_SMN_C2PMSG_63__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_64 +#define MP0_SMN_C2PMSG_64__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_65 +#define MP0_SMN_C2PMSG_65__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_66 +#define MP0_SMN_C2PMSG_66__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_67 +#define MP0_SMN_C2PMSG_67__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_68 +#define MP0_SMN_C2PMSG_68__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_69 +#define MP0_SMN_C2PMSG_69__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_70 +#define MP0_SMN_C2PMSG_70__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_71 +#define MP0_SMN_C2PMSG_71__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_72 +#define MP0_SMN_C2PMSG_72__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_73 +#define MP0_SMN_C2PMSG_73__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_74 +#define MP0_SMN_C2PMSG_74__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_75 +#define MP0_SMN_C2PMSG_75__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_76 +#define MP0_SMN_C2PMSG_76__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_77 +#define MP0_SMN_C2PMSG_77__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_78 +#define MP0_SMN_C2PMSG_78__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_79 +#define MP0_SMN_C2PMSG_79__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_80 +#define MP0_SMN_C2PMSG_80__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_81 +#define MP0_SMN_C2PMSG_81__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_82 +#define MP0_SMN_C2PMSG_82__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_83 +#define MP0_SMN_C2PMSG_83__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_84 +#define MP0_SMN_C2PMSG_84__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_85 +#define MP0_SMN_C2PMSG_85__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_86 +#define MP0_SMN_C2PMSG_86__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_87 +#define MP0_SMN_C2PMSG_87__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_88 +#define MP0_SMN_C2PMSG_88__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_89 +#define MP0_SMN_C2PMSG_89__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_90 +#define MP0_SMN_C2PMSG_90__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_91 +#define MP0_SMN_C2PMSG_91__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_92 +#define MP0_SMN_C2PMSG_92__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_93 +#define MP0_SMN_C2PMSG_93__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_94 +#define MP0_SMN_C2PMSG_94__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_95 +#define MP0_SMN_C2PMSG_95__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_96 +#define MP0_SMN_C2PMSG_96__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_97 +#define MP0_SMN_C2PMSG_97__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_98 +#define MP0_SMN_C2PMSG_98__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_99 +#define MP0_SMN_C2PMSG_99__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_100 +#define MP0_SMN_C2PMSG_100__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_101 +#define MP0_SMN_C2PMSG_101__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_102 +#define MP0_SMN_C2PMSG_102__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_C2PMSG_103 +#define MP0_SMN_C2PMSG_103__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL +//MP0_SMN_ACTIVE_FCN_ID +#define MP0_SMN_ACTIVE_FCN_ID__VFID__SHIFT 0x0 +#define MP0_SMN_ACTIVE_FCN_ID__VF__SHIFT 0x1f +#define MP0_SMN_ACTIVE_FCN_ID__VFID_MASK 0x0000001FL +#define MP0_SMN_ACTIVE_FCN_ID__VF_MASK 0x80000000L +//MP0_SMN_IH_CREDIT +#define MP0_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0 +#define MP0_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10 +#define MP0_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L +#define MP0_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L +//MP0_SMN_IH_SW_INT +#define MP0_SMN_IH_SW_INT__ID__SHIFT 0x0 +#define MP0_SMN_IH_SW_INT__VALID__SHIFT 0x8 +#define MP0_SMN_IH_SW_INT__ID_MASK 0x000000FFL +#define MP0_SMN_IH_SW_INT__VALID_MASK 0x00000100L +//MP0_SMN_IH_SW_INT_CTRL +#define MP0_SMN_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0 +#define MP0_SMN_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8 +#define MP0_SMN_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L +#define MP0_SMN_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L + + +// addressBlock: mp_SmuMp1_SmnDec +//MP1_SMN_C2PMSG_32 +#define MP1_SMN_C2PMSG_32__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_33 +#define MP1_SMN_C2PMSG_33__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_34 +#define MP1_SMN_C2PMSG_34__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_35 +#define MP1_SMN_C2PMSG_35__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_36 +#define MP1_SMN_C2PMSG_36__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_37 +#define MP1_SMN_C2PMSG_37__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_38 +#define MP1_SMN_C2PMSG_38__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_39 +#define MP1_SMN_C2PMSG_39__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_40 +#define MP1_SMN_C2PMSG_40__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_41 +#define MP1_SMN_C2PMSG_41__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_42 +#define MP1_SMN_C2PMSG_42__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_43 +#define MP1_SMN_C2PMSG_43__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_44 +#define MP1_SMN_C2PMSG_44__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_45 +#define MP1_SMN_C2PMSG_45__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_46 +#define MP1_SMN_C2PMSG_46__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_47 +#define MP1_SMN_C2PMSG_47__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_48 +#define MP1_SMN_C2PMSG_48__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_49 +#define MP1_SMN_C2PMSG_49__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_50 +#define MP1_SMN_C2PMSG_50__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_51 +#define MP1_SMN_C2PMSG_51__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_52 +#define MP1_SMN_C2PMSG_52__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_53 +#define MP1_SMN_C2PMSG_53__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_54 +#define MP1_SMN_C2PMSG_54__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_55 +#define MP1_SMN_C2PMSG_55__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_56 +#define MP1_SMN_C2PMSG_56__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_57 +#define MP1_SMN_C2PMSG_57__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_58 +#define MP1_SMN_C2PMSG_58__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_59 +#define MP1_SMN_C2PMSG_59__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_60 +#define MP1_SMN_C2PMSG_60__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_61 +#define MP1_SMN_C2PMSG_61__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_62 +#define MP1_SMN_C2PMSG_62__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_63 +#define MP1_SMN_C2PMSG_63__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_64 +#define MP1_SMN_C2PMSG_64__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_65 +#define MP1_SMN_C2PMSG_65__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_66 +#define MP1_SMN_C2PMSG_66__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_67 +#define MP1_SMN_C2PMSG_67__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_68 +#define MP1_SMN_C2PMSG_68__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_69 +#define MP1_SMN_C2PMSG_69__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_70 +#define MP1_SMN_C2PMSG_70__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_71 +#define MP1_SMN_C2PMSG_71__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_72 +#define MP1_SMN_C2PMSG_72__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_73 +#define MP1_SMN_C2PMSG_73__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_74 +#define MP1_SMN_C2PMSG_74__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_75 +#define MP1_SMN_C2PMSG_75__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_76 +#define MP1_SMN_C2PMSG_76__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_77 +#define MP1_SMN_C2PMSG_77__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_78 +#define MP1_SMN_C2PMSG_78__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_79 +#define MP1_SMN_C2PMSG_79__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_80 +#define MP1_SMN_C2PMSG_80__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_81 +#define MP1_SMN_C2PMSG_81__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_82 +#define MP1_SMN_C2PMSG_82__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_83 +#define MP1_SMN_C2PMSG_83__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_84 +#define MP1_SMN_C2PMSG_84__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_85 +#define MP1_SMN_C2PMSG_85__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_86 +#define MP1_SMN_C2PMSG_86__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_87 +#define MP1_SMN_C2PMSG_87__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_88 +#define MP1_SMN_C2PMSG_88__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_89 +#define MP1_SMN_C2PMSG_89__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_90 +#define MP1_SMN_C2PMSG_90__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_91 +#define MP1_SMN_C2PMSG_91__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_92 +#define MP1_SMN_C2PMSG_92__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_93 +#define MP1_SMN_C2PMSG_93__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_94 +#define MP1_SMN_C2PMSG_94__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_95 +#define MP1_SMN_C2PMSG_95__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_96 +#define MP1_SMN_C2PMSG_96__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_97 +#define MP1_SMN_C2PMSG_97__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_98 +#define MP1_SMN_C2PMSG_98__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_99 +#define MP1_SMN_C2PMSG_99__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_100 +#define MP1_SMN_C2PMSG_100__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_101 +#define MP1_SMN_C2PMSG_101__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_102 +#define MP1_SMN_C2PMSG_102__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_C2PMSG_103 +#define MP1_SMN_C2PMSG_103__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL +//MP1_SMN_ACTIVE_FCN_ID +#define MP1_SMN_ACTIVE_FCN_ID__VFID__SHIFT 0x0 +#define MP1_SMN_ACTIVE_FCN_ID__VF__SHIFT 0x1f +#define MP1_SMN_ACTIVE_FCN_ID__VFID_MASK 0x0000001FL +#define MP1_SMN_ACTIVE_FCN_ID__VF_MASK 0x80000000L +//MP1_SMN_IH_CREDIT +#define MP1_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0 +#define MP1_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10 +#define MP1_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L +#define MP1_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L +//MP1_SMN_IH_SW_INT +#define MP1_SMN_IH_SW_INT__ID__SHIFT 0x0 +#define MP1_SMN_IH_SW_INT__VALID__SHIFT 0x8 +#define MP1_SMN_IH_SW_INT__ID_MASK 0x000000FFL +#define MP1_SMN_IH_SW_INT__VALID_MASK 0x00000100L +//MP1_SMN_IH_SW_INT_CTRL +#define MP1_SMN_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0 +#define MP1_SMN_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8 +#define MP1_SMN_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L +#define MP1_SMN_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L +//MP1_SMN_FPS_CNT +#define MP1_SMN_FPS_CNT__COUNT__SHIFT 0x0 +#define MP1_SMN_FPS_CNT__COUNT_MASK 0xFFFFFFFFL +//MP1_SMN_PUB_CTRL +#define MP1_SMN_PUB_CTRL__RESET__SHIFT 0x0 +#define MP1_SMN_PUB_CTRL__RESET_MASK 0x00000001L +//MP1_SMN_EXT_SCRATCH0 +#define MP1_SMN_EXT_SCRATCH0__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH0__DATA_MASK 0xFFFFFFFFL +//MP1_SMN_EXT_SCRATCH1 +#define MP1_SMN_EXT_SCRATCH1__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH1__DATA_MASK 0xFFFFFFFFL +//MP1_SMN_EXT_SCRATCH2 +#define MP1_SMN_EXT_SCRATCH2__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH2__DATA_MASK 0xFFFFFFFFL +//MP1_SMN_EXT_SCRATCH3 +#define MP1_SMN_EXT_SCRATCH3__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH3__DATA_MASK 0xFFFFFFFFL +//MP1_SMN_EXT_SCRATCH4 +#define MP1_SMN_EXT_SCRATCH4__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH4__DATA_MASK 0xFFFFFFFFL +//MP1_SMN_EXT_SCRATCH5 +#define MP1_SMN_EXT_SCRATCH5__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH5__DATA_MASK 0xFFFFFFFFL +//MP1_SMN_EXT_SCRATCH6 +#define MP1_SMN_EXT_SCRATCH6__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH6__DATA_MASK 0xFFFFFFFFL +//MP1_SMN_EXT_SCRATCH7 +#define MP1_SMN_EXT_SCRATCH7__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH7__DATA_MASK 0xFFFFFFFFL + + +#endif -- GitLab From 25eaa565c47b229962d6c6f2212b224db188aef9 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 3 Apr 2018 15:49:56 -0500 Subject: [PATCH 0397/1692] Revert "drm/amdgpu: Add nbio support for vega20 (v2)" Revert this to add proper nbio 7.4 support. This reverts commit f5b2e1fa321eff20a9418ebd497d8a466f024a85. Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c | 18 +----------------- drivers/gpu/drm/amd/amdgpu/soc15.c | 2 -- 2 files changed, 1 insertion(+), 19 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c index 365517c0121e..df34dc79d444 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c @@ -34,19 +34,10 @@ #define smnCPM_CONTROL 0x11180460 #define smnPCIE_CNTL2 0x11180070 -/* vega20 */ -#define mmRCC_DEV0_EPF0_STRAP0_VG20 0x0011 -#define mmRCC_DEV0_EPF0_STRAP0_VG20_BASE_IDX 2 - static u32 nbio_v7_0_get_rev_id(struct amdgpu_device *adev) { u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0); - if (adev->asic_type == CHIP_VEGA20) - tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0_VG20); - else - tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0); - tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK; tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT; @@ -84,14 +75,10 @@ static void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instan SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE); u32 doorbell_range = RREG32(reg); - u32 range = 2; - - if (adev->asic_type == CHIP_VEGA20) - range = 8; if (use_doorbell) { doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index); - doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, range); + doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 2); } else doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0); @@ -146,9 +133,6 @@ static void nbio_v7_0_update_medium_grain_clock_gating(struct amdgpu_device *ade { uint32_t def, data; - if (adev->asic_type == CHIP_VEGA20) - return; - /* NBIF_MGCG_CTRL_LCLK */ def = data = RREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 83f2717fcf81..6bd80369685e 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -497,8 +497,6 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) if (adev->flags & AMD_IS_APU) adev->nbio_funcs = &nbio_v7_0_funcs; - else if (adev->asic_type == CHIP_VEGA20) - adev->nbio_funcs = &nbio_v7_0_funcs; else adev->nbio_funcs = &nbio_v6_1_funcs; -- GitLab From fe3c948918e7e3d18eed85571d32a2f7c4b63a84 Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Fri, 23 Mar 2018 14:44:28 -0500 Subject: [PATCH 0398/1692] drm/amdgpu: Add nbio 7.4 support for vega20 (v3) Some register offset in nbio v7.4 are different with v7.0. We need a seperate nbio_v7_4.c for vega20. v2: fix doorbell range for sdma (Alex) v3: squash in static fix (kbuild test robot) Signed-off-by: Feifei Xu Reviewed-by: Hawking Zhang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 2 +- drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c | 237 +++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h | 31 ++++ drivers/gpu/drm/amd/amdgpu/soc15.c | 2 + drivers/gpu/drm/amd/amdgpu/soc15.h | 1 + 5 files changed, 272 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c create mode 100644 drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index e610656015b9..dd6d70a05e7c 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -62,7 +62,7 @@ amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce amdgpu-y += \ vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \ - vega20_reg_init.o + vega20_reg_init.o nbio_v7_4.o # add DF block amdgpu-y += \ diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c new file mode 100644 index 000000000000..89ea92075b6b --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c @@ -0,0 +1,237 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "amdgpu_atombios.h" +#include "nbio_v7_4.h" + +#include "nbio/nbio_7_4_offset.h" +#include "nbio/nbio_7_4_sh_mask.h" + +#define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c + +#define smnCPM_CONTROL 0x11180460 +#define smnPCIE_CNTL2 0x11180070 + +static u32 nbio_v7_4_get_rev_id(struct amdgpu_device *adev) +{ + u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0); + + tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK; + tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT; + + return tmp; +} + +static void nbio_v7_4_mc_access_enable(struct amdgpu_device *adev, bool enable) +{ + if (enable) + WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, + BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK); + else + WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0); +} + +static void nbio_v7_4_hdp_flush(struct amdgpu_device *adev, + struct amdgpu_ring *ring) +{ + if (!ring || !ring->funcs->emit_wreg) + WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0); + else + amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET( + NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL), 0); +} + +static u32 nbio_v7_4_get_memsize(struct amdgpu_device *adev) +{ + return RREG32_SOC15(NBIO, 0, mmRCC_CONFIG_MEMSIZE); +} + +static void nbio_v7_4_sdma_doorbell_range(struct amdgpu_device *adev, int instance, + bool use_doorbell, int doorbell_index) +{ + u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) : + SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE); + + u32 doorbell_range = RREG32(reg); + + if (use_doorbell) { + doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index); + doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 8); + } else + doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0); + + WREG32(reg, doorbell_range); +} + +static void nbio_v7_4_enable_doorbell_aperture(struct amdgpu_device *adev, + bool enable) +{ + WREG32_FIELD15(NBIO, 0, RCC_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, enable ? 1 : 0); +} + +static void nbio_v7_4_enable_doorbell_selfring_aperture(struct amdgpu_device *adev, + bool enable) +{ + +} + +static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev, + bool use_doorbell, int doorbell_index) +{ + u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0 , mmBIF_IH_DOORBELL_RANGE); + + if (use_doorbell) { + ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, OFFSET, doorbell_index); + ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 2); + } else + ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 0); + + WREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE, ih_doorbell_range); +} + + +static void nbio_v7_4_update_medium_grain_clock_gating(struct amdgpu_device *adev, + bool enable) +{ + //TODO: Add support for v7.4 +} + +static void nbio_v7_4_update_medium_grain_light_sleep(struct amdgpu_device *adev, + bool enable) +{ + uint32_t def, data; + + def = data = RREG32_PCIE(smnPCIE_CNTL2); + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) { + data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK | + PCIE_CNTL2__MST_MEM_LS_EN_MASK | + PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK); + } else { + data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK | + PCIE_CNTL2__MST_MEM_LS_EN_MASK | + PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK); + } + + if (def != data) + WREG32_PCIE(smnPCIE_CNTL2, data); +} + +static void nbio_v7_4_get_clockgating_state(struct amdgpu_device *adev, + u32 *flags) +{ + int data; + + /* AMD_CG_SUPPORT_BIF_MGCG */ + data = RREG32_PCIE(smnCPM_CONTROL); + if (data & CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK) + *flags |= AMD_CG_SUPPORT_BIF_MGCG; + + /* AMD_CG_SUPPORT_BIF_LS */ + data = RREG32_PCIE(smnPCIE_CNTL2); + if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK) + *flags |= AMD_CG_SUPPORT_BIF_LS; +} + +static void nbio_v7_4_ih_control(struct amdgpu_device *adev) +{ + u32 interrupt_cntl; + + /* setup interrupt control */ + WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8); + interrupt_cntl = RREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL); + /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi + * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN + */ + interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0); + /* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */ + interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0); + WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl); +} + +static u32 nbio_v7_4_get_hdp_flush_req_offset(struct amdgpu_device *adev) +{ + return SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_REQ); +} + +static u32 nbio_v7_4_get_hdp_flush_done_offset(struct amdgpu_device *adev) +{ + return SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_DONE); +} + +static u32 nbio_v7_4_get_pcie_index_offset(struct amdgpu_device *adev) +{ + return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2); +} + +static u32 nbio_v7_4_get_pcie_data_offset(struct amdgpu_device *adev) +{ + return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2); +} + +static const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = { + .ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK, + .ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK, + .ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK, + .ref_and_mask_cp3 = GPU_HDP_FLUSH_DONE__CP3_MASK, + .ref_and_mask_cp4 = GPU_HDP_FLUSH_DONE__CP4_MASK, + .ref_and_mask_cp5 = GPU_HDP_FLUSH_DONE__CP5_MASK, + .ref_and_mask_cp6 = GPU_HDP_FLUSH_DONE__CP6_MASK, + .ref_and_mask_cp7 = GPU_HDP_FLUSH_DONE__CP7_MASK, + .ref_and_mask_cp8 = GPU_HDP_FLUSH_DONE__CP8_MASK, + .ref_and_mask_cp9 = GPU_HDP_FLUSH_DONE__CP9_MASK, + .ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__SDMA0_MASK, + .ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK, +}; + +static void nbio_v7_4_detect_hw_virt(struct amdgpu_device *adev) +{ + if (is_virtual_machine()) /* passthrough mode exclus sriov mod */ + adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; +} + +static void nbio_v7_4_init_registers(struct amdgpu_device *adev) +{ + +} + +const struct amdgpu_nbio_funcs nbio_v7_4_funcs = { + .hdp_flush_reg = &nbio_v7_4_hdp_flush_reg, + .get_hdp_flush_req_offset = nbio_v7_4_get_hdp_flush_req_offset, + .get_hdp_flush_done_offset = nbio_v7_4_get_hdp_flush_done_offset, + .get_pcie_index_offset = nbio_v7_4_get_pcie_index_offset, + .get_pcie_data_offset = nbio_v7_4_get_pcie_data_offset, + .get_rev_id = nbio_v7_4_get_rev_id, + .mc_access_enable = nbio_v7_4_mc_access_enable, + .hdp_flush = nbio_v7_4_hdp_flush, + .get_memsize = nbio_v7_4_get_memsize, + .sdma_doorbell_range = nbio_v7_4_sdma_doorbell_range, + .enable_doorbell_aperture = nbio_v7_4_enable_doorbell_aperture, + .enable_doorbell_selfring_aperture = nbio_v7_4_enable_doorbell_selfring_aperture, + .ih_doorbell_range = nbio_v7_4_ih_doorbell_range, + .update_medium_grain_clock_gating = nbio_v7_4_update_medium_grain_clock_gating, + .update_medium_grain_light_sleep = nbio_v7_4_update_medium_grain_light_sleep, + .get_clockgating_state = nbio_v7_4_get_clockgating_state, + .ih_control = nbio_v7_4_ih_control, + .init_registers = nbio_v7_4_init_registers, + .detect_hw_virt = nbio_v7_4_detect_hw_virt, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h new file mode 100644 index 000000000000..c442865bac4f --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h @@ -0,0 +1,31 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __NBIO_V7_4_H__ +#define __NBIO_V7_4_H__ + +#include "soc15_common.h" + +extern const struct amdgpu_nbio_funcs nbio_v7_4_funcs; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 6bd80369685e..73c85a0282d0 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -497,6 +497,8 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) if (adev->flags & AMD_IS_APU) adev->nbio_funcs = &nbio_v7_0_funcs; + else if (adev->asic_type == CHIP_VEGA20) + adev->nbio_funcs = &nbio_v7_4_funcs; else adev->nbio_funcs = &nbio_v6_1_funcs; diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h index 1f714b7af520..f8ad7804dc40 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15.h @@ -26,6 +26,7 @@ #include "nbio_v6_1.h" #include "nbio_v7_0.h" +#include "nbio_v7_4.h" #define SOC15_FLUSH_GPU_TLB_NUM_WREG 4 #define SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT 1 -- GitLab From 7a0d7089c700fbdc1bd3da957c26c0b142536cf6 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Wed, 2 May 2018 15:50:10 +0800 Subject: [PATCH 0399/1692] drm/amdgpu: update atomfirmware.h Add struct atom_smc_dpm_info_v4_3 Signed-off-by: Evan Quan Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/atomfirmware.h | 86 ++++++++++++++++++++++ 1 file changed, 86 insertions(+) diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h index 4bc118df3bc4..6109a45d7a63 100644 --- a/drivers/gpu/drm/amd/include/atomfirmware.h +++ b/drivers/gpu/drm/amd/include/atomfirmware.h @@ -1446,6 +1446,92 @@ struct atom_smc_dpm_info_v4_1 uint32_t boardreserved[9]; }; +/* + *************************************************************************** + Data Table smc_dpm_info structure + *************************************************************************** + */ +struct atom_smc_dpm_info_v4_3 +{ + struct atom_common_table_header table_header; + uint8_t liquid1_i2c_address; + uint8_t liquid2_i2c_address; + uint8_t vr_i2c_address; + uint8_t plx_i2c_address; + + uint8_t liquid_i2c_linescl; + uint8_t liquid_i2c_linesda; + uint8_t vr_i2c_linescl; + uint8_t vr_i2c_linesda; + + uint8_t plx_i2c_linescl; + uint8_t plx_i2c_linesda; + uint8_t vrsensorpresent; + uint8_t liquidsensorpresent; + + uint16_t maxvoltagestepgfx; + uint16_t maxvoltagestepsoc; + + uint8_t vddgfxvrmapping; + uint8_t vddsocvrmapping; + uint8_t vddmem0vrmapping; + uint8_t vddmem1vrmapping; + + uint8_t gfxulvphasesheddingmask; + uint8_t soculvphasesheddingmask; + uint8_t externalsensorpresent; + uint8_t padding8_v; + + uint16_t gfxmaxcurrent; + uint8_t gfxoffset; + uint8_t padding_telemetrygfx; + + uint16_t socmaxcurrent; + uint8_t socoffset; + uint8_t padding_telemetrysoc; + + uint16_t mem0maxcurrent; + uint8_t mem0offset; + uint8_t padding_telemetrymem0; + + uint16_t mem1maxcurrent; + uint8_t mem1offset; + uint8_t padding_telemetrymem1; + + uint8_t acdcgpio; + uint8_t acdcpolarity; + uint8_t vr0hotgpio; + uint8_t vr0hotpolarity; + + uint8_t vr1hotgpio; + uint8_t vr1hotpolarity; + uint8_t padding1; + uint8_t padding2; + + uint8_t ledpin0; + uint8_t ledpin1; + uint8_t ledpin2; + uint8_t padding8_4; + + uint8_t pllgfxclkspreadenabled; + uint8_t pllgfxclkspreadpercent; + uint16_t pllgfxclkspreadfreq; + + uint8_t uclkspreadenabled; + uint8_t uclkspreadpercent; + uint16_t uclkspreadfreq; + + uint8_t fclkspreadenabled; + uint8_t fclkspreadpercent; + uint16_t fclkspreadfreq; + + uint8_t fllgfxclkspreadenabled; + uint8_t fllgfxclkspreadpercent; + uint16_t fllgfxclkspreadfreq; + + uint32_t boardreserved[10]; +}; + /* *************************************************************************** Data Table asic_profiling_info structure -- GitLab From 5f51ab41e7a0ca2b56080fe5e15fd3e32c96aecb Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Wed, 21 Mar 2018 14:10:21 +0800 Subject: [PATCH 0400/1692] drm/amd/powerplay: add vega20_inc.h (v2) v2: use thm 11.0.2 headers Signed-off-by: Evan Quan Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- .../gpu/drm/amd/powerplay/hwmgr/vega20_inc.h | 35 +++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_inc.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_inc.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_inc.h new file mode 100644 index 000000000000..6738bad53602 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_inc.h @@ -0,0 +1,35 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef VEGA20_INC_H +#define VEGA20_INC_H + +#include "asic_reg/thm/thm_11_0_2_offset.h" +#include "asic_reg/thm/thm_11_0_2_sh_mask.h" + +#include "asic_reg/mp/mp_9_0_offset.h" +#include "asic_reg/mp/mp_9_0_sh_mask.h" + +#include "asic_reg/nbio/nbio_7_4_offset.h" + +#endif -- GitLab From 5fef5b1e7d9b10847559bc1fcc8cc01bb84097b8 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Wed, 21 Mar 2018 16:16:41 +0800 Subject: [PATCH 0401/1692] drm/amd/powerplay: add smu11_driver_if.h (v4) v2: cleanup v3: fit the latest 40.6 smc fw v4: update to latest. Signed-off-by: Evan Quan Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- .../drm/amd/powerplay/inc/smu11_driver_if.h | 831 ++++++++++++++++++ 1 file changed, 831 insertions(+) create mode 100644 drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h new file mode 100644 index 000000000000..0a39a4c564d2 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h @@ -0,0 +1,831 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef SMU11_DRIVER_IF_H +#define SMU11_DRIVER_IF_H + +// *** IMPORTANT *** +// SMU TEAM: Always increment the interface version if +// any structure is changed in this file +#define SMU11_DRIVER_IF_VERSION 0x11 + +#define PPTABLE_V20_SMU_VERSION 2 + +#define NUM_GFXCLK_DPM_LEVELS 16 +#define NUM_VCLK_DPM_LEVELS 8 +#define NUM_DCLK_DPM_LEVELS 8 +#define NUM_ECLK_DPM_LEVELS 8 +#define NUM_MP0CLK_DPM_LEVELS 2 +#define NUM_SOCCLK_DPM_LEVELS 8 +#define NUM_UCLK_DPM_LEVELS 4 +#define NUM_FCLK_DPM_LEVELS 8 +#define NUM_DCEFCLK_DPM_LEVELS 8 +#define NUM_DISPCLK_DPM_LEVELS 8 +#define NUM_PIXCLK_DPM_LEVELS 8 +#define NUM_PHYCLK_DPM_LEVELS 8 +#define NUM_LINK_LEVELS 2 +#define NUM_XGMI_LEVELS 2 + +#define MAX_GFXCLK_DPM_LEVEL (NUM_GFXCLK_DPM_LEVELS - 1) +#define MAX_VCLK_DPM_LEVEL (NUM_VCLK_DPM_LEVELS - 1) +#define MAX_DCLK_DPM_LEVEL (NUM_DCLK_DPM_LEVELS - 1) +#define MAX_ECLK_DPM_LEVEL (NUM_ECLK_DPM_LEVELS - 1) +#define MAX_MP0CLK_DPM_LEVEL (NUM_MP0CLK_DPM_LEVELS - 1) +#define MAX_SOCCLK_DPM_LEVEL (NUM_SOCCLK_DPM_LEVELS - 1) +#define MAX_UCLK_DPM_LEVEL (NUM_UCLK_DPM_LEVELS - 1) +#define MAX_FCLK_DPM_LEVEL (NUM_FCLK_DPM_LEVELS - 1) +#define MAX_DCEFCLK_DPM_LEVEL (NUM_DCEFCLK_DPM_LEVELS - 1) +#define MAX_DISPCLK_DPM_LEVEL (NUM_DISPCLK_DPM_LEVELS - 1) +#define MAX_PIXCLK_DPM_LEVEL (NUM_PIXCLK_DPM_LEVELS - 1) +#define MAX_PHYCLK_DPM_LEVEL (NUM_PHYCLK_DPM_LEVELS - 1) +#define MAX_LINK_LEVEL (NUM_LINK_LEVELS - 1) +#define MAX_XGMI_LEVEL (NUM_XGMI_LEVELS - 1) + +#define PPSMC_GeminiModeNone 0 +#define PPSMC_GeminiModeMaster 1 +#define PPSMC_GeminiModeSlave 2 + + +#define FEATURE_DPM_PREFETCHER_BIT 0 +#define FEATURE_DPM_GFXCLK_BIT 1 +#define FEATURE_DPM_UCLK_BIT 2 +#define FEATURE_DPM_SOCCLK_BIT 3 +#define FEATURE_DPM_UVD_BIT 4 +#define FEATURE_DPM_VCE_BIT 5 +#define FEATURE_ULV_BIT 6 +#define FEATURE_DPM_MP0CLK_BIT 7 +#define FEATURE_DPM_LINK_BIT 8 +#define FEATURE_DPM_DCEFCLK_BIT 9 +#define FEATURE_DS_GFXCLK_BIT 10 +#define FEATURE_DS_SOCCLK_BIT 11 +#define FEATURE_DS_LCLK_BIT 12 +#define FEATURE_PPT_BIT 13 +#define FEATURE_TDC_BIT 14 +#define FEATURE_THERMAL_BIT 15 +#define FEATURE_GFX_PER_CU_CG_BIT 16 +#define FEATURE_RM_BIT 17 +#define FEATURE_DS_DCEFCLK_BIT 18 +#define FEATURE_ACDC_BIT 19 +#define FEATURE_VR0HOT_BIT 20 +#define FEATURE_VR1HOT_BIT 21 +#define FEATURE_FW_CTF_BIT 22 +#define FEATURE_LED_DISPLAY_BIT 23 +#define FEATURE_FAN_CONTROL_BIT 24 +#define FEATURE_GFX_EDC_BIT 25 +#define FEATURE_GFXOFF_BIT 26 +#define FEATURE_CG_BIT 27 +#define FEATURE_DPM_FCLK_BIT 28 +#define FEATURE_DS_FCLK_BIT 29 +#define FEATURE_DS_MP1CLK_BIT 30 +#define FEATURE_DS_MP0CLK_BIT 31 +#define FEATURE_XGMI_BIT 32 +#define FEATURE_SPARE_33_BIT 33 +#define FEATURE_SPARE_34_BIT 34 +#define FEATURE_SPARE_35_BIT 35 +#define FEATURE_SPARE_36_BIT 36 +#define FEATURE_SPARE_37_BIT 37 +#define FEATURE_SPARE_38_BIT 38 +#define FEATURE_SPARE_39_BIT 39 +#define FEATURE_SPARE_40_BIT 40 +#define FEATURE_SPARE_41_BIT 41 +#define FEATURE_SPARE_42_BIT 42 +#define FEATURE_SPARE_43_BIT 43 +#define FEATURE_SPARE_44_BIT 44 +#define FEATURE_SPARE_45_BIT 45 +#define FEATURE_SPARE_46_BIT 46 +#define FEATURE_SPARE_47_BIT 47 +#define FEATURE_SPARE_48_BIT 48 +#define FEATURE_SPARE_49_BIT 49 +#define FEATURE_SPARE_50_BIT 50 +#define FEATURE_SPARE_51_BIT 51 +#define FEATURE_SPARE_52_BIT 52 +#define FEATURE_SPARE_53_BIT 53 +#define FEATURE_SPARE_54_BIT 54 +#define FEATURE_SPARE_55_BIT 55 +#define FEATURE_SPARE_56_BIT 56 +#define FEATURE_SPARE_57_BIT 57 +#define FEATURE_SPARE_58_BIT 58 +#define FEATURE_SPARE_59_BIT 59 +#define FEATURE_SPARE_60_BIT 60 +#define FEATURE_SPARE_61_BIT 61 +#define FEATURE_SPARE_62_BIT 62 +#define FEATURE_SPARE_63_BIT 63 + +#define NUM_FEATURES 64 + +#define FEATURE_DPM_PREFETCHER_MASK (1 << FEATURE_DPM_PREFETCHER_BIT ) +#define FEATURE_DPM_GFXCLK_MASK (1 << FEATURE_DPM_GFXCLK_BIT ) +#define FEATURE_DPM_UCLK_MASK (1 << FEATURE_DPM_UCLK_BIT ) +#define FEATURE_DPM_SOCCLK_MASK (1 << FEATURE_DPM_SOCCLK_BIT ) +#define FEATURE_DPM_UVD_MASK (1 << FEATURE_DPM_UVD_BIT ) +#define FEATURE_DPM_VCE_MASK (1 << FEATURE_DPM_VCE_BIT ) +#define FEATURE_ULV_MASK (1 << FEATURE_ULV_BIT ) +#define FEATURE_DPM_MP0CLK_MASK (1 << FEATURE_DPM_MP0CLK_BIT ) +#define FEATURE_DPM_LINK_MASK (1 << FEATURE_DPM_LINK_BIT ) +#define FEATURE_DPM_DCEFCLK_MASK (1 << FEATURE_DPM_DCEFCLK_BIT ) +#define FEATURE_DS_GFXCLK_MASK (1 << FEATURE_DS_GFXCLK_BIT ) +#define FEATURE_DS_SOCCLK_MASK (1 << FEATURE_DS_SOCCLK_BIT ) +#define FEATURE_DS_LCLK_MASK (1 << FEATURE_DS_LCLK_BIT ) +#define FEATURE_PPT_MASK (1 << FEATURE_PPT_BIT ) +#define FEATURE_TDC_MASK (1 << FEATURE_TDC_BIT ) +#define FEATURE_THERMAL_MASK (1 << FEATURE_THERMAL_BIT ) +#define FEATURE_GFX_PER_CU_CG_MASK (1 << FEATURE_GFX_PER_CU_CG_BIT ) +#define FEATURE_RM_MASK (1 << FEATURE_RM_BIT ) +#define FEATURE_DS_DCEFCLK_MASK (1 << FEATURE_DS_DCEFCLK_BIT ) +#define FEATURE_ACDC_MASK (1 << FEATURE_ACDC_BIT ) +#define FEATURE_VR0HOT_MASK (1 << FEATURE_VR0HOT_BIT ) +#define FEATURE_VR1HOT_MASK (1 << FEATURE_VR1HOT_BIT ) +#define FEATURE_FW_CTF_MASK (1 << FEATURE_FW_CTF_BIT ) +#define FEATURE_LED_DISPLAY_MASK (1 << FEATURE_LED_DISPLAY_BIT ) +#define FEATURE_FAN_CONTROL_MASK (1 << FEATURE_FAN_CONTROL_BIT ) +#define FEATURE_GFX_EDC_MASK (1 << FEATURE_GFX_EDC_BIT ) +#define FEATURE_GFXOFF_MASK (1 << FEATURE_GFXOFF_BIT ) +#define FEATURE_CG_MASK (1 << FEATURE_CG_BIT ) +#define FEATURE_DPM_FCLK_MASK (1 << FEATURE_DPM_FCLK_BIT ) +#define FEATURE_DS_FCLK_MASK (1 << FEATURE_DS_FCLK_BIT ) +#define FEATURE_DS_MP1CLK_MASK (1 << FEATURE_DS_MP1CLK_BIT ) +#define FEATURE_DS_MP0CLK_MASK (1 << FEATURE_DS_MP0CLK_BIT ) + + +#define DPM_OVERRIDE_DISABLE_SOCCLK_PID 0x00000001 +#define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000002 +#define DPM_OVERRIDE_ENABLE_VOLT_LINK_UVD_SOCCLK 0x00000004 +#define DPM_OVERRIDE_ENABLE_VOLT_LINK_UVD_UCLK 0x00000008 +#define DPM_OVERRIDE_ENABLE_FREQ_LINK_VCLK_SOCCLK 0x00000010 +#define DPM_OVERRIDE_ENABLE_FREQ_LINK_VCLK_UCLK 0x00000020 +#define DPM_OVERRIDE_ENABLE_FREQ_LINK_DCLK_SOCCLK 0x00000040 +#define DPM_OVERRIDE_ENABLE_FREQ_LINK_DCLK_UCLK 0x00000080 +#define DPM_OVERRIDE_ENABLE_VOLT_LINK_VCE_SOCCLK 0x00000100 +#define DPM_OVERRIDE_ENABLE_VOLT_LINK_VCE_UCLK 0x00000200 +#define DPM_OVERRIDE_ENABLE_FREQ_LINK_ECLK_SOCCLK 0x00000400 +#define DPM_OVERRIDE_ENABLE_FREQ_LINK_ECLK_UCLK 0x00000800 +#define DPM_OVERRIDE_ENABLE_FREQ_LINK_GFXCLK_SOCCLK 0x00001000 +#define DPM_OVERRIDE_ENABLE_FREQ_LINK_GFXCLK_UCLK 0x00002000 +#define DPM_OVERRIDE_ENABLE_GFXOFF_GFXCLK_SWITCH 0x00004000 +#define DPM_OVERRIDE_ENABLE_GFXOFF_SOCCLK_SWITCH 0x00008000 +#define DPM_OVERRIDE_ENABLE_GFXOFF_UCLK_SWITCH 0x00010000 +#define DPM_OVERRIDE_ENABLE_GFXOFF_FCLK_SWITCH 0x00020000 + +#define VR_MAPPING_VR_SELECT_MASK 0x01 +#define VR_MAPPING_VR_SELECT_SHIFT 0x00 + +#define VR_MAPPING_PLANE_SELECT_MASK 0x02 +#define VR_MAPPING_PLANE_SELECT_SHIFT 0x01 + + +#define PSI_SEL_VR0_PLANE0_PSI0 0x01 +#define PSI_SEL_VR0_PLANE0_PSI1 0x02 +#define PSI_SEL_VR0_PLANE1_PSI0 0x04 +#define PSI_SEL_VR0_PLANE1_PSI1 0x08 +#define PSI_SEL_VR1_PLANE0_PSI0 0x10 +#define PSI_SEL_VR1_PLANE0_PSI1 0x20 +#define PSI_SEL_VR1_PLANE1_PSI0 0x40 +#define PSI_SEL_VR1_PLANE1_PSI1 0x80 + + +#define THROTTLER_STATUS_PADDING_BIT 0 +#define THROTTLER_STATUS_TEMP_EDGE_BIT 1 +#define THROTTLER_STATUS_TEMP_HOTSPOT_BIT 2 +#define THROTTLER_STATUS_TEMP_HBM_BIT 3 +#define THROTTLER_STATUS_TEMP_VR_GFX_BIT 4 +#define THROTTLER_STATUS_TEMP_VR_MEM_BIT 5 +#define THROTTLER_STATUS_TEMP_LIQUID_BIT 6 +#define THROTTLER_STATUS_TEMP_PLX_BIT 7 +#define THROTTLER_STATUS_TEMP_SKIN_BIT 8 +#define THROTTLER_STATUS_TDC_GFX_BIT 9 +#define THROTTLER_STATUS_TDC_SOC_BIT 10 +#define THROTTLER_STATUS_PPT_BIT 11 +#define THROTTLER_STATUS_FIT_BIT 12 +#define THROTTLER_STATUS_PPM_BIT 13 + + +#define TABLE_TRANSFER_OK 0x0 +#define TABLE_TRANSFER_FAILED 0xFF + + +#define WORKLOAD_DEFAULT_BIT 0 +#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 1 +#define WORKLOAD_PPLIB_POWER_SAVING_BIT 2 +#define WORKLOAD_PPLIB_VIDEO_BIT 3 +#define WORKLOAD_PPLIB_VR_BIT 4 +#define WORKLOAD_PPLIB_COMPUTE_BIT 5 +#define WORKLOAD_PPLIB_CUSTOM_BIT 6 +#define WORKLOAD_PPLIB_COUNT 7 + + +#define XGMI_STATE_D0 1 +#define XGMI_STATE_D3 0 + +typedef struct { + uint32_t a; + uint32_t b; + uint32_t c; +} QuadraticInt_t; + +typedef struct { + uint32_t m; + uint32_t b; +} LinearInt_t; + +typedef struct { + uint32_t a; + uint32_t b; + uint32_t c; +} DroopInt_t; + +typedef enum { + PPCLK_GFXCLK, + PPCLK_VCLK, + PPCLK_DCLK, + PPCLK_ECLK, + PPCLK_SOCCLK, + PPCLK_UCLK, + PPCLK_DCEFCLK, + PPCLK_DISPCLK, + PPCLK_PIXCLK, + PPCLK_PHYCLK, + PPCLK_FCLK, + PPCLK_COUNT, +} PPCLK_e; + +typedef enum { + VOLTAGE_MODE_AVFS = 0, + VOLTAGE_MODE_AVFS_SS, + VOLTAGE_MODE_SS, + VOLTAGE_MODE_COUNT, +} VOLTAGE_MODE_e; + + +typedef enum { + AVFS_VOLTAGE_GFX = 0, + AVFS_VOLTAGE_SOC, + AVFS_VOLTAGE_COUNT, +} AVFS_VOLTAGE_TYPE_e; + + +typedef struct { + uint8_t VoltageMode; + uint8_t SnapToDiscrete; + uint8_t NumDiscreteLevels; + uint8_t padding; + LinearInt_t ConversionToAvfsClk; + QuadraticInt_t SsCurve; +} DpmDescriptor_t; + +typedef struct { + uint32_t Version; + + + uint32_t FeaturesToRun[2]; + + + uint16_t SocketPowerLimitAc0; + uint16_t SocketPowerLimitAc0Tau; + uint16_t SocketPowerLimitAc1; + uint16_t SocketPowerLimitAc1Tau; + uint16_t SocketPowerLimitAc2; + uint16_t SocketPowerLimitAc2Tau; + uint16_t SocketPowerLimitAc3; + uint16_t SocketPowerLimitAc3Tau; + uint16_t SocketPowerLimitDc; + uint16_t SocketPowerLimitDcTau; + uint16_t TdcLimitSoc; + uint16_t TdcLimitSocTau; + uint16_t TdcLimitGfx; + uint16_t TdcLimitGfxTau; + + uint16_t TedgeLimit; + uint16_t ThotspotLimit; + uint16_t ThbmLimit; + uint16_t Tvr_gfxLimit; + uint16_t Tvr_memLimit; + uint16_t Tliquid1Limit; + uint16_t Tliquid2Limit; + uint16_t TplxLimit; + uint32_t FitLimit; + + uint16_t PpmPowerLimit; + uint16_t PpmTemperatureThreshold; + + uint8_t MemoryOnPackage; + uint8_t padding8_limits[3]; + + + uint16_t UlvVoltageOffsetSoc; + uint16_t UlvVoltageOffsetGfx; + + uint8_t UlvSmnclkDid; + uint8_t UlvMp1clkDid; + uint8_t UlvGfxclkBypass; + uint8_t Padding234; + + + uint16_t MinVoltageGfx; + uint16_t MinVoltageSoc; + uint16_t MaxVoltageGfx; + uint16_t MaxVoltageSoc; + + uint16_t LoadLineResistanceGfx; + uint16_t LoadLineResistanceSoc; + + DpmDescriptor_t DpmDescriptor[PPCLK_COUNT]; + + uint16_t FreqTableGfx [NUM_GFXCLK_DPM_LEVELS ]; + uint16_t FreqTableVclk [NUM_VCLK_DPM_LEVELS ]; + uint16_t FreqTableDclk [NUM_DCLK_DPM_LEVELS ]; + uint16_t FreqTableEclk [NUM_ECLK_DPM_LEVELS ]; + uint16_t FreqTableSocclk [NUM_SOCCLK_DPM_LEVELS ]; + uint16_t FreqTableUclk [NUM_UCLK_DPM_LEVELS ]; + uint16_t FreqTableFclk [NUM_FCLK_DPM_LEVELS ]; + uint16_t FreqTableDcefclk [NUM_DCEFCLK_DPM_LEVELS ]; + uint16_t FreqTableDispclk [NUM_DISPCLK_DPM_LEVELS ]; + uint16_t FreqTablePixclk [NUM_PIXCLK_DPM_LEVELS ]; + uint16_t FreqTablePhyclk [NUM_PHYCLK_DPM_LEVELS ]; + + uint16_t DcModeMaxFreq [PPCLK_COUNT ]; + uint16_t Padding8_Clks; + + uint16_t Mp0clkFreq [NUM_MP0CLK_DPM_LEVELS]; + uint16_t Mp0DpmVoltage [NUM_MP0CLK_DPM_LEVELS]; + + + uint16_t GfxclkFidle; + uint16_t GfxclkSlewRate; + uint16_t CksEnableFreq; + uint16_t Padding789; + QuadraticInt_t CksVoltageOffset; + uint8_t Padding567[4]; + uint16_t GfxclkDsMaxFreq; + uint8_t GfxclkSource; + uint8_t Padding456; + + uint8_t LowestUclkReservedForUlv; + uint8_t Padding8_Uclk[3]; + + + uint8_t PcieGenSpeed[NUM_LINK_LEVELS]; + uint8_t PcieLaneCount[NUM_LINK_LEVELS]; + uint16_t LclkFreq[NUM_LINK_LEVELS]; + + + uint16_t EnableTdpm; + uint16_t TdpmHighHystTemperature; + uint16_t TdpmLowHystTemperature; + uint16_t GfxclkFreqHighTempLimit; + + + uint16_t FanStopTemp; + uint16_t FanStartTemp; + + uint16_t FanGainEdge; + uint16_t FanGainHotspot; + uint16_t FanGainLiquid; + uint16_t FanGainVrVddc; + uint16_t FanGainVrMvdd; + uint16_t FanGainPlx; + uint16_t FanGainHbm; + uint16_t FanPwmMin; + uint16_t FanAcousticLimitRpm; + uint16_t FanThrottlingRpm; + uint16_t FanMaximumRpm; + uint16_t FanTargetTemperature; + uint16_t FanTargetGfxclk; + uint8_t FanZeroRpmEnable; + uint8_t FanTachEdgePerRev; + + + + int16_t FuzzyFan_ErrorSetDelta; + int16_t FuzzyFan_ErrorRateSetDelta; + int16_t FuzzyFan_PwmSetDelta; + uint16_t FuzzyFan_Reserved; + + + uint8_t OverrideAvfsGb[AVFS_VOLTAGE_COUNT]; + uint8_t Padding8_Avfs[2]; + + QuadraticInt_t qAvfsGb[AVFS_VOLTAGE_COUNT]; + DroopInt_t dBtcGbGfxCksOn; + DroopInt_t dBtcGbGfxCksOff; + DroopInt_t dBtcGbGfxAfll; + DroopInt_t dBtcGbSoc; + LinearInt_t qAgingGb[AVFS_VOLTAGE_COUNT]; + + QuadraticInt_t qStaticVoltageOffset[AVFS_VOLTAGE_COUNT]; + + uint16_t DcTol[AVFS_VOLTAGE_COUNT]; + + uint8_t DcBtcEnabled[AVFS_VOLTAGE_COUNT]; + uint8_t Padding8_GfxBtc[2]; + + uint16_t DcBtcMin[AVFS_VOLTAGE_COUNT]; + uint16_t DcBtcMax[AVFS_VOLTAGE_COUNT]; + + + uint8_t XgmiLinkSpeed [NUM_XGMI_LEVELS]; + uint8_t XgmiLinkWidth [NUM_XGMI_LEVELS]; + uint16_t XgmiFclkFreq [NUM_XGMI_LEVELS]; + uint16_t XgmiUclkFreq [NUM_XGMI_LEVELS]; + uint16_t XgmiSocclkFreq [NUM_XGMI_LEVELS]; + uint16_t XgmiSocVoltage [NUM_XGMI_LEVELS]; + + uint32_t DebugOverrides; + QuadraticInt_t ReservedEquation0; + QuadraticInt_t ReservedEquation1; + QuadraticInt_t ReservedEquation2; + QuadraticInt_t ReservedEquation3; + + uint16_t MinVoltageUlvGfx; + uint16_t MinVoltageUlvSoc; + + uint16_t MGpuFanBoostLimitRpm; + uint16_t padding16_Fan; + + uint32_t Reserved[13]; + + + + uint8_t Liquid1_I2C_address; + uint8_t Liquid2_I2C_address; + uint8_t Vr_I2C_address; + uint8_t Plx_I2C_address; + + uint8_t Liquid_I2C_LineSCL; + uint8_t Liquid_I2C_LineSDA; + uint8_t Vr_I2C_LineSCL; + uint8_t Vr_I2C_LineSDA; + + uint8_t Plx_I2C_LineSCL; + uint8_t Plx_I2C_LineSDA; + uint8_t VrSensorPresent; + uint8_t LiquidSensorPresent; + + uint16_t MaxVoltageStepGfx; + uint16_t MaxVoltageStepSoc; + + uint8_t VddGfxVrMapping; + uint8_t VddSocVrMapping; + uint8_t VddMem0VrMapping; + uint8_t VddMem1VrMapping; + + uint8_t GfxUlvPhaseSheddingMask; + uint8_t SocUlvPhaseSheddingMask; + uint8_t ExternalSensorPresent; + uint8_t Padding8_V; + + + uint16_t GfxMaxCurrent; + int8_t GfxOffset; + uint8_t Padding_TelemetryGfx; + + uint16_t SocMaxCurrent; + int8_t SocOffset; + uint8_t Padding_TelemetrySoc; + + uint16_t Mem0MaxCurrent; + int8_t Mem0Offset; + uint8_t Padding_TelemetryMem0; + + uint16_t Mem1MaxCurrent; + int8_t Mem1Offset; + uint8_t Padding_TelemetryMem1; + + + uint8_t AcDcGpio; + uint8_t AcDcPolarity; + uint8_t VR0HotGpio; + uint8_t VR0HotPolarity; + + uint8_t VR1HotGpio; + uint8_t VR1HotPolarity; + uint8_t Padding1; + uint8_t Padding2; + + + + uint8_t LedPin0; + uint8_t LedPin1; + uint8_t LedPin2; + uint8_t padding8_4; + + + uint8_t PllGfxclkSpreadEnabled; + uint8_t PllGfxclkSpreadPercent; + uint16_t PllGfxclkSpreadFreq; + + uint8_t UclkSpreadEnabled; + uint8_t UclkSpreadPercent; + uint16_t UclkSpreadFreq; + + uint8_t FclkSpreadEnabled; + uint8_t FclkSpreadPercent; + uint16_t FclkSpreadFreq; + + uint8_t FllGfxclkSpreadEnabled; + uint8_t FllGfxclkSpreadPercent; + uint16_t FllGfxclkSpreadFreq; + + uint32_t BoardReserved[10]; + + + uint32_t MmHubPadding[8]; + +} PPTable_t; + +typedef struct { + + uint16_t GfxclkAverageLpfTau; + uint16_t SocclkAverageLpfTau; + uint16_t UclkAverageLpfTau; + uint16_t GfxActivityLpfTau; + uint16_t UclkActivityLpfTau; + + + uint32_t MmHubPadding[8]; +} DriverSmuConfig_t; + +typedef struct { + + uint16_t GfxclkFmin; + uint16_t GfxclkFmax; + uint16_t GfxclkFreq1; + uint16_t GfxclkOffsetVolt1; + uint16_t GfxclkFreq2; + uint16_t GfxclkOffsetVolt2; + uint16_t GfxclkFreq3; + uint16_t GfxclkOffsetVolt3; + uint16_t UclkFmax; + int16_t OverDrivePct; + uint16_t FanMaximumRpm; + uint16_t FanMinimumPwm; + uint16_t FanTargetTemperature; + uint16_t MaxOpTemp; + uint16_t FanZeroRpmEnable; + uint16_t Padding; + +} OverDriveTable_t; + +typedef struct { + uint16_t CurrClock[PPCLK_COUNT]; + uint16_t AverageGfxclkFrequency; + uint16_t AverageSocclkFrequency; + uint16_t AverageUclkFrequency ; + uint16_t AverageGfxActivity ; + uint16_t AverageUclkActivity ; + uint8_t CurrSocVoltageOffset ; + uint8_t CurrGfxVoltageOffset ; + uint8_t CurrMemVidOffset ; + uint8_t Padding8 ; + uint16_t CurrSocketPower ; + uint16_t TemperatureEdge ; + uint16_t TemperatureHotspot ; + uint16_t TemperatureHBM ; + uint16_t TemperatureVrGfx ; + uint16_t TemperatureVrMem ; + uint16_t TemperatureLiquid ; + uint16_t TemperaturePlx ; + uint32_t ThrottlerStatus ; + + uint8_t LinkDpmLevel; + uint8_t Padding[3]; + + + uint32_t MmHubPadding[7]; +} SmuMetrics_t; + +typedef struct { + uint16_t MinClock; + uint16_t MaxClock; + uint16_t MinUclk; + uint16_t MaxUclk; + + uint8_t WmSetting; + uint8_t Padding[3]; +} WatermarkRowGeneric_t; + +#define NUM_WM_RANGES 4 + +typedef enum { + WM_SOCCLK = 0, + WM_DCEFCLK, + WM_COUNT_PP, +} WM_CLOCK_e; + +typedef struct { + + WatermarkRowGeneric_t WatermarkRow[WM_COUNT_PP][NUM_WM_RANGES]; + + uint32_t MmHubPadding[7]; +} Watermarks_t; + +typedef struct { + uint16_t avgPsmCount[45]; + uint16_t minPsmCount[45]; + float avgPsmVoltage[45]; + float minPsmVoltage[45]; + + uint16_t avgScsPsmCount; + uint16_t minScsPsmCount; + float avgScsPsmVoltage; + float minScsPsmVoltage; + + + uint32_t MmHubPadding[6]; +} AvfsDebugTable_t; + +typedef struct { + uint8_t AvfsVersion; + uint8_t AvfsEn[AVFS_VOLTAGE_COUNT]; + + uint8_t OverrideVFT[AVFS_VOLTAGE_COUNT]; + uint8_t OverrideAvfsGb[AVFS_VOLTAGE_COUNT]; + + uint8_t OverrideTemperatures[AVFS_VOLTAGE_COUNT]; + uint8_t OverrideVInversion[AVFS_VOLTAGE_COUNT]; + uint8_t OverrideP2V[AVFS_VOLTAGE_COUNT]; + uint8_t OverrideP2VCharzFreq[AVFS_VOLTAGE_COUNT]; + + int32_t VFT0_m1[AVFS_VOLTAGE_COUNT]; + int32_t VFT0_m2[AVFS_VOLTAGE_COUNT]; + int32_t VFT0_b[AVFS_VOLTAGE_COUNT]; + + int32_t VFT1_m1[AVFS_VOLTAGE_COUNT]; + int32_t VFT1_m2[AVFS_VOLTAGE_COUNT]; + int32_t VFT1_b[AVFS_VOLTAGE_COUNT]; + + int32_t VFT2_m1[AVFS_VOLTAGE_COUNT]; + int32_t VFT2_m2[AVFS_VOLTAGE_COUNT]; + int32_t VFT2_b[AVFS_VOLTAGE_COUNT]; + + int32_t AvfsGb0_m1[AVFS_VOLTAGE_COUNT]; + int32_t AvfsGb0_m2[AVFS_VOLTAGE_COUNT]; + int32_t AvfsGb0_b[AVFS_VOLTAGE_COUNT]; + + int32_t AcBtcGb_m1[AVFS_VOLTAGE_COUNT]; + int32_t AcBtcGb_m2[AVFS_VOLTAGE_COUNT]; + int32_t AcBtcGb_b[AVFS_VOLTAGE_COUNT]; + + uint32_t AvfsTempCold[AVFS_VOLTAGE_COUNT]; + uint32_t AvfsTempMid[AVFS_VOLTAGE_COUNT]; + uint32_t AvfsTempHot[AVFS_VOLTAGE_COUNT]; + + uint32_t VInversion[AVFS_VOLTAGE_COUNT]; + + + int32_t P2V_m1[AVFS_VOLTAGE_COUNT]; + int32_t P2V_m2[AVFS_VOLTAGE_COUNT]; + int32_t P2V_b[AVFS_VOLTAGE_COUNT]; + + uint32_t P2VCharzFreq[AVFS_VOLTAGE_COUNT]; + + uint32_t EnabledAvfsModules; + + uint32_t MmHubPadding[7]; +} AvfsFuseOverride_t; + +typedef struct { + + uint8_t Gfx_ActiveHystLimit; + uint8_t Gfx_IdleHystLimit; + uint8_t Gfx_FPS; + uint8_t Gfx_MinActiveFreqType; + uint8_t Gfx_BoosterFreqType; + uint8_t Gfx_UseRlcBusy; + uint16_t Gfx_MinActiveFreq; + uint16_t Gfx_BoosterFreq; + uint16_t Gfx_PD_Data_time_constant; + uint32_t Gfx_PD_Data_limit_a; + uint32_t Gfx_PD_Data_limit_b; + uint32_t Gfx_PD_Data_limit_c; + uint32_t Gfx_PD_Data_error_coeff; + uint32_t Gfx_PD_Data_error_rate_coeff; + + uint8_t Soc_ActiveHystLimit; + uint8_t Soc_IdleHystLimit; + uint8_t Soc_FPS; + uint8_t Soc_MinActiveFreqType; + uint8_t Soc_BoosterFreqType; + uint8_t Soc_UseRlcBusy; + uint16_t Soc_MinActiveFreq; + uint16_t Soc_BoosterFreq; + uint16_t Soc_PD_Data_time_constant; + uint32_t Soc_PD_Data_limit_a; + uint32_t Soc_PD_Data_limit_b; + uint32_t Soc_PD_Data_limit_c; + uint32_t Soc_PD_Data_error_coeff; + uint32_t Soc_PD_Data_error_rate_coeff; + + uint8_t Mem_ActiveHystLimit; + uint8_t Mem_IdleHystLimit; + uint8_t Mem_FPS; + uint8_t Mem_MinActiveFreqType; + uint8_t Mem_BoosterFreqType; + uint8_t Mem_UseRlcBusy; + uint16_t Mem_MinActiveFreq; + uint16_t Mem_BoosterFreq; + uint16_t Mem_PD_Data_time_constant; + uint32_t Mem_PD_Data_limit_a; + uint32_t Mem_PD_Data_limit_b; + uint32_t Mem_PD_Data_limit_c; + uint32_t Mem_PD_Data_error_coeff; + uint32_t Mem_PD_Data_error_rate_coeff; + + uint8_t Fclk_ActiveHystLimit; + uint8_t Fclk_IdleHystLimit; + uint8_t Fclk_FPS; + uint8_t Fclk_MinActiveFreqType; + uint8_t Fclk_BoosterFreqType; + uint8_t Fclk_UseRlcBusy; + uint16_t Fclk_MinActiveFreq; + uint16_t Fclk_BoosterFreq; + uint16_t Fclk_PD_Data_time_constant; + uint32_t Fclk_PD_Data_limit_a; + uint32_t Fclk_PD_Data_limit_b; + uint32_t Fclk_PD_Data_limit_c; + uint32_t Fclk_PD_Data_error_coeff; + uint32_t Fclk_PD_Data_error_rate_coeff; + +} DpmActivityMonitorCoeffInt_t; + +#define TABLE_PPTABLE 0 +#define TABLE_WATERMARKS 1 +#define TABLE_AVFS 2 +#define TABLE_AVFS_PSM_DEBUG 3 +#define TABLE_AVFS_FUSE_OVERRIDE 4 +#define TABLE_PMSTATUSLOG 5 +#define TABLE_SMU_METRICS 6 +#define TABLE_DRIVER_SMU_CONFIG 7 +#define TABLE_ACTIVITY_MONITOR_COEFF 8 +#define TABLE_OVERDRIVE 9 +#define TABLE_COUNT 10 + + +#define UCLK_SWITCH_SLOW 0 +#define UCLK_SWITCH_FAST 1 + + +#define SQ_Enable_MASK 0x1 +#define SQ_IR_MASK 0x2 +#define SQ_PCC_MASK 0x4 +#define SQ_EDC_MASK 0x8 + +#define TCP_Enable_MASK 0x100 +#define TCP_IR_MASK 0x200 +#define TCP_PCC_MASK 0x400 +#define TCP_EDC_MASK 0x800 + +#define TD_Enable_MASK 0x10000 +#define TD_IR_MASK 0x20000 +#define TD_PCC_MASK 0x40000 +#define TD_EDC_MASK 0x80000 + +#define DB_Enable_MASK 0x1000000 +#define DB_IR_MASK 0x2000000 +#define DB_PCC_MASK 0x4000000 +#define DB_EDC_MASK 0x8000000 + +#define SQ_Enable_SHIFT 0 +#define SQ_IR_SHIFT 1 +#define SQ_PCC_SHIFT 2 +#define SQ_EDC_SHIFT 3 + +#define TCP_Enable_SHIFT 8 +#define TCP_IR_SHIFT 9 +#define TCP_PCC_SHIFT 10 +#define TCP_EDC_SHIFT 11 + +#define TD_Enable_SHIFT 16 +#define TD_IR_SHIFT 17 +#define TD_PCC_SHIFT 18 +#define TD_EDC_SHIFT 19 + +#define DB_Enable_SHIFT 24 +#define DB_IR_SHIFT 25 +#define DB_PCC_SHIFT 26 +#define DB_EDC_SHIFT 27 + +#define REMOVE_FMAX_MARGIN_BIT 0x0 +#define REMOVE_DCTOL_MARGIN_BIT 0x1 +#define REMOVE_PLATFORM_MARGIN_BIT 0x2 + +#endif -- GitLab From 79df9413f7c3bc5ae305b8c08dbd048334dd6092 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Wed, 21 Mar 2018 16:21:51 +0800 Subject: [PATCH 0402/1692] drm/amd/powerplay: add vega20_ppsmc.h (v2) v2: update to latest. Signed-off-by: Evan Quan Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- .../gpu/drm/amd/powerplay/inc/vega20_ppsmc.h | 127 ++++++++++++++++++ 1 file changed, 127 insertions(+) create mode 100644 drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h new file mode 100644 index 000000000000..165429f717c4 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h @@ -0,0 +1,127 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef VEGA20_PP_SMC_H +#define VEGA20_PP_SMC_H + +#pragma pack(push, 1) + +// SMU Response Codes: +#define PPSMC_Result_OK 0x1 +#define PPSMC_Result_Failed 0xFF +#define PPSMC_Result_UnknownCmd 0xFE +#define PPSMC_Result_CmdRejectedPrereq 0xFD +#define PPSMC_Result_CmdRejectedBusy 0xFC + +// Message Definitions: +#define PPSMC_MSG_TestMessage 0x1 +#define PPSMC_MSG_GetSmuVersion 0x2 +#define PPSMC_MSG_GetDriverIfVersion 0x3 +#define PPSMC_MSG_SetAllowedFeaturesMaskLow 0x4 +#define PPSMC_MSG_SetAllowedFeaturesMaskHigh 0x5 +#define PPSMC_MSG_EnableAllSmuFeatures 0x6 +#define PPSMC_MSG_DisableAllSmuFeatures 0x7 +#define PPSMC_MSG_EnableSmuFeaturesLow 0x8 +#define PPSMC_MSG_EnableSmuFeaturesHigh 0x9 +#define PPSMC_MSG_DisableSmuFeaturesLow 0xA +#define PPSMC_MSG_DisableSmuFeaturesHigh 0xB +#define PPSMC_MSG_GetEnabledSmuFeaturesLow 0xC +#define PPSMC_MSG_GetEnabledSmuFeaturesHigh 0xD +#define PPSMC_MSG_SetWorkloadMask 0xE +#define PPSMC_MSG_SetPptLimit 0xF +#define PPSMC_MSG_SetDriverDramAddrHigh 0x10 +#define PPSMC_MSG_SetDriverDramAddrLow 0x11 +#define PPSMC_MSG_SetToolsDramAddrHigh 0x12 +#define PPSMC_MSG_SetToolsDramAddrLow 0x13 +#define PPSMC_MSG_TransferTableSmu2Dram 0x14 +#define PPSMC_MSG_TransferTableDram2Smu 0x15 +#define PPSMC_MSG_UseDefaultPPTable 0x16 +#define PPSMC_MSG_UseBackupPPTable 0x17 +#define PPSMC_MSG_RunBtc 0x18 +#define PPSMC_MSG_RequestI2CBus 0x19 +#define PPSMC_MSG_ReleaseI2CBus 0x1A +#define PPSMC_MSG_SetFloorSocVoltage 0x21 +#define PPSMC_MSG_SoftReset 0x22 +#define PPSMC_MSG_StartBacoMonitor 0x23 +#define PPSMC_MSG_CancelBacoMonitor 0x24 +#define PPSMC_MSG_EnterBaco 0x25 +#define PPSMC_MSG_SetSoftMinByFreq 0x26 +#define PPSMC_MSG_SetSoftMaxByFreq 0x27 +#define PPSMC_MSG_SetHardMinByFreq 0x28 +#define PPSMC_MSG_SetHardMaxByFreq 0x29 +#define PPSMC_MSG_GetMinDpmFreq 0x2A +#define PPSMC_MSG_GetMaxDpmFreq 0x2B +#define PPSMC_MSG_GetDpmFreqByIndex 0x2C +#define PPSMC_MSG_GetDpmClockFreq 0x2D +#define PPSMC_MSG_GetSsVoltageByDpm 0x2E +#define PPSMC_MSG_SetMemoryChannelConfig 0x2F +#define PPSMC_MSG_SetGeminiMode 0x30 +#define PPSMC_MSG_SetGeminiApertureHigh 0x31 +#define PPSMC_MSG_SetGeminiApertureLow 0x32 +#define PPSMC_MSG_SetMinLinkDpmByIndex 0x33 +#define PPSMC_MSG_OverridePcieParameters 0x34 +#define PPSMC_MSG_OverDriveSetPercentage 0x35 +#define PPSMC_MSG_SetMinDeepSleepDcefclk 0x36 +#define PPSMC_MSG_ReenableAcDcInterrupt 0x37 +#define PPSMC_MSG_NotifyPowerSource 0x38 +#define PPSMC_MSG_SetUclkFastSwitch 0x39 +#define PPSMC_MSG_SetUclkDownHyst 0x3A +//#define PPSMC_MSG_GfxDeviceDriverReset 0x3B +#define PPSMC_MSG_GetCurrentRpm 0x3C +#define PPSMC_MSG_SetVideoFps 0x3D +#define PPSMC_MSG_SetTjMax 0x3E +#define PPSMC_MSG_SetFanTemperatureTarget 0x3F +#define PPSMC_MSG_PrepareMp1ForUnload 0x40 +#define PPSMC_MSG_DramLogSetDramAddrHigh 0x41 +#define PPSMC_MSG_DramLogSetDramAddrLow 0x42 +#define PPSMC_MSG_DramLogSetDramSize 0x43 +#define PPSMC_MSG_SetFanMaxRpm 0x44 +#define PPSMC_MSG_SetFanMinPwm 0x45 +#define PPSMC_MSG_ConfigureGfxDidt 0x46 +#define PPSMC_MSG_NumOfDisplays 0x47 +#define PPSMC_MSG_RemoveMargins 0x48 +#define PPSMC_MSG_ReadSerialNumTop32 0x49 +#define PPSMC_MSG_ReadSerialNumBottom32 0x4A +#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x4B +#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x4C +#define PPSMC_MSG_WaflTest 0x4D +// Unused ID 0x4E to 0x50 +#define PPSMC_MSG_AllowGfxOff 0x51 +#define PPSMC_MSG_DisallowGfxOff 0x52 +#define PPSMC_MSG_GetPptLimit 0x53 +#define PPSMC_MSG_GetDcModeMaxDpmFreq 0x54 +#define PPSMC_MSG_GetDebugData 0x55 +#define PPSMC_MSG_SetXgmiMode 0x56 +#define PPSMC_MSG_RunAfllBtc 0x57 +#define PPSMC_MSG_ExitBaco 0x58 +#define PPSMC_MSG_PrepareMp1ForReset 0x59 +#define PPSMC_MSG_PrepareMp1ForShutdown 0x5A +#define PPSMC_MSG_SetMGpuFanBoostLimitRpm 0x5D +#define PPSMC_Message_Count 0x5E + +typedef uint32_t PPSMC_Result; +typedef uint32_t PPSMC_Msg; + +#pragma pack(pop) + +#endif -- GitLab From b9443b572c0614c54bc475abf5b3b7e6e88c1158 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Wed, 21 Mar 2018 16:36:08 +0800 Subject: [PATCH 0403/1692] drm/amd/powerplay: add vega20_pptable.h (v2) v2: squash in table size fixes Signed-off-by: Evan Quan Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- .../drm/amd/powerplay/hwmgr/vega20_pptable.h | 140 ++++++++++++++++++ 1 file changed, 140 insertions(+) create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_pptable.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_pptable.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_pptable.h new file mode 100644 index 000000000000..b104f6af81a4 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_pptable.h @@ -0,0 +1,140 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _VEGA20_PPTABLE_H_ +#define _VEGA20_PPTABLE_H_ + +#pragma pack(push, 1) + +#define ATOM_VEGA20_PP_THERMALCONTROLLER_NONE 0 +#define ATOM_VEGA20_PP_THERMALCONTROLLER_VEGA20 26 + +#define ATOM_VEGA20_PP_PLATFORM_CAP_POWERPLAY 0x1 +#define ATOM_VEGA20_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 0x2 +#define ATOM_VEGA20_PP_PLATFORM_CAP_HARDWAREDC 0x4 +#define ATOM_VEGA20_PP_PLATFORM_CAP_BACO 0x8 +#define ATOM_VEGA20_PP_PLATFORM_CAP_BAMACO 0x10 +#define ATOM_VEGA20_PP_PLATFORM_CAP_ENABLESHADOWPSTATE 0x20 + +#define ATOM_VEGA20_TABLE_REVISION_VEGA20 11 +#define ATOM_VEGA20_ODFEATURE_MAX_COUNT 32 +#define ATOM_VEGA20_ODSETTING_MAX_COUNT 32 +#define ATOM_VEGA20_PPCLOCK_MAX_COUNT 16 + +enum ATOM_VEGA20_ODFEATURE_ID { + ATOM_VEGA20_ODFEATURE_GFXCLK_LIMITS = 0, + ATOM_VEGA20_ODFEATURE_GFXCLK_CURVE, + ATOM_VEGA20_ODFEATURE_UCLK_MAX, + ATOM_VEGA20_ODFEATURE_POWER_LIMIT, + ATOM_VEGA20_ODFEATURE_FAN_ACOUSTIC_LIMIT, //FanMaximumRpm + ATOM_VEGA20_ODFEATURE_FAN_SPEED_MIN, //FanMinimumPwm + ATOM_VEGA20_ODFEATURE_TEMPERATURE_FAN, //FanTargetTemperature + ATOM_VEGA20_ODFEATURE_TEMPERATURE_SYSTEM, //MaxOpTemp + ATOM_VEGA20_ODFEATURE_COUNT, +}; + +enum ATOM_VEGA20_ODSETTING_ID { + ATOM_VEGA20_ODSETTING_GFXCLKFMAX = 0, + ATOM_VEGA20_ODSETTING_GFXCLKFMIN, + ATOM_VEGA20_ODSETTING_VDDGFXCURVEFREQ_P1, + ATOM_VEGA20_ODSETTING_VDDGFXCURVEVOLTAGEOFFSET_P1, + ATOM_VEGA20_ODSETTING_VDDGFXCURVEFREQ_P2, + ATOM_VEGA20_ODSETTING_VDDGFXCURVEVOLTAGEOFFSET_P2, + ATOM_VEGA20_ODSETTING_VDDGFXCURVEFREQ_P3, + ATOM_VEGA20_ODSETTING_VDDGFXCURVEVOLTAGEOFFSET_P3, + ATOM_VEGA20_ODSETTING_UCLKFMAX, + ATOM_VEGA20_ODSETTING_POWERPERCENTAGE, + ATOM_VEGA20_ODSETTING_FANRPMMIN, + ATOM_VEGA20_ODSETTING_FANRPMACOUSTICLIMIT, + ATOM_VEGA20_ODSETTING_FANTARGETTEMPERATURE, + ATOM_VEGA20_ODSETTING_OPERATINGTEMPMAX, + ATOM_VEGA20_ODSETTING_COUNT, +}; +typedef enum ATOM_VEGA20_ODSETTING_ID ATOM_VEGA20_ODSETTING_ID; + +typedef struct _ATOM_VEGA20_OVERDRIVE8_RECORD +{ + UCHAR ucODTableRevision; + ULONG ODFeatureCount; + UCHAR ODFeatureCapabilities [ATOM_VEGA20_ODFEATURE_MAX_COUNT]; //OD feature support flags + ULONG ODSettingCount; + ULONG ODSettingsMax [ATOM_VEGA20_ODSETTING_MAX_COUNT]; //Upper Limit for each OD Setting + ULONG ODSettingsMin [ATOM_VEGA20_ODSETTING_MAX_COUNT]; //Lower Limit for each OD Setting +} ATOM_VEGA20_OVERDRIVE8_RECORD; + +enum ATOM_VEGA20_PPCLOCK_ID { + ATOM_VEGA20_PPCLOCK_GFXCLK = 0, + ATOM_VEGA20_PPCLOCK_VCLK, + ATOM_VEGA20_PPCLOCK_DCLK, + ATOM_VEGA20_PPCLOCK_ECLK, + ATOM_VEGA20_PPCLOCK_SOCCLK, + ATOM_VEGA20_PPCLOCK_UCLK, + ATOM_VEGA20_PPCLOCK_FCLK, + ATOM_VEGA20_PPCLOCK_DCEFCLK, + ATOM_VEGA20_PPCLOCK_DISPCLK, + ATOM_VEGA20_PPCLOCK_PIXCLK, + ATOM_VEGA20_PPCLOCK_PHYCLK, + ATOM_VEGA20_PPCLOCK_COUNT, +}; +typedef enum ATOM_VEGA20_PPCLOCK_ID ATOM_VEGA20_PPCLOCK_ID; + +typedef struct _ATOM_VEGA20_POWER_SAVING_CLOCK_RECORD +{ + UCHAR ucTableRevision; + ULONG PowerSavingClockCount; // Count of PowerSavingClock Mode + ULONG PowerSavingClockMax [ATOM_VEGA20_PPCLOCK_MAX_COUNT]; // PowerSavingClock Mode Clock Maximum array In MHz + ULONG PowerSavingClockMin [ATOM_VEGA20_PPCLOCK_MAX_COUNT]; // PowerSavingClock Mode Clock Minimum array In MHz +} ATOM_VEGA20_POWER_SAVING_CLOCK_RECORD; + +typedef struct _ATOM_VEGA20_POWERPLAYTABLE +{ + struct atom_common_table_header sHeader; + UCHAR ucTableRevision; + USHORT usTableSize; + ULONG ulGoldenPPID; + ULONG ulGoldenRevision; + USHORT usFormatID; + + ULONG ulPlatformCaps; + + UCHAR ucThermalControllerType; + + USHORT usSmallPowerLimit1; + USHORT usSmallPowerLimit2; + USHORT usBoostPowerLimit; + USHORT usODTurboPowerLimit; + USHORT usODPowerSavePowerLimit; + USHORT usSoftwareShutdownTemp; + + ATOM_VEGA20_POWER_SAVING_CLOCK_RECORD PowerSavingClockTable; //PowerSavingClock Mode Clock Min/Max array + + ATOM_VEGA20_OVERDRIVE8_RECORD OverDrive8Table; //OverDrive8 Feature capabilities and Settings Range (Max and Min) + + USHORT usReserve[5]; + + PPTable_t smcPPTable; + +} ATOM_Vega20_POWERPLAYTABLE; + +#pragma pack(pop) + +#endif -- GitLab From f4eac80add11572fe36800c045a1ed1fd9132ec0 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Thu, 2 Aug 2018 15:52:41 -0500 Subject: [PATCH 0404/1692] drm/amd/powerplay: add the smu manager for vega20 (v2) The SMU manager handles the driver interaction with the SMU which handles clock and voltage controls. v2: switch to SOC15 register access macros reserve space for ActivityMonitor table enable SMU fw loading Drop dead code from bringup Signed-off-by: Evan Quan Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/smumgr/Makefile | 2 +- .../drm/amd/powerplay/smumgr/vega20_smumgr.c | 530 ++++++++++++++++++ .../drm/amd/powerplay/smumgr/vega20_smumgr.h | 61 ++ 3 files changed, 592 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c create mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile index 8d557accaef2..6c59c61a0d81 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile +++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile @@ -26,7 +26,7 @@ SMU_MGR = smumgr.o smu8_smumgr.o tonga_smumgr.o fiji_smumgr.o \ polaris10_smumgr.o iceland_smumgr.o \ smu7_smumgr.o vega10_smumgr.o smu10_smumgr.o ci_smumgr.o \ - vega12_smumgr.o vegam_smumgr.o smu9_smumgr.o + vega12_smumgr.o vegam_smumgr.o smu9_smumgr.o vega20_smumgr.o AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR)) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c new file mode 100644 index 000000000000..41a2a5df679b --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c @@ -0,0 +1,530 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "smumgr.h" +#include "vega20_inc.h" +#include "soc15_common.h" +#include "vega20_smumgr.h" +#include "vega20_ppsmc.h" +#include "smu11_driver_if.h" +#include "ppatomctrl.h" +#include "pp_debug.h" +#include "smu_ucode_xfer_vi.h" +#include "smu7_smumgr.h" +#include "vega20_hwmgr.h" + +/* MP Apertures */ +#define MP0_Public 0x03800000 +#define MP0_SRAM 0x03900000 +#define MP1_Public 0x03b00000 +#define MP1_SRAM 0x03c00004 + +/* address block */ +#define smnMP1_FIRMWARE_FLAGS 0x3010024 +#define smnMP0_FW_INTF 0x30101c0 +#define smnMP1_PUB_CTRL 0x3010b14 + +static bool vega20_is_smc_ram_running(struct pp_hwmgr *hwmgr) +{ + struct amdgpu_device *adev = hwmgr->adev; + uint32_t mp1_fw_flags; + + WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2, + (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff))); + + mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2); + + if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> + MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) + return true; + + return false; +} + +/* + * Check if SMC has responded to previous message. + * + * @param smumgr the address of the powerplay hardware manager. + * @return TRUE SMC has responded, FALSE otherwise. + */ +static uint32_t vega20_wait_for_response(struct pp_hwmgr *hwmgr) +{ + struct amdgpu_device *adev = hwmgr->adev; + uint32_t reg; + + reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90); + + phm_wait_for_register_unequal(hwmgr, reg, + 0, MP1_C2PMSG_90__CONTENT_MASK); + + return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90); +} + +/* + * Send a message to the SMC, and do not wait for its response. + * @param smumgr the address of the powerplay hardware manager. + * @param msg the message to send. + * @return Always return 0. + */ +static int vega20_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, + uint16_t msg) +{ + struct amdgpu_device *adev = hwmgr->adev; + + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg); + + return 0; +} + +/* + * Send a message to the SMC, and wait for its response. + * @param hwmgr the address of the powerplay hardware manager. + * @param msg the message to send. + * @return Always return 0. + */ +static int vega20_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) +{ + struct amdgpu_device *adev = hwmgr->adev; + int ret = 0; + + vega20_wait_for_response(hwmgr); + + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); + + vega20_send_msg_to_smc_without_waiting(hwmgr, msg); + + ret = vega20_wait_for_response(hwmgr); + if (ret != PPSMC_Result_OK) + pr_err("Failed to send message 0x%x, response 0x%x\n", msg, ret); + + return (ret == PPSMC_Result_OK) ? 0 : -EIO; +} + +/* + * Send a message to the SMC with parameter + * @param hwmgr: the address of the powerplay hardware manager. + * @param msg: the message to send. + * @param parameter: the parameter to send + * @return Always return 0. + */ +static int vega20_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, + uint16_t msg, uint32_t parameter) +{ + struct amdgpu_device *adev = hwmgr->adev; + int ret = 0; + + vega20_wait_for_response(hwmgr); + + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); + + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter); + + vega20_send_msg_to_smc_without_waiting(hwmgr, msg); + + ret = vega20_wait_for_response(hwmgr); + if (ret != PPSMC_Result_OK) + pr_err("Failed to send message 0x%x, response 0x%x\n", msg, ret); + + return (ret == PPSMC_Result_OK) ? 0 : -EIO; +} + +/* + * Retrieve an argument from SMC. + * @param hwmgr the address of the powerplay hardware manager. + * @param arg pointer to store the argument from SMC. + * @return Always return 0. + */ +int vega20_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg) +{ + struct amdgpu_device *adev = hwmgr->adev; + + *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82); + + return 0; +} + +/* + * Copy table from SMC into driver FB + * @param hwmgr the address of the HW manager + * @param table_id the driver's table ID to copy from + */ +int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr, + uint8_t *table, int16_t table_id) +{ + struct vega20_smumgr *priv = + (struct vega20_smumgr *)(hwmgr->smu_backend); + int ret = 0; + + PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT, + "Invalid SMU Table ID!", return -EINVAL); + PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0, + "Invalid SMU Table version!", return -EINVAL); + PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, + "Invalid SMU Table Length!", return -EINVAL); + + PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetDriverDramAddrHigh, + upper_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0, + "[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", + return ret); + PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetDriverDramAddrLow, + lower_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0, + "[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!", + return ret); + PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_TransferTableSmu2Dram, table_id)) == 0, + "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!", + return ret); + + memcpy(table, priv->smu_tables.entry[table_id].table, + priv->smu_tables.entry[table_id].size); + + return 0; +} + +/* + * Copy table from Driver FB into SMC + * @param hwmgr the address of the HW manager + * @param table_id the table to copy from + */ +int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr, + uint8_t *table, int16_t table_id) +{ + struct vega20_smumgr *priv = + (struct vega20_smumgr *)(hwmgr->smu_backend); + int ret = 0; + + PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT, + "Invalid SMU Table ID!", return -EINVAL); + PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0, + "Invalid SMU Table version!", return -EINVAL); + PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, + "Invalid SMU Table Length!", return -EINVAL); + + memcpy(priv->smu_tables.entry[table_id].table, table, + priv->smu_tables.entry[table_id].size); + + PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetDriverDramAddrHigh, + upper_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0, + "[CopyTableToSMC] Attempt to Set Dram Addr High Failed!", + return ret); + PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetDriverDramAddrLow, + lower_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0, + "[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!", + return ret); + PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_TransferTableDram2Smu, table_id)) == 0, + "[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!", + return ret); + + return 0; +} + +int vega20_enable_smc_features(struct pp_hwmgr *hwmgr, + bool enable, uint64_t feature_mask) +{ + uint32_t smu_features_low, smu_features_high; + int ret = 0; + + smu_features_low = (uint32_t)((feature_mask & SMU_FEATURES_LOW_MASK) >> SMU_FEATURES_LOW_SHIFT); + smu_features_high = (uint32_t)((feature_mask & SMU_FEATURES_HIGH_MASK) >> SMU_FEATURES_HIGH_SHIFT); + + if (enable) { + PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low)) == 0, + "[EnableDisableSMCFeatures] Attemp to enable SMU features Low failed!", + return ret); + PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high)) == 0, + "[EnableDisableSMCFeatures] Attemp to enable SMU features High failed!", + return ret); + } else { + PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low)) == 0, + "[EnableDisableSMCFeatures] Attemp to disable SMU features Low failed!", + return ret); + PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high)) == 0, + "[EnableDisableSMCFeatures] Attemp to disable SMU features High failed!", + return ret); + } + + return 0; +} + +int vega20_get_enabled_smc_features(struct pp_hwmgr *hwmgr, + uint64_t *features_enabled) +{ + uint32_t smc_features_low, smc_features_high; + int ret = 0; + + if (features_enabled == NULL) + return -EINVAL; + + PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr, + PPSMC_MSG_GetEnabledSmuFeaturesLow)) == 0, + "[GetEnabledSMCFeatures] Attemp to get SMU features Low failed!", + return ret); + PP_ASSERT_WITH_CODE((ret = vega20_read_arg_from_smc(hwmgr, + &smc_features_low)) == 0, + "[GetEnabledSMCFeatures] Attemp to read SMU features Low argument failed!", + return ret); + PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr, + PPSMC_MSG_GetEnabledSmuFeaturesHigh)) == 0, + "[GetEnabledSMCFeatures] Attemp to get SMU features High failed!", + return ret); + PP_ASSERT_WITH_CODE((ret = vega20_read_arg_from_smc(hwmgr, + &smc_features_high)) == 0, + "[GetEnabledSMCFeatures] Attemp to read SMU features High argument failed!", + return ret); + + *features_enabled = ((((uint64_t)smc_features_low << SMU_FEATURES_LOW_SHIFT) & SMU_FEATURES_LOW_MASK) | + (((uint64_t)smc_features_high << SMU_FEATURES_HIGH_SHIFT) & SMU_FEATURES_HIGH_MASK)); + + return 0; +} + +static int vega20_set_tools_address(struct pp_hwmgr *hwmgr) +{ + struct vega20_smumgr *priv = + (struct vega20_smumgr *)(hwmgr->smu_backend); + int ret = 0; + + if (priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr) { + ret = vega20_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetToolsDramAddrHigh, + upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr)); + if (!ret) + ret = vega20_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetToolsDramAddrLow, + lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr)); + } + + return ret; +} + +static int vega20_smu_init(struct pp_hwmgr *hwmgr) +{ + struct vega20_smumgr *priv; + unsigned long tools_size = 0x19000; + int ret = 0; + + struct cgs_firmware_info info = {0}; + + ret = cgs_get_firmware_info(hwmgr->device, + smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), + &info); + if (ret || !info.kptr) + return -EINVAL; + + priv = kzalloc(sizeof(struct vega20_smumgr), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + hwmgr->smu_backend = priv; + + /* allocate space for pptable */ + ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, + sizeof(PPTable_t), + PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &priv->smu_tables.entry[TABLE_PPTABLE].handle, + &priv->smu_tables.entry[TABLE_PPTABLE].mc_addr, + &priv->smu_tables.entry[TABLE_PPTABLE].table); + if (ret) + goto free_backend; + + priv->smu_tables.entry[TABLE_PPTABLE].version = 0x01; + priv->smu_tables.entry[TABLE_PPTABLE].size = sizeof(PPTable_t); + + /* allocate space for watermarks table */ + ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, + sizeof(Watermarks_t), + PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &priv->smu_tables.entry[TABLE_WATERMARKS].handle, + &priv->smu_tables.entry[TABLE_WATERMARKS].mc_addr, + &priv->smu_tables.entry[TABLE_WATERMARKS].table); + if (ret) + goto err0; + + priv->smu_tables.entry[TABLE_WATERMARKS].version = 0x01; + priv->smu_tables.entry[TABLE_WATERMARKS].size = sizeof(Watermarks_t); + + /* allocate space for pmstatuslog table */ + ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, + tools_size, + PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &priv->smu_tables.entry[TABLE_PMSTATUSLOG].handle, + &priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr, + &priv->smu_tables.entry[TABLE_PMSTATUSLOG].table); + if (ret) + goto err1; + + priv->smu_tables.entry[TABLE_PMSTATUSLOG].version = 0x01; + priv->smu_tables.entry[TABLE_PMSTATUSLOG].size = tools_size; + + /* allocate space for OverDrive table */ + ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, + sizeof(OverDriveTable_t), + PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &priv->smu_tables.entry[TABLE_OVERDRIVE].handle, + &priv->smu_tables.entry[TABLE_OVERDRIVE].mc_addr, + &priv->smu_tables.entry[TABLE_OVERDRIVE].table); + if (ret) + goto err2; + + priv->smu_tables.entry[TABLE_OVERDRIVE].version = 0x01; + priv->smu_tables.entry[TABLE_OVERDRIVE].size = sizeof(OverDriveTable_t); + + /* allocate space for SmuMetrics table */ + ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, + sizeof(SmuMetrics_t), + PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &priv->smu_tables.entry[TABLE_SMU_METRICS].handle, + &priv->smu_tables.entry[TABLE_SMU_METRICS].mc_addr, + &priv->smu_tables.entry[TABLE_SMU_METRICS].table); + if (ret) + goto err3; + + priv->smu_tables.entry[TABLE_SMU_METRICS].version = 0x01; + priv->smu_tables.entry[TABLE_SMU_METRICS].size = sizeof(SmuMetrics_t); + + /* allocate space for ActivityMonitor table */ + ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, + sizeof(DpmActivityMonitorCoeffInt_t), + PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].handle, + &priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr, + &priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table); + if (ret) + goto err4; + + priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].version = 0x01; + priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffInt_t); + + return 0; + +err4: + amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_SMU_METRICS].handle, + &priv->smu_tables.entry[TABLE_SMU_METRICS].mc_addr, + &priv->smu_tables.entry[TABLE_SMU_METRICS].table); +err3: + amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_OVERDRIVE].handle, + &priv->smu_tables.entry[TABLE_OVERDRIVE].mc_addr, + &priv->smu_tables.entry[TABLE_OVERDRIVE].table); +err2: + amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PMSTATUSLOG].handle, + &priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr, + &priv->smu_tables.entry[TABLE_PMSTATUSLOG].table); +err1: + amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_WATERMARKS].handle, + &priv->smu_tables.entry[TABLE_WATERMARKS].mc_addr, + &priv->smu_tables.entry[TABLE_WATERMARKS].table); +err0: + amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle, + &priv->smu_tables.entry[TABLE_PPTABLE].mc_addr, + &priv->smu_tables.entry[TABLE_PPTABLE].table); +free_backend: + kfree(hwmgr->smu_backend); + + return -EINVAL; +} + +static int vega20_smu_fini(struct pp_hwmgr *hwmgr) +{ + struct vega20_smumgr *priv = + (struct vega20_smumgr *)(hwmgr->smu_backend); + + if (priv) { + amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle, + &priv->smu_tables.entry[TABLE_PPTABLE].mc_addr, + &priv->smu_tables.entry[TABLE_PPTABLE].table); + amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_WATERMARKS].handle, + &priv->smu_tables.entry[TABLE_WATERMARKS].mc_addr, + &priv->smu_tables.entry[TABLE_WATERMARKS].table); + amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PMSTATUSLOG].handle, + &priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr, + &priv->smu_tables.entry[TABLE_PMSTATUSLOG].table); + amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_OVERDRIVE].handle, + &priv->smu_tables.entry[TABLE_OVERDRIVE].mc_addr, + &priv->smu_tables.entry[TABLE_OVERDRIVE].table); + amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_SMU_METRICS].handle, + &priv->smu_tables.entry[TABLE_SMU_METRICS].mc_addr, + &priv->smu_tables.entry[TABLE_SMU_METRICS].table); + amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].handle, + &priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr, + &priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table); + kfree(hwmgr->smu_backend); + hwmgr->smu_backend = NULL; + } + return 0; +} + +static int vega20_start_smu(struct pp_hwmgr *hwmgr) +{ + int ret; + + ret = vega20_is_smc_ram_running(hwmgr); + PP_ASSERT_WITH_CODE(ret, + "[Vega20StartSmu] SMC is not running!", + return -EINVAL); + + ret = vega20_set_tools_address(hwmgr); + PP_ASSERT_WITH_CODE(!ret, + "[Vega20StartSmu] Failed to set tools address!", + return ret); + + return 0; +} + +static bool vega20_is_dpm_running(struct pp_hwmgr *hwmgr) +{ + uint64_t features_enabled = 0; + + vega20_get_enabled_smc_features(hwmgr, &features_enabled); + + if (features_enabled & SMC_DPM_FEATURES) + return true; + else + return false; +} + +const struct pp_smumgr_func vega20_smu_funcs = { + .smu_init = &vega20_smu_init, + .smu_fini = &vega20_smu_fini, + .start_smu = &vega20_start_smu, + .request_smu_load_specific_fw = NULL, + .send_msg_to_smc = &vega20_send_msg_to_smc, + .send_msg_to_smc_with_parameter = &vega20_send_msg_to_smc_with_parameter, + .download_pptable_settings = NULL, + .upload_pptable_settings = NULL, + .is_dpm_running = vega20_is_dpm_running, +}; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h new file mode 100644 index 000000000000..71da82266e7f --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h @@ -0,0 +1,61 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _VEGA20_SMUMANAGER_H_ +#define _VEGA20_SMUMANAGER_H_ + +#include "hwmgr.h" +#include "smu11_driver_if.h" + +struct smu_table_entry { + uint32_t version; + uint32_t size; + uint64_t mc_addr; + void *table; + struct amdgpu_bo *handle; +}; + +struct smu_table_array { + struct smu_table_entry entry[TABLE_COUNT]; +}; + +struct vega20_smumgr { + struct smu_table_array smu_tables; +}; + +#define SMU_FEATURES_LOW_MASK 0x00000000FFFFFFFF +#define SMU_FEATURES_LOW_SHIFT 0 +#define SMU_FEATURES_HIGH_MASK 0xFFFFFFFF00000000 +#define SMU_FEATURES_HIGH_SHIFT 32 + +int vega20_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg); +int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr, + uint8_t *table, int16_t table_id); +int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr, + uint8_t *table, int16_t table_id); +int vega20_enable_smc_features(struct pp_hwmgr *hwmgr, + bool enable, uint64_t feature_mask); +int vega20_get_enabled_smc_features(struct pp_hwmgr *hwmgr, + uint64_t *features_enabled); + +#endif + -- GitLab From f0e7e5e2a8e4cad91b24ae5dbcd308cd2c05382a Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Fri, 11 May 2018 10:56:25 +0800 Subject: [PATCH 0405/1692] drm/amd/powerplay: new interfaces for ActivityMonitor table with SMU Vega20 has a new activity monitor table that is stored in memory. Add API to get and set the new table. Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../drm/amd/powerplay/smumgr/vega20_smumgr.c | 57 +++++++++++++++++++ .../drm/amd/powerplay/smumgr/vega20_smumgr.h | 4 ++ 2 files changed, 61 insertions(+) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c index 41a2a5df679b..fe7f71079e0e 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c @@ -243,6 +243,63 @@ int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr, return 0; } +int vega20_set_activity_monitor_coeff(struct pp_hwmgr *hwmgr, + uint8_t *table, uint16_t workload_type) +{ + struct vega20_smumgr *priv = + (struct vega20_smumgr *)(hwmgr->smu_backend); + int ret = 0; + + memcpy(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table, table, + priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size); + + PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetDriverDramAddrHigh, + upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0, + "[SetActivityMonitor] Attempt to Set Dram Addr High Failed!", + return ret); + PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetDriverDramAddrLow, + lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0, + "[SetActivityMonitor] Attempt to Set Dram Addr Low Failed!", + return ret); + PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_TransferTableDram2Smu, TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16))) == 0, + "[SetActivityMonitor] Attempt to Transfer Table To SMU Failed!", + return ret); + + return 0; +} + +int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr, + uint8_t *table, uint16_t workload_type) +{ + struct vega20_smumgr *priv = + (struct vega20_smumgr *)(hwmgr->smu_backend); + int ret = 0; + + PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetDriverDramAddrHigh, + upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0, + "[GetActivityMonitor] Attempt to Set Dram Addr High Failed!", + return ret); + PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetDriverDramAddrLow, + lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0, + "[GetActivityMonitor] Attempt to Set Dram Addr Low Failed!", + return ret); + PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_TransferTableSmu2Dram, + TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16))) == 0, + "[GetActivityMonitor] Attempt to Transfer Table From SMU Failed!", + return ret); + + memcpy(table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table, + priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size); + + return 0; +} + int vega20_enable_smc_features(struct pp_hwmgr *hwmgr, bool enable, uint64_t feature_mask) { diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h index 71da82266e7f..505eb0d82e3b 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h @@ -56,6 +56,10 @@ int vega20_enable_smc_features(struct pp_hwmgr *hwmgr, bool enable, uint64_t feature_mask); int vega20_get_enabled_smc_features(struct pp_hwmgr *hwmgr, uint64_t *features_enabled); +int vega20_set_activity_monitor_coeff(struct pp_hwmgr *hwmgr, + uint8_t *table, uint16_t workload_type); +int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr, + uint8_t *table, uint16_t workload_type); #endif -- GitLab From da958630d530250c72bc54394ce77488acf2144c Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Thu, 2 Aug 2018 15:55:33 -0500 Subject: [PATCH 0406/1692] drm/amd/powerplay: add the hw manager for vega20 (v3) hwmgr is the interface for the driver to setup state structures which are used by the smu for managing the power state. v2: squash in fixes: - update set_watermarks_for_clocks_ranges to use common code - drop unsupported apis - correct MAX_REGULAR_DPM_NUMBER value - multimonitor fixes - add check for vbios pptable version - revise dpm table setup - init fclk dpm state - Remove unused definition in vega20_hwmgr - support power limit setup - enable vega20 to honour DAL clock limits - comment out dump_table debugging v3: switch to SOC15 register access macros Signed-off-by: Evan Quan Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/Makefile | 4 +- drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 8 +- .../drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 2099 +++++++++++++++++ .../drm/amd/powerplay/hwmgr/vega20_hwmgr.h | 519 ++++ .../amd/powerplay/hwmgr/vega20_powertune.c | 70 + .../amd/powerplay/hwmgr/vega20_powertune.h | 32 + .../powerplay/hwmgr/vega20_processpptables.c | 919 ++++++++ .../powerplay/hwmgr/vega20_processpptables.h | 31 + .../drm/amd/powerplay/hwmgr/vega20_thermal.c | 212 ++ .../drm/amd/powerplay/hwmgr/vega20_thermal.h | 64 + 10 files changed, 3956 insertions(+), 2 deletions(-) create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.c create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.h create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.h create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.h diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile index 210fb3ecd213..ade8973b6f4d 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile @@ -33,7 +33,9 @@ HARDWARE_MGR = hwmgr.o processpptables.o \ vega10_thermal.o smu10_hwmgr.o pp_psm.o\ vega12_processpptables.o vega12_hwmgr.o \ vega12_thermal.o \ - pp_overdriver.o smu_helper.o + pp_overdriver.o smu_helper.o \ + vega20_processpptables.o vega20_hwmgr.o vega20_powertune.o \ + vega20_thermal.o AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR)) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 8994aa5c8cf8..7500a3e61dba 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -44,11 +44,13 @@ extern const struct pp_smumgr_func vegam_smu_funcs; extern const struct pp_smumgr_func vega10_smu_funcs; extern const struct pp_smumgr_func vega12_smu_funcs; extern const struct pp_smumgr_func smu10_smu_funcs; +extern const struct pp_smumgr_func vega20_smu_funcs; extern int smu7_init_function_pointers(struct pp_hwmgr *hwmgr); extern int smu8_init_function_pointers(struct pp_hwmgr *hwmgr); extern int vega10_hwmgr_init(struct pp_hwmgr *hwmgr); extern int vega12_hwmgr_init(struct pp_hwmgr *hwmgr); +extern int vega20_hwmgr_init(struct pp_hwmgr *hwmgr); extern int smu10_init_function_pointers(struct pp_hwmgr *hwmgr); static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr); @@ -149,7 +151,6 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr) case AMDGPU_FAMILY_AI: switch (hwmgr->chip_id) { case CHIP_VEGA10: - case CHIP_VEGA20: hwmgr->feature_mask &= ~PP_GFXOFF_MASK; hwmgr->smumgr_funcs = &vega10_smu_funcs; vega10_hwmgr_init(hwmgr); @@ -158,6 +159,11 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr) hwmgr->smumgr_funcs = &vega12_smu_funcs; vega12_hwmgr_init(hwmgr); break; + case CHIP_VEGA20: + hwmgr->feature_mask &= ~PP_GFXOFF_MASK; + hwmgr->smumgr_funcs = &vega20_smu_funcs; + vega20_hwmgr_init(hwmgr); + break; default: return -EINVAL; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c new file mode 100644 index 000000000000..40f07177b046 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -0,0 +1,2099 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include +#include +#include + +#include "hwmgr.h" +#include "amd_powerplay.h" +#include "vega20_smumgr.h" +#include "hardwaremanager.h" +#include "ppatomfwctrl.h" +#include "atomfirmware.h" +#include "cgs_common.h" +#include "vega20_powertune.h" +#include "vega20_inc.h" +#include "pppcielanes.h" +#include "vega20_hwmgr.h" +#include "vega20_processpptables.h" +#include "vega20_pptable.h" +#include "vega20_thermal.h" +#include "vega20_ppsmc.h" +#include "pp_debug.h" +#include "amd_pcie_helpers.h" +#include "ppinterrupt.h" +#include "pp_overdriver.h" +#include "pp_thermal.h" + +static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr) +{ + struct vega20_hwmgr *data = + (struct vega20_hwmgr *)(hwmgr->backend); + + data->gfxclk_average_alpha = PPVEGA20_VEGA20GFXCLKAVERAGEALPHA_DFLT; + data->socclk_average_alpha = PPVEGA20_VEGA20SOCCLKAVERAGEALPHA_DFLT; + data->uclk_average_alpha = PPVEGA20_VEGA20UCLKCLKAVERAGEALPHA_DFLT; + data->gfx_activity_average_alpha = PPVEGA20_VEGA20GFXACTIVITYAVERAGEALPHA_DFLT; + data->lowest_uclk_reserved_for_ulv = PPVEGA20_VEGA20LOWESTUCLKRESERVEDFORULV_DFLT; + + data->display_voltage_mode = PPVEGA20_VEGA20DISPLAYVOLTAGEMODE_DFLT; + data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; + data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; + data->dcef_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; + data->disp_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; + data->disp_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; + data->disp_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; + data->pixel_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; + data->pixel_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; + data->pixel_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; + data->phy_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; + data->phy_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; + data->phy_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; + + data->registry_data.disallowed_features = 0x0; + data->registry_data.od_state_in_dc_support = 0; + data->registry_data.thermal_support = 1; + data->registry_data.skip_baco_hardware = 0; + + data->registry_data.log_avfs_param = 0; + data->registry_data.sclk_throttle_low_notification = 1; + data->registry_data.force_dpm_high = 0; + data->registry_data.stable_pstate_sclk_dpm_percentage = 75; + + data->registry_data.didt_support = 0; + if (data->registry_data.didt_support) { + data->registry_data.didt_mode = 6; + data->registry_data.sq_ramping_support = 1; + data->registry_data.db_ramping_support = 0; + data->registry_data.td_ramping_support = 0; + data->registry_data.tcp_ramping_support = 0; + data->registry_data.dbr_ramping_support = 0; + data->registry_data.edc_didt_support = 1; + data->registry_data.gc_didt_support = 0; + data->registry_data.psm_didt_support = 0; + } + + data->registry_data.pcie_lane_override = 0xff; + data->registry_data.pcie_speed_override = 0xff; + data->registry_data.pcie_clock_override = 0xffffffff; + data->registry_data.regulator_hot_gpio_support = 1; + data->registry_data.ac_dc_switch_gpio_support = 0; + data->registry_data.quick_transition_support = 0; + data->registry_data.zrpm_start_temp = 0xffff; + data->registry_data.zrpm_stop_temp = 0xffff; + data->registry_data.odn_feature_enable = 1; + data->registry_data.disable_water_mark = 0; + data->registry_data.disable_pp_tuning = 0; + data->registry_data.disable_xlpp_tuning = 0; + data->registry_data.disable_workload_policy = 0; + data->registry_data.perf_ui_tuning_profile_turbo = 0x19190F0F; + data->registry_data.perf_ui_tuning_profile_powerSave = 0x19191919; + data->registry_data.perf_ui_tuning_profile_xl = 0x00000F0A; + data->registry_data.force_workload_policy_mask = 0; + data->registry_data.disable_3d_fs_detection = 0; + data->registry_data.fps_support = 1; + data->registry_data.disable_auto_wattman = 1; + data->registry_data.auto_wattman_debug = 0; + data->registry_data.auto_wattman_sample_period = 100; + data->registry_data.auto_wattman_threshold = 50; + data->registry_data.gfxoff_controlled_by_driver = 1; + data->gfxoff_allowed = false; + data->counter_gfxoff = 0; +} + +static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr) +{ + struct vega20_hwmgr *data = + (struct vega20_hwmgr *)(hwmgr->backend); + struct amdgpu_device *adev = hwmgr->adev; + + if (data->vddci_control == VEGA20_VOLTAGE_CONTROL_NONE) + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ControlVDDCI); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TablelessHardwareInterface); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EnableSMU7ThermalManagement); + + if (adev->pg_flags & AMD_PG_SUPPORT_UVD) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UVDPowerGating); + + if (adev->pg_flags & AMD_PG_SUPPORT_VCE) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_VCEPowerGating); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UnTabledHardwareInterface); + + if (data->registry_data.odn_feature_enable) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ODNinACSupport); + else { + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_OD6inACSupport); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_OD6PlusinACSupport); + } + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ActivityReporting); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_FanSpeedInTableIsRPM); + + if (data->registry_data.od_state_in_dc_support) { + if (data->registry_data.odn_feature_enable) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ODNinDCSupport); + else { + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_OD6inDCSupport); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_OD6PlusinDCSupport); + } + } + + if (data->registry_data.thermal_support && + data->registry_data.fuzzy_fan_control_support && + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ODFuzzyFanControlSupport); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DynamicPowerManagement); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SMC); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ThermalPolicyDelay); + + if (data->registry_data.force_dpm_high) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ExclusiveModeAlwaysHigh); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DynamicUVDState); + + if (data->registry_data.sclk_throttle_low_notification) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkThrottleLowNotification); + + /* power tune caps */ + /* assume disabled */ + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DiDtSupport); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SQRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DBRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TDRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TCPRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DBRRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DiDtEDCEnable); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_GCEDC); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PSM); + + if (data->registry_data.didt_support) { + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DiDtSupport); + if (data->registry_data.sq_ramping_support) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SQRamping); + if (data->registry_data.db_ramping_support) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DBRamping); + if (data->registry_data.td_ramping_support) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TDRamping); + if (data->registry_data.tcp_ramping_support) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TCPRamping); + if (data->registry_data.dbr_ramping_support) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DBRRamping); + if (data->registry_data.edc_didt_support) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DiDtEDCEnable); + if (data->registry_data.gc_didt_support) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_GCEDC); + if (data->registry_data.psm_didt_support) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PSM); + } + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot); + + if (data->registry_data.ac_dc_switch_gpio_support) { + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme); + } + + if (data->registry_data.quick_transition_support) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_Falcon_QuickTransition); + } + + if (data->lowest_uclk_reserved_for_ulv != PPVEGA20_VEGA20LOWESTUCLKRESERVEDFORULV_DFLT) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_LowestUclkReservedForUlv); + if (data->lowest_uclk_reserved_for_ulv == 1) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_LowestUclkReservedForUlv); + } + + if (data->registry_data.custom_fan_support) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CustomFanControlSupport); + + return 0; +} + +static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr) +{ + struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + int i; + + data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id = + FEATURE_DPM_PREFETCHER_BIT; + data->smu_features[GNLD_DPM_GFXCLK].smu_feature_id = + FEATURE_DPM_GFXCLK_BIT; + data->smu_features[GNLD_DPM_UCLK].smu_feature_id = + FEATURE_DPM_UCLK_BIT; + data->smu_features[GNLD_DPM_SOCCLK].smu_feature_id = + FEATURE_DPM_SOCCLK_BIT; + data->smu_features[GNLD_DPM_UVD].smu_feature_id = + FEATURE_DPM_UVD_BIT; + data->smu_features[GNLD_DPM_VCE].smu_feature_id = + FEATURE_DPM_VCE_BIT; + data->smu_features[GNLD_ULV].smu_feature_id = + FEATURE_ULV_BIT; + data->smu_features[GNLD_DPM_MP0CLK].smu_feature_id = + FEATURE_DPM_MP0CLK_BIT; + data->smu_features[GNLD_DPM_LINK].smu_feature_id = + FEATURE_DPM_LINK_BIT; + data->smu_features[GNLD_DPM_DCEFCLK].smu_feature_id = + FEATURE_DPM_DCEFCLK_BIT; + data->smu_features[GNLD_DS_GFXCLK].smu_feature_id = + FEATURE_DS_GFXCLK_BIT; + data->smu_features[GNLD_DS_SOCCLK].smu_feature_id = + FEATURE_DS_SOCCLK_BIT; + data->smu_features[GNLD_DS_LCLK].smu_feature_id = + FEATURE_DS_LCLK_BIT; + data->smu_features[GNLD_PPT].smu_feature_id = + FEATURE_PPT_BIT; + data->smu_features[GNLD_TDC].smu_feature_id = + FEATURE_TDC_BIT; + data->smu_features[GNLD_THERMAL].smu_feature_id = + FEATURE_THERMAL_BIT; + data->smu_features[GNLD_GFX_PER_CU_CG].smu_feature_id = + FEATURE_GFX_PER_CU_CG_BIT; + data->smu_features[GNLD_RM].smu_feature_id = + FEATURE_RM_BIT; + data->smu_features[GNLD_DS_DCEFCLK].smu_feature_id = + FEATURE_DS_DCEFCLK_BIT; + data->smu_features[GNLD_ACDC].smu_feature_id = + FEATURE_ACDC_BIT; + data->smu_features[GNLD_VR0HOT].smu_feature_id = + FEATURE_VR0HOT_BIT; + data->smu_features[GNLD_VR1HOT].smu_feature_id = + FEATURE_VR1HOT_BIT; + data->smu_features[GNLD_FW_CTF].smu_feature_id = + FEATURE_FW_CTF_BIT; + data->smu_features[GNLD_LED_DISPLAY].smu_feature_id = + FEATURE_LED_DISPLAY_BIT; + data->smu_features[GNLD_FAN_CONTROL].smu_feature_id = + FEATURE_FAN_CONTROL_BIT; + data->smu_features[GNLD_DIDT].smu_feature_id = FEATURE_GFX_EDC_BIT; + data->smu_features[GNLD_GFXOFF].smu_feature_id = FEATURE_GFXOFF_BIT; + data->smu_features[GNLD_CG].smu_feature_id = FEATURE_CG_BIT; + data->smu_features[GNLD_DPM_FCLK].smu_feature_id = FEATURE_DPM_FCLK_BIT; + data->smu_features[GNLD_DS_FCLK].smu_feature_id = FEATURE_DS_FCLK_BIT; + data->smu_features[GNLD_DS_MP1CLK].smu_feature_id = FEATURE_DS_MP1CLK_BIT; + data->smu_features[GNLD_DS_MP0CLK].smu_feature_id = FEATURE_DS_MP0CLK_BIT; + data->smu_features[GNLD_XGMI].smu_feature_id = FEATURE_XGMI_BIT; + + for (i = 0; i < GNLD_FEATURES_MAX; i++) { + data->smu_features[i].smu_feature_bitmap = + (uint64_t)(1ULL << data->smu_features[i].smu_feature_id); + data->smu_features[i].allowed = + ((data->registry_data.disallowed_features >> i) & 1) ? + false : true; + } +} + +static int vega20_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr) +{ + return 0; +} + +static int vega20_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) +{ + kfree(hwmgr->backend); + hwmgr->backend = NULL; + + return 0; +} + +static int vega20_hwmgr_backend_init(struct pp_hwmgr *hwmgr) +{ + struct vega20_hwmgr *data; + struct amdgpu_device *adev = hwmgr->adev; + + data = kzalloc(sizeof(struct vega20_hwmgr), GFP_KERNEL); + if (data == NULL) + return -ENOMEM; + + hwmgr->backend = data; + + vega20_set_default_registry_data(hwmgr); + + data->disable_dpm_mask = 0xff; + data->workload_mask = 0xff; + + /* need to set voltage control types before EVV patching */ + data->vddc_control = VEGA20_VOLTAGE_CONTROL_NONE; + data->mvdd_control = VEGA20_VOLTAGE_CONTROL_NONE; + data->vddci_control = VEGA20_VOLTAGE_CONTROL_NONE; + + data->water_marks_bitmap = 0; + data->avfs_exist = false; + + vega20_set_features_platform_caps(hwmgr); + + vega20_init_dpm_defaults(hwmgr); + + /* Parse pptable data read from VBIOS */ + vega20_set_private_data_based_on_pptable(hwmgr); + + data->is_tlu_enabled = false; + + hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = + VEGA20_MAX_HARDWARE_POWERLEVELS; + hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; + hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; + + hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */ + /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */ + hwmgr->platform_descriptor.clockStep.engineClock = 500; + hwmgr->platform_descriptor.clockStep.memoryClock = 500; + + data->total_active_cus = adev->gfx.cu_info.number; + + return 0; +} + +static int vega20_init_sclk_threshold(struct pp_hwmgr *hwmgr) +{ + struct vega20_hwmgr *data = + (struct vega20_hwmgr *)(hwmgr->backend); + + data->low_sclk_interrupt_threshold = 0; + + return 0; +} + +static int vega20_setup_asic_task(struct pp_hwmgr *hwmgr) +{ + int ret = 0; + + ret = vega20_init_sclk_threshold(hwmgr); + PP_ASSERT_WITH_CODE(!ret, + "Failed to init sclk threshold!", + return ret); + + return 0; +} + +/* + * @fn vega20_init_dpm_state + * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff. + * + * @param dpm_state - the address of the DPM Table to initiailize. + * @return None. + */ +static void vega20_init_dpm_state(struct vega20_dpm_state *dpm_state) +{ + dpm_state->soft_min_level = 0x0; + dpm_state->soft_max_level = 0xffff; + dpm_state->hard_min_level = 0x0; + dpm_state->hard_max_level = 0xffff; +} + +static int vega20_get_number_of_dpm_level(struct pp_hwmgr *hwmgr, + PPCLK_e clk_id, uint32_t *num_of_levels) +{ + int ret = 0; + + ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_GetDpmFreqByIndex, + (clk_id << 16 | 0xFF)); + PP_ASSERT_WITH_CODE(!ret, + "[GetNumOfDpmLevel] failed to get dpm levels!", + return ret); + + vega20_read_arg_from_smc(hwmgr, num_of_levels); + PP_ASSERT_WITH_CODE(*num_of_levels > 0, + "[GetNumOfDpmLevel] number of clk levels is invalid!", + return -EINVAL); + + return ret; +} + +static int vega20_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr, + PPCLK_e clk_id, uint32_t index, uint32_t *clk) +{ + int ret = 0; + + ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_GetDpmFreqByIndex, + (clk_id << 16 | index)); + PP_ASSERT_WITH_CODE(!ret, + "[GetDpmFreqByIndex] failed to get dpm freq by index!", + return ret); + + vega20_read_arg_from_smc(hwmgr, clk); + PP_ASSERT_WITH_CODE(*clk, + "[GetDpmFreqByIndex] clk value is invalid!", + return -EINVAL); + + return ret; +} + +static int vega20_setup_single_dpm_table(struct pp_hwmgr *hwmgr, + struct vega20_single_dpm_table *dpm_table, PPCLK_e clk_id) +{ + int ret = 0; + uint32_t i, num_of_levels, clk; + + ret = vega20_get_number_of_dpm_level(hwmgr, clk_id, &num_of_levels); + PP_ASSERT_WITH_CODE(!ret, + "[SetupSingleDpmTable] failed to get clk levels!", + return ret); + + dpm_table->count = num_of_levels; + + for (i = 0; i < num_of_levels; i++) { + ret = vega20_get_dpm_frequency_by_index(hwmgr, clk_id, i, &clk); + PP_ASSERT_WITH_CODE(!ret, + "[SetupSingleDpmTable] failed to get clk of specific level!", + return ret); + dpm_table->dpm_levels[i].value = clk; + dpm_table->dpm_levels[i].enabled = true; + } + + return ret; +} + + +/* + * This function is to initialize all DPM state tables + * for SMU based on the dependency table. + * Dynamic state patching function will then trim these + * state tables to the allowed range based + * on the power policy or external client requests, + * such as UVD request, etc. + */ +static int vega20_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) +{ + struct vega20_hwmgr *data = + (struct vega20_hwmgr *)(hwmgr->backend); + struct vega20_single_dpm_table *dpm_table; + int ret = 0; + + memset(&data->dpm_table, 0, sizeof(data->dpm_table)); + + /* socclk */ + dpm_table = &(data->dpm_table.soc_table); + if (data->smu_features[GNLD_DPM_SOCCLK].enabled) { + ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_SOCCLK); + PP_ASSERT_WITH_CODE(!ret, + "[SetupDefaultDpmTable] failed to get socclk dpm levels!", + return ret); + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = data->vbios_boot_state.soc_clock / 100; + } + vega20_init_dpm_state(&(dpm_table->dpm_state)); + + /* gfxclk */ + dpm_table = &(data->dpm_table.gfx_table); + if (data->smu_features[GNLD_DPM_GFXCLK].enabled) { + ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_GFXCLK); + PP_ASSERT_WITH_CODE(!ret, + "[SetupDefaultDpmTable] failed to get gfxclk dpm levels!", + return ret); + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = data->vbios_boot_state.gfx_clock / 100; + } + vega20_init_dpm_state(&(dpm_table->dpm_state)); + + /* memclk */ + dpm_table = &(data->dpm_table.mem_table); + if (data->smu_features[GNLD_DPM_UCLK].enabled) { + ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_UCLK); + PP_ASSERT_WITH_CODE(!ret, + "[SetupDefaultDpmTable] failed to get memclk dpm levels!", + return ret); + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = data->vbios_boot_state.mem_clock / 100; + } + vega20_init_dpm_state(&(dpm_table->dpm_state)); + + /* eclk */ + dpm_table = &(data->dpm_table.eclk_table); + if (data->smu_features[GNLD_DPM_VCE].enabled) { + ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_ECLK); + PP_ASSERT_WITH_CODE(!ret, + "[SetupDefaultDpmTable] failed to get eclk dpm levels!", + return ret); + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = data->vbios_boot_state.eclock / 100; + } + vega20_init_dpm_state(&(dpm_table->dpm_state)); + + /* vclk */ + dpm_table = &(data->dpm_table.vclk_table); + if (data->smu_features[GNLD_DPM_UVD].enabled) { + ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_VCLK); + PP_ASSERT_WITH_CODE(!ret, + "[SetupDefaultDpmTable] failed to get vclk dpm levels!", + return ret); + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = data->vbios_boot_state.vclock / 100; + } + vega20_init_dpm_state(&(dpm_table->dpm_state)); + + /* dclk */ + dpm_table = &(data->dpm_table.dclk_table); + if (data->smu_features[GNLD_DPM_UVD].enabled) { + ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCLK); + PP_ASSERT_WITH_CODE(!ret, + "[SetupDefaultDpmTable] failed to get dclk dpm levels!", + return ret); + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = data->vbios_boot_state.dclock / 100; + } + vega20_init_dpm_state(&(dpm_table->dpm_state)); + + /* dcefclk */ + dpm_table = &(data->dpm_table.dcef_table); + if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { + ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCEFCLK); + PP_ASSERT_WITH_CODE(!ret, + "[SetupDefaultDpmTable] failed to get dcefclk dpm levels!", + return ret); + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = data->vbios_boot_state.dcef_clock / 100; + } + vega20_init_dpm_state(&(dpm_table->dpm_state)); + + /* pixclk */ + dpm_table = &(data->dpm_table.pixel_table); + if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { + ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PIXCLK); + PP_ASSERT_WITH_CODE(!ret, + "[SetupDefaultDpmTable] failed to get pixclk dpm levels!", + return ret); + } else + dpm_table->count = 0; + vega20_init_dpm_state(&(dpm_table->dpm_state)); + + /* dispclk */ + dpm_table = &(data->dpm_table.display_table); + if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { + ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DISPCLK); + PP_ASSERT_WITH_CODE(!ret, + "[SetupDefaultDpmTable] failed to get dispclk dpm levels!", + return ret); + } else + dpm_table->count = 0; + vega20_init_dpm_state(&(dpm_table->dpm_state)); + + /* phyclk */ + dpm_table = &(data->dpm_table.phy_table); + if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { + ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PHYCLK); + PP_ASSERT_WITH_CODE(!ret, + "[SetupDefaultDpmTable] failed to get phyclk dpm levels!", + return ret); + } else + dpm_table->count = 0; + vega20_init_dpm_state(&(dpm_table->dpm_state)); + + /* fclk */ + dpm_table = &(data->dpm_table.fclk_table); + if (data->smu_features[GNLD_DPM_FCLK].enabled) { + ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_FCLK); + PP_ASSERT_WITH_CODE(!ret, + "[SetupDefaultDpmTable] failed to get fclk dpm levels!", + return ret); + } else + dpm_table->count = 0; + vega20_init_dpm_state(&(dpm_table->dpm_state)); + + /* save a copy of the default DPM table */ + memcpy(&(data->golden_dpm_table), &(data->dpm_table), + sizeof(struct vega20_dpm_table)); + + return 0; +} + +/** +* Initializes the SMC table and uploads it +* +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data (PowerState) +* @return always 0 +*/ +static int vega20_init_smc_table(struct pp_hwmgr *hwmgr) +{ + int result; + struct vega20_hwmgr *data = + (struct vega20_hwmgr *)(hwmgr->backend); + PPTable_t *pp_table = &(data->smc_state_table.pp_table); + struct pp_atomfwctrl_bios_boot_up_values boot_up_values; + struct phm_ppt_v3_information *pptable_information = + (struct phm_ppt_v3_information *)hwmgr->pptable; + + result = pp_atomfwctrl_get_vbios_bootup_values(hwmgr, &boot_up_values); + PP_ASSERT_WITH_CODE(!result, + "[InitSMCTable] Failed to get vbios bootup values!", + return result); + + data->vbios_boot_state.vddc = boot_up_values.usVddc; + data->vbios_boot_state.vddci = boot_up_values.usVddci; + data->vbios_boot_state.mvddc = boot_up_values.usMvddc; + data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk; + data->vbios_boot_state.mem_clock = boot_up_values.ulUClk; + data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk; + data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk; + data->vbios_boot_state.eclock = boot_up_values.ulEClk; + data->vbios_boot_state.vclock = boot_up_values.ulVClk; + data->vbios_boot_state.dclock = boot_up_values.ulDClk; + data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID; + if (0 != boot_up_values.usVddc) { + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetFloorSocVoltage, + (boot_up_values.usVddc * 4)); + data->vbios_boot_state.bsoc_vddc_lock = true; + } else { + data->vbios_boot_state.bsoc_vddc_lock = false; + } + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetMinDeepSleepDcefclk, + (uint32_t)(data->vbios_boot_state.dcef_clock / 100)); + + memcpy(pp_table, pptable_information->smc_pptable, sizeof(PPTable_t)); + + result = vega20_copy_table_to_smc(hwmgr, + (uint8_t *)pp_table, TABLE_PPTABLE); + PP_ASSERT_WITH_CODE(!result, + "[InitSMCTable] Failed to upload PPtable!", + return result); + + return 0; +} + +static int vega20_set_allowed_featuresmask(struct pp_hwmgr *hwmgr) +{ + struct vega20_hwmgr *data = + (struct vega20_hwmgr *)(hwmgr->backend); + uint32_t allowed_features_low = 0, allowed_features_high = 0; + int i; + int ret = 0; + + for (i = 0; i < GNLD_FEATURES_MAX; i++) + if (data->smu_features[i].allowed) + data->smu_features[i].smu_feature_id > 31 ? + (allowed_features_high |= + ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_HIGH_SHIFT) + & 0xFFFFFFFF)) : + (allowed_features_low |= + ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_LOW_SHIFT) + & 0xFFFFFFFF)); + + ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high); + PP_ASSERT_WITH_CODE(!ret, + "[SetAllowedFeaturesMask] Attempt to set allowed features mask(high) failed!", + return ret); + + ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low); + PP_ASSERT_WITH_CODE(!ret, + "[SetAllowedFeaturesMask] Attempt to set allowed features mask (low) failed!", + return ret); + + return 0; +} + +static int vega20_enable_all_smu_features(struct pp_hwmgr *hwmgr) +{ + struct vega20_hwmgr *data = + (struct vega20_hwmgr *)(hwmgr->backend); + uint64_t features_enabled; + int i; + bool enabled; + int ret = 0; + + PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, + PPSMC_MSG_EnableAllSmuFeatures)) == 0, + "[EnableAllSMUFeatures] Failed to enable all smu features!", + return ret); + + ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled); + PP_ASSERT_WITH_CODE(!ret, + "[EnableAllSmuFeatures] Failed to get enabled smc features!", + return ret); + + for (i = 0; i < GNLD_FEATURES_MAX; i++) { + enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? + true : false; + data->smu_features[i].enabled = enabled; + data->smu_features[i].supported = enabled; + +#if 0 + if (data->smu_features[i].allowed && !enabled) + pr_info("[EnableAllSMUFeatures] feature %d is expected enabled!", i); + else if (!data->smu_features[i].allowed && enabled) + pr_info("[EnableAllSMUFeatures] feature %d is expected disabled!", i); +#endif + } + + return 0; +} + +static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr) +{ + struct vega20_hwmgr *data = + (struct vega20_hwmgr *)(hwmgr->backend); + uint64_t features_enabled; + int i; + bool enabled; + int ret = 0; + + PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, + PPSMC_MSG_DisableAllSmuFeatures)) == 0, + "[DisableAllSMUFeatures] Failed to disable all smu features!", + return ret); + + ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled); + PP_ASSERT_WITH_CODE(!ret, + "[DisableAllSMUFeatures] Failed to get enabled smc features!", + return ret); + + for (i = 0; i < GNLD_FEATURES_MAX; i++) { + enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? + true : false; + data->smu_features[i].enabled = enabled; + data->smu_features[i].supported = enabled; + } + + return 0; +} + +static int vega20_odn_initialize_default_settings( + struct pp_hwmgr *hwmgr) +{ + return 0; +} + +static int vega20_get_max_sustainable_clock(struct pp_hwmgr *hwmgr, + PP_Clock *clock, PPCLK_e clock_select) +{ + int ret = 0; + + PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_GetDcModeMaxDpmFreq, + (clock_select << 16))) == 0, + "[GetMaxSustainableClock] Failed to get max DC clock from SMC!", + return ret); + vega20_read_arg_from_smc(hwmgr, clock); + + /* if DC limit is zero, return AC limit */ + if (*clock == 0) { + PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_GetMaxDpmFreq, + (clock_select << 16))) == 0, + "[GetMaxSustainableClock] failed to get max AC clock from SMC!", + return ret); + vega20_read_arg_from_smc(hwmgr, clock); + } + + return 0; +} + +static int vega20_init_max_sustainable_clocks(struct pp_hwmgr *hwmgr) +{ + struct vega20_hwmgr *data = + (struct vega20_hwmgr *)(hwmgr->backend); + struct vega20_max_sustainable_clocks *max_sustainable_clocks = + &(data->max_sustainable_clocks); + int ret = 0; + + max_sustainable_clocks->uclock = data->vbios_boot_state.mem_clock / 100; + max_sustainable_clocks->soc_clock = data->vbios_boot_state.soc_clock / 100; + max_sustainable_clocks->dcef_clock = data->vbios_boot_state.dcef_clock / 100; + max_sustainable_clocks->display_clock = 0xFFFFFFFF; + max_sustainable_clocks->phy_clock = 0xFFFFFFFF; + max_sustainable_clocks->pixel_clock = 0xFFFFFFFF; + + if (data->smu_features[GNLD_DPM_UCLK].enabled) + PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr, + &(max_sustainable_clocks->uclock), + PPCLK_UCLK)) == 0, + "[InitMaxSustainableClocks] failed to get max UCLK from SMC!", + return ret); + + if (data->smu_features[GNLD_DPM_SOCCLK].enabled) + PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr, + &(max_sustainable_clocks->soc_clock), + PPCLK_SOCCLK)) == 0, + "[InitMaxSustainableClocks] failed to get max SOCCLK from SMC!", + return ret); + + if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { + PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr, + &(max_sustainable_clocks->dcef_clock), + PPCLK_DCEFCLK)) == 0, + "[InitMaxSustainableClocks] failed to get max DCEFCLK from SMC!", + return ret); + PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr, + &(max_sustainable_clocks->display_clock), + PPCLK_DISPCLK)) == 0, + "[InitMaxSustainableClocks] failed to get max DISPCLK from SMC!", + return ret); + PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr, + &(max_sustainable_clocks->phy_clock), + PPCLK_PHYCLK)) == 0, + "[InitMaxSustainableClocks] failed to get max PHYCLK from SMC!", + return ret); + PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr, + &(max_sustainable_clocks->pixel_clock), + PPCLK_PIXCLK)) == 0, + "[InitMaxSustainableClocks] failed to get max PIXCLK from SMC!", + return ret); + } + + if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock) + max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock; + + if (max_sustainable_clocks->uclock < max_sustainable_clocks->dcef_clock) + max_sustainable_clocks->dcef_clock = max_sustainable_clocks->uclock; + + return 0; +} + +static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr) +{ + int result = 0; + + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_NumOfDisplays, 0); + + result = vega20_set_allowed_featuresmask(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "[EnableDPMTasks] Failed to set allowed featuresmask!\n", + return result); + + result = vega20_init_smc_table(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "[EnableDPMTasks] Failed to initialize SMC table!", + return result); + + result = vega20_enable_all_smu_features(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "[EnableDPMTasks] Failed to enable all smu features!", + return result); + + result = vega20_setup_default_dpm_tables(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "[EnableDPMTasks] Failed to setup default DPM tables!", + return result); + + result = vega20_init_max_sustainable_clocks(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "[EnableDPMTasks] Failed to get maximum sustainable clocks!", + return result); + + result = vega20_power_control_set_level(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "[EnableDPMTasks] Failed to power control set level!", + return result); + + result = vega20_odn_initialize_default_settings(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "[EnableDPMTasks] Failed to initialize odn settings!", + return result); + + return result; +} + +static uint32_t vega20_find_lowest_dpm_level( + struct vega20_single_dpm_table *table) +{ + uint32_t i; + + for (i = 0; i < table->count; i++) { + if (table->dpm_levels[i].enabled) + break; + } + if (i >= table->count) { + i = 0; + table->dpm_levels[i].enabled = true; + } + + return i; +} + +static uint32_t vega20_find_highest_dpm_level( + struct vega20_single_dpm_table *table) +{ + uint32_t i = 0; + + PP_ASSERT_WITH_CODE(table != NULL, + "[FindHighestDPMLevel] DPM Table does not exist!", + return 0); + PP_ASSERT_WITH_CODE(table->count > 0, + "[FindHighestDPMLevel] DPM Table has no entry!", + return 0); + PP_ASSERT_WITH_CODE(table->count <= MAX_REGULAR_DPM_NUMBER, + "[FindHighestDPMLevel] DPM Table has too many entries!", + return MAX_REGULAR_DPM_NUMBER - 1); + + for (i = table->count - 1; i >= 0; i--) { + if (table->dpm_levels[i].enabled) + break; + } + if (i < 0) { + i = 0; + table->dpm_levels[i].enabled = true; + } + + return i; +} + +static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr) +{ + struct vega20_hwmgr *data = + (struct vega20_hwmgr *)(hwmgr->backend); + int ret = 0; + + if (data->smu_features[GNLD_DPM_GFXCLK].enabled) + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetSoftMinByFreq, + PPCLK_GFXCLK << 16 | + data->dpm_table.gfx_table.dpm_state.soft_min_level)), + "Failed to set soft min gfxclk !", + return ret); + + if (data->smu_features[GNLD_DPM_UCLK].enabled) { + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetSoftMinByFreq, + PPCLK_UCLK << 16 | + data->dpm_table.mem_table.dpm_state.soft_min_level)), + "Failed to set soft min memclk !", + return ret); + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetHardMinByFreq, + PPCLK_UCLK << 16 | + data->dpm_table.mem_table.dpm_state.hard_min_level)), + "Failed to set hard min memclk !", + return ret); + } + + return ret; +} + +static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr) +{ + struct vega20_hwmgr *data = + (struct vega20_hwmgr *)(hwmgr->backend); + int ret = 0; + + if (data->smu_features[GNLD_DPM_GFXCLK].enabled) + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetSoftMaxByFreq, + PPCLK_GFXCLK << 16 | + data->dpm_table.gfx_table.dpm_state.soft_max_level)), + "Failed to set soft max gfxclk!", + return ret); + + if (data->smu_features[GNLD_DPM_UCLK].enabled) + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetSoftMaxByFreq, + PPCLK_UCLK << 16 | + data->dpm_table.mem_table.dpm_state.soft_max_level)), + "Failed to set soft max memclk!", + return ret); + + return ret; +} + +int vega20_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable) +{ + struct vega20_hwmgr *data = + (struct vega20_hwmgr *)(hwmgr->backend); + int ret = 0; + + if (data->smu_features[GNLD_DPM_VCE].supported) { + if (data->smu_features[GNLD_DPM_VCE].enabled == enable) { + if (enable) + PP_DBG_LOG("[EnableDisableVCEDPM] feature VCE DPM already enabled!\n"); + else + PP_DBG_LOG("[EnableDisableVCEDPM] feature VCE DPM already disabled!\n"); + } + + ret = vega20_enable_smc_features(hwmgr, + enable, + data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap); + PP_ASSERT_WITH_CODE(!ret, + "Attempt to Enable/Disable DPM VCE Failed!", + return ret); + data->smu_features[GNLD_DPM_VCE].enabled = enable; + } + + return 0; +} + +static int vega20_get_clock_ranges(struct pp_hwmgr *hwmgr, + uint32_t *clock, + PPCLK_e clock_select, + bool max) +{ + int ret; + *clock = 0; + + if (max) { + PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16))) == 0, + "[GetClockRanges] Failed to get max clock from SMC!", + return ret); + vega20_read_arg_from_smc(hwmgr, clock); + } else { + PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_GetMinDpmFreq, + (clock_select << 16))) == 0, + "[GetClockRanges] Failed to get min clock from SMC!", + return ret); + vega20_read_arg_from_smc(hwmgr, clock); + } + + return 0; +} + +static uint32_t vega20_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) +{ + struct vega20_hwmgr *data = + (struct vega20_hwmgr *)(hwmgr->backend); + uint32_t gfx_clk; + int ret = 0; + + PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_GFXCLK].enabled, + "[GetSclks]: gfxclk dpm not enabled!\n", + return -EPERM); + + if (low) { + ret = vega20_get_clock_ranges(hwmgr, &gfx_clk, PPCLK_GFXCLK, false); + PP_ASSERT_WITH_CODE(!ret, + "[GetSclks]: fail to get min PPCLK_GFXCLK\n", + return ret); + } else { + ret = vega20_get_clock_ranges(hwmgr, &gfx_clk, PPCLK_GFXCLK, true); + PP_ASSERT_WITH_CODE(!ret, + "[GetSclks]: fail to get max PPCLK_GFXCLK\n", + return ret); + } + + return (gfx_clk * 100); +} + +static uint32_t vega20_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) +{ + struct vega20_hwmgr *data = + (struct vega20_hwmgr *)(hwmgr->backend); + uint32_t mem_clk; + int ret = 0; + + PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_UCLK].enabled, + "[MemMclks]: memclk dpm not enabled!\n", + return -EPERM); + + if (low) { + ret = vega20_get_clock_ranges(hwmgr, &mem_clk, PPCLK_UCLK, false); + PP_ASSERT_WITH_CODE(!ret, + "[GetMclks]: fail to get min PPCLK_UCLK\n", + return ret); + } else { + ret = vega20_get_clock_ranges(hwmgr, &mem_clk, PPCLK_UCLK, true); + PP_ASSERT_WITH_CODE(!ret, + "[GetMclks]: fail to get max PPCLK_UCLK\n", + return ret); + } + + return (mem_clk * 100); +} + +static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr, + uint32_t *query) +{ + int ret = 0; + SmuMetrics_t metrics_table; + + ret = vega20_copy_table_from_smc(hwmgr, (uint8_t *)&metrics_table, TABLE_SMU_METRICS); + PP_ASSERT_WITH_CODE(!ret, + "Failed to export SMU METRICS table!", + return ret); + + *query = metrics_table.CurrSocketPower << 8; + + return ret; +} + +static int vega20_get_current_gfx_clk_freq(struct pp_hwmgr *hwmgr, uint32_t *gfx_freq) +{ + uint32_t gfx_clk = 0; + int ret = 0; + + *gfx_freq = 0; + + PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16))) == 0, + "[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!", + return ret); + vega20_read_arg_from_smc(hwmgr, &gfx_clk); + + *gfx_freq = gfx_clk * 100; + + return 0; +} + +static int vega20_get_current_mclk_freq(struct pp_hwmgr *hwmgr, uint32_t *mclk_freq) +{ + uint32_t mem_clk = 0; + int ret = 0; + + *mclk_freq = 0; + + PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_GetDpmClockFreq, (PPCLK_UCLK << 16))) == 0, + "[GetCurrentMClkFreq] Attempt to get Current MCLK Frequency Failed!", + return ret); + vega20_read_arg_from_smc(hwmgr, &mem_clk); + + *mclk_freq = mem_clk * 100; + + return 0; +} + +static int vega20_get_current_activity_percent(struct pp_hwmgr *hwmgr, + uint32_t *activity_percent) +{ + int ret = 0; + SmuMetrics_t metrics_table; + + ret = vega20_copy_table_from_smc(hwmgr, (uint8_t *)&metrics_table, TABLE_SMU_METRICS); + PP_ASSERT_WITH_CODE(!ret, + "Failed to export SMU METRICS table!", + return ret); + + *activity_percent = metrics_table.AverageGfxActivity; + + return ret; +} + +static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx, + void *value, int *size) +{ + struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + int ret = 0; + + switch (idx) { + case AMDGPU_PP_SENSOR_GFX_SCLK: + ret = vega20_get_current_gfx_clk_freq(hwmgr, (uint32_t *)value); + if (!ret) + *size = 4; + break; + case AMDGPU_PP_SENSOR_GFX_MCLK: + ret = vega20_get_current_mclk_freq(hwmgr, (uint32_t *)value); + if (!ret) + *size = 4; + break; + case AMDGPU_PP_SENSOR_GPU_LOAD: + ret = vega20_get_current_activity_percent(hwmgr, (uint32_t *)value); + if (!ret) + *size = 4; + break; + case AMDGPU_PP_SENSOR_GPU_TEMP: + *((uint32_t *)value) = vega20_thermal_get_temperature(hwmgr); + *size = 4; + break; + case AMDGPU_PP_SENSOR_UVD_POWER: + *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1; + *size = 4; + break; + case AMDGPU_PP_SENSOR_VCE_POWER: + *((uint32_t *)value) = data->vce_power_gated ? 0 : 1; + *size = 4; + break; + case AMDGPU_PP_SENSOR_GPU_POWER: + *size = 16; + ret = vega20_get_gpu_power(hwmgr, (uint32_t *)value); + break; + default: + ret = -EINVAL; + break; + } + return ret; +} + +static int vega20_notify_smc_display_change(struct pp_hwmgr *hwmgr, + bool has_disp) +{ + struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + + if (data->smu_features[GNLD_DPM_UCLK].enabled) + return smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetUclkFastSwitch, + has_disp ? 0 : 1); + + return 0; +} + +int vega20_display_clock_voltage_request(struct pp_hwmgr *hwmgr, + struct pp_display_clock_request *clock_req) +{ + int result = 0; + struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + enum amd_pp_clock_type clk_type = clock_req->clock_type; + uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000; + PPCLK_e clk_select = 0; + uint32_t clk_request = 0; + + if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { + switch (clk_type) { + case amd_pp_dcef_clock: + clk_freq = clock_req->clock_freq_in_khz / 100; + clk_select = PPCLK_DCEFCLK; + break; + case amd_pp_disp_clock: + clk_select = PPCLK_DISPCLK; + break; + case amd_pp_pixel_clock: + clk_select = PPCLK_PIXCLK; + break; + case amd_pp_phy_clock: + clk_select = PPCLK_PHYCLK; + break; + default: + pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!"); + result = -EINVAL; + break; + } + + if (!result) { + clk_request = (clk_select << 16) | clk_freq; + result = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetHardMinByFreq, + clk_request); + } + } + + return result; +} + +static int vega20_notify_smc_display_config_after_ps_adjustment( + struct pp_hwmgr *hwmgr) +{ + struct vega20_hwmgr *data = + (struct vega20_hwmgr *)(hwmgr->backend); + struct PP_Clocks min_clocks = {0}; + struct pp_display_clock_request clock_req; + int ret = 0; + + if ((hwmgr->display_config->num_display > 1) && + !hwmgr->display_config->multi_monitor_in_sync) + vega20_notify_smc_display_change(hwmgr, false); + else + vega20_notify_smc_display_change(hwmgr, true); + + min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk; + min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk; + min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock; + + if (data->smu_features[GNLD_DPM_DCEFCLK].supported) { + clock_req.clock_type = amd_pp_dcef_clock; + clock_req.clock_freq_in_khz = min_clocks.dcefClock; + if (!vega20_display_clock_voltage_request(hwmgr, &clock_req)) { + if (data->smu_features[GNLD_DS_DCEFCLK].supported) + PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk, + min_clocks.dcefClockInSR / 100)) == 0, + "Attempt to set divider for DCEFCLK Failed!", + return ret); + } else { + pr_info("Attempt to set Hard Min for DCEFCLK Failed!"); + } + } + + return 0; +} + +static int vega20_force_dpm_highest(struct pp_hwmgr *hwmgr) +{ + struct vega20_hwmgr *data = + (struct vega20_hwmgr *)(hwmgr->backend); + int ret = 0; + + data->smc_state_table.gfx_boot_level = + data->smc_state_table.gfx_max_level = + vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table)); + data->smc_state_table.mem_boot_level = + data->smc_state_table.mem_max_level = + vega20_find_highest_dpm_level(&(data->dpm_table.mem_table)); + + ret = vega20_upload_dpm_min_level(hwmgr); + PP_ASSERT_WITH_CODE(!ret, + "Failed to upload boot level to highest!", + return ret); + + ret = vega20_upload_dpm_max_level(hwmgr); + PP_ASSERT_WITH_CODE(!ret, + "Failed to upload dpm max level to highest!", + return ret); + + return 0; +} + +static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr) +{ + struct vega20_hwmgr *data = + (struct vega20_hwmgr *)(hwmgr->backend); + int ret = 0; + + data->smc_state_table.gfx_boot_level = + data->smc_state_table.gfx_max_level = + vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table)); + data->smc_state_table.mem_boot_level = + data->smc_state_table.mem_max_level = + vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table)); + + ret = vega20_upload_dpm_min_level(hwmgr); + PP_ASSERT_WITH_CODE(!ret, + "Failed to upload boot level to highest!", + return ret); + + ret = vega20_upload_dpm_max_level(hwmgr); + PP_ASSERT_WITH_CODE(!ret, + "Failed to upload dpm max level to highest!", + return ret); + + return 0; + +} + +static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr) +{ + int ret = 0; + + ret = vega20_upload_dpm_min_level(hwmgr); + PP_ASSERT_WITH_CODE(!ret, + "Failed to upload DPM Bootup Levels!", + return ret); + + ret = vega20_upload_dpm_max_level(hwmgr); + PP_ASSERT_WITH_CODE(!ret, + "Failed to upload DPM Max Levels!", + return ret); + + return 0; +} + +#if 0 +static int vega20_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level, + uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask) +{ + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + + if (table_info->vdd_dep_on_sclk->count > VEGA20_UMD_PSTATE_GFXCLK_LEVEL && + table_info->vdd_dep_on_socclk->count > VEGA20_UMD_PSTATE_SOCCLK_LEVEL && + table_info->vdd_dep_on_mclk->count > VEGA20_UMD_PSTATE_MCLK_LEVEL) { + *sclk_mask = VEGA20_UMD_PSTATE_GFXCLK_LEVEL; + *soc_mask = VEGA20_UMD_PSTATE_SOCCLK_LEVEL; + *mclk_mask = VEGA20_UMD_PSTATE_MCLK_LEVEL; + } + + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { + *sclk_mask = 0; + } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) { + *mclk_mask = 0; + } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { + *sclk_mask = table_info->vdd_dep_on_sclk->count - 1; + *soc_mask = table_info->vdd_dep_on_socclk->count - 1; + *mclk_mask = table_info->vdd_dep_on_mclk->count - 1; + } + return 0; +} +#endif + +static int vega20_force_clock_level(struct pp_hwmgr *hwmgr, + enum pp_clock_type type, uint32_t mask) +{ + struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + int ret = 0; + + switch (type) { + case PP_SCLK: + data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0; + data->smc_state_table.gfx_max_level = mask ? (fls(mask) - 1) : 0; + + ret = vega20_upload_dpm_min_level(hwmgr); + PP_ASSERT_WITH_CODE(!ret, + "Failed to upload boot level to lowest!", + return ret); + + ret = vega20_upload_dpm_max_level(hwmgr); + PP_ASSERT_WITH_CODE(!ret, + "Failed to upload dpm max level to highest!", + return ret); + break; + + case PP_MCLK: + data->smc_state_table.mem_boot_level = mask ? (ffs(mask) - 1) : 0; + data->smc_state_table.mem_max_level = mask ? (fls(mask) - 1) : 0; + + ret = vega20_upload_dpm_min_level(hwmgr); + PP_ASSERT_WITH_CODE(!ret, + "Failed to upload boot level to lowest!", + return ret); + + ret = vega20_upload_dpm_max_level(hwmgr); + PP_ASSERT_WITH_CODE(!ret, + "Failed to upload dpm max level to highest!", + return ret); + + break; + + case PP_PCIE: + break; + + default: + break; + } + + return 0; +} + +static int vega20_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, + enum amd_dpm_forced_level level) +{ + int ret = 0; +#if 0 + uint32_t sclk_mask = 0; + uint32_t mclk_mask = 0; + uint32_t soc_mask = 0; +#endif + + switch (level) { + case AMD_DPM_FORCED_LEVEL_HIGH: + ret = vega20_force_dpm_highest(hwmgr); + break; + case AMD_DPM_FORCED_LEVEL_LOW: + ret = vega20_force_dpm_lowest(hwmgr); + break; + case AMD_DPM_FORCED_LEVEL_AUTO: + ret = vega20_unforce_dpm_levels(hwmgr); + break; + case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: + case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: +#if 0 + ret = vega20_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask); + if (ret) + return ret; + vega20_force_clock_level(hwmgr, PP_SCLK, 1<dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) + vega20_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE); + else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) + vega20_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO); + } +#endif + return ret; +} + +static uint32_t vega20_get_fan_control_mode(struct pp_hwmgr *hwmgr) +{ + struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + + if (data->smu_features[GNLD_FAN_CONTROL].enabled == false) + return AMD_FAN_CTRL_MANUAL; + else + return AMD_FAN_CTRL_AUTO; +} + +static int vega20_get_dal_power_level(struct pp_hwmgr *hwmgr, + struct amd_pp_simple_clock_info *info) +{ +#if 0 + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)hwmgr->pptable; + struct phm_clock_and_voltage_limits *max_limits = + &table_info->max_clock_voltage_on_ac; + + info->engine_max_clock = max_limits->sclk; + info->memory_max_clock = max_limits->mclk; +#endif + return 0; +} + + +static int vega20_get_sclks(struct pp_hwmgr *hwmgr, + struct pp_clock_levels_with_latency *clocks) +{ + struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table); + int i, count; + + PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_GFXCLK].enabled, + "[GetSclks]: gfxclk dpm not enabled!\n", + return -EPERM); + + count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count; + clocks->num_levels = count; + + for (i = 0; i < count; i++) { + clocks->data[i].clocks_in_khz = + dpm_table->dpm_levels[i].value * 100; + clocks->data[i].latency_in_us = 0; + } + + return 0; +} + +static uint32_t vega20_get_mem_latency(struct pp_hwmgr *hwmgr, + uint32_t clock) +{ + return 25; +} + +static int vega20_get_memclocks(struct pp_hwmgr *hwmgr, + struct pp_clock_levels_with_latency *clocks) +{ + struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.mem_table); + int i, count; + + PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_UCLK].enabled, + "[GetMclks]: uclk dpm not enabled!\n", + return -EPERM); + + count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count; + clocks->num_levels = data->mclk_latency_table.count = count; + + for (i = 0; i < count; i++) { + clocks->data[i].clocks_in_khz = + data->mclk_latency_table.entries[i].frequency = + dpm_table->dpm_levels[i].value * 100; + clocks->data[i].latency_in_us = + data->mclk_latency_table.entries[i].latency = + vega20_get_mem_latency(hwmgr, dpm_table->dpm_levels[i].value); + } + + return 0; +} + +static int vega20_get_dcefclocks(struct pp_hwmgr *hwmgr, + struct pp_clock_levels_with_latency *clocks) +{ + struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.dcef_table); + int i, count; + + PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_DCEFCLK].enabled, + "[GetDcfclocks]: dcefclk dpm not enabled!\n", + return -EPERM); + + count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count; + clocks->num_levels = count; + + for (i = 0; i < count; i++) { + clocks->data[i].clocks_in_khz = + dpm_table->dpm_levels[i].value * 100; + clocks->data[i].latency_in_us = 0; + } + + return 0; +} + +static int vega20_get_socclocks(struct pp_hwmgr *hwmgr, + struct pp_clock_levels_with_latency *clocks) +{ + struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.soc_table); + int i, count; + + PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_SOCCLK].enabled, + "[GetSocclks]: socclk dpm not enabled!\n", + return -EPERM); + + count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count; + clocks->num_levels = count; + + for (i = 0; i < count; i++) { + clocks->data[i].clocks_in_khz = + dpm_table->dpm_levels[i].value * 100; + clocks->data[i].latency_in_us = 0; + } + + return 0; + +} + +static int vega20_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr, + enum amd_pp_clock_type type, + struct pp_clock_levels_with_latency *clocks) +{ + int ret; + + switch (type) { + case amd_pp_sys_clock: + ret = vega20_get_sclks(hwmgr, clocks); + break; + case amd_pp_mem_clock: + ret = vega20_get_memclocks(hwmgr, clocks); + break; + case amd_pp_dcef_clock: + ret = vega20_get_dcefclocks(hwmgr, clocks); + break; + case amd_pp_soc_clock: + ret = vega20_get_socclocks(hwmgr, clocks); + break; + default: + return -EINVAL; + } + + return ret; +} + +static int vega20_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr, + enum amd_pp_clock_type type, + struct pp_clock_levels_with_voltage *clocks) +{ + clocks->num_levels = 0; + + return 0; +} + +static int vega20_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, + void *clock_ranges) +{ + struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + Watermarks_t *table = &(data->smc_state_table.water_marks_table); + struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges; + + if (!data->registry_data.disable_water_mark && + data->smu_features[GNLD_DPM_DCEFCLK].supported && + data->smu_features[GNLD_DPM_SOCCLK].supported) { + smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges); + data->water_marks_bitmap |= WaterMarksExist; + data->water_marks_bitmap &= ~WaterMarksLoaded; + } + + return 0; +} + +static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, + enum pp_clock_type type, char *buf) +{ + int i, now, size = 0; + struct pp_clock_levels_with_latency clocks; + int ret = 0; + + switch (type) { + case PP_SCLK: + ret = vega20_get_current_gfx_clk_freq(hwmgr, &now); + PP_ASSERT_WITH_CODE(!ret, + "Attempt to get current gfx clk Failed!", + return ret); + + ret = vega20_get_sclks(hwmgr, &clocks); + PP_ASSERT_WITH_CODE(!ret, + "Attempt to get gfx clk levels Failed!", + return ret); + + for (i = 0; i < clocks.num_levels; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, clocks.data[i].clocks_in_khz / 100, + (clocks.data[i].clocks_in_khz == now) ? "*" : ""); + break; + + case PP_MCLK: + ret = vega20_get_current_mclk_freq(hwmgr, &now); + PP_ASSERT_WITH_CODE(!ret, + "Attempt to get current mclk freq Failed!", + return ret); + + ret = vega20_get_memclocks(hwmgr, &clocks); + PP_ASSERT_WITH_CODE(!ret, + "Attempt to get memory clk levels Failed!", + return ret); + + for (i = 0; i < clocks.num_levels; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, clocks.data[i].clocks_in_khz / 100, + (clocks.data[i].clocks_in_khz == now) ? "*" : ""); + break; + + case PP_PCIE: + break; + + default: + break; + } + return size; +} + +static int vega20_display_configuration_changed_task(struct pp_hwmgr *hwmgr) +{ + struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + int result = 0; + Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table); + + if ((data->water_marks_bitmap & WaterMarksExist) && + !(data->water_marks_bitmap & WaterMarksLoaded)) { + result = vega20_copy_table_to_smc(hwmgr, + (uint8_t *)wm_table, TABLE_WATERMARKS); + PP_ASSERT_WITH_CODE(!result, + "Failed to update WMTABLE!", + return result); + data->water_marks_bitmap |= WaterMarksLoaded; + } + + if ((data->water_marks_bitmap & WaterMarksExist) && + data->smu_features[GNLD_DPM_DCEFCLK].supported && + data->smu_features[GNLD_DPM_SOCCLK].supported) { + result = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_NumOfDisplays, + hwmgr->display_config->num_display); + } + + return result; +} + +int vega20_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable) +{ + struct vega20_hwmgr *data = + (struct vega20_hwmgr *)(hwmgr->backend); + int ret = 0; + + if (data->smu_features[GNLD_DPM_UVD].supported) { + if (data->smu_features[GNLD_DPM_UVD].enabled == enable) { + if (enable) + PP_DBG_LOG("[EnableDisableUVDDPM] feature DPM UVD already enabled!\n"); + else + PP_DBG_LOG("[EnableDisableUVDDPM] feature DPM UVD already disabled!\n"); + } + + ret = vega20_enable_smc_features(hwmgr, + enable, + data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap); + PP_ASSERT_WITH_CODE(!ret, + "[EnableDisableUVDDPM] Attempt to Enable/Disable DPM UVD Failed!", + return ret); + data->smu_features[GNLD_DPM_UVD].enabled = enable; + } + + return 0; +} + +static void vega20_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate) +{ + struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + + data->vce_power_gated = bgate; + vega20_enable_disable_vce_dpm(hwmgr, !bgate); +} + +static void vega20_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate) +{ + struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + + data->uvd_power_gated = bgate; + vega20_enable_disable_uvd_dpm(hwmgr, !bgate); +} + +static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) +{ + struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + struct vega20_single_dpm_table *dpm_table; + bool vblank_too_short = false; + bool disable_mclk_switching; + uint32_t i, latency; + + disable_mclk_switching = ((1 < hwmgr->display_config->num_display) && + !hwmgr->display_config->multi_monitor_in_sync) || + vblank_too_short; + latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency; + + /* gfxclk */ + dpm_table = &(data->dpm_table.gfx_table); + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + + /* memclk */ + dpm_table = &(data->dpm_table.mem_table); + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + + if (dpm_table->dpm_state.hard_min_level < (hwmgr->display_config->min_mem_set_clock / 100)) + dpm_table->dpm_state.hard_min_level = hwmgr->display_config->min_mem_set_clock / 100; + + if (disable_mclk_switching) { + dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + for (i = 0; i < data->mclk_latency_table.count - 1; i++) { + if (data->mclk_latency_table.entries[i].latency <= latency) { + if (dpm_table->dpm_levels[i].value >= (hwmgr->display_config->min_mem_set_clock / 100)) { + dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[i].value; + break; + } + } + } + } + + if (hwmgr->display_config->nb_pstate_switch_disable) + dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + + return 0; +} + +static bool +vega20_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr) +{ + struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + bool is_update_required = false; + + if (data->display_timing.num_existing_displays != + hwmgr->display_config->num_display) + is_update_required = true; + + if (data->registry_data.gfx_clk_deep_sleep_support && + (data->display_timing.min_clock_in_sr != + hwmgr->display_config->min_core_set_clock_in_sr)) + is_update_required = true; + + return is_update_required; +} + +static int vega20_disable_dpm_tasks(struct pp_hwmgr *hwmgr) +{ + int ret = 0; + + ret = vega20_disable_all_smu_features(hwmgr); + PP_ASSERT_WITH_CODE(!ret, + "[DisableDpmTasks] Failed to disable all smu features!", + return ret); + + return 0; +} + +static int vega20_power_off_asic(struct pp_hwmgr *hwmgr) +{ + struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + int result; + + result = vega20_disable_dpm_tasks(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), + "[PowerOffAsic] Failed to disable DPM!", + ); + data->water_marks_bitmap &= ~(WaterMarksLoaded); + + return result; +} + +static int vega20_notify_cac_buffer_info(struct pp_hwmgr *hwmgr, + uint32_t virtual_addr_low, + uint32_t virtual_addr_hi, + uint32_t mc_addr_low, + uint32_t mc_addr_hi, + uint32_t size) +{ + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetSystemVirtualDramAddrHigh, + virtual_addr_hi); + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetSystemVirtualDramAddrLow, + virtual_addr_low); + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_DramLogSetDramAddrHigh, + mc_addr_hi); + + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_DramLogSetDramAddrLow, + mc_addr_low); + + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_DramLogSetDramSize, + size); + return 0; +} + +static int vega20_get_thermal_temperature_range(struct pp_hwmgr *hwmgr, + struct PP_TemperatureRange *thermal_data) +{ + struct phm_ppt_v3_information *pptable_information = + (struct phm_ppt_v3_information *)hwmgr->pptable; + + memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange)); + + thermal_data->max = pptable_information->us_software_shutdown_temp * + PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + + return 0; +} + +static const struct pp_hwmgr_func vega20_hwmgr_funcs = { + /* init/fini related */ + .backend_init = + vega20_hwmgr_backend_init, + .backend_fini = + vega20_hwmgr_backend_fini, + .asic_setup = + vega20_setup_asic_task, + .power_off_asic = + vega20_power_off_asic, + .dynamic_state_management_enable = + vega20_enable_dpm_tasks, + .dynamic_state_management_disable = + vega20_disable_dpm_tasks, + /* power state related */ + .apply_clocks_adjust_rules = + vega20_apply_clocks_adjust_rules, + .display_config_changed = + vega20_display_configuration_changed_task, + .check_smc_update_required_for_display_configuration = + vega20_check_smc_update_required_for_display_configuration, + .notify_smc_display_config_after_ps_adjustment = + vega20_notify_smc_display_config_after_ps_adjustment, + /* export to DAL */ + .get_sclk = + vega20_dpm_get_sclk, + .get_mclk = + vega20_dpm_get_mclk, + .get_dal_power_level = + vega20_get_dal_power_level, + .get_clock_by_type_with_latency = + vega20_get_clock_by_type_with_latency, + .get_clock_by_type_with_voltage = + vega20_get_clock_by_type_with_voltage, + .set_watermarks_for_clocks_ranges = + vega20_set_watermarks_for_clocks_ranges, + .display_clock_voltage_request = + vega20_display_clock_voltage_request, + /* UMD pstate, profile related */ + .force_dpm_level = + vega20_dpm_force_dpm_level, + .set_power_limit = + vega20_set_power_limit, + /* for sysfs to retrive/set gfxclk/memclk */ + .force_clock_level = + vega20_force_clock_level, + .print_clock_levels = + vega20_print_clock_levels, + .read_sensor = + vega20_read_sensor, + /* powergate related */ + .powergate_uvd = + vega20_power_gate_uvd, + .powergate_vce = + vega20_power_gate_vce, + /* thermal related */ + .start_thermal_controller = + vega20_start_thermal_controller, + .stop_thermal_controller = + vega20_thermal_stop_thermal_controller, + .get_thermal_temperature_range = + vega20_get_thermal_temperature_range, + .register_irq_handlers = + smu9_register_irq_handlers, + .disable_smc_firmware_ctf = + vega20_thermal_disable_alert, + /* fan control related */ + .get_fan_speed_info = + vega20_fan_ctrl_get_fan_speed_info, + .get_fan_speed_rpm = + vega20_fan_ctrl_get_fan_speed_rpm, + .get_fan_control_mode = + vega20_get_fan_control_mode, + /* smu memory related */ + .notify_cac_buffer_info = + vega20_notify_cac_buffer_info, +}; + +int vega20_hwmgr_init(struct pp_hwmgr *hwmgr) +{ + hwmgr->hwmgr_func = &vega20_hwmgr_funcs; + hwmgr->pptable_func = &vega20_pptable_funcs; + + return 0; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h new file mode 100644 index 000000000000..59a59bcdad3a --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h @@ -0,0 +1,519 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _VEGA20_HWMGR_H_ +#define _VEGA20_HWMGR_H_ + +#include "hwmgr.h" +#include "smu11_driver_if.h" +#include "ppatomfwctrl.h" + +#define VEGA20_MAX_HARDWARE_POWERLEVELS 2 + +#define WaterMarksExist 1 +#define WaterMarksLoaded 2 + +#define VG20_PSUEDO_NUM_GFXCLK_DPM_LEVELS 8 +#define VG20_PSUEDO_NUM_SOCCLK_DPM_LEVELS 8 +#define VG20_PSUEDO_NUM_DCEFCLK_DPM_LEVELS 8 +#define VG20_PSUEDO_NUM_UCLK_DPM_LEVELS 4 + +typedef uint32_t PP_Clock; + +enum { + GNLD_DPM_PREFETCHER = 0, + GNLD_DPM_GFXCLK, + GNLD_DPM_UCLK, + GNLD_DPM_SOCCLK, + GNLD_DPM_UVD, + GNLD_DPM_VCE, + GNLD_ULV, + GNLD_DPM_MP0CLK, + GNLD_DPM_LINK, + GNLD_DPM_DCEFCLK, + GNLD_DS_GFXCLK, + GNLD_DS_SOCCLK, + GNLD_DS_LCLK, + GNLD_PPT, + GNLD_TDC, + GNLD_THERMAL, + GNLD_GFX_PER_CU_CG, + GNLD_RM, + GNLD_DS_DCEFCLK, + GNLD_ACDC, + GNLD_VR0HOT, + GNLD_VR1HOT, + GNLD_FW_CTF, + GNLD_LED_DISPLAY, + GNLD_FAN_CONTROL, + GNLD_DIDT, + GNLD_GFXOFF, + GNLD_CG, + GNLD_DPM_FCLK, + GNLD_DS_FCLK, + GNLD_DS_MP1CLK, + GNLD_DS_MP0CLK, + GNLD_XGMI, + + GNLD_FEATURES_MAX +}; + + +#define GNLD_DPM_MAX (GNLD_DPM_DCEFCLK + 1) + +#define SMC_DPM_FEATURES 0x30F + +struct smu_features { + bool supported; + bool enabled; + bool allowed; + uint32_t smu_feature_id; + uint64_t smu_feature_bitmap; +}; + +struct vega20_performance_level { + uint32_t soc_clock; + uint32_t gfx_clock; + uint32_t mem_clock; +}; + +struct vega20_bacos { + uint32_t baco_flags; + /* struct vega20_performance_level performance_level; */ +}; + +struct vega20_uvd_clocks { + uint32_t vclk; + uint32_t dclk; +}; + +struct vega20_vce_clocks { + uint32_t evclk; + uint32_t ecclk; +}; + +struct vega20_power_state { + uint32_t magic; + struct vega20_uvd_clocks uvd_clks; + struct vega20_vce_clocks vce_clks; + uint16_t performance_level_count; + bool dc_compatible; + uint32_t sclk_threshold; + struct vega20_performance_level performance_levels[VEGA20_MAX_HARDWARE_POWERLEVELS]; +}; + +struct vega20_dpm_level { + bool enabled; + uint32_t value; + uint32_t param1; +}; + +#define VEGA20_MAX_DEEPSLEEP_DIVIDER_ID 5 +#define MAX_REGULAR_DPM_NUMBER 16 +#define MAX_PCIE_CONF 2 +#define VEGA20_MINIMUM_ENGINE_CLOCK 2500 + +struct vega20_max_sustainable_clocks { + PP_Clock display_clock; + PP_Clock phy_clock; + PP_Clock pixel_clock; + PP_Clock uclock; + PP_Clock dcef_clock; + PP_Clock soc_clock; +}; + +struct vega20_dpm_state { + uint32_t soft_min_level; + uint32_t soft_max_level; + uint32_t hard_min_level; + uint32_t hard_max_level; +}; + +struct vega20_single_dpm_table { + uint32_t count; + struct vega20_dpm_state dpm_state; + struct vega20_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER]; +}; + +struct vega20_odn_dpm_control { + uint32_t count; + uint32_t entries[MAX_REGULAR_DPM_NUMBER]; +}; + +struct vega20_pcie_table { + uint16_t count; + uint8_t pcie_gen[MAX_PCIE_CONF]; + uint8_t pcie_lane[MAX_PCIE_CONF]; + uint32_t lclk[MAX_PCIE_CONF]; +}; + +struct vega20_dpm_table { + struct vega20_single_dpm_table soc_table; + struct vega20_single_dpm_table gfx_table; + struct vega20_single_dpm_table mem_table; + struct vega20_single_dpm_table eclk_table; + struct vega20_single_dpm_table vclk_table; + struct vega20_single_dpm_table dclk_table; + struct vega20_single_dpm_table dcef_table; + struct vega20_single_dpm_table pixel_table; + struct vega20_single_dpm_table display_table; + struct vega20_single_dpm_table phy_table; + struct vega20_single_dpm_table fclk_table; + struct vega20_pcie_table pcie_table; +}; + +#define VEGA20_MAX_LEAKAGE_COUNT 8 +struct vega20_leakage_voltage { + uint16_t count; + uint16_t leakage_id[VEGA20_MAX_LEAKAGE_COUNT]; + uint16_t actual_voltage[VEGA20_MAX_LEAKAGE_COUNT]; +}; + +struct vega20_display_timing { + uint32_t min_clock_in_sr; + uint32_t num_existing_displays; +}; + +struct vega20_dpmlevel_enable_mask { + uint32_t uvd_dpm_enable_mask; + uint32_t vce_dpm_enable_mask; + uint32_t samu_dpm_enable_mask; + uint32_t sclk_dpm_enable_mask; + uint32_t mclk_dpm_enable_mask; +}; + +struct vega20_vbios_boot_state { + bool bsoc_vddc_lock; + uint8_t uc_cooling_id; + uint16_t vddc; + uint16_t vddci; + uint16_t mvddc; + uint16_t vdd_gfx; + uint32_t gfx_clock; + uint32_t mem_clock; + uint32_t soc_clock; + uint32_t dcef_clock; + uint32_t eclock; + uint32_t dclock; + uint32_t vclock; +}; + +#define DPMTABLE_OD_UPDATE_SCLK 0x00000001 +#define DPMTABLE_OD_UPDATE_MCLK 0x00000002 +#define DPMTABLE_UPDATE_SCLK 0x00000004 +#define DPMTABLE_UPDATE_MCLK 0x00000008 +#define DPMTABLE_OD_UPDATE_VDDC 0x00000010 +#define DPMTABLE_OD_UPDATE_SCLK_MASK 0x00000020 +#define DPMTABLE_OD_UPDATE_MCLK_MASK 0x00000040 + +// To determine if sclk and mclk are in overdrive state +#define SCLK_MASK_OVERDRIVE_ENABLED 0x00000008 +#define MCLK_MASK_OVERDRIVE_ENABLED 0x00000010 +#define SOCCLK_OVERDRIVE_ENABLED 0x00000020 + +struct vega20_smc_state_table { + uint32_t soc_boot_level; + uint32_t gfx_boot_level; + uint32_t dcef_boot_level; + uint32_t mem_boot_level; + uint32_t uvd_boot_level; + uint32_t vce_boot_level; + uint32_t gfx_max_level; + uint32_t mem_max_level; + uint8_t vr_hot_gpio; + uint8_t ac_dc_gpio; + uint8_t therm_out_gpio; + uint8_t therm_out_polarity; + uint8_t therm_out_mode; + PPTable_t pp_table; + Watermarks_t water_marks_table; + AvfsDebugTable_t avfs_debug_table; + AvfsFuseOverride_t avfs_fuse_override_table; + SmuMetrics_t smu_metrics; + DriverSmuConfig_t driver_smu_config; + DpmActivityMonitorCoeffInt_t dpm_activity_monitor_coeffint; + OverDriveTable_t overdrive_table; +}; + +struct vega20_mclk_latency_entries { + uint32_t frequency; + uint32_t latency; +}; + +struct vega20_mclk_latency_table { + uint32_t count; + struct vega20_mclk_latency_entries entries[MAX_REGULAR_DPM_NUMBER]; +}; + +struct vega20_registry_data { + uint64_t disallowed_features; + uint8_t ac_dc_switch_gpio_support; + uint8_t acg_loop_support; + uint8_t clock_stretcher_support; + uint8_t db_ramping_support; + uint8_t didt_mode; + uint8_t didt_support; + uint8_t edc_didt_support; + uint8_t force_dpm_high; + uint8_t fuzzy_fan_control_support; + uint8_t mclk_dpm_key_disabled; + uint8_t od_state_in_dc_support; + uint8_t pcie_lane_override; + uint8_t pcie_speed_override; + uint32_t pcie_clock_override; + uint8_t pcie_dpm_key_disabled; + uint8_t dcefclk_dpm_key_disabled; + uint8_t prefetcher_dpm_key_disabled; + uint8_t quick_transition_support; + uint8_t regulator_hot_gpio_support; + uint8_t master_deep_sleep_support; + uint8_t gfx_clk_deep_sleep_support; + uint8_t sclk_deep_sleep_support; + uint8_t lclk_deep_sleep_support; + uint8_t dce_fclk_deep_sleep_support; + uint8_t sclk_dpm_key_disabled; + uint8_t sclk_throttle_low_notification; + uint8_t skip_baco_hardware; + uint8_t socclk_dpm_key_disabled; + uint8_t sq_ramping_support; + uint8_t tcp_ramping_support; + uint8_t td_ramping_support; + uint8_t dbr_ramping_support; + uint8_t gc_didt_support; + uint8_t psm_didt_support; + uint8_t thermal_support; + uint8_t fw_ctf_enabled; + uint8_t led_dpm_enabled; + uint8_t fan_control_support; + uint8_t ulv_support; + uint8_t odn_feature_enable; + uint8_t disable_water_mark; + uint8_t disable_workload_policy; + uint32_t force_workload_policy_mask; + uint8_t disable_3d_fs_detection; + uint8_t disable_pp_tuning; + uint8_t disable_xlpp_tuning; + uint32_t perf_ui_tuning_profile_turbo; + uint32_t perf_ui_tuning_profile_powerSave; + uint32_t perf_ui_tuning_profile_xl; + uint16_t zrpm_stop_temp; + uint16_t zrpm_start_temp; + uint32_t stable_pstate_sclk_dpm_percentage; + uint8_t fps_support; + uint8_t vr0hot; + uint8_t vr1hot; + uint8_t disable_auto_wattman; + uint32_t auto_wattman_debug; + uint32_t auto_wattman_sample_period; + uint8_t auto_wattman_threshold; + uint8_t log_avfs_param; + uint8_t enable_enginess; + uint8_t custom_fan_support; + uint8_t disable_pcc_limit_control; + uint8_t gfxoff_controlled_by_driver; +}; + +struct vega20_odn_clock_voltage_dependency_table { + uint32_t count; + struct phm_ppt_v1_clock_voltage_dependency_record + entries[MAX_REGULAR_DPM_NUMBER]; +}; + +struct vega20_odn_dpm_table { + struct vega20_odn_dpm_control control_gfxclk_state; + struct vega20_odn_dpm_control control_memclk_state; + struct phm_odn_clock_levels odn_core_clock_dpm_levels; + struct phm_odn_clock_levels odn_memory_clock_dpm_levels; + struct vega20_odn_clock_voltage_dependency_table vdd_dependency_on_sclk; + struct vega20_odn_clock_voltage_dependency_table vdd_dependency_on_mclk; + struct vega20_odn_clock_voltage_dependency_table vdd_dependency_on_socclk; + uint32_t odn_mclk_min_limit; +}; + +struct vega20_odn_fan_table { + uint32_t target_fan_speed; + uint32_t target_temperature; + uint32_t min_performance_clock; + uint32_t min_fan_limit; + bool force_fan_pwm; +}; + +struct vega20_odn_temp_table { + uint16_t target_operating_temp; + uint16_t default_target_operating_temp; + uint16_t operating_temp_min_limit; + uint16_t operating_temp_max_limit; + uint16_t operating_temp_step; +}; + +struct vega20_odn_data { + uint32_t apply_overdrive_next_settings_mask; + uint32_t overdrive_next_state; + uint32_t overdrive_next_capabilities; + uint32_t odn_sclk_dpm_enable_mask; + uint32_t odn_mclk_dpm_enable_mask; + struct vega20_odn_dpm_table odn_dpm_table; + struct vega20_odn_fan_table odn_fan_table; + struct vega20_odn_temp_table odn_temp_table; +}; + +struct vega20_hwmgr { + struct vega20_dpm_table dpm_table; + struct vega20_dpm_table golden_dpm_table; + struct vega20_registry_data registry_data; + struct vega20_vbios_boot_state vbios_boot_state; + struct vega20_mclk_latency_table mclk_latency_table; + + struct vega20_max_sustainable_clocks max_sustainable_clocks; + + struct vega20_leakage_voltage vddc_leakage; + + uint32_t vddc_control; + struct pp_atomfwctrl_voltage_table vddc_voltage_table; + uint32_t mvdd_control; + struct pp_atomfwctrl_voltage_table mvdd_voltage_table; + uint32_t vddci_control; + struct pp_atomfwctrl_voltage_table vddci_voltage_table; + + uint32_t active_auto_throttle_sources; + struct vega20_bacos bacos; + + /* ---- General data ---- */ + uint8_t need_update_dpm_table; + + bool cac_enabled; + bool battery_state; + bool is_tlu_enabled; + bool avfs_exist; + + uint32_t low_sclk_interrupt_threshold; + + uint32_t total_active_cus; + + uint32_t water_marks_bitmap; + + struct vega20_display_timing display_timing; + + /* ---- Vega20 Dyn Register Settings ---- */ + + uint32_t debug_settings; + uint32_t lowest_uclk_reserved_for_ulv; + uint32_t gfxclk_average_alpha; + uint32_t socclk_average_alpha; + uint32_t uclk_average_alpha; + uint32_t gfx_activity_average_alpha; + uint32_t display_voltage_mode; + uint32_t dcef_clk_quad_eqn_a; + uint32_t dcef_clk_quad_eqn_b; + uint32_t dcef_clk_quad_eqn_c; + uint32_t disp_clk_quad_eqn_a; + uint32_t disp_clk_quad_eqn_b; + uint32_t disp_clk_quad_eqn_c; + uint32_t pixel_clk_quad_eqn_a; + uint32_t pixel_clk_quad_eqn_b; + uint32_t pixel_clk_quad_eqn_c; + uint32_t phy_clk_quad_eqn_a; + uint32_t phy_clk_quad_eqn_b; + uint32_t phy_clk_quad_eqn_c; + + /* ---- Thermal Temperature Setting ---- */ + struct vega20_dpmlevel_enable_mask dpm_level_enable_mask; + + /* ---- Power Gating States ---- */ + bool uvd_power_gated; + bool vce_power_gated; + bool samu_power_gated; + bool need_long_memory_training; + + /* Internal settings to apply the application power optimization parameters */ + bool apply_optimized_settings; + uint32_t disable_dpm_mask; + + /* ---- Overdrive next setting ---- */ + struct vega20_odn_data odn_data; + + /* ---- Workload Mask ---- */ + uint32_t workload_mask; + + /* ---- SMU9 ---- */ + uint32_t smu_version; + struct smu_features smu_features[GNLD_FEATURES_MAX]; + struct vega20_smc_state_table smc_state_table; + + /* ---- Gfxoff ---- */ + bool gfxoff_allowed; + uint32_t counter_gfxoff; +}; + +#define VEGA20_DPM2_NEAR_TDP_DEC 10 +#define VEGA20_DPM2_ABOVE_SAFE_INC 5 +#define VEGA20_DPM2_BELOW_SAFE_INC 20 + +#define VEGA20_DPM2_LTA_WINDOW_SIZE 7 + +#define VEGA20_DPM2_LTS_TRUNCATE 0 + +#define VEGA20_DPM2_TDP_SAFE_LIMIT_PERCENT 80 + +#define VEGA20_DPM2_MAXPS_PERCENT_M 90 +#define VEGA20_DPM2_MAXPS_PERCENT_H 90 + +#define VEGA20_DPM2_PWREFFICIENCYRATIO_MARGIN 50 + +#define VEGA20_DPM2_SQ_RAMP_MAX_POWER 0x3FFF +#define VEGA20_DPM2_SQ_RAMP_MIN_POWER 0x12 +#define VEGA20_DPM2_SQ_RAMP_MAX_POWER_DELTA 0x15 +#define VEGA20_DPM2_SQ_RAMP_SHORT_TERM_INTERVAL_SIZE 0x1E +#define VEGA20_DPM2_SQ_RAMP_LONG_TERM_INTERVAL_RATIO 0xF + +#define VEGA20_VOLTAGE_CONTROL_NONE 0x0 +#define VEGA20_VOLTAGE_CONTROL_BY_GPIO 0x1 +#define VEGA20_VOLTAGE_CONTROL_BY_SVID2 0x2 +#define VEGA20_VOLTAGE_CONTROL_MERGED 0x3 +/* To convert to Q8.8 format for firmware */ +#define VEGA20_Q88_FORMAT_CONVERSION_UNIT 256 + +#define VEGA20_UNUSED_GPIO_PIN 0x7F + +#define VEGA20_THERM_OUT_MODE_DISABLE 0x0 +#define VEGA20_THERM_OUT_MODE_THERM_ONLY 0x1 +#define VEGA20_THERM_OUT_MODE_THERM_VRHOT 0x2 + +#define PPVEGA20_VEGA20DISPLAYVOLTAGEMODE_DFLT 0xffffffff +#define PPREGKEY_VEGA20QUADRATICEQUATION_DFLT 0xffffffff + +#define PPVEGA20_VEGA20GFXCLKAVERAGEALPHA_DFLT 25 /* 10% * 255 = 25 */ +#define PPVEGA20_VEGA20SOCCLKAVERAGEALPHA_DFLT 25 /* 10% * 255 = 25 */ +#define PPVEGA20_VEGA20UCLKCLKAVERAGEALPHA_DFLT 25 /* 10% * 255 = 25 */ +#define PPVEGA20_VEGA20GFXACTIVITYAVERAGEALPHA_DFLT 25 /* 10% * 255 = 25 */ +#define PPVEGA20_VEGA20LOWESTUCLKRESERVEDFORULV_DFLT 0xffffffff +#define PPVEGA20_VEGA20DISPLAYVOLTAGEMODE_DFLT 0xffffffff +#define PPREGKEY_VEGA20QUADRATICEQUATION_DFLT 0xffffffff + +#define VEGA20_UMD_PSTATE_GFXCLK_LEVEL 0x3 +#define VEGA20_UMD_PSTATE_SOCCLK_LEVEL 0x3 +#define VEGA20_UMD_PSTATE_MCLK_LEVEL 0x2 +#define VEGA20_UMD_PSTATE_UVDCLK_LEVEL 0x3 +#define VEGA20_UMD_PSTATE_VCEMCLK_LEVEL 0x3 + +#endif /* _VEGA20_HWMGR_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.c new file mode 100644 index 000000000000..a0bfb65cc5d6 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.c @@ -0,0 +1,70 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "hwmgr.h" +#include "vega20_hwmgr.h" +#include "vega20_powertune.h" +#include "vega20_smumgr.h" +#include "vega20_ppsmc.h" +#include "vega20_inc.h" +#include "pp_debug.h" + +int vega20_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) +{ + struct vega20_hwmgr *data = + (struct vega20_hwmgr *)(hwmgr->backend); + + if (data->smu_features[GNLD_PPT].enabled) + return smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetPptLimit, n); + + return 0; +} + +int vega20_validate_power_level_request(struct pp_hwmgr *hwmgr, + uint32_t tdp_percentage_adjustment, uint32_t tdp_absolute_value_adjustment) +{ + return (tdp_percentage_adjustment > hwmgr->platform_descriptor.TDPLimit) ? -1 : 0; +} + +static int vega20_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr, + uint32_t adjust_percent) +{ + return smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_OverDriveSetPercentage, adjust_percent); +} + +int vega20_power_control_set_level(struct pp_hwmgr *hwmgr) +{ + int adjust_percent, result = 0; + + if (PP_CAP(PHM_PlatformCaps_PowerContainment)) { + adjust_percent = + hwmgr->platform_descriptor.TDPAdjustmentPolarity ? + hwmgr->platform_descriptor.TDPAdjustment : + (-1 * hwmgr->platform_descriptor.TDPAdjustment); + result = vega20_set_overdrive_target_percentage(hwmgr, + (uint32_t)adjust_percent); + } + return result; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.h new file mode 100644 index 000000000000..d68c734c0f4e --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.h @@ -0,0 +1,32 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _VEGA20_POWERTUNE_H_ +#define _VEGA20_POWERTUNE_H_ + +int vega20_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n); +int vega20_power_control_set_level(struct pp_hwmgr *hwmgr); +int vega20_validate_power_level_request(struct pp_hwmgr *hwmgr, + uint32_t tdp_percentage_adjustment, + uint32_t tdp_absolute_value_adjustment); +#endif /* _VEGA20_POWERTUNE_H_ */ + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c new file mode 100644 index 000000000000..379ac3d1da03 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c @@ -0,0 +1,919 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include +#include +#include + +#include "smu11_driver_if.h" +#include "vega20_processpptables.h" +#include "ppatomfwctrl.h" +#include "atomfirmware.h" +#include "pp_debug.h" +#include "cgs_common.h" +#include "vega20_pptable.h" + +static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable, + enum phm_platform_caps cap) +{ + if (enable) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, cap); + else + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, cap); +} + +static const void *get_powerplay_table(struct pp_hwmgr *hwmgr) +{ + int index = GetIndexIntoMasterDataTable(powerplayinfo); + + u16 size; + u8 frev, crev; + const void *table_address = hwmgr->soft_pp_table; + + if (!table_address) { + table_address = (ATOM_Vega20_POWERPLAYTABLE *) + smu_atom_get_data_table(hwmgr->adev, index, + &size, &frev, &crev); + + hwmgr->soft_pp_table = table_address; + hwmgr->soft_pp_table_size = size; + } + + return table_address; +} + +#if 0 +static void dump_pptable(PPTable_t *pptable) +{ + int i; + + pr_info("Version = 0x%08x\n", pptable->Version); + + pr_info("FeaturesToRun[0] = 0x%08x\n", pptable->FeaturesToRun[0]); + pr_info("FeaturesToRun[1] = 0x%08x\n", pptable->FeaturesToRun[1]); + + pr_info("SocketPowerLimitAc0 = %d\n", pptable->SocketPowerLimitAc0); + pr_info("SocketPowerLimitAc0Tau = %d\n", pptable->SocketPowerLimitAc0Tau); + pr_info("SocketPowerLimitAc1 = %d\n", pptable->SocketPowerLimitAc1); + pr_info("SocketPowerLimitAc1Tau = %d\n", pptable->SocketPowerLimitAc1Tau); + pr_info("SocketPowerLimitAc2 = %d\n", pptable->SocketPowerLimitAc2); + pr_info("SocketPowerLimitAc2Tau = %d\n", pptable->SocketPowerLimitAc2Tau); + pr_info("SocketPowerLimitAc3 = %d\n", pptable->SocketPowerLimitAc3); + pr_info("SocketPowerLimitAc3Tau = %d\n", pptable->SocketPowerLimitAc3Tau); + pr_info("SocketPowerLimitDc = %d\n", pptable->SocketPowerLimitDc); + pr_info("SocketPowerLimitDcTau = %d\n", pptable->SocketPowerLimitDcTau); + pr_info("TdcLimitSoc = %d\n", pptable->TdcLimitSoc); + pr_info("TdcLimitSocTau = %d\n", pptable->TdcLimitSocTau); + pr_info("TdcLimitGfx = %d\n", pptable->TdcLimitGfx); + pr_info("TdcLimitGfxTau = %d\n", pptable->TdcLimitGfxTau); + + pr_info("TedgeLimit = %d\n", pptable->TedgeLimit); + pr_info("ThotspotLimit = %d\n", pptable->ThotspotLimit); + pr_info("ThbmLimit = %d\n", pptable->ThbmLimit); + pr_info("Tvr_gfxLimit = %d\n", pptable->Tvr_gfxLimit); + pr_info("Tvr_memLimit = %d\n", pptable->Tvr_memLimit); + pr_info("Tliquid1Limit = %d\n", pptable->Tliquid1Limit); + pr_info("Tliquid2Limit = %d\n", pptable->Tliquid2Limit); + pr_info("TplxLimit = %d\n", pptable->TplxLimit); + pr_info("FitLimit = %d\n", pptable->FitLimit); + + pr_info("PpmPowerLimit = %d\n", pptable->PpmPowerLimit); + pr_info("PpmTemperatureThreshold = %d\n", pptable->PpmTemperatureThreshold); + + pr_info("MemoryOnPackage = 0x%02x\n", pptable->MemoryOnPackage); + pr_info("padding8_limits[0] = 0x%02x\n", pptable->padding8_limits[0]); + pr_info("padding8_limits[1] = 0x%02x\n", pptable->padding8_limits[1]); + pr_info("padding8_limits[2] = 0x%02x\n", pptable->padding8_limits[2]); + + pr_info("UlvVoltageOffsetSoc = %d\n", pptable->UlvVoltageOffsetSoc); + pr_info("UlvVoltageOffsetGfx = %d\n", pptable->UlvVoltageOffsetGfx); + + pr_info("UlvSmnclkDid = %d\n", pptable->UlvSmnclkDid); + pr_info("UlvMp1clkDid = %d\n", pptable->UlvMp1clkDid); + pr_info("UlvGfxclkBypass = %d\n", pptable->UlvGfxclkBypass); + pr_info("Padding234 = 0x%02x\n", pptable->Padding234); + + pr_info("MinVoltageGfx = %d\n", pptable->MinVoltageGfx); + pr_info("MinVoltageSoc = %d\n", pptable->MinVoltageSoc); + pr_info("MaxVoltageGfx = %d\n", pptable->MaxVoltageGfx); + pr_info("MaxVoltageSoc = %d\n", pptable->MaxVoltageSoc); + + pr_info("LoadLineResistanceGfx = %d\n", pptable->LoadLineResistanceGfx); + pr_info("LoadLineResistanceSoc = %d\n", pptable->LoadLineResistanceSoc); + + pr_info("[PPCLK_GFXCLK]\n" + " .VoltageMode = 0x%02x\n" + " .SnapToDiscrete = 0x%02x\n" + " .NumDiscreteLevels = 0x%02x\n" + " .padding = 0x%02x\n" + " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" + " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", + pptable->DpmDescriptor[PPCLK_GFXCLK].VoltageMode, + pptable->DpmDescriptor[PPCLK_GFXCLK].SnapToDiscrete, + pptable->DpmDescriptor[PPCLK_GFXCLK].NumDiscreteLevels, + pptable->DpmDescriptor[PPCLK_GFXCLK].padding, + pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.m, + pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.b, + pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.a, + pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.b, + pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.c); + + pr_info("[PPCLK_VCLK]\n" + " .VoltageMode = 0x%02x\n" + " .SnapToDiscrete = 0x%02x\n" + " .NumDiscreteLevels = 0x%02x\n" + " .padding = 0x%02x\n" + " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" + " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", + pptable->DpmDescriptor[PPCLK_VCLK].VoltageMode, + pptable->DpmDescriptor[PPCLK_VCLK].SnapToDiscrete, + pptable->DpmDescriptor[PPCLK_VCLK].NumDiscreteLevels, + pptable->DpmDescriptor[PPCLK_VCLK].padding, + pptable->DpmDescriptor[PPCLK_VCLK].ConversionToAvfsClk.m, + pptable->DpmDescriptor[PPCLK_VCLK].ConversionToAvfsClk.b, + pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.a, + pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.b, + pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.c); + + pr_info("[PPCLK_DCLK]\n" + " .VoltageMode = 0x%02x\n" + " .SnapToDiscrete = 0x%02x\n" + " .NumDiscreteLevels = 0x%02x\n" + " .padding = 0x%02x\n" + " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" + " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", + pptable->DpmDescriptor[PPCLK_DCLK].VoltageMode, + pptable->DpmDescriptor[PPCLK_DCLK].SnapToDiscrete, + pptable->DpmDescriptor[PPCLK_DCLK].NumDiscreteLevels, + pptable->DpmDescriptor[PPCLK_DCLK].padding, + pptable->DpmDescriptor[PPCLK_DCLK].ConversionToAvfsClk.m, + pptable->DpmDescriptor[PPCLK_DCLK].ConversionToAvfsClk.b, + pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.a, + pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.b, + pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.c); + + pr_info("[PPCLK_ECLK]\n" + " .VoltageMode = 0x%02x\n" + " .SnapToDiscrete = 0x%02x\n" + " .NumDiscreteLevels = 0x%02x\n" + " .padding = 0x%02x\n" + " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" + " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", + pptable->DpmDescriptor[PPCLK_ECLK].VoltageMode, + pptable->DpmDescriptor[PPCLK_ECLK].SnapToDiscrete, + pptable->DpmDescriptor[PPCLK_ECLK].NumDiscreteLevels, + pptable->DpmDescriptor[PPCLK_ECLK].padding, + pptable->DpmDescriptor[PPCLK_ECLK].ConversionToAvfsClk.m, + pptable->DpmDescriptor[PPCLK_ECLK].ConversionToAvfsClk.b, + pptable->DpmDescriptor[PPCLK_ECLK].SsCurve.a, + pptable->DpmDescriptor[PPCLK_ECLK].SsCurve.b, + pptable->DpmDescriptor[PPCLK_ECLK].SsCurve.c); + + pr_info("[PPCLK_SOCCLK]\n" + " .VoltageMode = 0x%02x\n" + " .SnapToDiscrete = 0x%02x\n" + " .NumDiscreteLevels = 0x%02x\n" + " .padding = 0x%02x\n" + " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" + " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", + pptable->DpmDescriptor[PPCLK_SOCCLK].VoltageMode, + pptable->DpmDescriptor[PPCLK_SOCCLK].SnapToDiscrete, + pptable->DpmDescriptor[PPCLK_SOCCLK].NumDiscreteLevels, + pptable->DpmDescriptor[PPCLK_SOCCLK].padding, + pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.m, + pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.b, + pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.a, + pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.b, + pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.c); + + pr_info("[PPCLK_UCLK]\n" + " .VoltageMode = 0x%02x\n" + " .SnapToDiscrete = 0x%02x\n" + " .NumDiscreteLevels = 0x%02x\n" + " .padding = 0x%02x\n" + " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" + " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", + pptable->DpmDescriptor[PPCLK_UCLK].VoltageMode, + pptable->DpmDescriptor[PPCLK_UCLK].SnapToDiscrete, + pptable->DpmDescriptor[PPCLK_UCLK].NumDiscreteLevels, + pptable->DpmDescriptor[PPCLK_UCLK].padding, + pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.m, + pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.b, + pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.a, + pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.b, + pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.c); + + pr_info("[PPCLK_DCEFCLK]\n" + " .VoltageMode = 0x%02x\n" + " .SnapToDiscrete = 0x%02x\n" + " .NumDiscreteLevels = 0x%02x\n" + " .padding = 0x%02x\n" + " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" + " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", + pptable->DpmDescriptor[PPCLK_DCEFCLK].VoltageMode, + pptable->DpmDescriptor[PPCLK_DCEFCLK].SnapToDiscrete, + pptable->DpmDescriptor[PPCLK_DCEFCLK].NumDiscreteLevels, + pptable->DpmDescriptor[PPCLK_DCEFCLK].padding, + pptable->DpmDescriptor[PPCLK_DCEFCLK].ConversionToAvfsClk.m, + pptable->DpmDescriptor[PPCLK_DCEFCLK].ConversionToAvfsClk.b, + pptable->DpmDescriptor[PPCLK_DCEFCLK].SsCurve.a, + pptable->DpmDescriptor[PPCLK_DCEFCLK].SsCurve.b, + pptable->DpmDescriptor[PPCLK_DCEFCLK].SsCurve.c); + + pr_info("[PPCLK_DISPCLK]\n" + " .VoltageMode = 0x%02x\n" + " .SnapToDiscrete = 0x%02x\n" + " .NumDiscreteLevels = 0x%02x\n" + " .padding = 0x%02x\n" + " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" + " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", + pptable->DpmDescriptor[PPCLK_DISPCLK].VoltageMode, + pptable->DpmDescriptor[PPCLK_DISPCLK].SnapToDiscrete, + pptable->DpmDescriptor[PPCLK_DISPCLK].NumDiscreteLevels, + pptable->DpmDescriptor[PPCLK_DISPCLK].padding, + pptable->DpmDescriptor[PPCLK_DISPCLK].ConversionToAvfsClk.m, + pptable->DpmDescriptor[PPCLK_DISPCLK].ConversionToAvfsClk.b, + pptable->DpmDescriptor[PPCLK_DISPCLK].SsCurve.a, + pptable->DpmDescriptor[PPCLK_DISPCLK].SsCurve.b, + pptable->DpmDescriptor[PPCLK_DISPCLK].SsCurve.c); + + pr_info("[PPCLK_PIXCLK]\n" + " .VoltageMode = 0x%02x\n" + " .SnapToDiscrete = 0x%02x\n" + " .NumDiscreteLevels = 0x%02x\n" + " .padding = 0x%02x\n" + " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" + " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", + pptable->DpmDescriptor[PPCLK_PIXCLK].VoltageMode, + pptable->DpmDescriptor[PPCLK_PIXCLK].SnapToDiscrete, + pptable->DpmDescriptor[PPCLK_PIXCLK].NumDiscreteLevels, + pptable->DpmDescriptor[PPCLK_PIXCLK].padding, + pptable->DpmDescriptor[PPCLK_PIXCLK].ConversionToAvfsClk.m, + pptable->DpmDescriptor[PPCLK_PIXCLK].ConversionToAvfsClk.b, + pptable->DpmDescriptor[PPCLK_PIXCLK].SsCurve.a, + pptable->DpmDescriptor[PPCLK_PIXCLK].SsCurve.b, + pptable->DpmDescriptor[PPCLK_PIXCLK].SsCurve.c); + + pr_info("[PPCLK_PHYCLK]\n" + " .VoltageMode = 0x%02x\n" + " .SnapToDiscrete = 0x%02x\n" + " .NumDiscreteLevels = 0x%02x\n" + " .padding = 0x%02x\n" + " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" + " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", + pptable->DpmDescriptor[PPCLK_PHYCLK].VoltageMode, + pptable->DpmDescriptor[PPCLK_PHYCLK].SnapToDiscrete, + pptable->DpmDescriptor[PPCLK_PHYCLK].NumDiscreteLevels, + pptable->DpmDescriptor[PPCLK_PHYCLK].padding, + pptable->DpmDescriptor[PPCLK_PHYCLK].ConversionToAvfsClk.m, + pptable->DpmDescriptor[PPCLK_PHYCLK].ConversionToAvfsClk.b, + pptable->DpmDescriptor[PPCLK_PHYCLK].SsCurve.a, + pptable->DpmDescriptor[PPCLK_PHYCLK].SsCurve.b, + pptable->DpmDescriptor[PPCLK_PHYCLK].SsCurve.c); + + pr_info("[PPCLK_FCLK]\n" + " .VoltageMode = 0x%02x\n" + " .SnapToDiscrete = 0x%02x\n" + " .NumDiscreteLevels = 0x%02x\n" + " .padding = 0x%02x\n" + " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" + " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n", + pptable->DpmDescriptor[PPCLK_FCLK].VoltageMode, + pptable->DpmDescriptor[PPCLK_FCLK].SnapToDiscrete, + pptable->DpmDescriptor[PPCLK_FCLK].NumDiscreteLevels, + pptable->DpmDescriptor[PPCLK_FCLK].padding, + pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.m, + pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.b, + pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.a, + pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.b, + pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.c); + + + pr_info("FreqTableGfx\n"); + for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++) + pr_info(" .[%02d] = %d\n", i, pptable->FreqTableGfx[i]); + + pr_info("FreqTableVclk\n"); + for (i = 0; i < NUM_VCLK_DPM_LEVELS; i++) + pr_info(" .[%02d] = %d\n", i, pptable->FreqTableVclk[i]); + + pr_info("FreqTableDclk\n"); + for (i = 0; i < NUM_DCLK_DPM_LEVELS; i++) + pr_info(" .[%02d] = %d\n", i, pptable->FreqTableDclk[i]); + + pr_info("FreqTableEclk\n"); + for (i = 0; i < NUM_ECLK_DPM_LEVELS; i++) + pr_info(" .[%02d] = %d\n", i, pptable->FreqTableEclk[i]); + + pr_info("FreqTableSocclk\n"); + for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) + pr_info(" .[%02d] = %d\n", i, pptable->FreqTableSocclk[i]); + + pr_info("FreqTableUclk\n"); + for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) + pr_info(" .[%02d] = %d\n", i, pptable->FreqTableUclk[i]); + + pr_info("FreqTableFclk\n"); + for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) + pr_info(" .[%02d] = %d\n", i, pptable->FreqTableFclk[i]); + + pr_info("FreqTableDcefclk\n"); + for (i = 0; i < NUM_DCEFCLK_DPM_LEVELS; i++) + pr_info(" .[%02d] = %d\n", i, pptable->FreqTableDcefclk[i]); + + pr_info("FreqTableDispclk\n"); + for (i = 0; i < NUM_DISPCLK_DPM_LEVELS; i++) + pr_info(" .[%02d] = %d\n", i, pptable->FreqTableDispclk[i]); + + pr_info("FreqTablePixclk\n"); + for (i = 0; i < NUM_PIXCLK_DPM_LEVELS; i++) + pr_info(" .[%02d] = %d\n", i, pptable->FreqTablePixclk[i]); + + pr_info("FreqTablePhyclk\n"); + for (i = 0; i < NUM_PHYCLK_DPM_LEVELS; i++) + pr_info(" .[%02d] = %d\n", i, pptable->FreqTablePhyclk[i]); + + pr_info("DcModeMaxFreq[PPCLK_GFXCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_GFXCLK]); + pr_info("DcModeMaxFreq[PPCLK_VCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_VCLK]); + pr_info("DcModeMaxFreq[PPCLK_DCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_DCLK]); + pr_info("DcModeMaxFreq[PPCLK_ECLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_ECLK]); + pr_info("DcModeMaxFreq[PPCLK_SOCCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_SOCCLK]); + pr_info("DcModeMaxFreq[PPCLK_UCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_UCLK]); + pr_info("DcModeMaxFreq[PPCLK_DCEFCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_DCEFCLK]); + pr_info("DcModeMaxFreq[PPCLK_DISPCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_DISPCLK]); + pr_info("DcModeMaxFreq[PPCLK_PIXCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_PIXCLK]); + pr_info("DcModeMaxFreq[PPCLK_PHYCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_PHYCLK]); + pr_info("DcModeMaxFreq[PPCLK_FCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_FCLK]); + pr_info("Padding8_Clks = %d\n", pptable->Padding8_Clks); + + pr_info("Mp0clkFreq\n"); + for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++) + pr_info(" .[%d] = %d\n", i, pptable->Mp0clkFreq[i]); + + pr_info("Mp0DpmVoltage\n"); + for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++) + pr_info(" .[%d] = %d\n", i, pptable->Mp0DpmVoltage[i]); + + pr_info("GfxclkFidle = 0x%x\n", pptable->GfxclkFidle); + pr_info("GfxclkSlewRate = 0x%x\n", pptable->GfxclkSlewRate); + pr_info("CksEnableFreq = 0x%x\n", pptable->CksEnableFreq); + pr_info("Padding789 = 0x%x\n", pptable->Padding789); + pr_info("CksVoltageOffset[a = 0x%08x b = 0x%08x c = 0x%08x]\n", + pptable->CksVoltageOffset.a, + pptable->CksVoltageOffset.b, + pptable->CksVoltageOffset.c); + pr_info("Padding567[0] = 0x%x\n", pptable->Padding567[0]); + pr_info("Padding567[1] = 0x%x\n", pptable->Padding567[1]); + pr_info("Padding567[2] = 0x%x\n", pptable->Padding567[2]); + pr_info("Padding567[3] = 0x%x\n", pptable->Padding567[3]); + pr_info("GfxclkDsMaxFreq = %d\n", pptable->GfxclkDsMaxFreq); + pr_info("GfxclkSource = 0x%x\n", pptable->GfxclkSource); + pr_info("Padding456 = 0x%x\n", pptable->Padding456); + + pr_info("LowestUclkReservedForUlv = %d\n", pptable->LowestUclkReservedForUlv); + pr_info("Padding8_Uclk[0] = 0x%x\n", pptable->Padding8_Uclk[0]); + pr_info("Padding8_Uclk[1] = 0x%x\n", pptable->Padding8_Uclk[1]); + pr_info("Padding8_Uclk[2] = 0x%x\n", pptable->Padding8_Uclk[2]); + + pr_info("PcieGenSpeed\n"); + for (i = 0; i < NUM_LINK_LEVELS; i++) + pr_info(" .[%d] = %d\n", i, pptable->PcieGenSpeed[i]); + + pr_info("PcieLaneCount\n"); + for (i = 0; i < NUM_LINK_LEVELS; i++) + pr_info(" .[%d] = %d\n", i, pptable->PcieLaneCount[i]); + + pr_info("LclkFreq\n"); + for (i = 0; i < NUM_LINK_LEVELS; i++) + pr_info(" .[%d] = %d\n", i, pptable->LclkFreq[i]); + + pr_info("EnableTdpm = %d\n", pptable->EnableTdpm); + pr_info("TdpmHighHystTemperature = %d\n", pptable->TdpmHighHystTemperature); + pr_info("TdpmLowHystTemperature = %d\n", pptable->TdpmLowHystTemperature); + pr_info("GfxclkFreqHighTempLimit = %d\n", pptable->GfxclkFreqHighTempLimit); + + pr_info("FanStopTemp = %d\n", pptable->FanStopTemp); + pr_info("FanStartTemp = %d\n", pptable->FanStartTemp); + + pr_info("FanGainEdge = %d\n", pptable->FanGainEdge); + pr_info("FanGainHotspot = %d\n", pptable->FanGainHotspot); + pr_info("FanGainLiquid = %d\n", pptable->FanGainLiquid); + pr_info("FanGainVrVddc = %d\n", pptable->FanGainVrVddc); + pr_info("FanGainVrMvdd = %d\n", pptable->FanGainVrMvdd); + pr_info("FanGainPlx = %d\n", pptable->FanGainPlx); + pr_info("FanGainHbm = %d\n", pptable->FanGainHbm); + pr_info("FanPwmMin = %d\n", pptable->FanPwmMin); + pr_info("FanAcousticLimitRpm = %d\n", pptable->FanAcousticLimitRpm); + pr_info("FanThrottlingRpm = %d\n", pptable->FanThrottlingRpm); + pr_info("FanMaximumRpm = %d\n", pptable->FanMaximumRpm); + pr_info("FanTargetTemperature = %d\n", pptable->FanTargetTemperature); + pr_info("FanTargetGfxclk = %d\n", pptable->FanTargetGfxclk); + pr_info("FanZeroRpmEnable = %d\n", pptable->FanZeroRpmEnable); + pr_info("FanTachEdgePerRev = %d\n", pptable->FanTachEdgePerRev); + + pr_info("FuzzyFan_ErrorSetDelta = %d\n", pptable->FuzzyFan_ErrorSetDelta); + pr_info("FuzzyFan_ErrorRateSetDelta = %d\n", pptable->FuzzyFan_ErrorRateSetDelta); + pr_info("FuzzyFan_PwmSetDelta = %d\n", pptable->FuzzyFan_PwmSetDelta); + pr_info("FuzzyFan_Reserved = %d\n", pptable->FuzzyFan_Reserved); + + pr_info("OverrideAvfsGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_GFX]); + pr_info("OverrideAvfsGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_SOC]); + pr_info("Padding8_Avfs[0] = %d\n", pptable->Padding8_Avfs[0]); + pr_info("Padding8_Avfs[1] = %d\n", pptable->Padding8_Avfs[1]); + + pr_info("qAvfsGb[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n", + pptable->qAvfsGb[AVFS_VOLTAGE_GFX].a, + pptable->qAvfsGb[AVFS_VOLTAGE_GFX].b, + pptable->qAvfsGb[AVFS_VOLTAGE_GFX].c); + pr_info("qAvfsGb[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n", + pptable->qAvfsGb[AVFS_VOLTAGE_SOC].a, + pptable->qAvfsGb[AVFS_VOLTAGE_SOC].b, + pptable->qAvfsGb[AVFS_VOLTAGE_SOC].c); + pr_info("dBtcGbGfxCksOn{a = 0x%x b = 0x%x c = 0x%x}\n", + pptable->dBtcGbGfxCksOn.a, + pptable->dBtcGbGfxCksOn.b, + pptable->dBtcGbGfxCksOn.c); + pr_info("dBtcGbGfxCksOff{a = 0x%x b = 0x%x c = 0x%x}\n", + pptable->dBtcGbGfxCksOff.a, + pptable->dBtcGbGfxCksOff.b, + pptable->dBtcGbGfxCksOff.c); + pr_info("dBtcGbGfxAfll{a = 0x%x b = 0x%x c = 0x%x}\n", + pptable->dBtcGbGfxAfll.a, + pptable->dBtcGbGfxAfll.b, + pptable->dBtcGbGfxAfll.c); + pr_info("dBtcGbSoc{a = 0x%x b = 0x%x c = 0x%x}\n", + pptable->dBtcGbSoc.a, + pptable->dBtcGbSoc.b, + pptable->dBtcGbSoc.c); + pr_info("qAgingGb[AVFS_VOLTAGE_GFX]{m = 0x%x b = 0x%x}\n", + pptable->qAgingGb[AVFS_VOLTAGE_GFX].m, + pptable->qAgingGb[AVFS_VOLTAGE_GFX].b); + pr_info("qAgingGb[AVFS_VOLTAGE_SOC]{m = 0x%x b = 0x%x}\n", + pptable->qAgingGb[AVFS_VOLTAGE_SOC].m, + pptable->qAgingGb[AVFS_VOLTAGE_SOC].b); + + pr_info("qStaticVoltageOffset[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n", + pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].a, + pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].b, + pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].c); + pr_info("qStaticVoltageOffset[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n", + pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].a, + pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].b, + pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].c); + + pr_info("DcTol[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_GFX]); + pr_info("DcTol[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_SOC]); + + pr_info("DcBtcEnabled[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_GFX]); + pr_info("DcBtcEnabled[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_SOC]); + pr_info("Padding8_GfxBtc[0] = 0x%x\n", pptable->Padding8_GfxBtc[0]); + pr_info("Padding8_GfxBtc[1] = 0x%x\n", pptable->Padding8_GfxBtc[1]); + + pr_info("DcBtcMin[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_GFX]); + pr_info("DcBtcMin[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_SOC]); + pr_info("DcBtcMax[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_GFX]); + pr_info("DcBtcMax[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_SOC]); + + pr_info("XgmiLinkSpeed\n"); + for (i = 0; i < NUM_XGMI_LEVELS; i++) + pr_info(" .[%d] = %d\n", i, pptable->XgmiLinkSpeed[i]); + pr_info("XgmiLinkWidth\n"); + for (i = 0; i < NUM_XGMI_LEVELS; i++) + pr_info(" .[%d] = %d\n", i, pptable->XgmiLinkWidth[i]); + pr_info("XgmiFclkFreq\n"); + for (i = 0; i < NUM_XGMI_LEVELS; i++) + pr_info(" .[%d] = %d\n", i, pptable->XgmiFclkFreq[i]); + pr_info("XgmiUclkFreq\n"); + for (i = 0; i < NUM_XGMI_LEVELS; i++) + pr_info(" .[%d] = %d\n", i, pptable->XgmiUclkFreq[i]); + pr_info("XgmiSocclkFreq\n"); + for (i = 0; i < NUM_XGMI_LEVELS; i++) + pr_info(" .[%d] = %d\n", i, pptable->XgmiSocclkFreq[i]); + pr_info("XgmiSocVoltage\n"); + for (i = 0; i < NUM_XGMI_LEVELS; i++) + pr_info(" .[%d] = %d\n", i, pptable->XgmiSocVoltage[i]); + + pr_info("DebugOverrides = 0x%x\n", pptable->DebugOverrides); + pr_info("ReservedEquation0{a = 0x%x b = 0x%x c = 0x%x}\n", + pptable->ReservedEquation0.a, + pptable->ReservedEquation0.b, + pptable->ReservedEquation0.c); + pr_info("ReservedEquation1{a = 0x%x b = 0x%x c = 0x%x}\n", + pptable->ReservedEquation1.a, + pptable->ReservedEquation1.b, + pptable->ReservedEquation1.c); + pr_info("ReservedEquation2{a = 0x%x b = 0x%x c = 0x%x}\n", + pptable->ReservedEquation2.a, + pptable->ReservedEquation2.b, + pptable->ReservedEquation2.c); + pr_info("ReservedEquation3{a = 0x%x b = 0x%x c = 0x%x}\n", + pptable->ReservedEquation3.a, + pptable->ReservedEquation3.b, + pptable->ReservedEquation3.c); + + pr_info("MinVoltageUlvGfx = %d\n", pptable->MinVoltageUlvGfx); + pr_info("MinVoltageUlvSoc = %d\n", pptable->MinVoltageUlvSoc); + + for (i = 0; i < 14; i++) + pr_info("Reserved[%d] = 0x%x\n", i, pptable->Reserved[i]); + + pr_info("Liquid1_I2C_address = 0x%x\n", pptable->Liquid1_I2C_address); + pr_info("Liquid2_I2C_address = 0x%x\n", pptable->Liquid2_I2C_address); + pr_info("Vr_I2C_address = 0x%x\n", pptable->Vr_I2C_address); + pr_info("Plx_I2C_address = 0x%x\n", pptable->Plx_I2C_address); + + pr_info("Liquid_I2C_LineSCL = 0x%x\n", pptable->Liquid_I2C_LineSCL); + pr_info("Liquid_I2C_LineSDA = 0x%x\n", pptable->Liquid_I2C_LineSDA); + pr_info("Vr_I2C_LineSCL = 0x%x\n", pptable->Vr_I2C_LineSCL); + pr_info("Vr_I2C_LineSDA = 0x%x\n", pptable->Vr_I2C_LineSDA); + + pr_info("Plx_I2C_LineSCL = 0x%x\n", pptable->Plx_I2C_LineSCL); + pr_info("Plx_I2C_LineSDA = 0x%x\n", pptable->Plx_I2C_LineSDA); + pr_info("VrSensorPresent = 0x%x\n", pptable->VrSensorPresent); + pr_info("LiquidSensorPresent = 0x%x\n", pptable->LiquidSensorPresent); + + pr_info("MaxVoltageStepGfx = 0x%x\n", pptable->MaxVoltageStepGfx); + pr_info("MaxVoltageStepSoc = 0x%x\n", pptable->MaxVoltageStepSoc); + + pr_info("VddGfxVrMapping = 0x%x\n", pptable->VddGfxVrMapping); + pr_info("VddSocVrMapping = 0x%x\n", pptable->VddSocVrMapping); + pr_info("VddMem0VrMapping = 0x%x\n", pptable->VddMem0VrMapping); + pr_info("VddMem1VrMapping = 0x%x\n", pptable->VddMem1VrMapping); + + pr_info("GfxUlvPhaseSheddingMask = 0x%x\n", pptable->GfxUlvPhaseSheddingMask); + pr_info("SocUlvPhaseSheddingMask = 0x%x\n", pptable->SocUlvPhaseSheddingMask); + pr_info("ExternalSensorPresent = 0x%x\n", pptable->ExternalSensorPresent); + pr_info("Padding8_V = 0x%x\n", pptable->Padding8_V); + + pr_info("GfxMaxCurrent = 0x%x\n", pptable->GfxMaxCurrent); + pr_info("GfxOffset = 0x%x\n", pptable->GfxOffset); + pr_info("Padding_TelemetryGfx = 0x%x\n", pptable->Padding_TelemetryGfx); + + pr_info("SocMaxCurrent = 0x%x\n", pptable->SocMaxCurrent); + pr_info("SocOffset = 0x%x\n", pptable->SocOffset); + pr_info("Padding_TelemetrySoc = 0x%x\n", pptable->Padding_TelemetrySoc); + + pr_info("Mem0MaxCurrent = 0x%x\n", pptable->Mem0MaxCurrent); + pr_info("Mem0Offset = 0x%x\n", pptable->Mem0Offset); + pr_info("Padding_TelemetryMem0 = 0x%x\n", pptable->Padding_TelemetryMem0); + + pr_info("Mem1MaxCurrent = 0x%x\n", pptable->Mem1MaxCurrent); + pr_info("Mem1Offset = 0x%x\n", pptable->Mem1Offset); + pr_info("Padding_TelemetryMem1 = 0x%x\n", pptable->Padding_TelemetryMem1); + + pr_info("AcDcGpio = %d\n", pptable->AcDcGpio); + pr_info("AcDcPolarity = %d\n", pptable->AcDcPolarity); + pr_info("VR0HotGpio = %d\n", pptable->VR0HotGpio); + pr_info("VR0HotPolarity = %d\n", pptable->VR0HotPolarity); + + pr_info("VR1HotGpio = %d\n", pptable->VR1HotGpio); + pr_info("VR1HotPolarity = %d\n", pptable->VR1HotPolarity); + pr_info("Padding1 = 0x%x\n", pptable->Padding1); + pr_info("Padding2 = 0x%x\n", pptable->Padding2); + + pr_info("LedPin0 = %d\n", pptable->LedPin0); + pr_info("LedPin1 = %d\n", pptable->LedPin1); + pr_info("LedPin2 = %d\n", pptable->LedPin2); + pr_info("padding8_4 = 0x%x\n", pptable->padding8_4); + + pr_info("PllGfxclkSpreadEnabled = %d\n", pptable->PllGfxclkSpreadEnabled); + pr_info("PllGfxclkSpreadPercent = %d\n", pptable->PllGfxclkSpreadPercent); + pr_info("PllGfxclkSpreadFreq = %d\n", pptable->PllGfxclkSpreadFreq); + + pr_info("UclkSpreadEnabled = %d\n", pptable->UclkSpreadEnabled); + pr_info("UclkSpreadPercent = %d\n", pptable->UclkSpreadPercent); + pr_info("UclkSpreadFreq = %d\n", pptable->UclkSpreadFreq); + + pr_info("FclkSpreadEnabled = %d\n", pptable->FclkSpreadEnabled); + pr_info("FclkSpreadPercent = %d\n", pptable->FclkSpreadPercent); + pr_info("FclkSpreadFreq = %d\n", pptable->FclkSpreadFreq); + + pr_info("FllGfxclkSpreadEnabled = %d\n", pptable->FllGfxclkSpreadEnabled); + pr_info("FllGfxclkSpreadPercent = %d\n", pptable->FllGfxclkSpreadPercent); + pr_info("FllGfxclkSpreadFreq = %d\n", pptable->FllGfxclkSpreadFreq); + + for (i = 0; i < 10; i++) + pr_info("BoardReserved[%d] = 0x%x\n", i, pptable->BoardReserved[i]); + + for (i = 0; i < 8; i++) + pr_info("MmHubPadding[%d] = 0x%x\n", i, pptable->MmHubPadding[i]); +} +#endif + +static int check_powerplay_tables( + struct pp_hwmgr *hwmgr, + const ATOM_Vega20_POWERPLAYTABLE *powerplay_table) +{ + PP_ASSERT_WITH_CODE((powerplay_table->sHeader.format_revision >= + ATOM_VEGA20_TABLE_REVISION_VEGA20), + "Unsupported PPTable format!", return -1); + PP_ASSERT_WITH_CODE(powerplay_table->sHeader.structuresize > 0, + "Invalid PowerPlay Table!", return -1); + PP_ASSERT_WITH_CODE(powerplay_table->smcPPTable.Version == PPTABLE_V20_SMU_VERSION, + "Unmatch PPTable version, vbios update may be needed!", return -1); + + //dump_pptable(&powerplay_table->smcPPTable); + + return 0; +} + +static int set_platform_caps(struct pp_hwmgr *hwmgr, uint32_t powerplay_caps) +{ + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_VEGA20_PP_PLATFORM_CAP_POWERPLAY), + PHM_PlatformCaps_PowerPlaySupport); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_VEGA20_PP_PLATFORM_CAP_SBIOSPOWERSOURCE), + PHM_PlatformCaps_BiosPowerSourceControl); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_VEGA20_PP_PLATFORM_CAP_BACO), + PHM_PlatformCaps_BACO); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_VEGA20_PP_PLATFORM_CAP_BAMACO), + PHM_PlatformCaps_BAMACO); + + return 0; +} + +static int copy_clock_limits_array( + struct pp_hwmgr *hwmgr, + uint32_t **pptable_info_array, + const uint32_t *pptable_array) +{ + uint32_t array_size, i; + uint32_t *table; + + array_size = sizeof(uint32_t) * ATOM_VEGA20_PPCLOCK_COUNT; + + table = kzalloc(array_size, GFP_KERNEL); + if (NULL == table) + return -ENOMEM; + + for (i = 0; i < ATOM_VEGA20_PPCLOCK_COUNT; i++) + table[i] = pptable_array[i]; + + *pptable_info_array = table; + + return 0; +} + +static int copy_overdrive_settings_limits_array( + struct pp_hwmgr *hwmgr, + uint32_t **pptable_info_array, + const uint32_t *pptable_array) +{ + uint32_t array_size, i; + uint32_t *table; + + array_size = sizeof(uint32_t) * ATOM_VEGA20_ODSETTING_COUNT; + + table = kzalloc(array_size, GFP_KERNEL); + if (NULL == table) + return -ENOMEM; + + for (i = 0; i < ATOM_VEGA20_ODSETTING_COUNT; i++) + table[i] = pptable_array[i]; + + *pptable_info_array = table; + + return 0; +} + +static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable) +{ + struct atom_smc_dpm_info_v4_3 *smc_dpm_table; + int index = GetIndexIntoMasterDataTable(smc_dpm_info); + + PP_ASSERT_WITH_CODE( + smc_dpm_table = smu_atom_get_data_table(hwmgr->adev, index, NULL, NULL, NULL), + "[appendVbiosPPTable] Failed to retrieve Smc Dpm Table from VBIOS!", + return -1); + + ppsmc_pptable->Liquid1_I2C_address = smc_dpm_table->liquid1_i2c_address; + ppsmc_pptable->Liquid2_I2C_address = smc_dpm_table->liquid2_i2c_address; + ppsmc_pptable->Vr_I2C_address = smc_dpm_table->vr_i2c_address; + ppsmc_pptable->Plx_I2C_address = smc_dpm_table->plx_i2c_address; + + ppsmc_pptable->Liquid_I2C_LineSCL = smc_dpm_table->liquid_i2c_linescl; + ppsmc_pptable->Liquid_I2C_LineSDA = smc_dpm_table->liquid_i2c_linesda; + ppsmc_pptable->Vr_I2C_LineSCL = smc_dpm_table->vr_i2c_linescl; + ppsmc_pptable->Vr_I2C_LineSDA = smc_dpm_table->vr_i2c_linesda; + + ppsmc_pptable->Plx_I2C_LineSCL = smc_dpm_table->plx_i2c_linescl; + ppsmc_pptable->Plx_I2C_LineSDA = smc_dpm_table->plx_i2c_linesda; + ppsmc_pptable->VrSensorPresent = smc_dpm_table->vrsensorpresent; + ppsmc_pptable->LiquidSensorPresent = smc_dpm_table->liquidsensorpresent; + + ppsmc_pptable->MaxVoltageStepGfx = smc_dpm_table->maxvoltagestepgfx; + ppsmc_pptable->MaxVoltageStepSoc = smc_dpm_table->maxvoltagestepsoc; + + ppsmc_pptable->VddGfxVrMapping = smc_dpm_table->vddgfxvrmapping; + ppsmc_pptable->VddSocVrMapping = smc_dpm_table->vddsocvrmapping; + ppsmc_pptable->VddMem0VrMapping = smc_dpm_table->vddmem0vrmapping; + ppsmc_pptable->VddMem1VrMapping = smc_dpm_table->vddmem1vrmapping; + + ppsmc_pptable->GfxUlvPhaseSheddingMask = smc_dpm_table->gfxulvphasesheddingmask; + ppsmc_pptable->SocUlvPhaseSheddingMask = smc_dpm_table->soculvphasesheddingmask; + ppsmc_pptable->ExternalSensorPresent = smc_dpm_table->externalsensorpresent; + + ppsmc_pptable->GfxMaxCurrent = smc_dpm_table->gfxmaxcurrent; + ppsmc_pptable->GfxOffset = smc_dpm_table->gfxoffset; + ppsmc_pptable->Padding_TelemetryGfx = smc_dpm_table->padding_telemetrygfx; + + ppsmc_pptable->SocMaxCurrent = smc_dpm_table->socmaxcurrent; + ppsmc_pptable->SocOffset = smc_dpm_table->socoffset; + ppsmc_pptable->Padding_TelemetrySoc = smc_dpm_table->padding_telemetrysoc; + + ppsmc_pptable->Mem0MaxCurrent = smc_dpm_table->mem0maxcurrent; + ppsmc_pptable->Mem0Offset = smc_dpm_table->mem0offset; + ppsmc_pptable->Padding_TelemetryMem0 = smc_dpm_table->padding_telemetrymem0; + + ppsmc_pptable->Mem1MaxCurrent = smc_dpm_table->mem1maxcurrent; + ppsmc_pptable->Mem1Offset = smc_dpm_table->mem1offset; + ppsmc_pptable->Padding_TelemetryMem1 = smc_dpm_table->padding_telemetrymem1; + + ppsmc_pptable->AcDcGpio = smc_dpm_table->acdcgpio; + ppsmc_pptable->AcDcPolarity = smc_dpm_table->acdcpolarity; + ppsmc_pptable->VR0HotGpio = smc_dpm_table->vr0hotgpio; + ppsmc_pptable->VR0HotPolarity = smc_dpm_table->vr0hotpolarity; + + ppsmc_pptable->VR1HotGpio = smc_dpm_table->vr1hotgpio; + ppsmc_pptable->VR1HotPolarity = smc_dpm_table->vr1hotpolarity; + ppsmc_pptable->Padding1 = smc_dpm_table->padding1; + ppsmc_pptable->Padding2 = smc_dpm_table->padding2; + + ppsmc_pptable->LedPin0 = smc_dpm_table->ledpin0; + ppsmc_pptable->LedPin1 = smc_dpm_table->ledpin1; + ppsmc_pptable->LedPin2 = smc_dpm_table->ledpin2; + + ppsmc_pptable->PllGfxclkSpreadEnabled = smc_dpm_table->pllgfxclkspreadenabled; + ppsmc_pptable->PllGfxclkSpreadPercent = smc_dpm_table->pllgfxclkspreadpercent; + ppsmc_pptable->PllGfxclkSpreadFreq = smc_dpm_table->pllgfxclkspreadfreq; + + ppsmc_pptable->UclkSpreadEnabled = 0; + ppsmc_pptable->UclkSpreadPercent = smc_dpm_table->uclkspreadpercent; + ppsmc_pptable->UclkSpreadFreq = smc_dpm_table->uclkspreadfreq; + + ppsmc_pptable->FclkSpreadEnabled = 0; + ppsmc_pptable->FclkSpreadPercent = smc_dpm_table->fclkspreadpercent; + ppsmc_pptable->FclkSpreadFreq = smc_dpm_table->fclkspreadfreq; + + ppsmc_pptable->FllGfxclkSpreadEnabled = smc_dpm_table->fllgfxclkspreadenabled; + ppsmc_pptable->FllGfxclkSpreadPercent = smc_dpm_table->fllgfxclkspreadpercent; + ppsmc_pptable->FllGfxclkSpreadFreq = smc_dpm_table->fllgfxclkspreadfreq; + + return 0; +} + +#define VEGA20_ENGINECLOCK_HARDMAX 198000 +static int init_powerplay_table_information( + struct pp_hwmgr *hwmgr, + const ATOM_Vega20_POWERPLAYTABLE *powerplay_table) +{ + struct phm_ppt_v3_information *pptable_information = + (struct phm_ppt_v3_information *)hwmgr->pptable; + uint32_t disable_power_control = 0; + int result; + + hwmgr->thermal_controller.ucType = powerplay_table->ucThermalControllerType; + pptable_information->uc_thermal_controller_type = powerplay_table->ucThermalControllerType; + + set_hw_cap(hwmgr, + ATOM_VEGA20_PP_THERMALCONTROLLER_NONE != hwmgr->thermal_controller.ucType, + PHM_PlatformCaps_ThermalController); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl); + + if (powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_GFXCLKFMAX] > VEGA20_ENGINECLOCK_HARDMAX) + hwmgr->platform_descriptor.overdriveLimit.engineClock = VEGA20_ENGINECLOCK_HARDMAX; + else + hwmgr->platform_descriptor.overdriveLimit.engineClock = powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_GFXCLKFMAX]; + hwmgr->platform_descriptor.overdriveLimit.memoryClock = powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_UCLKFMAX]; + + copy_overdrive_settings_limits_array(hwmgr, &pptable_information->od_settings_max, powerplay_table->OverDrive8Table.ODSettingsMax); + copy_overdrive_settings_limits_array(hwmgr, &pptable_information->od_settings_min, powerplay_table->OverDrive8Table.ODSettingsMin); + + /* hwmgr->platformDescriptor.minOverdriveVDDC = 0; + hwmgr->platformDescriptor.maxOverdriveVDDC = 0; + hwmgr->platformDescriptor.overdriveVDDCStep = 0; */ + + if (hwmgr->platform_descriptor.overdriveLimit.engineClock > 0 + && hwmgr->platform_descriptor.overdriveLimit.memoryClock > 0) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ACOverdriveSupport); + + pptable_information->us_small_power_limit1 = powerplay_table->usSmallPowerLimit1; + pptable_information->us_small_power_limit2 = powerplay_table->usSmallPowerLimit2; + pptable_information->us_boost_power_limit = powerplay_table->usBoostPowerLimit; + pptable_information->us_od_turbo_power_limit = powerplay_table->usODTurboPowerLimit; + pptable_information->us_od_powersave_power_limit = powerplay_table->usODPowerSavePowerLimit; + + pptable_information->us_software_shutdown_temp = powerplay_table->usSoftwareShutdownTemp; + + hwmgr->platform_descriptor.TDPODLimit = (uint16_t)powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE]; + + disable_power_control = 0; + if (!disable_power_control && hwmgr->platform_descriptor.TDPODLimit) { + /* enable TDP overdrive (PowerControl) feature as well if supported */ + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerControl); + } + + copy_clock_limits_array(hwmgr, &pptable_information->power_saving_clock_max, powerplay_table->PowerSavingClockTable.PowerSavingClockMax); + copy_clock_limits_array(hwmgr, &pptable_information->power_saving_clock_min, powerplay_table->PowerSavingClockTable.PowerSavingClockMin); + + pptable_information->smc_pptable = (PPTable_t *)kmalloc(sizeof(PPTable_t), GFP_KERNEL); + if (pptable_information->smc_pptable == NULL) + return -ENOMEM; + + memcpy(pptable_information->smc_pptable, &(powerplay_table->smcPPTable), sizeof(PPTable_t)); + + result = append_vbios_pptable(hwmgr, (pptable_information->smc_pptable)); + + return result; +} + +static int vega20_pp_tables_initialize(struct pp_hwmgr *hwmgr) +{ + int result = 0; + const ATOM_Vega20_POWERPLAYTABLE *powerplay_table; + + hwmgr->pptable = kzalloc(sizeof(struct phm_ppt_v3_information), GFP_KERNEL); + PP_ASSERT_WITH_CODE((hwmgr->pptable != NULL), + "Failed to allocate hwmgr->pptable!", return -ENOMEM); + + powerplay_table = get_powerplay_table(hwmgr); + PP_ASSERT_WITH_CODE((powerplay_table != NULL), + "Missing PowerPlay Table!", return -1); + + result = check_powerplay_tables(hwmgr, powerplay_table); + PP_ASSERT_WITH_CODE((result == 0), + "check_powerplay_tables failed", return result); + + result = set_platform_caps(hwmgr, + le32_to_cpu(powerplay_table->ulPlatformCaps)); + PP_ASSERT_WITH_CODE((result == 0), + "set_platform_caps failed", return result); + + result = init_powerplay_table_information(hwmgr, powerplay_table); + PP_ASSERT_WITH_CODE((result == 0), + "init_powerplay_table_information failed", return result); + + return result; +} + +static int vega20_pp_tables_uninitialize(struct pp_hwmgr *hwmgr) +{ + struct phm_ppt_v3_information *pp_table_info = + (struct phm_ppt_v3_information *)(hwmgr->pptable); + + kfree(pp_table_info->power_saving_clock_max); + pp_table_info->power_saving_clock_max = NULL; + + kfree(pp_table_info->power_saving_clock_min); + pp_table_info->power_saving_clock_min = NULL; + + kfree(pp_table_info->od_settings_max); + pp_table_info->od_settings_max = NULL; + + kfree(pp_table_info->od_settings_min); + pp_table_info->od_settings_min = NULL; + + kfree(pp_table_info->smc_pptable); + pp_table_info->smc_pptable = NULL; + + kfree(hwmgr->pptable); + hwmgr->pptable = NULL; + + return 0; +} + +const struct pp_table_func vega20_pptable_funcs = { + .pptable_init = vega20_pp_tables_initialize, + .pptable_fini = vega20_pp_tables_uninitialize, +}; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.h new file mode 100644 index 000000000000..846c2cb40b35 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.h @@ -0,0 +1,31 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef VEGA20_PROCESSPPTABLES_H +#define VEGA20_PROCESSPPTABLES_H + +#include "hwmgr.h" + +extern const struct pp_table_func vega20_pptable_funcs; + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c new file mode 100644 index 000000000000..2984ddd5428c --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c @@ -0,0 +1,212 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "vega20_thermal.h" +#include "vega20_hwmgr.h" +#include "vega20_smumgr.h" +#include "vega20_ppsmc.h" +#include "vega20_inc.h" +#include "soc15_common.h" +#include "pp_debug.h" + +static int vega20_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm) +{ + int ret = 0; + + PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, + PPSMC_MSG_GetCurrentRpm)) == 0, + "Attempt to get current RPM from SMC Failed!", + return ret); + PP_ASSERT_WITH_CODE((ret = vega20_read_arg_from_smc(hwmgr, + current_rpm)) == 0, + "Attempt to read current RPM from SMC Failed!", + return ret); + + return 0; +} + +int vega20_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, + struct phm_fan_speed_info *fan_speed_info) +{ + memset(fan_speed_info, 0, sizeof(*fan_speed_info)); + fan_speed_info->supports_percent_read = false; + fan_speed_info->supports_percent_write = false; + fan_speed_info->supports_rpm_read = true; + fan_speed_info->supports_rpm_write = true; + + return 0; +} + +int vega20_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed) +{ + *speed = 0; + + return vega20_get_current_rpm(hwmgr, speed); +} + +/** +* Reads the remote temperature from the SIslands thermal controller. +* +* @param hwmgr The address of the hardware manager. +*/ +int vega20_thermal_get_temperature(struct pp_hwmgr *hwmgr) +{ + struct amdgpu_device *adev = hwmgr->adev; + int temp = 0; + + temp = RREG32_SOC15(THM, 0, mmCG_MULT_THERMAL_STATUS); + + temp = (temp & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >> + CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT; + + temp = temp & 0x1ff; + + temp *= PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + return temp; +} + +/** +* Set the requested temperature range for high and low alert signals +* +* @param hwmgr The address of the hardware manager. +* @param range Temperature range to be programmed for +* high and low alert signals +* @exception PP_Result_BadInput if the input data is not valid. +*/ +static int vega20_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, + struct PP_TemperatureRange *range) +{ + struct amdgpu_device *adev = hwmgr->adev; + int low = VEGA20_THERMAL_MINIMUM_ALERT_TEMP * + PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + int high = VEGA20_THERMAL_MAXIMUM_ALERT_TEMP * + PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + uint32_t val; + + if (low < range->min) + low = range->min; + if (high > range->max) + high = range->max; + + if (low > high) + return -EINVAL; + + val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL); + + val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5); + val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); + val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); + + WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val); + + return 0; +} + +/** +* Enable thermal alerts on the RV770 thermal controller. +* +* @param hwmgr The address of the hardware manager. +*/ +static int vega20_thermal_enable_alert(struct pp_hwmgr *hwmgr) +{ + struct amdgpu_device *adev = hwmgr->adev; + uint32_t val = 0; + + val |= (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT); + val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT); + val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT); + + WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val); + + return 0; +} + +/** +* Disable thermal alerts on the RV770 thermal controller. +* @param hwmgr The address of the hardware manager. +*/ +int vega20_thermal_disable_alert(struct pp_hwmgr *hwmgr) +{ + struct amdgpu_device *adev = hwmgr->adev; + + WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, 0); + + return 0; +} + +/** +* Uninitialize the thermal controller. +* Currently just disables alerts. +* @param hwmgr The address of the hardware manager. +*/ +int vega20_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr) +{ + int result = vega20_thermal_disable_alert(hwmgr); + + return result; +} + +/** +* Set up the fan table to control the fan using the SMC. +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from set temperature range routine +*/ +static int vega20_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) +{ + int ret; + struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + PPTable_t *table = &(data->smc_state_table.pp_table); + + ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetFanTemperatureTarget, + (uint32_t)table->FanTargetTemperature); + + return ret; +} + +int vega20_start_thermal_controller(struct pp_hwmgr *hwmgr, + struct PP_TemperatureRange *range) +{ + int ret = 0; + + if (range == NULL) + return -EINVAL; + + ret = vega20_thermal_set_temperature_range(hwmgr, range); + if (ret) + return ret; + + ret = vega20_thermal_enable_alert(hwmgr); + if (ret) + return ret; + + ret = vega20_thermal_setup_fan_table(hwmgr); + + return ret; +}; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.h new file mode 100644 index 000000000000..2a6d49fec4e0 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.h @@ -0,0 +1,64 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef VEGA20_THERMAL_H +#define VEGA20_THERMAL_H + +#include "hwmgr.h" + +struct vega20_temperature { + uint16_t edge_temp; + uint16_t hot_spot_temp; + uint16_t hbm_temp; + uint16_t vr_soc_temp; + uint16_t vr_mem_temp; + uint16_t liquid1_temp; + uint16_t liquid2_temp; + uint16_t plx_temp; +}; + +#define VEGA20_THERMAL_HIGH_ALERT_MASK 0x1 +#define VEGA20_THERMAL_LOW_ALERT_MASK 0x2 + +#define VEGA20_THERMAL_MINIMUM_TEMP_READING -256 +#define VEGA20_THERMAL_MAXIMUM_TEMP_READING 255 + +#define VEGA20_THERMAL_MINIMUM_ALERT_TEMP 0 +#define VEGA20_THERMAL_MAXIMUM_ALERT_TEMP 255 + +#define FDO_PWM_MODE_STATIC 1 +#define FDO_PWM_MODE_STATIC_RPM 5 + +extern int vega20_thermal_get_temperature(struct pp_hwmgr *hwmgr); +extern int vega20_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr); +extern int vega20_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, + struct phm_fan_speed_info *fan_speed_info); +extern int vega20_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr); +extern int vega20_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, + uint32_t *speed); +extern int vega20_thermal_disable_alert(struct pp_hwmgr *hwmgr); +extern int vega20_start_thermal_controller(struct pp_hwmgr *hwmgr, + struct PP_TemperatureRange *range); + +#endif + -- GitLab From 982b90319459c78fca0ddaac9858b2ef7bb5424c Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Fri, 6 Jul 2018 14:00:37 -0500 Subject: [PATCH 0407/1692] drm/amd/powerplay: support workload profile query and setup for vega20 Support the power profile API. Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 193 +++++++++++++++++- 1 file changed, 192 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index 40f07177b046..06471d1a5765 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -384,10 +384,13 @@ static int vega20_hwmgr_backend_init(struct pp_hwmgr *hwmgr) hwmgr->backend = data; + hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO]; + hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO; + hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO; + vega20_set_default_registry_data(hwmgr); data->disable_dpm_mask = 0xff; - data->workload_mask = 0xff; /* need to set voltage control types before EVV patching */ data->vddc_control = VEGA20_VOLTAGE_CONTROL_NONE; @@ -1971,6 +1974,190 @@ static int vega20_power_off_asic(struct pp_hwmgr *hwmgr) return result; } +static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) +{ + DpmActivityMonitorCoeffInt_t activity_monitor; + uint32_t i, size = 0; + uint16_t workload_type = 0; + static const char *profile_name[] = { + "3D_FULL_SCREEN", + "POWER_SAVING", + "VIDEO", + "VR", + "COMPUTE", + "CUSTOM"}; + static const char *title[] = { + "PROFILE_INDEX(NAME)", + "CLOCK_TYPE(NAME)", + "FPS", + "UseRlcBusy", + "MinActiveFreqType", + "MinActiveFreq", + "BoosterFreqType", + "BoosterFreq", + "PD_Data_limit_c", + "PD_Data_error_coeff", + "PD_Data_error_rate_coeff"}; + int result = 0; + + if (!buf) + return -EINVAL; + + size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n", + title[0], title[1], title[2], title[3], title[4], title[5], + title[6], title[7], title[8], title[9], title[10]); + + for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) { + /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ + workload_type = i + 1; + result = vega20_get_activity_monitor_coeff(hwmgr, + (uint8_t *)(&activity_monitor), workload_type); + PP_ASSERT_WITH_CODE(!result, + "[GetPowerProfile] Failed to get activity monitor!", + return result); + + size += sprintf(buf + size, "%2d(%14s%s)\n", + i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " "); + + size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", + " ", + 0, + "GFXCLK", + activity_monitor.Gfx_FPS, + activity_monitor.Gfx_UseRlcBusy, + activity_monitor.Gfx_MinActiveFreqType, + activity_monitor.Gfx_MinActiveFreq, + activity_monitor.Gfx_BoosterFreqType, + activity_monitor.Gfx_BoosterFreq, + activity_monitor.Gfx_PD_Data_limit_c, + activity_monitor.Gfx_PD_Data_error_coeff, + activity_monitor.Gfx_PD_Data_error_rate_coeff); + + size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", + " ", + 1, + "SOCCLK", + activity_monitor.Soc_FPS, + activity_monitor.Soc_UseRlcBusy, + activity_monitor.Soc_MinActiveFreqType, + activity_monitor.Soc_MinActiveFreq, + activity_monitor.Soc_BoosterFreqType, + activity_monitor.Soc_BoosterFreq, + activity_monitor.Soc_PD_Data_limit_c, + activity_monitor.Soc_PD_Data_error_coeff, + activity_monitor.Soc_PD_Data_error_rate_coeff); + + size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", + " ", + 2, + "UCLK", + activity_monitor.Mem_FPS, + activity_monitor.Mem_UseRlcBusy, + activity_monitor.Mem_MinActiveFreqType, + activity_monitor.Mem_MinActiveFreq, + activity_monitor.Mem_BoosterFreqType, + activity_monitor.Mem_BoosterFreq, + activity_monitor.Mem_PD_Data_limit_c, + activity_monitor.Mem_PD_Data_error_coeff, + activity_monitor.Mem_PD_Data_error_rate_coeff); + + size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", + " ", + 3, + "FCLK", + activity_monitor.Fclk_FPS, + activity_monitor.Fclk_UseRlcBusy, + activity_monitor.Fclk_MinActiveFreqType, + activity_monitor.Fclk_MinActiveFreq, + activity_monitor.Fclk_BoosterFreqType, + activity_monitor.Fclk_BoosterFreq, + activity_monitor.Fclk_PD_Data_limit_c, + activity_monitor.Fclk_PD_Data_error_coeff, + activity_monitor.Fclk_PD_Data_error_rate_coeff); + } + + return size; +} + +static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size) +{ + DpmActivityMonitorCoeffInt_t activity_monitor; + int result = 0; + + hwmgr->power_profile_mode = input[size]; + + if (hwmgr->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { + if (size < 10) + return -EINVAL; + + result = vega20_get_activity_monitor_coeff(hwmgr, + (uint8_t *)(&activity_monitor), + WORKLOAD_PPLIB_CUSTOM_BIT); + PP_ASSERT_WITH_CODE(!result, + "[SetPowerProfile] Failed to get activity monitor!", + return result); + + switch (input[0]) { + case 0: /* Gfxclk */ + activity_monitor.Gfx_FPS = input[1]; + activity_monitor.Gfx_UseRlcBusy = input[2]; + activity_monitor.Gfx_MinActiveFreqType = input[3]; + activity_monitor.Gfx_MinActiveFreq = input[4]; + activity_monitor.Gfx_BoosterFreqType = input[5]; + activity_monitor.Gfx_BoosterFreq = input[6]; + activity_monitor.Gfx_PD_Data_limit_c = input[7]; + activity_monitor.Gfx_PD_Data_error_coeff = input[8]; + activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9]; + break; + case 1: /* Socclk */ + activity_monitor.Soc_FPS = input[1]; + activity_monitor.Soc_UseRlcBusy = input[2]; + activity_monitor.Soc_MinActiveFreqType = input[3]; + activity_monitor.Soc_MinActiveFreq = input[4]; + activity_monitor.Soc_BoosterFreqType = input[5]; + activity_monitor.Soc_BoosterFreq = input[6]; + activity_monitor.Soc_PD_Data_limit_c = input[7]; + activity_monitor.Soc_PD_Data_error_coeff = input[8]; + activity_monitor.Soc_PD_Data_error_rate_coeff = input[9]; + break; + case 2: /* Uclk */ + activity_monitor.Mem_FPS = input[1]; + activity_monitor.Mem_UseRlcBusy = input[2]; + activity_monitor.Mem_MinActiveFreqType = input[3]; + activity_monitor.Mem_MinActiveFreq = input[4]; + activity_monitor.Mem_BoosterFreqType = input[5]; + activity_monitor.Mem_BoosterFreq = input[6]; + activity_monitor.Mem_PD_Data_limit_c = input[7]; + activity_monitor.Mem_PD_Data_error_coeff = input[8]; + activity_monitor.Mem_PD_Data_error_rate_coeff = input[9]; + break; + case 3: /* Fclk */ + activity_monitor.Fclk_FPS = input[1]; + activity_monitor.Fclk_UseRlcBusy = input[2]; + activity_monitor.Fclk_MinActiveFreqType = input[3]; + activity_monitor.Fclk_MinActiveFreq = input[4]; + activity_monitor.Fclk_BoosterFreqType = input[5]; + activity_monitor.Fclk_BoosterFreq = input[6]; + activity_monitor.Fclk_PD_Data_limit_c = input[7]; + activity_monitor.Fclk_PD_Data_error_coeff = input[8]; + activity_monitor.Fclk_PD_Data_error_rate_coeff = input[9]; + break; + } + + result = vega20_set_activity_monitor_coeff(hwmgr, + (uint8_t *)(&activity_monitor), + WORKLOAD_PPLIB_CUSTOM_BIT); + PP_ASSERT_WITH_CODE(!result, + "[SetPowerProfile] Failed to set activity monitor!", + return result); + } + + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask, + 1 << hwmgr->power_profile_mode); + + return 0; +} + static int vega20_notify_cac_buffer_info(struct pp_hwmgr *hwmgr, uint32_t virtual_addr_low, uint32_t virtual_addr_hi, @@ -2053,6 +2240,10 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = { /* UMD pstate, profile related */ .force_dpm_level = vega20_dpm_force_dpm_level, + .get_power_profile_mode = + vega20_get_power_profile_mode, + .set_power_profile_mode = + vega20_set_power_profile_mode, .set_power_limit = vega20_set_power_limit, /* for sysfs to retrive/set gfxclk/memclk */ -- GitLab From 7c2912a26d363eb33d04b4009e857a81c918247b Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Fri, 4 May 2018 15:20:15 +0800 Subject: [PATCH 0408/1692] drm/amd/powerplay: init vega20 uvd/vce powergate status on dpm setup This is essentially necessary when uvd/vce dpm is not enabled yet. Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index 06471d1a5765..a82a3df0e8d9 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -932,6 +932,21 @@ static int vega20_init_max_sustainable_clocks(struct pp_hwmgr *hwmgr) return 0; } +static void vega20_init_powergate_state(struct pp_hwmgr *hwmgr) +{ + struct vega20_hwmgr *data = + (struct vega20_hwmgr *)(hwmgr->backend); + + data->uvd_power_gated = true; + data->vce_power_gated = true; + + if (data->smu_features[GNLD_DPM_UVD].enabled) + data->uvd_power_gated = false; + + if (data->smu_features[GNLD_DPM_VCE].enabled) + data->vce_power_gated = false; +} + static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr) { int result = 0; @@ -954,6 +969,9 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr) "[EnableDPMTasks] Failed to enable all smu features!", return result); + /* Initialize UVD/VCE powergating state */ + vega20_init_powergate_state(hwmgr); + result = vega20_setup_default_dpm_tables(hwmgr); PP_ASSERT_WITH_CODE(!result, "[EnableDPMTasks] Failed to setup default DPM tables!", -- GitLab From fff7e3e049d897801addf2e9051b3f26e48bdf46 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Tue, 8 May 2018 18:27:03 +0800 Subject: [PATCH 0409/1692] drm/amd/powerplay: correct force clock level related settings for vega20 (v2) 1. The min/max level is determined by soft_min_level/soft_max_level. 2. Vega20 comes with pptable v3 which has no vdd related table(vdd_dep_on_socclk, vdd_dep_on_mclk) support. 3. Vega20 does not support separate fan feature control(enable or disable). v2: squash in fixes: - bug fix for force dpm level settings - fix wrong data type Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 109 ++++++++++-------- 1 file changed, 64 insertions(+), 45 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index a82a3df0e8d9..289e3ee2006d 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -1015,7 +1015,7 @@ static uint32_t vega20_find_lowest_dpm_level( static uint32_t vega20_find_highest_dpm_level( struct vega20_single_dpm_table *table) { - uint32_t i = 0; + int i = 0; PP_ASSERT_WITH_CODE(table != NULL, "[FindHighestDPMLevel] DPM Table does not exist!", @@ -1409,14 +1409,20 @@ static int vega20_force_dpm_highest(struct pp_hwmgr *hwmgr) { struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + uint32_t soft_level; int ret = 0; - data->smc_state_table.gfx_boot_level = - data->smc_state_table.gfx_max_level = - vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table)); - data->smc_state_table.mem_boot_level = - data->smc_state_table.mem_max_level = - vega20_find_highest_dpm_level(&(data->dpm_table.mem_table)); + soft_level = vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table)); + + data->dpm_table.gfx_table.dpm_state.soft_min_level = + data->dpm_table.gfx_table.dpm_state.soft_max_level = + data->dpm_table.gfx_table.dpm_levels[soft_level].value; + + soft_level = vega20_find_highest_dpm_level(&(data->dpm_table.mem_table)); + + data->dpm_table.mem_table.dpm_state.soft_min_level = + data->dpm_table.mem_table.dpm_state.soft_max_level = + data->dpm_table.mem_table.dpm_levels[soft_level].value; ret = vega20_upload_dpm_min_level(hwmgr); PP_ASSERT_WITH_CODE(!ret, @@ -1435,14 +1441,20 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr) { struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + uint32_t soft_level; int ret = 0; - data->smc_state_table.gfx_boot_level = - data->smc_state_table.gfx_max_level = - vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table)); - data->smc_state_table.mem_boot_level = - data->smc_state_table.mem_max_level = - vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table)); + soft_level = vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table)); + + data->dpm_table.gfx_table.dpm_state.soft_min_level = + data->dpm_table.gfx_table.dpm_state.soft_max_level = + data->dpm_table.gfx_table.dpm_levels[soft_level].value; + + soft_level = vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table)); + + data->dpm_table.mem_table.dpm_state.soft_min_level = + data->dpm_table.mem_table.dpm_state.soft_max_level = + data->dpm_table.mem_table.dpm_levels[soft_level].value; ret = vega20_upload_dpm_min_level(hwmgr); PP_ASSERT_WITH_CODE(!ret, @@ -1475,19 +1487,24 @@ static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr) return 0; } -#if 0 static int vega20_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level, uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask) { - struct phm_ppt_v2_information *table_info = - (struct phm_ppt_v2_information *)(hwmgr->pptable); + struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + struct vega20_single_dpm_table *gfx_dpm_table = &(data->dpm_table.gfx_table); + struct vega20_single_dpm_table *mem_dpm_table = &(data->dpm_table.mem_table); + struct vega20_single_dpm_table *soc_dpm_table = &(data->dpm_table.soc_table); + + *sclk_mask = 0; + *mclk_mask = 0; + *soc_mask = 0; - if (table_info->vdd_dep_on_sclk->count > VEGA20_UMD_PSTATE_GFXCLK_LEVEL && - table_info->vdd_dep_on_socclk->count > VEGA20_UMD_PSTATE_SOCCLK_LEVEL && - table_info->vdd_dep_on_mclk->count > VEGA20_UMD_PSTATE_MCLK_LEVEL) { + if (gfx_dpm_table->count > VEGA20_UMD_PSTATE_GFXCLK_LEVEL && + mem_dpm_table->count > VEGA20_UMD_PSTATE_MCLK_LEVEL && + soc_dpm_table->count > VEGA20_UMD_PSTATE_SOCCLK_LEVEL) { *sclk_mask = VEGA20_UMD_PSTATE_GFXCLK_LEVEL; - *soc_mask = VEGA20_UMD_PSTATE_SOCCLK_LEVEL; *mclk_mask = VEGA20_UMD_PSTATE_MCLK_LEVEL; + *soc_mask = VEGA20_UMD_PSTATE_SOCCLK_LEVEL; } if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { @@ -1495,24 +1512,30 @@ static int vega20_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_fo } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) { *mclk_mask = 0; } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { - *sclk_mask = table_info->vdd_dep_on_sclk->count - 1; - *soc_mask = table_info->vdd_dep_on_socclk->count - 1; - *mclk_mask = table_info->vdd_dep_on_mclk->count - 1; + *sclk_mask = gfx_dpm_table->count - 1; + *mclk_mask = mem_dpm_table->count - 1; + *soc_mask = soc_dpm_table->count - 1; } + return 0; } -#endif static int vega20_force_clock_level(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask) { struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + uint32_t soft_min_level, soft_max_level; int ret = 0; switch (type) { case PP_SCLK: - data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0; - data->smc_state_table.gfx_max_level = mask ? (fls(mask) - 1) : 0; + soft_min_level = mask ? (ffs(mask) - 1) : 0; + soft_max_level = mask ? (fls(mask) - 1) : 0; + + data->dpm_table.gfx_table.dpm_state.soft_min_level = + data->dpm_table.gfx_table.dpm_levels[soft_min_level].value; + data->dpm_table.gfx_table.dpm_state.soft_max_level = + data->dpm_table.gfx_table.dpm_levels[soft_max_level].value; ret = vega20_upload_dpm_min_level(hwmgr); PP_ASSERT_WITH_CODE(!ret, @@ -1526,8 +1549,13 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr, break; case PP_MCLK: - data->smc_state_table.mem_boot_level = mask ? (ffs(mask) - 1) : 0; - data->smc_state_table.mem_max_level = mask ? (fls(mask) - 1) : 0; + soft_min_level = mask ? (ffs(mask) - 1) : 0; + soft_max_level = mask ? (fls(mask) - 1) : 0; + + data->dpm_table.mem_table.dpm_state.soft_min_level = + data->dpm_table.mem_table.dpm_levels[soft_min_level].value; + data->dpm_table.mem_table.dpm_state.soft_max_level = + data->dpm_table.mem_table.dpm_levels[soft_max_level].value; ret = vega20_upload_dpm_min_level(hwmgr); PP_ASSERT_WITH_CODE(!ret, @@ -1555,47 +1583,38 @@ static int vega20_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level) { int ret = 0; -#if 0 - uint32_t sclk_mask = 0; - uint32_t mclk_mask = 0; - uint32_t soc_mask = 0; -#endif + uint32_t sclk_mask, mclk_mask, soc_mask; switch (level) { case AMD_DPM_FORCED_LEVEL_HIGH: ret = vega20_force_dpm_highest(hwmgr); break; + case AMD_DPM_FORCED_LEVEL_LOW: ret = vega20_force_dpm_lowest(hwmgr); break; + case AMD_DPM_FORCED_LEVEL_AUTO: ret = vega20_unforce_dpm_levels(hwmgr); break; + case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: -#if 0 ret = vega20_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask); if (ret) return ret; - vega20_force_clock_level(hwmgr, PP_SCLK, 1<dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) - vega20_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE); - else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) - vega20_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO); - } -#endif + return ret; } -- GitLab From b8497699ef8a1858e7246bf1abfa0c84a479f665 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Tue, 8 May 2018 18:23:16 +0800 Subject: [PATCH 0410/1692] drm/amd/powerplay: export vega20 stable pstate clocks Needed for querying the stable pstate clocks. Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 26 ++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index 289e3ee2006d..7b6e48a01c75 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -846,6 +846,25 @@ static int vega20_odn_initialize_default_settings( return 0; } +static int vega20_populate_umdpstate_clocks( + struct pp_hwmgr *hwmgr) +{ + struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + struct vega20_single_dpm_table *gfx_table = &(data->dpm_table.gfx_table); + struct vega20_single_dpm_table *mem_table = &(data->dpm_table.mem_table); + + hwmgr->pstate_sclk = gfx_table->dpm_levels[0].value; + hwmgr->pstate_mclk = mem_table->dpm_levels[0].value; + + if (gfx_table->count > VEGA20_UMD_PSTATE_GFXCLK_LEVEL && + mem_table->count > VEGA20_UMD_PSTATE_MCLK_LEVEL) { + hwmgr->pstate_sclk = gfx_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value; + hwmgr->pstate_mclk = mem_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value; + } + + return 0; +} + static int vega20_get_max_sustainable_clock(struct pp_hwmgr *hwmgr, PP_Clock *clock, PPCLK_e clock_select) { @@ -992,7 +1011,12 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr) "[EnableDPMTasks] Failed to initialize odn settings!", return result); - return result; + result = vega20_populate_umdpstate_clocks(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "[EnableDPMTasks] Failed to populate umdpstate clocks!", + return result); + + return 0; } static uint32_t vega20_find_lowest_dpm_level( -- GitLab From 8dd97d6bc2c5875a365fbe0f4f00de3ae62390e2 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Wed, 9 May 2018 11:14:06 +0800 Subject: [PATCH 0411/1692] drm/amd/powerplay: add vega20 pre_display_config_changed callback fix possible handshake hang and video playback crash Corner cases: - Handshake between SMU and DCE causes hangs when CRTC is not enabled - System crash occurs when starting 4K playback with Movies and TV in an SLS configuration Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 41 +++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index 7b6e48a01c75..5b0c65405dd1 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -1874,6 +1874,45 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, return size; } +static int vega20_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr, + struct vega20_single_dpm_table *dpm_table) +{ + struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + int ret = 0; + + if (data->smu_features[GNLD_DPM_UCLK].enabled) { + PP_ASSERT_WITH_CODE(dpm_table->count > 0, + "[SetUclkToHightestDpmLevel] Dpm table has no entry!", + return -EINVAL); + PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_UCLK_DPM_LEVELS, + "[SetUclkToHightestDpmLevel] Dpm table has too many entries!", + return -EINVAL); + + dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetHardMinByFreq, + (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)), + "[SetUclkToHightestDpmLevel] Set hard min uclk failed!", + return ret); + } + + return ret; +} + +static int vega20_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr) +{ + struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + int ret = 0; + + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_NumOfDisplays, 0); + + ret = vega20_set_uclk_to_highest_dpm_level(hwmgr, + &data->dpm_table.mem_table); + + return ret; +} + static int vega20_display_configuration_changed_task(struct pp_hwmgr *hwmgr) { struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); @@ -2277,6 +2316,8 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = { /* power state related */ .apply_clocks_adjust_rules = vega20_apply_clocks_adjust_rules, + .pre_display_config_changed = + vega20_pre_display_configuration_changed_task, .display_config_changed = vega20_display_configuration_changed_task, .check_smc_update_required_for_display_configuration = -- GitLab From bc9b8c45b86ff24c34ad7e6a320ad11507ca9f63 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Fri, 11 May 2018 16:10:51 +0800 Subject: [PATCH 0412/1692] drm/amd/powerplay: conv the vega20 pstate sclk/mclk into necessary 10KHz unit Powerplay uses 10KHz units. Signed-off-by: Evan Quan Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index 5b0c65405dd1..182f25ccc61b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -862,6 +862,9 @@ static int vega20_populate_umdpstate_clocks( hwmgr->pstate_mclk = mem_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value; } + hwmgr->pstate_sclk = hwmgr->pstate_sclk * 100; + hwmgr->pstate_mclk = hwmgr->pstate_mclk * 100; + return 0; } -- GitLab From 7dd67c0d4200a333aa7f6fc9b077f423654987dd Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 21 May 2018 10:16:41 +0800 Subject: [PATCH 0413/1692] drm/amd/powerplay: initialize vega20 overdrive settings The initialized overdrive settings are taken from vbios and SMU( by PPSMC_MSG_TransferTableSmu2Dram). Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 293 ++++++++++++++++-- .../drm/amd/powerplay/hwmgr/vega20_hwmgr.h | 53 +++- .../powerplay/hwmgr/vega20_processpptables.c | 103 ++++-- .../drm/amd/powerplay/inc/hardwaremanager.h | 2 + drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 1 + 5 files changed, 403 insertions(+), 49 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index 182f25ccc61b..51bc05dea8e1 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -103,7 +103,7 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr) data->registry_data.quick_transition_support = 0; data->registry_data.zrpm_start_temp = 0xffff; data->registry_data.zrpm_stop_temp = 0xffff; - data->registry_data.odn_feature_enable = 1; + data->registry_data.od8_feature_enable = 1; data->registry_data.disable_water_mark = 0; data->registry_data.disable_pp_tuning = 0; data->registry_data.disable_xlpp_tuning = 0; @@ -150,15 +150,9 @@ static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr) phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UnTabledHardwareInterface); - if (data->registry_data.odn_feature_enable) + if (data->registry_data.od8_feature_enable) phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ODNinACSupport); - else { - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_OD6inACSupport); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_OD6PlusinACSupport); - } + PHM_PlatformCaps_OD8inACSupport); phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ActivityReporting); @@ -166,15 +160,9 @@ static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr) PHM_PlatformCaps_FanSpeedInTableIsRPM); if (data->registry_data.od_state_in_dc_support) { - if (data->registry_data.odn_feature_enable) + if (data->registry_data.od8_feature_enable) phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_ODNinDCSupport); - else { - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_OD6inDCSupport); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_OD6PlusinDCSupport); - } + PHM_PlatformCaps_OD8inDCSupport); } if (data->registry_data.thermal_support && @@ -840,9 +828,276 @@ static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr) return 0; } -static int vega20_odn_initialize_default_settings( +static int vega20_od8_set_feature_capabilities( + struct pp_hwmgr *hwmgr) +{ + struct phm_ppt_v3_information *pptable_information = + (struct phm_ppt_v3_information *)hwmgr->pptable; + struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + struct vega20_od8_settings *od_settings = &(data->od8_settings); + + od_settings->overdrive8_capabilities = 0; + + if (data->smu_features[GNLD_DPM_GFXCLK].enabled) { + if (pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_GFXCLKFMAX] > 0 && + pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_GFXCLKFMAX] > 0 && + pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_GFXCLKFMIN] > 0 && + pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_GFXCLKFMIN] > 0) + od_settings->overdrive8_capabilities |= OD8_GFXCLK_LIMITS; + + if (pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_VDDGFXCURVEFREQ_P1] > 0 && + pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_VDDGFXCURVEFREQ_P2] > 0 && + pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_VDDGFXCURVEFREQ_P3] > 0 && + pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_VDDGFXCURVEFREQ_P1] > 0 && + pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_VDDGFXCURVEFREQ_P2] > 0 && + pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_VDDGFXCURVEFREQ_P3] > 0 && + pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_VDDGFXCURVEVOLTAGEOFFSET_P1] > 0 && + pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_VDDGFXCURVEVOLTAGEOFFSET_P2] > 0 && + pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_VDDGFXCURVEVOLTAGEOFFSET_P3] > 0 && + pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_VDDGFXCURVEVOLTAGEOFFSET_P1] > 0 && + pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_VDDGFXCURVEVOLTAGEOFFSET_P2] > 0 && + pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_VDDGFXCURVEVOLTAGEOFFSET_P3] > 0) + od_settings->overdrive8_capabilities |= OD8_GFXCLK_CURVE; + } + + if (data->smu_features[GNLD_DPM_UCLK].enabled) { + if (pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_UCLKFMAX] > 0 && + pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_UCLKFMAX] > 0) + od_settings->overdrive8_capabilities |= OD8_UCLK_MAX; + } + + if (pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE] > 0 && + pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE] <= 100) + od_settings->overdrive8_capabilities |= OD8_POWER_LIMIT; + + if (data->smu_features[GNLD_FAN_CONTROL].enabled) { + if (pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_FANRPMMIN] > 0) + od_settings->overdrive8_capabilities |= OD8_FAN_SPEED_MIN; + + if (pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_FANRPMACOUSTICLIMIT] > 0) + od_settings->overdrive8_capabilities |= OD8_ACOUSTIC_LIMIT_SCLK; + } + + if (data->smu_features[GNLD_THERMAL].enabled) { + if (pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_FANTARGETTEMPERATURE] > 0) + od_settings->overdrive8_capabilities |= OD8_TEMPERATURE_FAN; + + if (pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_OPERATINGTEMPMAX] > 0) + od_settings->overdrive8_capabilities |= OD8_TEMPERATURE_SYSTEM; + } + + return 0; +} + +static int vega20_od8_set_feature_id( + struct pp_hwmgr *hwmgr) +{ + struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + struct vega20_od8_settings *od_settings = &(data->od8_settings); + + if (od_settings->overdrive8_capabilities & OD8_GFXCLK_LIMITS) { + od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].feature_id = + OD8_GFXCLK_LIMITS; + od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].feature_id = + OD8_GFXCLK_LIMITS; + } else { + od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].feature_id = + 0; + od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].feature_id = + 0; + } + + if (od_settings->overdrive8_capabilities & OD8_GFXCLK_CURVE) { + od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].feature_id = + OD8_GFXCLK_CURVE; + od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id = + OD8_GFXCLK_CURVE; + od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].feature_id = + OD8_GFXCLK_CURVE; + od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id = + OD8_GFXCLK_CURVE; + od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].feature_id = + OD8_GFXCLK_CURVE; + od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id = + OD8_GFXCLK_CURVE; + } else { + od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].feature_id = + 0; + od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id = + 0; + od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].feature_id = + 0; + od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id = + 0; + od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].feature_id = + 0; + od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id = + 0; + } + + if (od_settings->overdrive8_capabilities & OD8_UCLK_MAX) + od_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].feature_id = OD8_UCLK_MAX; + else + od_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].feature_id = 0; + + if (od_settings->overdrive8_capabilities & OD8_POWER_LIMIT) + od_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].feature_id = OD8_POWER_LIMIT; + else + od_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].feature_id = 0; + + if (od_settings->overdrive8_capabilities & OD8_ACOUSTIC_LIMIT_SCLK) + od_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].feature_id = + OD8_ACOUSTIC_LIMIT_SCLK; + else + od_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].feature_id = + 0; + + if (od_settings->overdrive8_capabilities & OD8_FAN_SPEED_MIN) + od_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].feature_id = + OD8_FAN_SPEED_MIN; + else + od_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].feature_id = + 0; + + if (od_settings->overdrive8_capabilities & OD8_TEMPERATURE_FAN) + od_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].feature_id = + OD8_TEMPERATURE_FAN; + else + od_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].feature_id = + 0; + + if (od_settings->overdrive8_capabilities & OD8_TEMPERATURE_SYSTEM) + od_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].feature_id = + OD8_TEMPERATURE_SYSTEM; + else + od_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].feature_id = + 0; + + return 0; +} + +static int vega20_od8_initialize_default_settings( struct pp_hwmgr *hwmgr) { + struct phm_ppt_v3_information *pptable_information = + (struct phm_ppt_v3_information *)hwmgr->pptable; + struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + struct vega20_od8_settings *od8_settings = &(data->od8_settings); + OverDriveTable_t *od_table = &(data->smc_state_table.overdrive_table); + int i, ret = 0; + + /* Set Feature Capabilities */ + vega20_od8_set_feature_capabilities(hwmgr); + + /* Map FeatureID to individual settings */ + vega20_od8_set_feature_id(hwmgr); + + /* Set default values */ + ret = vega20_copy_table_from_smc(hwmgr, (uint8_t *)od_table, TABLE_OVERDRIVE); + PP_ASSERT_WITH_CODE(!ret, + "Failed to export over drive table!", + return ret); + + if (od8_settings->overdrive8_capabilities & OD8_GFXCLK_LIMITS) { + od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].default_value = + od_table->GfxclkFmin; + od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].default_value = + od_table->GfxclkFmax; + } else { + od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].default_value = + 0; + od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].default_value = + 0; + } + + if (od8_settings->overdrive8_capabilities & OD8_GFXCLK_CURVE) { + od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].default_value = + od_table->GfxclkFreq1; + od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value = + od_table->GfxclkOffsetVolt1; + od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].default_value = + od_table->GfxclkFreq2; + od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value = + od_table->GfxclkOffsetVolt2; + od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].default_value = + od_table->GfxclkFreq3; + od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value = + od_table->GfxclkOffsetVolt3; + } else { + od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].default_value = + 0; + od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value = + 0; + od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].default_value = + 0; + od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value = + 0; + od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].default_value = + 0; + od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value = + 0; + } + + if (od8_settings->overdrive8_capabilities & OD8_UCLK_MAX) + od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].default_value = + od_table->UclkFmax; + else + od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].default_value = + 0; + + if (od8_settings->overdrive8_capabilities & OD8_POWER_LIMIT) + od8_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].default_value = + od_table->OverDrivePct; + else + od8_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].default_value = + 0; + + if (od8_settings->overdrive8_capabilities & OD8_ACOUSTIC_LIMIT_SCLK) + od8_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].default_value = + od_table->FanMaximumRpm; + else + od8_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].default_value = + 0; + + if (od8_settings->overdrive8_capabilities & OD8_FAN_SPEED_MIN) + od8_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].default_value = + od_table->FanMinimumPwm; + else + od8_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].default_value = + 0; + + if (od8_settings->overdrive8_capabilities & OD8_TEMPERATURE_FAN) + od8_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].default_value = + od_table->FanTargetTemperature; + else + od8_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].default_value = + 0; + + if (od8_settings->overdrive8_capabilities & OD8_TEMPERATURE_SYSTEM) + od8_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].default_value = + od_table->MaxOpTemp; + else + od8_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].default_value = + 0; + + for (i = 0; i < OD8_SETTING_COUNT; i++) { + if (od8_settings->od8_settings_array[i].feature_id) { + od8_settings->od8_settings_array[i].min_value = + pptable_information->od_settings_min[i]; + od8_settings->od8_settings_array[i].max_value = + pptable_information->od_settings_max[i]; + od8_settings->od8_settings_array[i].current_value = + od8_settings->od8_settings_array[i].default_value; + } else { + od8_settings->od8_settings_array[i].min_value = + 0; + od8_settings->od8_settings_array[i].max_value = + 0; + od8_settings->od8_settings_array[i].current_value = + 0; + } + } + return 0; } @@ -1009,7 +1264,7 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr) "[EnableDPMTasks] Failed to power control set level!", return result); - result = vega20_odn_initialize_default_settings(hwmgr); + result = vega20_od8_initialize_default_settings(hwmgr); PP_ASSERT_WITH_CODE(!result, "[EnableDPMTasks] Failed to initialize odn settings!", return result); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h index 59a59bcdad3a..130052a330b3 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h @@ -306,7 +306,7 @@ struct vega20_registry_data { uint8_t led_dpm_enabled; uint8_t fan_control_support; uint8_t ulv_support; - uint8_t odn_feature_enable; + uint8_t od8_feature_enable; uint8_t disable_water_mark; uint8_t disable_workload_policy; uint32_t force_workload_policy_mask; @@ -377,6 +377,54 @@ struct vega20_odn_data { struct vega20_odn_temp_table odn_temp_table; }; +enum OD8_FEATURE_ID +{ + OD8_GFXCLK_LIMITS = 1 << 0, + OD8_GFXCLK_CURVE = 1 << 1, + OD8_UCLK_MAX = 1 << 2, + OD8_POWER_LIMIT = 1 << 3, + OD8_ACOUSTIC_LIMIT_SCLK = 1 << 4, //FanMaximumRpm + OD8_FAN_SPEED_MIN = 1 << 5, //FanMinimumPwm + OD8_TEMPERATURE_FAN = 1 << 6, //FanTargetTemperature + OD8_TEMPERATURE_SYSTEM = 1 << 7, //MaxOpTemp + OD8_MEMORY_TIMING_TUNE = 1 << 8, + OD8_FAN_ZERO_RPM_CONTROL = 1 << 9 +}; + +enum OD8_SETTING_ID +{ + OD8_SETTING_GFXCLK_FMIN = 0, + OD8_SETTING_GFXCLK_FMAX, + OD8_SETTING_GFXCLK_FREQ1, + OD8_SETTING_GFXCLK_VOLTAGE1, + OD8_SETTING_GFXCLK_FREQ2, + OD8_SETTING_GFXCLK_VOLTAGE2, + OD8_SETTING_GFXCLK_FREQ3, + OD8_SETTING_GFXCLK_VOLTAGE3, + OD8_SETTING_UCLK_FMAX, + OD8_SETTING_POWER_PERCENTAGE, + OD8_SETTING_FAN_ACOUSTIC_LIMIT, + OD8_SETTING_FAN_MIN_SPEED, + OD8_SETTING_FAN_TARGET_TEMP, + OD8_SETTING_OPERATING_TEMP_MAX, + OD8_SETTING_AC_TIMING, + OD8_SETTING_FAN_ZERO_RPM_CONTROL, + OD8_SETTING_COUNT +}; + +struct vega20_od8_single_setting { + uint32_t feature_id; + int32_t min_value; + int32_t max_value; + int32_t current_value; + int32_t default_value; +}; + +struct vega20_od8_settings { + uint32_t overdrive8_capabilities; + struct vega20_od8_single_setting od8_settings_array[OD8_SETTING_COUNT]; +}; + struct vega20_hwmgr { struct vega20_dpm_table dpm_table; struct vega20_dpm_table golden_dpm_table; @@ -452,6 +500,9 @@ struct vega20_hwmgr { /* ---- Overdrive next setting ---- */ struct vega20_odn_data odn_data; + /* ---- Overdrive8 Setting ---- */ + struct vega20_od8_settings od8_settings; + /* ---- Workload Mask ---- */ uint32_t workload_mask; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c index 379ac3d1da03..32d24a48a947 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c @@ -664,18 +664,18 @@ static int set_platform_caps(struct pp_hwmgr *hwmgr, uint32_t powerplay_caps) static int copy_clock_limits_array( struct pp_hwmgr *hwmgr, uint32_t **pptable_info_array, - const uint32_t *pptable_array) + const uint32_t *pptable_array, + uint32_t power_saving_clock_count) { uint32_t array_size, i; uint32_t *table; - array_size = sizeof(uint32_t) * ATOM_VEGA20_PPCLOCK_COUNT; - + array_size = sizeof(uint32_t) * power_saving_clock_count; table = kzalloc(array_size, GFP_KERNEL); if (NULL == table) return -ENOMEM; - for (i = 0; i < ATOM_VEGA20_PPCLOCK_COUNT; i++) + for (i = 0; i < power_saving_clock_count; i++) table[i] = pptable_array[i]; *pptable_info_array = table; @@ -686,22 +686,52 @@ static int copy_clock_limits_array( static int copy_overdrive_settings_limits_array( struct pp_hwmgr *hwmgr, uint32_t **pptable_info_array, - const uint32_t *pptable_array) + const uint32_t *pptable_array, + uint32_t od_setting_count) { uint32_t array_size, i; uint32_t *table; - array_size = sizeof(uint32_t) * ATOM_VEGA20_ODSETTING_COUNT; + array_size = sizeof(uint32_t) * od_setting_count; + table = kzalloc(array_size, GFP_KERNEL); + if (NULL == table) + return -ENOMEM; + + for (i = 0; i < od_setting_count; i++) + table[i] = pptable_array[i]; + + *pptable_info_array = table; + + return 0; +} + +static int copy_overdrive_feature_capabilities_array( + struct pp_hwmgr *hwmgr, + uint8_t **pptable_info_array, + const uint8_t *pptable_array, + uint8_t od_feature_count) +{ + uint32_t array_size, i; + uint8_t *table; + bool od_supported = false; + array_size = sizeof(uint8_t) * od_feature_count; table = kzalloc(array_size, GFP_KERNEL); if (NULL == table) return -ENOMEM; - for (i = 0; i < ATOM_VEGA20_ODSETTING_COUNT; i++) + for (i = 0; i < od_feature_count; i++) { table[i] = pptable_array[i]; + if (table[i]) + od_supported = true; + } *pptable_info_array = table; + if (od_supported) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ACOverdriveSupport); + return 0; } @@ -799,6 +829,7 @@ static int init_powerplay_table_information( struct phm_ppt_v3_information *pptable_information = (struct phm_ppt_v3_information *)hwmgr->pptable; uint32_t disable_power_control = 0; + uint32_t od_feature_count, od_setting_count, power_saving_clock_count; int result; hwmgr->thermal_controller.ucType = powerplay_table->ucThermalControllerType; @@ -810,22 +841,25 @@ static int init_powerplay_table_information( phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl); - if (powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_GFXCLKFMAX] > VEGA20_ENGINECLOCK_HARDMAX) - hwmgr->platform_descriptor.overdriveLimit.engineClock = VEGA20_ENGINECLOCK_HARDMAX; - else - hwmgr->platform_descriptor.overdriveLimit.engineClock = powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_GFXCLKFMAX]; - hwmgr->platform_descriptor.overdriveLimit.memoryClock = powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_UCLKFMAX]; - - copy_overdrive_settings_limits_array(hwmgr, &pptable_information->od_settings_max, powerplay_table->OverDrive8Table.ODSettingsMax); - copy_overdrive_settings_limits_array(hwmgr, &pptable_information->od_settings_min, powerplay_table->OverDrive8Table.ODSettingsMin); - - /* hwmgr->platformDescriptor.minOverdriveVDDC = 0; - hwmgr->platformDescriptor.maxOverdriveVDDC = 0; - hwmgr->platformDescriptor.overdriveVDDCStep = 0; */ - - if (hwmgr->platform_descriptor.overdriveLimit.engineClock > 0 - && hwmgr->platform_descriptor.overdriveLimit.memoryClock > 0) - phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ACOverdriveSupport); + if (powerplay_table->OverDrive8Table.ucODTableRevision == 1) { + od_feature_count = (powerplay_table->OverDrive8Table.ODFeatureCount > ATOM_VEGA20_ODFEATURE_COUNT) ? + ATOM_VEGA20_ODFEATURE_COUNT : powerplay_table->OverDrive8Table.ODFeatureCount; + od_setting_count = (powerplay_table->OverDrive8Table.ODSettingCount > ATOM_VEGA20_ODSETTING_COUNT) ? + ATOM_VEGA20_ODSETTING_COUNT : powerplay_table->OverDrive8Table.ODSettingCount; + + copy_overdrive_feature_capabilities_array(hwmgr, + &pptable_information->od_feature_capabilities, + powerplay_table->OverDrive8Table.ODFeatureCapabilities, + od_feature_count); + copy_overdrive_settings_limits_array(hwmgr, + &pptable_information->od_settings_max, + powerplay_table->OverDrive8Table.ODSettingsMax, + od_setting_count); + copy_overdrive_settings_limits_array(hwmgr, + &pptable_information->od_settings_min, + powerplay_table->OverDrive8Table.ODSettingsMin, + od_setting_count); + } pptable_information->us_small_power_limit1 = powerplay_table->usSmallPowerLimit1; pptable_information->us_small_power_limit2 = powerplay_table->usSmallPowerLimit2; @@ -838,15 +872,23 @@ static int init_powerplay_table_information( hwmgr->platform_descriptor.TDPODLimit = (uint16_t)powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE]; disable_power_control = 0; - if (!disable_power_control && hwmgr->platform_descriptor.TDPODLimit) { + if (!disable_power_control && hwmgr->platform_descriptor.TDPODLimit) /* enable TDP overdrive (PowerControl) feature as well if supported */ - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerControl); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PowerControl); + + if (powerplay_table->PowerSavingClockTable.ucTableRevision == 1) { + power_saving_clock_count = (powerplay_table->PowerSavingClockTable.PowerSavingClockCount >= ATOM_VEGA20_PPCLOCK_COUNT) ? + ATOM_VEGA20_PPCLOCK_COUNT : powerplay_table->PowerSavingClockTable.PowerSavingClockCount; + copy_clock_limits_array(hwmgr, + &pptable_information->power_saving_clock_max, + powerplay_table->PowerSavingClockTable.PowerSavingClockMax, + power_saving_clock_count); + copy_clock_limits_array(hwmgr, + &pptable_information->power_saving_clock_min, + powerplay_table->PowerSavingClockTable.PowerSavingClockMin, + power_saving_clock_count); } - copy_clock_limits_array(hwmgr, &pptable_information->power_saving_clock_max, powerplay_table->PowerSavingClockTable.PowerSavingClockMax); - copy_clock_limits_array(hwmgr, &pptable_information->power_saving_clock_min, powerplay_table->PowerSavingClockTable.PowerSavingClockMin); - pptable_information->smc_pptable = (PPTable_t *)kmalloc(sizeof(PPTable_t), GFP_KERNEL); if (pptable_information->smc_pptable == NULL) return -ENOMEM; @@ -898,6 +940,9 @@ static int vega20_pp_tables_uninitialize(struct pp_hwmgr *hwmgr) kfree(pp_table_info->power_saving_clock_min); pp_table_info->power_saving_clock_min = NULL; + kfree(pp_table_info->od_feature_capabilities); + pp_table_info->od_feature_capabilities = NULL; + kfree(pp_table_info->od_settings_max); pp_table_info->od_settings_max = NULL; diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h index 429c9c4322da..54fd0125d9cf 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h @@ -232,6 +232,8 @@ enum phm_platform_caps { PHM_PlatformCaps_UVDClientMCTuning, PHM_PlatformCaps_ODNinACSupport, PHM_PlatformCaps_ODNinDCSupport, + PHM_PlatformCaps_OD8inACSupport, + PHM_PlatformCaps_OD8inDCSupport, PHM_PlatformCaps_UMDPState, PHM_PlatformCaps_AutoWattmanSupport, PHM_PlatformCaps_AutoWattmanEnable_CCCState, diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 88f451764da9..a6d92128b19c 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -583,6 +583,7 @@ struct phm_ppt_v3_information uint32_t *power_saving_clock_max; uint32_t *power_saving_clock_min; + uint8_t *od_feature_capabilities; uint32_t *od_settings_max; uint32_t *od_settings_min; -- GitLab From d617d4d73043bc4cbc316a7a1b4370fa5bc26a31 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 21 May 2018 10:19:06 +0800 Subject: [PATCH 0414/1692] drm/amd/powerplay: new interfaces for overdrive vega20 sclk and mclk Add support for the new SMU firmware interface for clock adjustment. Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 169 ++++++++++++++++++ 1 file changed, 169 insertions(+) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index 51bc05dea8e1..1e9426fb1bf9 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -1101,6 +1101,166 @@ static int vega20_od8_initialize_default_settings( return 0; } +static int vega20_od8_set_settings( + struct pp_hwmgr *hwmgr, + uint32_t index, + uint32_t value) +{ + OverDriveTable_t od_table; + int ret = 0; + + ret = vega20_copy_table_from_smc(hwmgr, (uint8_t *)(&od_table), TABLE_OVERDRIVE); + PP_ASSERT_WITH_CODE(!ret, + "Failed to export over drive table!", + return ret); + + switch(index) { + case OD8_SETTING_GFXCLK_FMIN: + od_table.GfxclkFmin = (uint16_t)value; + break; + case OD8_SETTING_GFXCLK_FMAX: + od_table.GfxclkFmax = (uint16_t)value; + break; + case OD8_SETTING_GFXCLK_FREQ1: + od_table.GfxclkFreq1 = (uint16_t)value; + break; + case OD8_SETTING_GFXCLK_VOLTAGE1: + od_table.GfxclkOffsetVolt1 = (uint16_t)value; + break; + case OD8_SETTING_GFXCLK_FREQ2: + od_table.GfxclkFreq2 = (uint16_t)value; + break; + case OD8_SETTING_GFXCLK_VOLTAGE2: + od_table.GfxclkOffsetVolt2 = (uint16_t)value; + break; + case OD8_SETTING_GFXCLK_FREQ3: + od_table.GfxclkFreq3 = (uint16_t)value; + break; + case OD8_SETTING_GFXCLK_VOLTAGE3: + od_table.GfxclkOffsetVolt3 = (uint16_t)value; + break; + case OD8_SETTING_UCLK_FMAX: + od_table.UclkFmax = (uint16_t)value; + break; + case OD8_SETTING_POWER_PERCENTAGE: + od_table.OverDrivePct = (int16_t)value; + break; + case OD8_SETTING_FAN_ACOUSTIC_LIMIT: + od_table.FanMaximumRpm = (uint16_t)value; + break; + case OD8_SETTING_FAN_MIN_SPEED: + od_table.FanMinimumPwm = (uint16_t)value; + break; + case OD8_SETTING_FAN_TARGET_TEMP: + od_table.FanTargetTemperature = (uint16_t)value; + break; + case OD8_SETTING_OPERATING_TEMP_MAX: + od_table.MaxOpTemp = (uint16_t)value; + break; + } + + ret = vega20_copy_table_to_smc(hwmgr, (uint8_t *)(&od_table), TABLE_OVERDRIVE); + PP_ASSERT_WITH_CODE(!ret, + "Failed to import over drive table!", + return ret); + + return 0; +} + +static int vega20_get_sclk_od( + struct pp_hwmgr *hwmgr) +{ + struct vega20_hwmgr *data = hwmgr->backend; + struct vega20_single_dpm_table *sclk_table = + &(data->dpm_table.gfx_table); + struct vega20_single_dpm_table *golden_sclk_table = + &(data->golden_dpm_table.gfx_table); + int value; + + /* od percentage */ + value = DIV_ROUND_UP((sclk_table->dpm_levels[sclk_table->count - 1].value - + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * 100, + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value); + + return value; +} + +static int vega20_set_sclk_od( + struct pp_hwmgr *hwmgr, uint32_t value) +{ + struct vega20_hwmgr *data = hwmgr->backend; + struct vega20_single_dpm_table *sclk_table = + &(data->dpm_table.gfx_table); + struct vega20_single_dpm_table *golden_sclk_table = + &(data->golden_dpm_table.gfx_table); + uint32_t od_sclk; + int ret = 0; + + od_sclk = golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * value; + do_div(od_sclk, 100); + od_sclk += golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; + + ret = vega20_od8_set_settings(hwmgr, OD8_SETTING_GFXCLK_FMAX, od_sclk); + PP_ASSERT_WITH_CODE(!ret, + "[SetSclkOD] failed to set od gfxclk!", + return ret); + + /* refresh gfxclk table */ + ret = vega20_setup_single_dpm_table(hwmgr, sclk_table, PPCLK_GFXCLK); + PP_ASSERT_WITH_CODE(!ret, + "[SetSclkOD] failed to refresh gfxclk table!", + return ret); + + return 0; +} + +static int vega20_get_mclk_od( + struct pp_hwmgr *hwmgr) +{ + struct vega20_hwmgr *data = hwmgr->backend; + struct vega20_single_dpm_table *mclk_table = + &(data->dpm_table.mem_table); + struct vega20_single_dpm_table *golden_mclk_table = + &(data->golden_dpm_table.mem_table); + int value; + + /* od percentage */ + value = DIV_ROUND_UP((mclk_table->dpm_levels[mclk_table->count - 1].value - + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) * 100, + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value); + + return value; +} + +static int vega20_set_mclk_od( + struct pp_hwmgr *hwmgr, uint32_t value) +{ + struct vega20_hwmgr *data = hwmgr->backend; + struct vega20_single_dpm_table *mclk_table = + &(data->dpm_table.mem_table); + struct vega20_single_dpm_table *golden_mclk_table = + &(data->golden_dpm_table.mem_table); + uint32_t od_mclk; + int ret = 0; + + od_mclk = golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * value; + do_div(od_mclk, 100); + od_mclk += golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; + + ret = vega20_od8_set_settings(hwmgr, OD8_SETTING_UCLK_FMAX, od_mclk); + PP_ASSERT_WITH_CODE(!ret, + "[SetMclkOD] failed to set od memclk!", + return ret); + + /* refresh memclk table */ + ret = vega20_setup_single_dpm_table(hwmgr, mclk_table, PPCLK_UCLK); + PP_ASSERT_WITH_CODE(!ret, + "[SetMclkOD] failed to refresh memclk table!", + return ret); + + return 0; +} + static int vega20_populate_umdpstate_clocks( struct pp_hwmgr *hwmgr) { @@ -2604,8 +2764,17 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = { vega20_get_power_profile_mode, .set_power_profile_mode = vega20_set_power_profile_mode, + /* od related */ .set_power_limit = vega20_set_power_limit, + .get_sclk_od = + vega20_get_sclk_od, + .set_sclk_od = + vega20_set_sclk_od, + .get_mclk_od = + vega20_get_mclk_od, + .set_mclk_od = + vega20_set_mclk_od, /* for sysfs to retrive/set gfxclk/memclk */ .force_clock_level = vega20_force_clock_level, -- GitLab From acd116243a0da2a082dabd920ec68d782390d66d Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 21 May 2018 10:24:57 +0800 Subject: [PATCH 0415/1692] drm/amd/powerplay: revise vega20 PPSMC_MSG_SetSoftMin/[Max]ByFreq settings UVD, VCE and Socclk also need to be taken into consideration when setting PPSMC_MSG_SetSoftMinByFreq and PPSMC_MSG_SetSoftMaxByFreq. Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 109 +++++++++++++++--- 1 file changed, 96 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index 1e9426fb1bf9..3f769f37e9fb 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -1485,31 +1485,72 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr) { struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + uint32_t min_freq; int ret = 0; - if (data->smu_features[GNLD_DPM_GFXCLK].enabled) + if (data->smu_features[GNLD_DPM_GFXCLK].enabled) { + min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level; PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_SetSoftMinByFreq, - PPCLK_GFXCLK << 16 | - data->dpm_table.gfx_table.dpm_state.soft_min_level)), + (PPCLK_GFXCLK << 16) | (min_freq & 0xffff))), "Failed to set soft min gfxclk !", return ret); + } if (data->smu_features[GNLD_DPM_UCLK].enabled) { + min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level; PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_SetSoftMinByFreq, - PPCLK_UCLK << 16 | - data->dpm_table.mem_table.dpm_state.soft_min_level)), + (PPCLK_UCLK << 16) | (min_freq & 0xffff))), "Failed to set soft min memclk !", return ret); + + min_freq = data->dpm_table.mem_table.dpm_state.hard_min_level; PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_SetHardMinByFreq, - PPCLK_UCLK << 16 | - data->dpm_table.mem_table.dpm_state.hard_min_level)), + (PPCLK_UCLK << 16) | (min_freq & 0xffff))), "Failed to set hard min memclk !", return ret); } + if (data->smu_features[GNLD_DPM_UVD].enabled) { + min_freq = data->dpm_table.vclk_table.dpm_state.soft_min_level; + + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetSoftMinByFreq, + (PPCLK_VCLK << 16) | (min_freq & 0xffff))), + "Failed to set soft min vclk!", + return ret); + + min_freq = data->dpm_table.dclk_table.dpm_state.soft_min_level; + + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetSoftMinByFreq, + (PPCLK_DCLK << 16) | (min_freq & 0xffff))), + "Failed to set soft min dclk!", + return ret); + } + + if (data->smu_features[GNLD_DPM_VCE].enabled) { + min_freq = data->dpm_table.eclk_table.dpm_state.soft_min_level; + + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetSoftMinByFreq, + (PPCLK_ECLK << 16) | (min_freq & 0xffff))), + "Failed to set soft min eclk!", + return ret); + } + + if (data->smu_features[GNLD_DPM_SOCCLK].enabled) { + min_freq = data->dpm_table.soc_table.dpm_state.soft_min_level; + + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetSoftMinByFreq, + (PPCLK_SOCCLK << 16) | (min_freq & 0xffff))), + "Failed to set soft min socclk!", + return ret); + } + return ret; } @@ -1517,23 +1558,65 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr) { struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + uint32_t max_freq; int ret = 0; - if (data->smu_features[GNLD_DPM_GFXCLK].enabled) + if (data->smu_features[GNLD_DPM_GFXCLK].enabled) { + max_freq = data->dpm_table.gfx_table.dpm_state.soft_max_level; + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_SetSoftMaxByFreq, - PPCLK_GFXCLK << 16 | - data->dpm_table.gfx_table.dpm_state.soft_max_level)), + (PPCLK_GFXCLK << 16) | (max_freq & 0xffff))), "Failed to set soft max gfxclk!", return ret); + } + + if (data->smu_features[GNLD_DPM_UCLK].enabled) { + max_freq = data->dpm_table.mem_table.dpm_state.soft_max_level; - if (data->smu_features[GNLD_DPM_UCLK].enabled) PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_SetSoftMaxByFreq, - PPCLK_UCLK << 16 | - data->dpm_table.mem_table.dpm_state.soft_max_level)), + (PPCLK_UCLK << 16) | (max_freq & 0xffff))), "Failed to set soft max memclk!", return ret); + } + + if (data->smu_features[GNLD_DPM_UVD].enabled) { + max_freq = data->dpm_table.vclk_table.dpm_state.soft_max_level; + + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetSoftMaxByFreq, + (PPCLK_VCLK << 16) | (max_freq & 0xffff))), + "Failed to set soft max vclk!", + return ret); + + max_freq = data->dpm_table.dclk_table.dpm_state.soft_max_level; + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetSoftMaxByFreq, + (PPCLK_DCLK << 16) | (max_freq & 0xffff))), + "Failed to set soft max dclk!", + return ret); + } + + if (data->smu_features[GNLD_DPM_VCE].enabled) { + max_freq = data->dpm_table.eclk_table.dpm_state.soft_max_level; + + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetSoftMaxByFreq, + (PPCLK_ECLK << 16) | (max_freq & 0xffff))), + "Failed to set soft max eclk!", + return ret); + } + + if (data->smu_features[GNLD_DPM_SOCCLK].enabled) { + max_freq = data->dpm_table.soc_table.dpm_state.soft_max_level; + + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetSoftMaxByFreq, + (PPCLK_SOCCLK << 16) | (max_freq & 0xffff))), + "Failed to set soft max socclk!", + return ret); + } return ret; } -- GitLab From 8c191fe3d55309f0bd12ab5fa6f2272675a89f41 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 21 May 2018 10:43:31 +0800 Subject: [PATCH 0416/1692] drm/amd/powerplay: update vega20 clocks threshold settings on power state adjust UVD, VCE and SOC clocks need to be taken into consideration. Also, the thresholds need be updated correspondingly when stable power state is selected. Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 112 ++++++++++++++++++ 1 file changed, 112 insertions(+) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index 3f769f37e9fb..ed928c5d878c 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -2503,6 +2503,23 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + if (PP_CAP(PHM_PlatformCaps_UMDPState)) { + if (VEGA20_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) { + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value; + } + + if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value; + } + + if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + } + } + /* memclk */ dpm_table = &(data->dpm_table.mem_table); dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; @@ -2510,9 +2527,28 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + if (PP_CAP(PHM_PlatformCaps_UMDPState)) { + if (VEGA20_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) { + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value; + } + + if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) { + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value; + } + + if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + } + } + + /* honour DAL's UCLK Hardmin */ if (dpm_table->dpm_state.hard_min_level < (hwmgr->display_config->min_mem_set_clock / 100)) dpm_table->dpm_state.hard_min_level = hwmgr->display_config->min_mem_set_clock / 100; + /* Hardmin is dependent on displayconfig */ if (disable_mclk_switching) { dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; for (i = 0; i < data->mclk_latency_table.count - 1; i++) { @@ -2528,6 +2564,82 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) if (hwmgr->display_config->nb_pstate_switch_disable) dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + /* vclk */ + dpm_table = &(data->dpm_table.vclk_table); + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + + if (PP_CAP(PHM_PlatformCaps_UMDPState)) { + if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) { + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value; + } + + if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + } + } + + /* dclk */ + dpm_table = &(data->dpm_table.dclk_table); + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + + if (PP_CAP(PHM_PlatformCaps_UMDPState)) { + if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) { + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value; + } + + if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + } + } + + /* socclk */ + dpm_table = &(data->dpm_table.soc_table); + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + + if (PP_CAP(PHM_PlatformCaps_UMDPState)) { + if (VEGA20_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) { + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_SOCCLK_LEVEL].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_SOCCLK_LEVEL].value; + } + + if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + } + } + + /* eclk */ + dpm_table = &(data->dpm_table.eclk_table); + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + + if (PP_CAP(PHM_PlatformCaps_UMDPState)) { + if (VEGA20_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count) { + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_VCEMCLK_LEVEL].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_VCEMCLK_LEVEL].value; + } + + if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + } + } + return 0; } -- GitLab From a6637313c7bf013d30fc7d501a19a93fddab7b5f Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Wed, 2 May 2018 15:45:54 +0800 Subject: [PATCH 0417/1692] drm/amdgpu: enable vega20 powerplay support Signed-off-by: Evan Quan Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 73c85a0282d0..dd44b50242ac 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -520,9 +520,9 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); if (adev->asic_type != CHIP_VEGA20) { amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); - if (!amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); } + if (!amdgpu_sriov_vf(adev)) + amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); #if defined(CONFIG_DRM_AMD_DC) -- GitLab From 654f761cfa0454bbfdf50d5ed6dc004c92114a97 Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Fri, 11 May 2018 14:54:50 +0800 Subject: [PATCH 0418/1692] drm/amdgpu: Add psp 11.0 support for vega20. (v2) Add psp 11.0 code for vega20 and enable it. PSP is the security processor for the GPU. It handles firmware loading and GPU resets among other things. v2: whitespace fix, enable support, adjust reg includes (Alex) Signed-off-by: Feifei Xu Reviewed-by: Alex Deucher Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 14 +- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h | 1 + drivers/gpu/drm/amd/amdgpu/psp_v11_0.c | 565 ++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/psp_v11_0.h | 30 ++ drivers/gpu/drm/amd/amdgpu/psp_v3_1.c | 2 - drivers/gpu/drm/amd/amdgpu/soc15.c | 5 +- 7 files changed, 614 insertions(+), 6 deletions(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/psp_v11_0.c create mode 100644 drivers/gpu/drm/amd/amdgpu/psp_v11_0.h diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index dd6d70a05e7c..7d7faaf299ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -88,7 +88,8 @@ amdgpu-y += \ amdgpu-y += \ amdgpu_psp.o \ psp_v3_1.o \ - psp_v10_0.o + psp_v10_0.o \ + psp_v11_0.o # add SMC block amdgpu-y += \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 5b39d1399630..ab324e34cadb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -31,6 +31,7 @@ #include "soc15_common.h" #include "psp_v3_1.h" #include "psp_v10_0.h" +#include "psp_v11_0.h" static void psp_set_funcs(struct amdgpu_device *adev); @@ -52,12 +53,14 @@ static int psp_sw_init(void *handle) switch (adev->asic_type) { case CHIP_VEGA10: case CHIP_VEGA12: - case CHIP_VEGA20: psp_v3_1_set_psp_funcs(psp); break; case CHIP_RAVEN: psp_v10_0_set_psp_funcs(psp); break; + case CHIP_VEGA20: + psp_v11_0_set_psp_funcs(psp); + break; default: return -EINVAL; } @@ -594,3 +597,12 @@ const struct amdgpu_ip_block_version psp_v10_0_ip_block = .rev = 0, .funcs = &psp_ip_funcs, }; + +const struct amdgpu_ip_block_version psp_v11_0_ip_block = +{ + .type = AMD_IP_BLOCK_TYPE_PSP, + .major = 11, + .minor = 0, + .rev = 0, + .funcs = &psp_ip_funcs, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 967712fd6abd..d772545332e2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -164,5 +164,6 @@ extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index, extern const struct amdgpu_ip_block_version psp_v10_0_ip_block; int psp_gpu_reset(struct amdgpu_device *adev); +extern const struct amdgpu_ip_block_version psp_v11_0_ip_block; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c new file mode 100644 index 000000000000..9c58a23adc5d --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -0,0 +1,565 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include +#include "amdgpu.h" +#include "amdgpu_psp.h" +#include "amdgpu_ucode.h" +#include "soc15_common.h" +#include "psp_v11_0.h" + +#include "mp/mp_11_0_offset.h" +#include "mp/mp_11_0_sh_mask.h" +#include "gc/gc_9_0_offset.h" +#include "sdma0/sdma0_4_0_offset.h" +#include "nbio/nbio_7_4_offset.h" + +MODULE_FIRMWARE("amdgpu/vega20_sos.bin"); + +/* address block */ +#define smnMP1_FIRMWARE_FLAGS 0x3010024 + +static int +psp_v11_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *type) +{ + switch (ucode->ucode_id) { + case AMDGPU_UCODE_ID_SDMA0: + *type = GFX_FW_TYPE_SDMA0; + break; + case AMDGPU_UCODE_ID_SDMA1: + *type = GFX_FW_TYPE_SDMA1; + break; + case AMDGPU_UCODE_ID_CP_CE: + *type = GFX_FW_TYPE_CP_CE; + break; + case AMDGPU_UCODE_ID_CP_PFP: + *type = GFX_FW_TYPE_CP_PFP; + break; + case AMDGPU_UCODE_ID_CP_ME: + *type = GFX_FW_TYPE_CP_ME; + break; + case AMDGPU_UCODE_ID_CP_MEC1: + *type = GFX_FW_TYPE_CP_MEC; + break; + case AMDGPU_UCODE_ID_CP_MEC1_JT: + *type = GFX_FW_TYPE_CP_MEC_ME1; + break; + case AMDGPU_UCODE_ID_CP_MEC2: + *type = GFX_FW_TYPE_CP_MEC; + break; + case AMDGPU_UCODE_ID_CP_MEC2_JT: + *type = GFX_FW_TYPE_CP_MEC_ME2; + break; + case AMDGPU_UCODE_ID_RLC_G: + *type = GFX_FW_TYPE_RLC_G; + break; + case AMDGPU_UCODE_ID_SMC: + *type = GFX_FW_TYPE_SMU; + break; + case AMDGPU_UCODE_ID_UVD: + *type = GFX_FW_TYPE_UVD; + break; + case AMDGPU_UCODE_ID_VCE: + *type = GFX_FW_TYPE_VCE; + break; + case AMDGPU_UCODE_ID_MAXIMUM: + default: + return -EINVAL; + } + + return 0; +} + +static int psp_v11_0_init_microcode(struct psp_context *psp) +{ + struct amdgpu_device *adev = psp->adev; + const char *chip_name; + char fw_name[30]; + int err = 0; + const struct psp_firmware_header_v1_0 *hdr; + + DRM_DEBUG("\n"); + + switch (adev->asic_type) { + case CHIP_VEGA20: + chip_name = "vega20"; + break; + default: + BUG(); + } + + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name); + err = request_firmware(&adev->psp.sos_fw, fw_name, adev->dev); + if (err) + goto out; + + err = amdgpu_ucode_validate(adev->psp.sos_fw); + if (err) + goto out; + + hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; + adev->psp.sos_fw_version = le32_to_cpu(hdr->header.ucode_version); + adev->psp.sos_feature_version = le32_to_cpu(hdr->ucode_feature_version); + adev->psp.sos_bin_size = le32_to_cpu(hdr->sos_size_bytes); + adev->psp.sys_bin_size = le32_to_cpu(hdr->header.ucode_size_bytes) - + le32_to_cpu(hdr->sos_size_bytes); + adev->psp.sys_start_addr = (uint8_t *)hdr + + le32_to_cpu(hdr->header.ucode_array_offset_bytes); + adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr + + le32_to_cpu(hdr->sos_offset_bytes); + return 0; +out: + if (err) { + dev_err(adev->dev, + "psp v11.0: Failed to load firmware \"%s\"\n", + fw_name); + release_firmware(adev->psp.sos_fw); + adev->psp.sos_fw = NULL; + } + + return err; +} + +static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp) +{ + int ret; + uint32_t psp_gfxdrv_command_reg = 0; + struct amdgpu_device *adev = psp->adev; + uint32_t sol_reg; + + /* Check sOS sign of life register to confirm sys driver and sOS + * are already been loaded. + */ + sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); + if (sol_reg) + return 0; + + /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), + 0x80000000, 0x80000000, false); + if (ret) + return ret; + + memset(psp->fw_pri_buf, 0, PSP_1_MEG); + + /* Copy PSP System Driver binary to memory */ + memcpy(psp->fw_pri_buf, psp->sys_start_addr, psp->sys_bin_size); + + /* Provide the sys driver to bootrom */ + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, + (uint32_t)(psp->fw_pri_mc_addr >> 20)); + psp_gfxdrv_command_reg = 1 << 16; + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, + psp_gfxdrv_command_reg); + + /* there might be handshake issue with hardware which needs delay */ + mdelay(20); + + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), + 0x80000000, 0x80000000, false); + + return ret; +} + +static int psp_v11_0_bootloader_load_sos(struct psp_context *psp) +{ + int ret; + unsigned int psp_gfxdrv_command_reg = 0; + struct amdgpu_device *adev = psp->adev; + uint32_t sol_reg; + + /* Check sOS sign of life register to confirm sys driver and sOS + * are already been loaded. + */ + sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); + if (sol_reg) + return 0; + + /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), + 0x80000000, 0x80000000, false); + if (ret) + return ret; + + memset(psp->fw_pri_buf, 0, PSP_1_MEG); + + /* Copy Secure OS binary to PSP memory */ + memcpy(psp->fw_pri_buf, psp->sos_start_addr, psp->sos_bin_size); + + /* Provide the PSP secure OS to bootrom */ + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, + (uint32_t)(psp->fw_pri_mc_addr >> 20)); + psp_gfxdrv_command_reg = 2 << 16; + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, + psp_gfxdrv_command_reg); + + /* there might be handshake issue with hardware which needs delay */ + mdelay(20); + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81), + RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), + 0, true); + + return ret; +} + +static int psp_v11_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode, + struct psp_gfx_cmd_resp *cmd) +{ + int ret; + uint64_t fw_mem_mc_addr = ucode->mc_addr; + + memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp)); + + cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; + cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr); + cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr); + cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size; + + ret = psp_v11_0_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type); + if (ret) + DRM_ERROR("Unknown firmware type\n"); + + return ret; +} + +static int psp_v11_0_ring_init(struct psp_context *psp, + enum psp_ring_type ring_type) +{ + int ret = 0; + struct psp_ring *ring; + struct amdgpu_device *adev = psp->adev; + + ring = &psp->km_ring; + + ring->ring_type = ring_type; + + /* allocate 4k Page of Local Frame Buffer memory for ring */ + ring->ring_size = 0x1000; + ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->firmware.rbuf, + &ring->ring_mem_mc_addr, + (void **)&ring->ring_mem); + if (ret) { + ring->ring_size = 0; + return ret; + } + + return 0; +} + +static int psp_v11_0_ring_create(struct psp_context *psp, + enum psp_ring_type ring_type) +{ + int ret = 0; + unsigned int psp_ring_reg = 0; + struct psp_ring *ring = &psp->km_ring; + struct amdgpu_device *adev = psp->adev; + + /* Write low address of the ring to C2PMSG_69 */ + psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr); + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg); + /* Write high address of the ring to C2PMSG_70 */ + psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr); + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg); + /* Write size of ring to C2PMSG_71 */ + psp_ring_reg = ring->ring_size; + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg); + /* Write the ring initialization command to C2PMSG_64 */ + psp_ring_reg = ring_type; + psp_ring_reg = psp_ring_reg << 16; + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg); + + /* there might be handshake issue with hardware which needs delay */ + mdelay(20); + + /* Wait for response flag (bit 31) in C2PMSG_64 */ + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + 0x80000000, 0x8000FFFF, false); + + return ret; +} + +static int psp_v11_0_ring_stop(struct psp_context *psp, + enum psp_ring_type ring_type) +{ + int ret = 0; + struct psp_ring *ring; + struct amdgpu_device *adev = psp->adev; + + ring = &psp->km_ring; + + /* Write the ring destroy command to C2PMSG_64 */ + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_DESTROY_RINGS); + + /* there might be handshake issue with hardware which needs delay */ + mdelay(20); + + /* Wait for response flag (bit 31) in C2PMSG_64 */ + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + 0x80000000, 0x80000000, false); + + return ret; +} + +static int psp_v11_0_ring_destroy(struct psp_context *psp, + enum psp_ring_type ring_type) +{ + int ret = 0; + struct psp_ring *ring = &psp->km_ring; + struct amdgpu_device *adev = psp->adev; + + ret = psp_v11_0_ring_stop(psp, ring_type); + if (ret) + DRM_ERROR("Fail to stop psp ring\n"); + + amdgpu_bo_free_kernel(&adev->firmware.rbuf, + &ring->ring_mem_mc_addr, + (void **)&ring->ring_mem); + + return ret; +} + +static int psp_v11_0_cmd_submit(struct psp_context *psp, + struct amdgpu_firmware_info *ucode, + uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr, + int index) +{ + unsigned int psp_write_ptr_reg = 0; + struct psp_gfx_rb_frame *write_frame = psp->km_ring.ring_mem; + struct psp_ring *ring = &psp->km_ring; + struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem; + struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start + + ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1; + struct amdgpu_device *adev = psp->adev; + uint32_t ring_size_dw = ring->ring_size / 4; + uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; + + /* KM (GPCOM) prepare write pointer */ + psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67); + + /* Update KM RB frame pointer to new frame */ + /* write_frame ptr increments by size of rb_frame in bytes */ + /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */ + if ((psp_write_ptr_reg % ring_size_dw) == 0) + write_frame = ring_buffer_start; + else + write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw); + /* Check invalid write_frame ptr address */ + if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) { + DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", + ring_buffer_start, ring_buffer_end, write_frame); + DRM_ERROR("write_frame is pointing to address out of bounds\n"); + return -EINVAL; + } + + /* Initialize KM RB frame */ + memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame)); + + /* Update KM RB frame */ + write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr); + write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr); + write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); + write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); + write_frame->fence_value = index; + + /* Update the write Pointer in DWORDs */ + psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg); + + return 0; +} + +static int +psp_v11_0_sram_map(struct amdgpu_device *adev, + unsigned int *sram_offset, unsigned int *sram_addr_reg_offset, + unsigned int *sram_data_reg_offset, + enum AMDGPU_UCODE_ID ucode_id) +{ + int ret = 0; + + switch (ucode_id) { +/* TODO: needs to confirm */ +#if 0 + case AMDGPU_UCODE_ID_SMC: + *sram_offset = 0; + *sram_addr_reg_offset = 0; + *sram_data_reg_offset = 0; + break; +#endif + + case AMDGPU_UCODE_ID_CP_CE: + *sram_offset = 0x0; + *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_ADDR); + *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_DATA); + break; + + case AMDGPU_UCODE_ID_CP_PFP: + *sram_offset = 0x0; + *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_ADDR); + *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_DATA); + break; + + case AMDGPU_UCODE_ID_CP_ME: + *sram_offset = 0x0; + *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_ADDR); + *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_DATA); + break; + + case AMDGPU_UCODE_ID_CP_MEC1: + *sram_offset = 0x10000; + *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_ADDR); + *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_DATA); + break; + + case AMDGPU_UCODE_ID_CP_MEC2: + *sram_offset = 0x10000; + *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_ADDR); + *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_DATA); + break; + + case AMDGPU_UCODE_ID_RLC_G: + *sram_offset = 0x2000; + *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_ADDR); + *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_DATA); + break; + + case AMDGPU_UCODE_ID_SDMA0: + *sram_offset = 0x0; + *sram_addr_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_ADDR); + *sram_data_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_DATA); + break; + +/* TODO: needs to confirm */ +#if 0 + case AMDGPU_UCODE_ID_SDMA1: + *sram_offset = ; + *sram_addr_reg_offset = ; + break; + + case AMDGPU_UCODE_ID_UVD: + *sram_offset = ; + *sram_addr_reg_offset = ; + break; + + case AMDGPU_UCODE_ID_VCE: + *sram_offset = ; + *sram_addr_reg_offset = ; + break; +#endif + + case AMDGPU_UCODE_ID_MAXIMUM: + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static bool psp_v11_0_compare_sram_data(struct psp_context *psp, + struct amdgpu_firmware_info *ucode, + enum AMDGPU_UCODE_ID ucode_type) +{ + int err = 0; + unsigned int fw_sram_reg_val = 0; + unsigned int fw_sram_addr_reg_offset = 0; + unsigned int fw_sram_data_reg_offset = 0; + unsigned int ucode_size; + uint32_t *ucode_mem = NULL; + struct amdgpu_device *adev = psp->adev; + + err = psp_v11_0_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset, + &fw_sram_data_reg_offset, ucode_type); + if (err) + return false; + + WREG32(fw_sram_addr_reg_offset, fw_sram_reg_val); + + ucode_size = ucode->ucode_size; + ucode_mem = (uint32_t *)ucode->kaddr; + while (ucode_size) { + fw_sram_reg_val = RREG32(fw_sram_data_reg_offset); + + if (*ucode_mem != fw_sram_reg_val) + return false; + + ucode_mem++; + /* 4 bytes */ + ucode_size -= 4; + } + + return true; +} + +static int psp_v11_0_mode1_reset(struct psp_context *psp) +{ + int ret; + uint32_t offset; + struct amdgpu_device *adev = psp->adev; + + offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64); + + ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, false); + + if (ret) { + DRM_INFO("psp is not working correctly before mode1 reset!\n"); + return -EINVAL; + } + + /*send the mode 1 reset command*/ + WREG32(offset, GFX_CTRL_CMD_ID_MODE1_RST); + + mdelay(1000); + + offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33); + + ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, false); + + if (ret) { + DRM_INFO("psp mode 1 reset failed!\n"); + return -EINVAL; + } + + DRM_INFO("psp mode1 reset succeed \n"); + + return 0; +} + +static const struct psp_funcs psp_v11_0_funcs = { + .init_microcode = psp_v11_0_init_microcode, + .bootloader_load_sysdrv = psp_v11_0_bootloader_load_sysdrv, + .bootloader_load_sos = psp_v11_0_bootloader_load_sos, + .prep_cmd_buf = psp_v11_0_prep_cmd_buf, + .ring_init = psp_v11_0_ring_init, + .ring_create = psp_v11_0_ring_create, + .ring_stop = psp_v11_0_ring_stop, + .ring_destroy = psp_v11_0_ring_destroy, + .cmd_submit = psp_v11_0_cmd_submit, + .compare_sram_data = psp_v11_0_compare_sram_data, + .mode1_reset = psp_v11_0_mode1_reset, +}; + +void psp_v11_0_set_psp_funcs(struct psp_context *psp) +{ + psp->funcs = &psp_v11_0_funcs; +} diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.h b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.h new file mode 100644 index 000000000000..082c16c887bf --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.h @@ -0,0 +1,30 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __PSP_V11_0_H__ +#define __PSP_V11_0_H__ + +#include "amdgpu_psp.h" + +void psp_v11_0_set_psp_funcs(struct psp_context *psp); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c index 727071fee6f6..e1ebf770c303 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c @@ -41,8 +41,6 @@ MODULE_FIRMWARE("amdgpu/vega10_sos.bin"); MODULE_FIRMWARE("amdgpu/vega10_asd.bin"); MODULE_FIRMWARE("amdgpu/vega12_sos.bin"); MODULE_FIRMWARE("amdgpu/vega12_asd.bin"); -MODULE_FIRMWARE("amdgpu/vega20_sos.bin"); -MODULE_FIRMWARE("amdgpu/vega20_asd.bin"); #define smnMP1_FIRMWARE_FLAGS 0x3010028 diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index dd44b50242ac..fc0cb7d38c9f 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -518,9 +518,10 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); - if (adev->asic_type != CHIP_VEGA20) { + if (adev->asic_type == CHIP_VEGA20) + amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); + else amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); - } if (!amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) -- GitLab From 0df5295c99de7bd092392bc6f902db26baf99042 Mon Sep 17 00:00:00 2001 From: Likun Gao Date: Mon, 9 Jul 2018 13:47:04 -0500 Subject: [PATCH 0419/1692] drm/amdgpu/vg20: Change the load type of vega20 to psp (v2) Modified the vega20 load type to psp now that psp support is implemented. v2: squash in fixes history (Alex) Signed-off-by: Likun Gao Reviewed-by: Feifei Xu Reviewed-by: Hawking Zhang Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index f55f72a37ca8..b419d6e33b3a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -303,12 +303,11 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type) case CHIP_VEGA10: case CHIP_RAVEN: case CHIP_VEGA12: + case CHIP_VEGA20: if (!load_type) return AMDGPU_FW_LOAD_DIRECT; else return AMDGPU_FW_LOAD_PSP; - case CHIP_VEGA20: - return AMDGPU_FW_LOAD_DIRECT; default: DRM_ERROR("Unknown firmware load type\n"); } -- GitLab From 3490738f9be9e20ac44dff4e76ed63762453ef68 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Tue, 17 Jul 2018 10:22:37 +0800 Subject: [PATCH 0420/1692] drm/amd/powerplay: enable fclk ss by default Set fclk ss as enabled on default. Signed-off-by: Evan Quan Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c index 32d24a48a947..5f1f7a32ac24 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c @@ -810,7 +810,7 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable ppsmc_pptable->UclkSpreadPercent = smc_dpm_table->uclkspreadpercent; ppsmc_pptable->UclkSpreadFreq = smc_dpm_table->uclkspreadfreq; - ppsmc_pptable->FclkSpreadEnabled = 0; + ppsmc_pptable->FclkSpreadEnabled = smc_dpm_table->fclkspreadenabled; ppsmc_pptable->FclkSpreadPercent = smc_dpm_table->fclkspreadpercent; ppsmc_pptable->FclkSpreadFreq = smc_dpm_table->fclkspreadfreq; -- GitLab From be6a55a11af64fc07e478ba0f964bef76521293b Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Wed, 18 Jul 2018 10:59:02 +0800 Subject: [PATCH 0421/1692] drm/amd/powerplay: remove setting soc floor voltage before sending pptable SOC voltage is not able to switch and forced to low 0.8V when running HEVC. Thus the test failed. Signed-off-by: Evan Quan Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 9 +-------- drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h | 1 - 2 files changed, 1 insertion(+), 9 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index ed928c5d878c..ad6ce148fb3f 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -707,14 +707,7 @@ static int vega20_init_smc_table(struct pp_hwmgr *hwmgr) data->vbios_boot_state.vclock = boot_up_values.ulVClk; data->vbios_boot_state.dclock = boot_up_values.ulDClk; data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID; - if (0 != boot_up_values.usVddc) { - smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_SetFloorSocVoltage, - (boot_up_values.usVddc * 4)); - data->vbios_boot_state.bsoc_vddc_lock = true; - } else { - data->vbios_boot_state.bsoc_vddc_lock = false; - } + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk, (uint32_t)(data->vbios_boot_state.dcef_clock / 100)); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h index 130052a330b3..72e4f2a55641 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h @@ -203,7 +203,6 @@ struct vega20_dpmlevel_enable_mask { }; struct vega20_vbios_boot_state { - bool bsoc_vddc_lock; uint8_t uc_cooling_id; uint16_t vddc; uint16_t vddci; -- GitLab From d940def9ab1f02bb421e02f3e9137153705a79e6 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Thu, 19 Jul 2018 18:40:25 +0800 Subject: [PATCH 0422/1692] drm/amd/powerplay: avoid enabling/disabling uvd/vce dpm twice For vega20, there are two UVD rings which share one powerplay instance. Under some case(two rings used parallel), the uvd dpm is disabled twice which causes the SMC hang. Signed-off-by: Evan Quan Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index ad6ce148fb3f..c4302bc41a24 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -2464,6 +2464,9 @@ static void vega20_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate) { struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + if (data->vce_power_gated == bgate) + return ; + data->vce_power_gated = bgate; vega20_enable_disable_vce_dpm(hwmgr, !bgate); } @@ -2472,6 +2475,9 @@ static void vega20_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate) { struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + if (data->uvd_power_gated == bgate) + return ; + data->uvd_power_gated = bgate; vega20_enable_disable_uvd_dpm(hwmgr, !bgate); } -- GitLab From 5d923a692868609f77db4873a3ff4420853cb38c Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Fri, 20 Jul 2018 10:53:31 +0800 Subject: [PATCH 0423/1692] drm/amd/powerplay: correct the argument for PPSMC_MSG_SetUclkFastSwitch The argument was set wrongly. Fast/slow switch was asked when there is actually a slow/fast switch needed. Signed-off-by: Evan Quan Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index c4302bc41a24..c5bdb2b4b921 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -1839,7 +1839,7 @@ static int vega20_notify_smc_display_change(struct pp_hwmgr *hwmgr, if (data->smu_features[GNLD_DPM_UCLK].enabled) return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetUclkFastSwitch, - has_disp ? 0 : 1); + has_disp ? 1 : 0); return 0; } -- GitLab From 16ed0ff2755c852c3222bf53d4a0d97f3665733e Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Fri, 20 Jul 2018 10:56:21 +0800 Subject: [PATCH 0424/1692] drm/amd/powerplay: allow slow switch only if NBPState enabled Otherwise there may be potential SMU performance issues. Signed-off-by: Evan Quan Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index c5bdb2b4b921..1170f233d9e2 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -1896,7 +1896,8 @@ static int vega20_notify_smc_display_config_after_ps_adjustment( int ret = 0; if ((hwmgr->display_config->num_display > 1) && - !hwmgr->display_config->multi_monitor_in_sync) + !hwmgr->display_config->multi_monitor_in_sync && + !hwmgr->display_config->nb_pstate_switch_disable) vega20_notify_smc_display_change(hwmgr, false); else vega20_notify_smc_display_change(hwmgr, true); -- GitLab From efa7ac67a80f31a35c6c3e2b40841439bf107fa2 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 30 Jul 2018 14:01:00 +0800 Subject: [PATCH 0425/1692] drm/amd/powerplay: remove max DCEFCLK limitation The latest SMU fw removes the limitation that required UCLK >= DCEFCLK. Signed-off-by: Evan Quan Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index 1170f233d9e2..d7c4334da250 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -1356,9 +1356,6 @@ static int vega20_init_max_sustainable_clocks(struct pp_hwmgr *hwmgr) if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock) max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock; - if (max_sustainable_clocks->uclock < max_sustainable_clocks->dcef_clock) - max_sustainable_clocks->dcef_clock = max_sustainable_clocks->uclock; - return 0; } -- GitLab From ff50e15b00b0d107df58d0f95065015196015543 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Fri, 10 Aug 2018 14:27:56 +0800 Subject: [PATCH 0426/1692] drm/amd/powerplay: added voltage boot time calibration Run AFLL BTC after upload pptable and before enabling all smu features. Signed-off-by: Evan Quan Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index d7c4334da250..fb32b28afa66 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -756,6 +756,11 @@ static int vega20_set_allowed_featuresmask(struct pp_hwmgr *hwmgr) return 0; } +static int vega20_run_btc_afll(struct pp_hwmgr *hwmgr) +{ + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAfllBtc); +} + static int vega20_enable_all_smu_features(struct pp_hwmgr *hwmgr) { struct vega20_hwmgr *data = @@ -1391,6 +1396,11 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr) "[EnableDPMTasks] Failed to initialize SMC table!", return result); + result = vega20_run_btc_afll(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "[EnableDPMTasks] Failed to run btc afll!", + return result); + result = vega20_enable_all_smu_features(hwmgr); PP_ASSERT_WITH_CODE(!result, "[EnableDPMTasks] Failed to enable all smu features!", -- GitLab From 54d682d9a5b357eb711994fa94ef1bc44d7ce9d9 Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Thu, 26 Jul 2018 12:31:34 +0800 Subject: [PATCH 0427/1692] drm/amdgpu/gfx9: Update gfx9 golden settings. Update the goldensettings for vega20. Signed-off-by: Feifei Xu Signed-off-by: Evan Quan Reviewed-by: Hawking Zhang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index fd31d3b27819..76d979e276a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -82,7 +82,7 @@ MODULE_FIRMWARE("amdgpu/raven_rlc.bin"); static const struct soc15_reg_golden golden_settings_gc_9_0[] = { - SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001), -- GitLab From 218a9fbc8a28f63dc0990070b81ad5574e87846c Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Wed, 18 Jul 2018 16:00:03 +0800 Subject: [PATCH 0428/1692] drm/amdgpu: update vega20 sdma golden settings Updated vega20 SDMA0 and SDMA1 golden settings. Signed-off-by: Evan Quan Signed-off-by: Feifei Xu Reviewed-by: Hawking Zhang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 68 +++++++++++++++++++++----- 1 file changed, 57 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index e7ca4623cfb9..407ed8a271b7 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -27,10 +27,10 @@ #include "amdgpu_ucode.h" #include "amdgpu_trace.h" -#include "sdma0/sdma0_4_0_offset.h" -#include "sdma0/sdma0_4_0_sh_mask.h" -#include "sdma1/sdma1_4_0_offset.h" -#include "sdma1/sdma1_4_0_sh_mask.h" +#include "sdma0/sdma0_4_2_offset.h" +#include "sdma0/sdma0_4_2_sh_mask.h" +#include "sdma1/sdma1_4_2_offset.h" +#include "sdma1/sdma1_4_2_sh_mask.h" #include "hdp/hdp_4_0_offset.h" #include "sdma0/sdma0_4_1_default.h" @@ -98,8 +98,7 @@ static const struct soc15_reg_golden golden_settings_sdma_vg12[] = { SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001) }; -static const struct soc15_reg_golden golden_settings_sdma_4_1[] = -{ +static const struct soc15_reg_golden golden_settings_sdma_4_1[] = { SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100), @@ -112,26 +111,67 @@ static const struct soc15_reg_golden golden_settings_sdma_4_1[] = SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0) }; -static const struct soc15_reg_golden golden_settings_sdma_4_2[] = +static const struct soc15_reg_golden golden_settings_sdma0_4_2_init[] = { + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff0, 0x00403000), +}; + +static const struct soc15_reg_golden golden_settings_sdma0_4_2[] = { SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RD_BURST_CNTL, 0x0000000f, 0x00000003), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff0, 0x00403000), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC2_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC3_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC4_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC5_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC6_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0), +}; + +static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = { SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), - SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RD_BURST_CNTL, 0x0000000f, 0x00000003), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff0, 0x00403000), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), - SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0) + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC2_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC3_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC4_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC5_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC6_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0), }; static const struct soc15_reg_golden golden_settings_sdma_rv1[] = @@ -168,8 +208,14 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev) break; case CHIP_VEGA20: soc15_program_register_sequence(adev, - golden_settings_sdma_4_2, - ARRAY_SIZE(golden_settings_sdma_4_2)); + golden_settings_sdma0_4_2_init, + ARRAY_SIZE(golden_settings_sdma0_4_2_init)); + soc15_program_register_sequence(adev, + golden_settings_sdma0_4_2, + ARRAY_SIZE(golden_settings_sdma0_4_2)); + soc15_program_register_sequence(adev, + golden_settings_sdma1_4_2, + ARRAY_SIZE(golden_settings_sdma1_4_2)); break; case CHIP_RAVEN: soc15_program_register_sequence(adev, -- GitLab From 3082be1aeaeef52a907b9d5bff3c50011504c687 Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Tue, 14 Aug 2018 10:33:25 +0800 Subject: [PATCH 0429/1692] drm/amdgpu/psp: Enlarge PSP TMR SIZE from 3M to 4M. Enlarge the PSP TMR SIZE to 4M for dual UVD fw front-door loading. Signed-off-by: Feifei Xu Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 6 ++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h | 1 + 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index ab324e34cadb..58e20385eab5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -163,7 +163,7 @@ static int psp_tmr_init(struct psp_context *psp) * Note: this memory need be reserved till the driver * uninitializes. */ - ret = amdgpu_bo_create_kernel(psp->adev, 0x300000, 0x100000, + ret = amdgpu_bo_create_kernel(psp->adev, PSP_TMR_SIZE, 0x100000, AMDGPU_GEM_DOMAIN_VRAM, &psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf); @@ -179,7 +179,9 @@ static int psp_tmr_load(struct psp_context *psp) if (!cmd) return -ENOMEM; - psp_prep_tmr_cmd_buf(cmd, psp->tmr_mc_addr, 0x300000); + psp_prep_tmr_cmd_buf(cmd, psp->tmr_mc_addr, PSP_TMR_SIZE); + DRM_INFO("reserve 0x%x from 0x%llx for PSP TMR SIZE\n", + PSP_TMR_SIZE, psp->tmr_mc_addr); ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr, 1); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index d772545332e2..981887c928b7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -32,6 +32,7 @@ #define PSP_CMD_BUFFER_SIZE 0x1000 #define PSP_ASD_SHARED_MEM_SIZE 0x4000 #define PSP_1_MEG 0x100000 +#define PSP_TMR_SIZE 0x400000 struct psp_context; -- GitLab From 6dddaeef4f9e0445a75261f88e29de5a24e302c6 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 14 Aug 2018 11:44:44 -0500 Subject: [PATCH 0430/1692] drm/amdgpu: remove experimental flag for vega20 Now that PSP and SMU support is in place. Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 75c9433ef300..2221f6b1dd7c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -770,12 +770,12 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x69A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12}, {0x1002, 0x69AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12}, /* Vega 20 */ - {0x1002, 0x66A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT}, - {0x1002, 0x66A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT}, - {0x1002, 0x66A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT}, - {0x1002, 0x66A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT}, - {0x1002, 0x66A7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT}, - {0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x66A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, + {0x1002, 0x66A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, + {0x1002, 0x66A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, + {0x1002, 0x66A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, + {0x1002, 0x66A7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, + {0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, /* Raven */ {0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU}, -- GitLab From a54594752ad48de75dc0158f0c9b124177235efc Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Tue, 14 Aug 2018 17:31:09 +0800 Subject: [PATCH 0431/1692] drm/amdgpu: Cancel the delay work when suspend MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cancel the delay work to avoid the corner case that ib test was not running when suspend Reviewed-by: Christian König Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 99a0e478499b..9c594763ddff 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2722,6 +2722,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) if (fbcon) amdgpu_fbdev_set_suspend(adev, 1); + cancel_delayed_work_sync(&adev->late_init_work); + if (!amdgpu_device_has_dc_support(adev)) { /* turn off display hw */ drm_modeset_lock_all(dev); -- GitLab From e51ee68ff2eef2a0117a26ec28d64f86081df4e8 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Mon, 13 Aug 2018 18:37:39 +0800 Subject: [PATCH 0432/1692] drm/amd/pp: OverDrive gfx domain voltage on Tonga Also ajust the gfx domain voltage on Tonga when user overdriver the voltage. For Tonga, Driver do not update user's setting to voltage table in smu, we only pick up a minimum value from voltage table that not less than the user's setting. v2: fix a typo Acked-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index ab759e38e4ea..04b7da0e39a6 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -4857,6 +4857,7 @@ static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, podn_vdd_dep_in_backend->entries[input_level].clk = input_clk; podn_dpm_table_in_backend->entries[input_level].vddc = input_vol; podn_vdd_dep_in_backend->entries[input_level].vddc = input_vol; + podn_vdd_dep_in_backend->entries[input_level].vddgfx = input_vol; } else { return -EINVAL; } -- GitLab From ff30e9e8509cb877dc7cbc776b36c70f5bdd290f Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 10 Aug 2018 18:50:32 +0800 Subject: [PATCH 0433/1692] drm/amdgpu: fix integer overflow test in amdgpu_bo_list_create() We accidentally left out the size of the amdgpu_bo_list struct. It could lead to memory corruption on 32 bit systems. You'd have to pick the absolute maximum and set "num_entries == 59652323" then size would wrap to 16 bytes. Fixes: 920990cb080a ("drm/amdgpu: allocate the bo_list array after the list") Signed-off-by: Dan Carpenter Reviewed-by: Huang Rui Reviewed-by: Bas Nieuwenhuizen Signed-off-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index d472a2c8399f..b80243d3972e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -67,7 +67,8 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp, unsigned i; int r; - if (num_entries > SIZE_MAX / sizeof(struct amdgpu_bo_list_entry)) + if (num_entries > (SIZE_MAX - sizeof(struct amdgpu_bo_list)) + / sizeof(struct amdgpu_bo_list_entry)) return -EINVAL; size = sizeof(struct amdgpu_bo_list); -- GitLab From 8acc7254577a3007029b4f13c759bf07d6719075 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 14 Aug 2018 09:41:12 +0200 Subject: [PATCH 0434/1692] drm/scheduler: trivial error handling fix MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Return -ENOMEM when allocating the rq_list fails. Signed-off-by: Christian König Reviewed-by: Huang Rui Reviewed-by: Andrey Grodzovsky Signed-off-by: Alex Deucher --- drivers/gpu/drm/scheduler/gpu_scheduler.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index f566405f49e3..85c1f95752cc 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -191,6 +191,9 @@ int drm_sched_entity_init(struct drm_sched_entity *entity, entity->num_rq_list = num_rq_list; entity->rq_list = kcalloc(num_rq_list, sizeof(struct drm_sched_rq *), GFP_KERNEL); + if (!entity->rq_list) + return -ENOMEM; + for (i = 0; i < num_rq_list; ++i) entity->rq_list[i] = rq_list[i]; entity->last_scheduled = NULL; -- GitLab From 620e762f9a984fc3f77cd6f757581a21605ce125 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 6 Aug 2018 14:25:32 +0200 Subject: [PATCH 0435/1692] drm/scheduler: move entity handling into separate file MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is complex enough on it's own. Move it into a separate C file. Signed-off-by: Christian König Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/scheduler/Makefile | 2 +- drivers/gpu/drm/scheduler/gpu_scheduler.c | 441 +-------------------- drivers/gpu/drm/scheduler/sched_entity.c | 459 ++++++++++++++++++++++ include/drm/gpu_scheduler.h | 28 +- 4 files changed, 484 insertions(+), 446 deletions(-) create mode 100644 drivers/gpu/drm/scheduler/sched_entity.c diff --git a/drivers/gpu/drm/scheduler/Makefile b/drivers/gpu/drm/scheduler/Makefile index 7665883f81d4..f23785d4b3c8 100644 --- a/drivers/gpu/drm/scheduler/Makefile +++ b/drivers/gpu/drm/scheduler/Makefile @@ -20,6 +20,6 @@ # OTHER DEALINGS IN THE SOFTWARE. # # -gpu-sched-y := gpu_scheduler.o sched_fence.o +gpu-sched-y := gpu_scheduler.o sched_fence.o sched_entity.o obj-$(CONFIG_DRM_SCHED) += gpu-sched.o diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index 85c1f95752cc..9ca741f3a0bc 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -58,8 +58,6 @@ #define to_drm_sched_job(sched_job) \ container_of((sched_job), struct drm_sched_job, queue_node) -static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity); -static void drm_sched_wakeup(struct drm_gpu_scheduler *sched); static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb); /** @@ -86,8 +84,8 @@ static void drm_sched_rq_init(struct drm_gpu_scheduler *sched, * * Adds a scheduler entity to the run queue. */ -static void drm_sched_rq_add_entity(struct drm_sched_rq *rq, - struct drm_sched_entity *entity) +void drm_sched_rq_add_entity(struct drm_sched_rq *rq, + struct drm_sched_entity *entity) { if (!list_empty(&entity->list)) return; @@ -104,8 +102,8 @@ static void drm_sched_rq_add_entity(struct drm_sched_rq *rq, * * Removes a scheduler entity from the run queue. */ -static void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, - struct drm_sched_entity *entity) +void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, + struct drm_sched_entity *entity) { if (list_empty(&entity->list)) return; @@ -158,301 +156,6 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq) return NULL; } -/** - * drm_sched_entity_init - Init a context entity used by scheduler when - * submit to HW ring. - * - * @entity: scheduler entity to init - * @rq_list: the list of run queue on which jobs from this - * entity can be submitted - * @num_rq_list: number of run queue in rq_list - * @guilty: atomic_t set to 1 when a job on this queue - * is found to be guilty causing a timeout - * - * Note: the rq_list should have atleast one element to schedule - * the entity - * - * Returns 0 on success or a negative error code on failure. -*/ -int drm_sched_entity_init(struct drm_sched_entity *entity, - struct drm_sched_rq **rq_list, - unsigned int num_rq_list, - atomic_t *guilty) -{ - int i; - - if (!(entity && rq_list && num_rq_list > 0 && rq_list[0])) - return -EINVAL; - - memset(entity, 0, sizeof(struct drm_sched_entity)); - INIT_LIST_HEAD(&entity->list); - entity->rq = rq_list[0]; - entity->guilty = guilty; - entity->num_rq_list = num_rq_list; - entity->rq_list = kcalloc(num_rq_list, sizeof(struct drm_sched_rq *), - GFP_KERNEL); - if (!entity->rq_list) - return -ENOMEM; - - for (i = 0; i < num_rq_list; ++i) - entity->rq_list[i] = rq_list[i]; - entity->last_scheduled = NULL; - - spin_lock_init(&entity->rq_lock); - spsc_queue_init(&entity->job_queue); - - atomic_set(&entity->fence_seq, 0); - entity->fence_context = dma_fence_context_alloc(2); - - return 0; -} -EXPORT_SYMBOL(drm_sched_entity_init); - -/** - * drm_sched_entity_is_idle - Check if entity is idle - * - * @entity: scheduler entity - * - * Returns true if the entity does not have any unscheduled jobs. - */ -static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity) -{ - rmb(); - - if (list_empty(&entity->list) || - spsc_queue_peek(&entity->job_queue) == NULL) - return true; - - return false; -} - -/** - * drm_sched_entity_is_ready - Check if entity is ready - * - * @entity: scheduler entity - * - * Return true if entity could provide a job. - */ -static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity) -{ - if (spsc_queue_peek(&entity->job_queue) == NULL) - return false; - - if (READ_ONCE(entity->dependency)) - return false; - - return true; -} - -/** - * drm_sched_entity_get_free_sched - Get the rq from rq_list with least load - * - * @entity: scheduler entity - * - * Return the pointer to the rq with least load. - */ -static struct drm_sched_rq * -drm_sched_entity_get_free_sched(struct drm_sched_entity *entity) -{ - struct drm_sched_rq *rq = NULL; - unsigned int min_jobs = UINT_MAX, num_jobs; - int i; - - for (i = 0; i < entity->num_rq_list; ++i) { - num_jobs = atomic_read(&entity->rq_list[i]->sched->num_jobs); - if (num_jobs < min_jobs) { - min_jobs = num_jobs; - rq = entity->rq_list[i]; - } - } - - return rq; -} - -static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, - struct dma_fence_cb *cb) -{ - struct drm_sched_job *job = container_of(cb, struct drm_sched_job, - finish_cb); - drm_sched_fence_finished(job->s_fence); - WARN_ON(job->s_fence->parent); - dma_fence_put(&job->s_fence->finished); - job->sched->ops->free_job(job); -} - - -/** - * drm_sched_entity_flush - Flush a context entity - * - * @entity: scheduler entity - * @timeout: time to wait in for Q to become empty in jiffies. - * - * Splitting drm_sched_entity_fini() into two functions, The first one does the waiting, - * removes the entity from the runqueue and returns an error when the process was killed. - * - * Returns the remaining time in jiffies left from the input timeout - */ -long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) -{ - struct drm_gpu_scheduler *sched; - struct task_struct *last_user; - long ret = timeout; - - sched = entity->rq->sched; - /** - * The client will not queue more IBs during this fini, consume existing - * queued IBs or discard them on SIGKILL - */ - if (current->flags & PF_EXITING) { - if (timeout) - ret = wait_event_timeout( - sched->job_scheduled, - drm_sched_entity_is_idle(entity), - timeout); - } else - wait_event_killable(sched->job_scheduled, drm_sched_entity_is_idle(entity)); - - - /* For killed process disable any more IBs enqueue right now */ - last_user = cmpxchg(&entity->last_user, current->group_leader, NULL); - if ((!last_user || last_user == current->group_leader) && - (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) - drm_sched_rq_remove_entity(entity->rq, entity); - - return ret; -} -EXPORT_SYMBOL(drm_sched_entity_flush); - -/** - * drm_sched_entity_cleanup - Destroy a context entity - * - * @entity: scheduler entity - * - * This should be called after @drm_sched_entity_do_release. It goes over the - * entity and signals all jobs with an error code if the process was killed. - * - */ -void drm_sched_entity_fini(struct drm_sched_entity *entity) -{ - struct drm_gpu_scheduler *sched; - - sched = entity->rq->sched; - drm_sched_rq_remove_entity(entity->rq, entity); - - /* Consumption of existing IBs wasn't completed. Forcefully - * remove them here. - */ - if (spsc_queue_peek(&entity->job_queue)) { - struct drm_sched_job *job; - int r; - - /* Park the kernel for a moment to make sure it isn't processing - * our enity. - */ - kthread_park(sched->thread); - kthread_unpark(sched->thread); - if (entity->dependency) { - dma_fence_remove_callback(entity->dependency, - &entity->cb); - dma_fence_put(entity->dependency); - entity->dependency = NULL; - } - - while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) { - struct drm_sched_fence *s_fence = job->s_fence; - drm_sched_fence_scheduled(s_fence); - dma_fence_set_error(&s_fence->finished, -ESRCH); - - /* - * When pipe is hanged by older entity, new entity might - * not even have chance to submit it's first job to HW - * and so entity->last_scheduled will remain NULL - */ - if (!entity->last_scheduled) { - drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb); - } else { - r = dma_fence_add_callback(entity->last_scheduled, &job->finish_cb, - drm_sched_entity_kill_jobs_cb); - if (r == -ENOENT) - drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb); - else if (r) - DRM_ERROR("fence add callback failed (%d)\n", r); - } - } - } - - dma_fence_put(entity->last_scheduled); - entity->last_scheduled = NULL; - kfree(entity->rq_list); -} -EXPORT_SYMBOL(drm_sched_entity_fini); - -/** - * drm_sched_entity_fini - Destroy a context entity - * - * @entity: scheduler entity - * - * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup() - */ -void drm_sched_entity_destroy(struct drm_sched_entity *entity) -{ - drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY); - drm_sched_entity_fini(entity); -} -EXPORT_SYMBOL(drm_sched_entity_destroy); - -static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb) -{ - struct drm_sched_entity *entity = - container_of(cb, struct drm_sched_entity, cb); - entity->dependency = NULL; - dma_fence_put(f); - drm_sched_wakeup(entity->rq->sched); -} - -static void drm_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb) -{ - struct drm_sched_entity *entity = - container_of(cb, struct drm_sched_entity, cb); - entity->dependency = NULL; - dma_fence_put(f); -} - -/** - * drm_sched_entity_set_rq_priority - helper for drm_sched_entity_set_priority - */ -static void drm_sched_entity_set_rq_priority(struct drm_sched_rq **rq, - enum drm_sched_priority priority) -{ - *rq = &(*rq)->sched->sched_rq[priority]; -} - -/** - * drm_sched_entity_set_priority - Sets priority of the entity - * - * @entity: scheduler entity - * @priority: scheduler priority - * - * Update the priority of runqueus used for the entity. - */ -void drm_sched_entity_set_priority(struct drm_sched_entity *entity, - enum drm_sched_priority priority) -{ - unsigned int i; - - spin_lock(&entity->rq_lock); - - for (i = 0; i < entity->num_rq_list; ++i) - drm_sched_entity_set_rq_priority(&entity->rq_list[i], priority); - - drm_sched_rq_remove_entity(entity->rq, entity); - drm_sched_entity_set_rq_priority(&entity->rq, priority); - drm_sched_rq_add_entity(entity->rq, entity); - - spin_unlock(&entity->rq_lock); -} -EXPORT_SYMBOL(drm_sched_entity_set_priority); - /** * drm_sched_dependency_optimized * @@ -479,140 +182,6 @@ bool drm_sched_dependency_optimized(struct dma_fence* fence, } EXPORT_SYMBOL(drm_sched_dependency_optimized); -static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity) -{ - struct drm_gpu_scheduler *sched = entity->rq->sched; - struct dma_fence * fence = entity->dependency; - struct drm_sched_fence *s_fence; - - if (fence->context == entity->fence_context || - fence->context == entity->fence_context + 1) { - /* - * Fence is a scheduled/finished fence from a job - * which belongs to the same entity, we can ignore - * fences from ourself - */ - dma_fence_put(entity->dependency); - return false; - } - - s_fence = to_drm_sched_fence(fence); - if (s_fence && s_fence->sched == sched) { - - /* - * Fence is from the same scheduler, only need to wait for - * it to be scheduled - */ - fence = dma_fence_get(&s_fence->scheduled); - dma_fence_put(entity->dependency); - entity->dependency = fence; - if (!dma_fence_add_callback(fence, &entity->cb, - drm_sched_entity_clear_dep)) - return true; - - /* Ignore it when it is already scheduled */ - dma_fence_put(fence); - return false; - } - - if (!dma_fence_add_callback(entity->dependency, &entity->cb, - drm_sched_entity_wakeup)) - return true; - - dma_fence_put(entity->dependency); - return false; -} - -static struct drm_sched_job * -drm_sched_entity_pop_job(struct drm_sched_entity *entity) -{ - struct drm_gpu_scheduler *sched = entity->rq->sched; - struct drm_sched_job *sched_job = to_drm_sched_job( - spsc_queue_peek(&entity->job_queue)); - - if (!sched_job) - return NULL; - - while ((entity->dependency = sched->ops->dependency(sched_job, entity))) { - if (drm_sched_entity_add_dependency_cb(entity)) { - - trace_drm_sched_job_wait_dep(sched_job, entity->dependency); - return NULL; - } - } - - /* skip jobs from entity that marked guilty */ - if (entity->guilty && atomic_read(entity->guilty)) - dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED); - - dma_fence_put(entity->last_scheduled); - entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished); - - spsc_queue_pop(&entity->job_queue); - return sched_job; -} - -/** - * drm_sched_entity_select_rq - select a new rq for the entity - * - * @entity: scheduler entity - * - * Check all prerequisites and select a new rq for the entity for load - * balancing. - */ -static void drm_sched_entity_select_rq(struct drm_sched_entity *entity) -{ - struct dma_fence *fence; - struct drm_sched_rq *rq; - - if (!spsc_queue_count(&entity->job_queue) == 0 || - entity->num_rq_list <= 1) - return; - - fence = READ_ONCE(entity->last_scheduled); - if (fence && !dma_fence_is_signaled(fence)) - return; - - rq = drm_sched_entity_get_free_sched(entity); - spin_lock(&entity->rq_lock); - drm_sched_rq_remove_entity(entity->rq, entity); - entity->rq = rq; - spin_unlock(&entity->rq_lock); -} - -/** - * drm_sched_entity_push_job - Submit a job to the entity's job queue - * - * @sched_job: job to submit - * @entity: scheduler entity - * - * Note: To guarantee that the order of insertion to queue matches - * the job's fence sequence number this function should be - * called with drm_sched_job_init under common lock. - * - * Returns 0 for success, negative error code otherwise. - */ -void drm_sched_entity_push_job(struct drm_sched_job *sched_job, - struct drm_sched_entity *entity) -{ - bool first; - - trace_drm_sched_job(sched_job, entity); - atomic_inc(&entity->rq->sched->num_jobs); - WRITE_ONCE(entity->last_user, current->group_leader); - first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node); - - /* first job wakes up scheduler */ - if (first) { - /* Add the entity to the run queue */ - spin_lock(&entity->rq_lock); - drm_sched_rq_add_entity(entity->rq, entity); - spin_unlock(&entity->rq_lock); - drm_sched_wakeup(entity->rq->sched); - } -} -EXPORT_SYMBOL(drm_sched_entity_push_job); - /* job_finish is called after hw fence signaled */ static void drm_sched_job_finish(struct work_struct *work) @@ -840,7 +409,7 @@ static bool drm_sched_ready(struct drm_gpu_scheduler *sched) * @sched: scheduler instance * */ -static void drm_sched_wakeup(struct drm_gpu_scheduler *sched) +void drm_sched_wakeup(struct drm_gpu_scheduler *sched) { if (drm_sched_ready(sched)) wake_up_interruptible(&sched->wake_up_worker); diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c new file mode 100644 index 000000000000..1053f27af9df --- /dev/null +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -0,0 +1,459 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include + +#include "gpu_scheduler_trace.h" + +#define to_drm_sched_job(sched_job) \ + container_of((sched_job), struct drm_sched_job, queue_node) + +/** + * drm_sched_entity_init - Init a context entity used by scheduler when + * submit to HW ring. + * + * @entity: scheduler entity to init + * @rq_list: the list of run queue on which jobs from this + * entity can be submitted + * @num_rq_list: number of run queue in rq_list + * @guilty: atomic_t set to 1 when a job on this queue + * is found to be guilty causing a timeout + * + * Note: the rq_list should have atleast one element to schedule + * the entity + * + * Returns 0 on success or a negative error code on failure. +*/ +int drm_sched_entity_init(struct drm_sched_entity *entity, + struct drm_sched_rq **rq_list, + unsigned int num_rq_list, + atomic_t *guilty) +{ + int i; + + if (!(entity && rq_list && num_rq_list > 0 && rq_list[0])) + return -EINVAL; + + memset(entity, 0, sizeof(struct drm_sched_entity)); + INIT_LIST_HEAD(&entity->list); + entity->rq = rq_list[0]; + entity->guilty = guilty; + entity->num_rq_list = num_rq_list; + entity->rq_list = kcalloc(num_rq_list, sizeof(struct drm_sched_rq *), + GFP_KERNEL); + if (!entity->rq_list) + return -ENOMEM; + + for (i = 0; i < num_rq_list; ++i) + entity->rq_list[i] = rq_list[i]; + entity->last_scheduled = NULL; + + spin_lock_init(&entity->rq_lock); + spsc_queue_init(&entity->job_queue); + + atomic_set(&entity->fence_seq, 0); + entity->fence_context = dma_fence_context_alloc(2); + + return 0; +} +EXPORT_SYMBOL(drm_sched_entity_init); + +/** + * drm_sched_entity_is_idle - Check if entity is idle + * + * @entity: scheduler entity + * + * Returns true if the entity does not have any unscheduled jobs. + */ +static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity) +{ + rmb(); + + if (list_empty(&entity->list) || + spsc_queue_peek(&entity->job_queue) == NULL) + return true; + + return false; +} + +/** + * drm_sched_entity_is_ready - Check if entity is ready + * + * @entity: scheduler entity + * + * Return true if entity could provide a job. + */ +bool drm_sched_entity_is_ready(struct drm_sched_entity *entity) +{ + if (spsc_queue_peek(&entity->job_queue) == NULL) + return false; + + if (READ_ONCE(entity->dependency)) + return false; + + return true; +} + +/** + * drm_sched_entity_get_free_sched - Get the rq from rq_list with least load + * + * @entity: scheduler entity + * + * Return the pointer to the rq with least load. + */ +static struct drm_sched_rq * +drm_sched_entity_get_free_sched(struct drm_sched_entity *entity) +{ + struct drm_sched_rq *rq = NULL; + unsigned int min_jobs = UINT_MAX, num_jobs; + int i; + + for (i = 0; i < entity->num_rq_list; ++i) { + num_jobs = atomic_read(&entity->rq_list[i]->sched->num_jobs); + if (num_jobs < min_jobs) { + min_jobs = num_jobs; + rq = entity->rq_list[i]; + } + } + + return rq; +} + +static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, + struct dma_fence_cb *cb) +{ + struct drm_sched_job *job = container_of(cb, struct drm_sched_job, + finish_cb); + drm_sched_fence_finished(job->s_fence); + WARN_ON(job->s_fence->parent); + dma_fence_put(&job->s_fence->finished); + job->sched->ops->free_job(job); +} + + +/** + * drm_sched_entity_flush - Flush a context entity + * + * @entity: scheduler entity + * @timeout: time to wait in for Q to become empty in jiffies. + * + * Splitting drm_sched_entity_fini() into two functions, The first one does the waiting, + * removes the entity from the runqueue and returns an error when the process was killed. + * + * Returns the remaining time in jiffies left from the input timeout + */ +long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) +{ + struct drm_gpu_scheduler *sched; + struct task_struct *last_user; + long ret = timeout; + + sched = entity->rq->sched; + /** + * The client will not queue more IBs during this fini, consume existing + * queued IBs or discard them on SIGKILL + */ + if (current->flags & PF_EXITING) { + if (timeout) + ret = wait_event_timeout( + sched->job_scheduled, + drm_sched_entity_is_idle(entity), + timeout); + } else { + wait_event_killable(sched->job_scheduled, + drm_sched_entity_is_idle(entity)); + } + + /* For killed process disable any more IBs enqueue right now */ + last_user = cmpxchg(&entity->last_user, current->group_leader, NULL); + if ((!last_user || last_user == current->group_leader) && + (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) + drm_sched_rq_remove_entity(entity->rq, entity); + + return ret; +} +EXPORT_SYMBOL(drm_sched_entity_flush); + +/** + * drm_sched_entity_cleanup - Destroy a context entity + * + * @entity: scheduler entity + * + * This should be called after @drm_sched_entity_do_release. It goes over the + * entity and signals all jobs with an error code if the process was killed. + * + */ +void drm_sched_entity_fini(struct drm_sched_entity *entity) +{ + struct drm_gpu_scheduler *sched; + + sched = entity->rq->sched; + drm_sched_rq_remove_entity(entity->rq, entity); + + /* Consumption of existing IBs wasn't completed. Forcefully + * remove them here. + */ + if (spsc_queue_peek(&entity->job_queue)) { + struct drm_sched_job *job; + int r; + + /* Park the kernel for a moment to make sure it isn't processing + * our enity. + */ + kthread_park(sched->thread); + kthread_unpark(sched->thread); + if (entity->dependency) { + dma_fence_remove_callback(entity->dependency, + &entity->cb); + dma_fence_put(entity->dependency); + entity->dependency = NULL; + } + + while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) { + struct drm_sched_fence *s_fence = job->s_fence; + drm_sched_fence_scheduled(s_fence); + dma_fence_set_error(&s_fence->finished, -ESRCH); + + /* + * When pipe is hanged by older entity, new entity might + * not even have chance to submit it's first job to HW + * and so entity->last_scheduled will remain NULL + */ + if (!entity->last_scheduled) { + drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb); + } else { + r = dma_fence_add_callback(entity->last_scheduled, &job->finish_cb, + drm_sched_entity_kill_jobs_cb); + if (r == -ENOENT) + drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb); + else if (r) + DRM_ERROR("fence add callback failed (%d)\n", r); + } + } + } + + dma_fence_put(entity->last_scheduled); + entity->last_scheduled = NULL; + kfree(entity->rq_list); +} +EXPORT_SYMBOL(drm_sched_entity_fini); + +/** + * drm_sched_entity_fini - Destroy a context entity + * + * @entity: scheduler entity + * + * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup() + */ +void drm_sched_entity_destroy(struct drm_sched_entity *entity) +{ + drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY); + drm_sched_entity_fini(entity); +} +EXPORT_SYMBOL(drm_sched_entity_destroy); + +static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb) +{ + struct drm_sched_entity *entity = + container_of(cb, struct drm_sched_entity, cb); + entity->dependency = NULL; + dma_fence_put(f); + drm_sched_wakeup(entity->rq->sched); +} + +static void drm_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb) +{ + struct drm_sched_entity *entity = + container_of(cb, struct drm_sched_entity, cb); + entity->dependency = NULL; + dma_fence_put(f); +} + +/** + * drm_sched_entity_set_rq_priority - helper for drm_sched_entity_set_priority + */ +static void drm_sched_entity_set_rq_priority(struct drm_sched_rq **rq, + enum drm_sched_priority priority) +{ + *rq = &(*rq)->sched->sched_rq[priority]; +} + +/** + * drm_sched_entity_set_priority - Sets priority of the entity + * + * @entity: scheduler entity + * @priority: scheduler priority + * + * Update the priority of runqueus used for the entity. + */ +void drm_sched_entity_set_priority(struct drm_sched_entity *entity, + enum drm_sched_priority priority) +{ + unsigned int i; + + spin_lock(&entity->rq_lock); + + for (i = 0; i < entity->num_rq_list; ++i) + drm_sched_entity_set_rq_priority(&entity->rq_list[i], priority); + + drm_sched_rq_remove_entity(entity->rq, entity); + drm_sched_entity_set_rq_priority(&entity->rq, priority); + drm_sched_rq_add_entity(entity->rq, entity); + + spin_unlock(&entity->rq_lock); +} +EXPORT_SYMBOL(drm_sched_entity_set_priority); + +static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity) +{ + struct drm_gpu_scheduler *sched = entity->rq->sched; + struct dma_fence * fence = entity->dependency; + struct drm_sched_fence *s_fence; + + if (fence->context == entity->fence_context || + fence->context == entity->fence_context + 1) { + /* + * Fence is a scheduled/finished fence from a job + * which belongs to the same entity, we can ignore + * fences from ourself + */ + dma_fence_put(entity->dependency); + return false; + } + + s_fence = to_drm_sched_fence(fence); + if (s_fence && s_fence->sched == sched) { + + /* + * Fence is from the same scheduler, only need to wait for + * it to be scheduled + */ + fence = dma_fence_get(&s_fence->scheduled); + dma_fence_put(entity->dependency); + entity->dependency = fence; + if (!dma_fence_add_callback(fence, &entity->cb, + drm_sched_entity_clear_dep)) + return true; + + /* Ignore it when it is already scheduled */ + dma_fence_put(fence); + return false; + } + + if (!dma_fence_add_callback(entity->dependency, &entity->cb, + drm_sched_entity_wakeup)) + return true; + + dma_fence_put(entity->dependency); + return false; +} + +struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity) +{ + struct drm_gpu_scheduler *sched = entity->rq->sched; + struct drm_sched_job *sched_job = to_drm_sched_job( + spsc_queue_peek(&entity->job_queue)); + + if (!sched_job) + return NULL; + + while ((entity->dependency = sched->ops->dependency(sched_job, entity))) { + if (drm_sched_entity_add_dependency_cb(entity)) { + + trace_drm_sched_job_wait_dep(sched_job, entity->dependency); + return NULL; + } + } + + /* skip jobs from entity that marked guilty */ + if (entity->guilty && atomic_read(entity->guilty)) + dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED); + + dma_fence_put(entity->last_scheduled); + entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished); + + spsc_queue_pop(&entity->job_queue); + return sched_job; +} + +/** + * drm_sched_entity_select_rq - select a new rq for the entity + * + * @entity: scheduler entity + * + * Check all prerequisites and select a new rq for the entity for load + * balancing. + */ +void drm_sched_entity_select_rq(struct drm_sched_entity *entity) +{ + struct dma_fence *fence; + struct drm_sched_rq *rq; + + if (!spsc_queue_count(&entity->job_queue) == 0 || + entity->num_rq_list <= 1) + return; + + fence = READ_ONCE(entity->last_scheduled); + if (fence && !dma_fence_is_signaled(fence)) + return; + + rq = drm_sched_entity_get_free_sched(entity); + spin_lock(&entity->rq_lock); + drm_sched_rq_remove_entity(entity->rq, entity); + entity->rq = rq; + spin_unlock(&entity->rq_lock); +} + +/** + * drm_sched_entity_push_job - Submit a job to the entity's job queue + * + * @sched_job: job to submit + * @entity: scheduler entity + * + * Note: To guarantee that the order of insertion to queue matches + * the job's fence sequence number this function should be + * called with drm_sched_job_init under common lock. + * + * Returns 0 for success, negative error code otherwise. + */ +void drm_sched_entity_push_job(struct drm_sched_job *sched_job, + struct drm_sched_entity *entity) +{ + bool first; + + trace_drm_sched_job(sched_job, entity); + atomic_inc(&entity->rq->sched->num_jobs); + WRITE_ONCE(entity->last_user, current->group_leader); + first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node); + + /* first job wakes up scheduler */ + if (first) { + /* Add the entity to the run queue */ + spin_lock(&entity->rq_lock); + drm_sched_rq_add_entity(entity->rq, entity); + spin_unlock(&entity->rq_lock); + drm_sched_wakeup(entity->rq->sched); + } +} +EXPORT_SYMBOL(drm_sched_entity_push_job); diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 22c0f88f7d8f..919ae572f775 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -288,6 +288,21 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, uint32_t hw_submission, unsigned hang_limit, long timeout, const char *name); void drm_sched_fini(struct drm_gpu_scheduler *sched); +int drm_sched_job_init(struct drm_sched_job *job, + struct drm_sched_entity *entity, + void *owner); +void drm_sched_wakeup(struct drm_gpu_scheduler *sched); +void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, + struct drm_sched_job *job); +void drm_sched_job_recovery(struct drm_gpu_scheduler *sched); +bool drm_sched_dependency_optimized(struct dma_fence* fence, + struct drm_sched_entity *entity); +void drm_sched_job_kickout(struct drm_sched_job *s_job); + +void drm_sched_rq_add_entity(struct drm_sched_rq *rq, + struct drm_sched_entity *entity); +void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, + struct drm_sched_entity *entity); int drm_sched_entity_init(struct drm_sched_entity *entity, struct drm_sched_rq **rq_list, @@ -296,22 +311,17 @@ int drm_sched_entity_init(struct drm_sched_entity *entity, long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout); void drm_sched_entity_fini(struct drm_sched_entity *entity); void drm_sched_entity_destroy(struct drm_sched_entity *entity); +void drm_sched_entity_select_rq(struct drm_sched_entity *entity); +struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity); void drm_sched_entity_push_job(struct drm_sched_job *sched_job, struct drm_sched_entity *entity); void drm_sched_entity_set_priority(struct drm_sched_entity *entity, enum drm_sched_priority priority); +bool drm_sched_entity_is_ready(struct drm_sched_entity *entity); + struct drm_sched_fence *drm_sched_fence_create( struct drm_sched_entity *s_entity, void *owner); void drm_sched_fence_scheduled(struct drm_sched_fence *fence); void drm_sched_fence_finished(struct drm_sched_fence *fence); -int drm_sched_job_init(struct drm_sched_job *job, - struct drm_sched_entity *entity, - void *owner); -void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, - struct drm_sched_job *job); -void drm_sched_job_recovery(struct drm_gpu_scheduler *sched); -bool drm_sched_dependency_optimized(struct dma_fence* fence, - struct drm_sched_entity *entity); -void drm_sched_job_kickout(struct drm_sched_job *s_job); #endif -- GitLab From 7b10574eac0b44f99e8e1d3ea9345a78d1fcaf07 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 6 Aug 2018 14:58:56 +0200 Subject: [PATCH 0436/1692] drm/scheduler: cleanup entity coding style MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cleanup coding style in sched_entity.c Signed-off-by: Christian König Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/scheduler/sched_entity.c | 167 +++++++++++++++-------- 1 file changed, 110 insertions(+), 57 deletions(-) diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 1053f27af9df..1416edb2642a 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -44,7 +44,7 @@ * the entity * * Returns 0 on success or a negative error code on failure. -*/ + */ int drm_sched_entity_init(struct drm_sched_entity *entity, struct drm_sched_rq **rq_list, unsigned int num_rq_list, @@ -88,7 +88,7 @@ EXPORT_SYMBOL(drm_sched_entity_init); */ static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity) { - rmb(); + rmb(); /* for list_empty to work without lock */ if (list_empty(&entity->list) || spsc_queue_peek(&entity->job_queue) == NULL) @@ -140,26 +140,15 @@ drm_sched_entity_get_free_sched(struct drm_sched_entity *entity) return rq; } -static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, - struct dma_fence_cb *cb) -{ - struct drm_sched_job *job = container_of(cb, struct drm_sched_job, - finish_cb); - drm_sched_fence_finished(job->s_fence); - WARN_ON(job->s_fence->parent); - dma_fence_put(&job->s_fence->finished); - job->sched->ops->free_job(job); -} - - /** * drm_sched_entity_flush - Flush a context entity * * @entity: scheduler entity * @timeout: time to wait in for Q to become empty in jiffies. * - * Splitting drm_sched_entity_fini() into two functions, The first one does the waiting, - * removes the entity from the runqueue and returns an error when the process was killed. + * Splitting drm_sched_entity_fini() into two functions, The first one does the + * waiting, removes the entity from the runqueue and returns an error when the + * process was killed. * * Returns the remaining time in jiffies left from the input timeout */ @@ -173,7 +162,7 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) /** * The client will not queue more IBs during this fini, consume existing * queued IBs or discard them on SIGKILL - */ + */ if (current->flags & PF_EXITING) { if (timeout) ret = wait_event_timeout( @@ -195,6 +184,65 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) } EXPORT_SYMBOL(drm_sched_entity_flush); +/** + * drm_sched_entity_kill_jobs - helper for drm_sched_entity_kill_jobs + * + * @f: signaled fence + * @cb: our callback structure + * + * Signal the scheduler finished fence when the entity in question is killed. + */ +static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, + struct dma_fence_cb *cb) +{ + struct drm_sched_job *job = container_of(cb, struct drm_sched_job, + finish_cb); + + drm_sched_fence_finished(job->s_fence); + WARN_ON(job->s_fence->parent); + dma_fence_put(&job->s_fence->finished); + job->sched->ops->free_job(job); +} + +/** + * drm_sched_entity_kill_jobs - Make sure all remaining jobs are killed + * + * @entity: entity which is cleaned up + * + * Makes sure that all remaining jobs in an entity are killed before it is + * destroyed. + */ +static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity) +{ + struct drm_sched_job *job; + int r; + + while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) { + struct drm_sched_fence *s_fence = job->s_fence; + + drm_sched_fence_scheduled(s_fence); + dma_fence_set_error(&s_fence->finished, -ESRCH); + + /* + * When pipe is hanged by older entity, new entity might + * not even have chance to submit it's first job to HW + * and so entity->last_scheduled will remain NULL + */ + if (!entity->last_scheduled) { + drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb); + continue; + } + + r = dma_fence_add_callback(entity->last_scheduled, + &job->finish_cb, + drm_sched_entity_kill_jobs_cb); + if (r == -ENOENT) + drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb); + else if (r) + DRM_ERROR("fence add callback failed (%d)\n", r); + } +} + /** * drm_sched_entity_cleanup - Destroy a context entity * @@ -215,9 +263,6 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity) * remove them here. */ if (spsc_queue_peek(&entity->job_queue)) { - struct drm_sched_job *job; - int r; - /* Park the kernel for a moment to make sure it isn't processing * our enity. */ @@ -230,27 +275,7 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity) entity->dependency = NULL; } - while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) { - struct drm_sched_fence *s_fence = job->s_fence; - drm_sched_fence_scheduled(s_fence); - dma_fence_set_error(&s_fence->finished, -ESRCH); - - /* - * When pipe is hanged by older entity, new entity might - * not even have chance to submit it's first job to HW - * and so entity->last_scheduled will remain NULL - */ - if (!entity->last_scheduled) { - drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb); - } else { - r = dma_fence_add_callback(entity->last_scheduled, &job->finish_cb, - drm_sched_entity_kill_jobs_cb); - if (r == -ENOENT) - drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb); - else if (r) - DRM_ERROR("fence add callback failed (%d)\n", r); - } - } + drm_sched_entity_kill_jobs(entity); } dma_fence_put(entity->last_scheduled); @@ -273,21 +298,31 @@ void drm_sched_entity_destroy(struct drm_sched_entity *entity) } EXPORT_SYMBOL(drm_sched_entity_destroy); -static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb) +/** + * drm_sched_entity_clear_dep - callback to clear the entities dependency + */ +static void drm_sched_entity_clear_dep(struct dma_fence *f, + struct dma_fence_cb *cb) { struct drm_sched_entity *entity = container_of(cb, struct drm_sched_entity, cb); + entity->dependency = NULL; dma_fence_put(f); - drm_sched_wakeup(entity->rq->sched); } -static void drm_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb) +/** + * drm_sched_entity_clear_dep - callback to clear the entities dependency and + * wake up scheduler + */ +static void drm_sched_entity_wakeup(struct dma_fence *f, + struct dma_fence_cb *cb) { struct drm_sched_entity *entity = container_of(cb, struct drm_sched_entity, cb); - entity->dependency = NULL; - dma_fence_put(f); + + drm_sched_entity_clear_dep(f, cb); + drm_sched_wakeup(entity->rq->sched); } /** @@ -325,19 +360,27 @@ void drm_sched_entity_set_priority(struct drm_sched_entity *entity, } EXPORT_SYMBOL(drm_sched_entity_set_priority); +/** + * drm_sched_entity_add_dependency_cb - add callback for the entities dependency + * + * @entity: entity with dependency + * + * Add a callback to the current dependency of the entity to wake up the + * scheduler when the entity becomes available. + */ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity) { struct drm_gpu_scheduler *sched = entity->rq->sched; - struct dma_fence * fence = entity->dependency; + struct dma_fence *fence = entity->dependency; struct drm_sched_fence *s_fence; if (fence->context == entity->fence_context || - fence->context == entity->fence_context + 1) { - /* - * Fence is a scheduled/finished fence from a job - * which belongs to the same entity, we can ignore - * fences from ourself - */ + fence->context == entity->fence_context + 1) { + /* + * Fence is a scheduled/finished fence from a job + * which belongs to the same entity, we can ignore + * fences from ourself + */ dma_fence_put(entity->dependency); return false; } @@ -369,19 +412,29 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity) return false; } +/** + * drm_sched_entity_pop_job - get a ready to be scheduled job from the entity + * + * @entity: entity to get the job from + * + * Process all dependencies and try to get one job from the entities queue. + */ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity) { struct drm_gpu_scheduler *sched = entity->rq->sched; - struct drm_sched_job *sched_job = to_drm_sched_job( - spsc_queue_peek(&entity->job_queue)); + struct drm_sched_job *sched_job; + sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue)); if (!sched_job) return NULL; - while ((entity->dependency = sched->ops->dependency(sched_job, entity))) { + while ((entity->dependency = + sched->ops->dependency(sched_job, entity))) { + if (drm_sched_entity_add_dependency_cb(entity)) { - trace_drm_sched_job_wait_dep(sched_job, entity->dependency); + trace_drm_sched_job_wait_dep(sched_job, + entity->dependency); return NULL; } } -- GitLab From 23f67981fd92859a156fc7d2e41f98d826f68a6c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 6 Aug 2018 15:01:45 +0200 Subject: [PATCH 0437/1692] drm/scheduler: rename gpu_scheduler.c to sched_main.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Better match the naming of the other components. Signed-off-by: Christian König Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/scheduler/Makefile | 2 +- drivers/gpu/drm/scheduler/{gpu_scheduler.c => sched_main.c} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename drivers/gpu/drm/scheduler/{gpu_scheduler.c => sched_main.c} (100%) diff --git a/drivers/gpu/drm/scheduler/Makefile b/drivers/gpu/drm/scheduler/Makefile index f23785d4b3c8..53863621829f 100644 --- a/drivers/gpu/drm/scheduler/Makefile +++ b/drivers/gpu/drm/scheduler/Makefile @@ -20,6 +20,6 @@ # OTHER DEALINGS IN THE SOFTWARE. # # -gpu-sched-y := gpu_scheduler.o sched_fence.o sched_entity.o +gpu-sched-y := sched_main.o sched_fence.o sched_entity.o obj-$(CONFIG_DRM_SCHED) += gpu-sched.o diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/sched_main.c similarity index 100% rename from drivers/gpu/drm/scheduler/gpu_scheduler.c rename to drivers/gpu/drm/scheduler/sched_main.c -- GitLab From aa5873dca46385454d36c3dca31d66d7b64574be Mon Sep 17 00:00:00 2001 From: James Zhu Date: Tue, 14 Aug 2018 14:53:51 -0400 Subject: [PATCH 0438/1692] drm/amdgpu: Change VCE booting with firmware loaded by PSP With PSP firmware loading, TMR mc address is supposed to be used. Signed-off-by: James Zhu Acked-by: Huang Rui Reviewed-by: Alex Deucher Signed-off-by: Feifei Xu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vce_v4_0.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index 2e4d1b5f6243..1c9471890bf7 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c @@ -601,6 +601,7 @@ static int vce_v4_0_resume(void *handle) static void vce_v4_0_mc_resume(struct amdgpu_device *adev) { uint32_t offset, size; + uint64_t tmr_mc_addr; WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A), 0, ~(1 << 16)); WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), 0x1FF000, ~0xFF9FF000); @@ -613,21 +614,25 @@ static void vce_v4_0_mc_resume(struct amdgpu_device *adev) WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL1), 0); WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VM_CTRL), 0); + offset = AMDGPU_VCE_FIRMWARE_OFFSET; + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + tmr_mc_addr = (uint64_t)(adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].tmr_mc_addr_hi) << 32 | + adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].tmr_mc_addr_lo; WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR0), - (adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 8)); + (tmr_mc_addr >> 8)); WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR0), - (adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 40) & 0xff); + (tmr_mc_addr >> 40) & 0xff); + WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0), 0); } else { WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR0), (adev->vce.gpu_addr >> 8)); WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR0), (adev->vce.gpu_addr >> 40) & 0xff); + WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0), offset & ~0x0f000000); } - offset = AMDGPU_VCE_FIRMWARE_OFFSET; size = VCE_V4_0_FW_SIZE; - WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0), offset & ~0x0f000000); WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE0), size); WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR1), (adev->vce.gpu_addr >> 8)); -- GitLab From 3798e9a6e6390b873a745d6240ac9646bd2bf514 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 12 Jul 2018 15:15:21 +0200 Subject: [PATCH 0439/1692] drm/amdgpu: use new scheduler load balancing for VMs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of the fixed round robin use let the scheduler balance the load of page table updates. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 12 ++---------- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 7 +++---- drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 12 +++++++----- drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | 12 +++++++----- drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 12 +++++++----- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 12 +++++++----- drivers/gpu/drm/amd/amdgpu/si_dma.c | 12 +++++++----- 8 files changed, 41 insertions(+), 40 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 9c594763ddff..0b4815c1e181 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2348,7 +2348,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->mman.buffer_funcs = NULL; adev->mman.buffer_funcs_ring = NULL; adev->vm_manager.vm_pte_funcs = NULL; - adev->vm_manager.vm_pte_num_rings = 0; + adev->vm_manager.vm_pte_num_rqs = 0; adev->gmc.gmc_funcs = NULL; adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS); bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index e40ca8676418..995ad5e83611 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2569,9 +2569,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_bo *root; const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE, AMDGPU_VM_PTE_COUNT(adev) * 8); - unsigned ring_instance; - struct amdgpu_ring *ring; - struct drm_sched_rq *rq; unsigned long size; uint64_t flags; int r, i; @@ -2587,12 +2584,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, INIT_LIST_HEAD(&vm->freed); /* create scheduler entity for page table updates */ - - ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring); - ring_instance %= adev->vm_manager.vm_pte_num_rings; - ring = adev->vm_manager.vm_pte_rings[ring_instance]; - rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL]; - r = drm_sched_entity_init(&vm->entity, &rq, 1, NULL); + r = drm_sched_entity_init(&vm->entity, adev->vm_manager.vm_pte_rqs, + adev->vm_manager.vm_pte_num_rqs, NULL); if (r) return r; @@ -2901,7 +2894,6 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev) for (i = 0; i < AMDGPU_MAX_RINGS; ++i) adev->vm_manager.seqno[i] = 0; - atomic_set(&adev->vm_manager.vm_pte_next_ring, 0); spin_lock_init(&adev->vm_manager.prt_lock); atomic_set(&adev->vm_manager.num_prt_users, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index ffda53420f8c..1162c2bf3138 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -265,10 +265,9 @@ struct amdgpu_vm_manager { /* vram base address for page table entry */ u64 vram_base_offset; /* vm pte handling */ - const struct amdgpu_vm_pte_funcs *vm_pte_funcs; - struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS]; - unsigned vm_pte_num_rings; - atomic_t vm_pte_next_ring; + const struct amdgpu_vm_pte_funcs *vm_pte_funcs; + struct drm_sched_rq *vm_pte_rqs[AMDGPU_MAX_RINGS]; + unsigned vm_pte_num_rqs; /* partial resident texture handling */ spinlock_t prt_lock; diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index d0fa2aac2388..154b1499b07e 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -1386,15 +1386,17 @@ static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = { static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev) { + struct drm_gpu_scheduler *sched; unsigned i; if (adev->vm_manager.vm_pte_funcs == NULL) { adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs; - for (i = 0; i < adev->sdma.num_instances; i++) - adev->vm_manager.vm_pte_rings[i] = - &adev->sdma.instance[i].ring; - - adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances; + for (i = 0; i < adev->sdma.num_instances; i++) { + sched = &adev->sdma.instance[i].ring.sched; + adev->vm_manager.vm_pte_rqs[i] = + &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; + } + adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances; } } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index 15ae4bc9c072..c403bdf8ad70 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -1312,15 +1312,17 @@ static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = { static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev) { + struct drm_gpu_scheduler *sched; unsigned i; if (adev->vm_manager.vm_pte_funcs == NULL) { adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs; - for (i = 0; i < adev->sdma.num_instances; i++) - adev->vm_manager.vm_pte_rings[i] = - &adev->sdma.instance[i].ring; - - adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances; + for (i = 0; i < adev->sdma.num_instances; i++) { + sched = &adev->sdma.instance[i].ring.sched; + adev->vm_manager.vm_pte_rqs[i] = + &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; + } + adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances; } } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 1e07ff274d73..2677d6a1bf42 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -1752,15 +1752,17 @@ static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = { static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev) { + struct drm_gpu_scheduler *sched; unsigned i; if (adev->vm_manager.vm_pte_funcs == NULL) { adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs; - for (i = 0; i < adev->sdma.num_instances; i++) - adev->vm_manager.vm_pte_rings[i] = - &adev->sdma.instance[i].ring; - - adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances; + for (i = 0; i < adev->sdma.num_instances; i++) { + sched = &adev->sdma.instance[i].ring.sched; + adev->vm_manager.vm_pte_rqs[i] = + &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; + } + adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances; } } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 407ed8a271b7..df138401fbf8 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1796,15 +1796,17 @@ static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = { static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev) { + struct drm_gpu_scheduler *sched; unsigned i; if (adev->vm_manager.vm_pte_funcs == NULL) { adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs; - for (i = 0; i < adev->sdma.num_instances; i++) - adev->vm_manager.vm_pte_rings[i] = - &adev->sdma.instance[i].ring; - - adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances; + for (i = 0; i < adev->sdma.num_instances; i++) { + sched = &adev->sdma.instance[i].ring.sched; + adev->vm_manager.vm_pte_rqs[i] = + &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; + } + adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances; } } diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c index b75d901ba3c4..fafaf259b17b 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c @@ -879,15 +879,17 @@ static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = { static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev) { + struct drm_gpu_scheduler *sched; unsigned i; if (adev->vm_manager.vm_pte_funcs == NULL) { adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs; - for (i = 0; i < adev->sdma.num_instances; i++) - adev->vm_manager.vm_pte_rings[i] = - &adev->sdma.instance[i].ring; - - adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances; + for (i = 0; i < adev->sdma.num_instances; i++) { + sched = &adev->sdma.instance[i].ring.sched; + adev->vm_manager.vm_pte_rqs[i] = + &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; + } + adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances; } } -- GitLab From 845e6fdf3b52ae8d8cde8ddafa6bbd60214f2bd2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 13 Jul 2018 09:12:44 +0200 Subject: [PATCH 0440/1692] drm/amdgpu: use scheduler load balancing for SDMA CS MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Start to use the scheduler load balancing for userspace SDMA command submissions. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 25 +++++++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 02d563cfb4a7..3ff8042b8f89 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -48,7 +48,8 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct drm_file *filp, struct amdgpu_ctx *ctx) { - unsigned i, j; + struct drm_sched_rq *sdma_rqs[AMDGPU_MAX_RINGS]; + unsigned i, j, num_sdma_rqs; int r; if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX) @@ -80,18 +81,34 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, ctx->init_priority = priority; ctx->override_priority = DRM_SCHED_PRIORITY_UNSET; - /* create context entity for each ring */ + num_sdma_rqs = 0; for (i = 0; i < adev->num_rings; i++) { struct amdgpu_ring *ring = adev->rings[i]; struct drm_sched_rq *rq; rq = &ring->sched.sched_rq[priority]; + if (ring->funcs->type == AMDGPU_RING_TYPE_SDMA) + sdma_rqs[num_sdma_rqs++] = rq; + } + + /* create context entity for each ring */ + for (i = 0; i < adev->num_rings; i++) { + struct amdgpu_ring *ring = adev->rings[i]; if (ring == &adev->gfx.kiq.ring) continue; - r = drm_sched_entity_init(&ctx->rings[i].entity, - &rq, 1, &ctx->guilty); + if (ring->funcs->type == AMDGPU_RING_TYPE_SDMA) { + r = drm_sched_entity_init(&ctx->rings[i].entity, + sdma_rqs, num_sdma_rqs, + &ctx->guilty); + } else { + struct drm_sched_rq *rq; + + rq = &ring->sched.sched_rq[priority]; + r = drm_sched_entity_init(&ctx->rings[i].entity, + &rq, 1, &ctx->guilty); + } if (r) goto failed; } -- GitLab From 72a4c072ca9f2640ea303c399bd3224b69a543d9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 16 Jul 2018 14:59:26 +0200 Subject: [PATCH 0441/1692] drm/amdgpu: use scheduler load balancing for compute CS MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Start to use the scheduler load balancing for userspace compute command submissions. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 3ff8042b8f89..a078e68e0319 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -49,7 +49,8 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx) { struct drm_sched_rq *sdma_rqs[AMDGPU_MAX_RINGS]; - unsigned i, j, num_sdma_rqs; + struct drm_sched_rq *comp_rqs[AMDGPU_MAX_RINGS]; + unsigned i, j, num_sdma_rqs, num_comp_rqs; int r; if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX) @@ -82,6 +83,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, ctx->override_priority = DRM_SCHED_PRIORITY_UNSET; num_sdma_rqs = 0; + num_comp_rqs = 0; for (i = 0; i < adev->num_rings; i++) { struct amdgpu_ring *ring = adev->rings[i]; struct drm_sched_rq *rq; @@ -89,6 +91,8 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, rq = &ring->sched.sched_rq[priority]; if (ring->funcs->type == AMDGPU_RING_TYPE_SDMA) sdma_rqs[num_sdma_rqs++] = rq; + else if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) + comp_rqs[num_comp_rqs++] = rq; } /* create context entity for each ring */ @@ -102,6 +106,10 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, r = drm_sched_entity_init(&ctx->rings[i].entity, sdma_rqs, num_sdma_rqs, &ctx->guilty); + } else if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { + r = drm_sched_entity_init(&ctx->rings[i].entity, + comp_rqs, num_comp_rqs, + &ctx->guilty); } else { struct drm_sched_rq *rq; -- GitLab From 869a53d4d7d7976d039b9389aa90b6f3d29ed234 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 16 Jul 2018 15:19:20 +0200 Subject: [PATCH 0442/1692] drm/amdgpu: remove the queue manager MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Not needed any more since that is now done by the scheduler. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 27 +- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 22 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 67 +++- drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c | 316 ------------------ 5 files changed, 75 insertions(+), 360 deletions(-) delete mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 7d7faaf299ef..860cb8731c7c 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -51,8 +51,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \ amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \ amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \ - amdgpu_queue_mgr.o amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o \ - amdgpu_ids.o + amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o # add asic specific block amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 07924d41ee89..20e81df5cd94 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -477,29 +477,6 @@ struct amdgpu_ib { extern const struct drm_sched_backend_ops amdgpu_sched_ops; -/* - * Queue manager - */ -struct amdgpu_queue_mapper { - int hw_ip; - struct mutex lock; - /* protected by lock */ - struct amdgpu_ring *queue_map[AMDGPU_MAX_RINGS]; -}; - -struct amdgpu_queue_mgr { - struct amdgpu_queue_mapper mapper[AMDGPU_MAX_IP_NUM]; -}; - -int amdgpu_queue_mgr_init(struct amdgpu_device *adev, - struct amdgpu_queue_mgr *mgr); -int amdgpu_queue_mgr_fini(struct amdgpu_device *adev, - struct amdgpu_queue_mgr *mgr); -int amdgpu_queue_mgr_map(struct amdgpu_device *adev, - struct amdgpu_queue_mgr *mgr, - u32 hw_ip, u32 instance, u32 ring, - struct amdgpu_ring **out_ring); - /* * context related structures */ @@ -513,7 +490,6 @@ struct amdgpu_ctx_ring { struct amdgpu_ctx { struct kref refcount; struct amdgpu_device *adev; - struct amdgpu_queue_mgr queue_mgr; unsigned reset_counter; unsigned reset_counter_query; uint32_t vram_lost_counter; @@ -537,6 +513,9 @@ struct amdgpu_ctx_mgr { struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id); int amdgpu_ctx_put(struct amdgpu_ctx *ctx); +int amdgpu_ctx_get_ring(struct amdgpu_ctx *ctx, + u32 hw_ip, u32 instance, u32 ring, + struct amdgpu_ring **out_ring); int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, struct dma_fence *fence, uint64_t *seq); struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index dc3b2f980d87..55667ab4fbf5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1008,8 +1008,9 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, return -EINVAL; } - r = amdgpu_queue_mgr_map(adev, &parser->ctx->queue_mgr, chunk_ib->ip_type, - chunk_ib->ip_instance, chunk_ib->ring, &ring); + r = amdgpu_ctx_get_ring(parser->ctx, chunk_ib->ip_type, + chunk_ib->ip_instance, chunk_ib->ring, + &ring); if (r) return r; @@ -1067,10 +1068,9 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, if (ctx == NULL) return -EINVAL; - r = amdgpu_queue_mgr_map(p->adev, &ctx->queue_mgr, - deps[i].ip_type, - deps[i].ip_instance, - deps[i].ring, &ring); + r = amdgpu_ctx_get_ring(ctx, deps[i].ip_type, + deps[i].ip_instance, + deps[i].ring, &ring); if (r) { amdgpu_ctx_put(ctx); return r; @@ -1331,7 +1331,6 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { union drm_amdgpu_wait_cs *wait = data; - struct amdgpu_device *adev = dev->dev_private; unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout); struct amdgpu_ring *ring = NULL; struct amdgpu_ctx *ctx; @@ -1342,9 +1341,8 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, if (ctx == NULL) return -EINVAL; - r = amdgpu_queue_mgr_map(adev, &ctx->queue_mgr, - wait->in.ip_type, wait->in.ip_instance, - wait->in.ring, &ring); + r = amdgpu_ctx_get_ring(ctx, wait->in.ip_type, wait->in.ip_instance, + wait->in.ring, &ring); if (r) { amdgpu_ctx_put(ctx); return r; @@ -1391,8 +1389,8 @@ static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev, if (ctx == NULL) return ERR_PTR(-EINVAL); - r = amdgpu_queue_mgr_map(adev, &ctx->queue_mgr, user->ip_type, - user->ip_instance, user->ring, &ring); + r = amdgpu_ctx_get_ring(ctx, user->ip_type, user->ip_instance, + user->ring, &ring); if (r) { amdgpu_ctx_put(ctx); return ERR_PTR(r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index a078e68e0319..e5acc72b05d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -121,10 +121,6 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, goto failed; } - r = amdgpu_queue_mgr_init(adev, &ctx->queue_mgr); - if (r) - goto failed; - return 0; failed: @@ -150,13 +146,72 @@ static void amdgpu_ctx_fini(struct kref *ref) kfree(ctx->fences); ctx->fences = NULL; - amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr); - mutex_destroy(&ctx->lock); kfree(ctx); } +int amdgpu_ctx_get_ring(struct amdgpu_ctx *ctx, + u32 hw_ip, u32 instance, u32 ring, + struct amdgpu_ring **out_ring) +{ + struct amdgpu_device *adev = ctx->adev; + unsigned num_rings = 0; + + /* Right now all IPs have only one instance - multiple rings. */ + if (instance != 0) { + DRM_DEBUG("invalid ip instance: %d\n", instance); + return -EINVAL; + } + + switch (hw_ip) { + case AMDGPU_HW_IP_GFX: + *out_ring = &adev->gfx.gfx_ring[ring]; + num_rings = adev->gfx.num_gfx_rings; + break; + case AMDGPU_HW_IP_COMPUTE: + *out_ring = &adev->gfx.compute_ring[ring]; + num_rings = adev->gfx.num_compute_rings; + break; + case AMDGPU_HW_IP_DMA: + *out_ring = &adev->sdma.instance[ring].ring; + num_rings = adev->sdma.num_instances; + break; + case AMDGPU_HW_IP_UVD: + *out_ring = &adev->uvd.inst[0].ring; + num_rings = adev->uvd.num_uvd_inst; + break; + case AMDGPU_HW_IP_VCE: + *out_ring = &adev->vce.ring[ring]; + num_rings = adev->vce.num_rings; + break; + case AMDGPU_HW_IP_UVD_ENC: + *out_ring = &adev->uvd.inst[0].ring_enc[ring]; + num_rings = adev->uvd.num_enc_rings; + break; + case AMDGPU_HW_IP_VCN_DEC: + *out_ring = &adev->vcn.ring_dec; + num_rings = 1; + break; + case AMDGPU_HW_IP_VCN_ENC: + *out_ring = &adev->vcn.ring_enc[ring]; + num_rings = adev->vcn.num_enc_rings; + break; + case AMDGPU_HW_IP_VCN_JPEG: + *out_ring = &adev->vcn.ring_jpeg; + num_rings = 1; + break; + default: + DRM_ERROR("unknown HW IP type: %d\n", hw_ip); + return -EINVAL; + } + + if (ring > num_rings) + return -EINVAL; + + return 0; +} + static int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, struct drm_file *filp, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c deleted file mode 100644 index a172bba32b45..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c +++ /dev/null @@ -1,316 +0,0 @@ -/* - * Copyright 2017 Valve Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Andres Rodriguez - */ - -#include "amdgpu.h" -#include "amdgpu_ring.h" - -static int amdgpu_queue_mapper_init(struct amdgpu_queue_mapper *mapper, - int hw_ip) -{ - if (!mapper) - return -EINVAL; - - if (hw_ip > AMDGPU_MAX_IP_NUM) - return -EINVAL; - - mapper->hw_ip = hw_ip; - mutex_init(&mapper->lock); - - memset(mapper->queue_map, 0, sizeof(mapper->queue_map)); - - return 0; -} - -static struct amdgpu_ring *amdgpu_get_cached_map(struct amdgpu_queue_mapper *mapper, - int ring) -{ - return mapper->queue_map[ring]; -} - -static int amdgpu_update_cached_map(struct amdgpu_queue_mapper *mapper, - int ring, struct amdgpu_ring *pring) -{ - if (WARN_ON(mapper->queue_map[ring])) { - DRM_ERROR("Un-expected ring re-map\n"); - return -EINVAL; - } - - mapper->queue_map[ring] = pring; - - return 0; -} - -static int amdgpu_identity_map(struct amdgpu_device *adev, - struct amdgpu_queue_mapper *mapper, - u32 ring, - struct amdgpu_ring **out_ring) -{ - switch (mapper->hw_ip) { - case AMDGPU_HW_IP_GFX: - *out_ring = &adev->gfx.gfx_ring[ring]; - break; - case AMDGPU_HW_IP_COMPUTE: - *out_ring = &adev->gfx.compute_ring[ring]; - break; - case AMDGPU_HW_IP_DMA: - *out_ring = &adev->sdma.instance[ring].ring; - break; - case AMDGPU_HW_IP_UVD: - *out_ring = &adev->uvd.inst[0].ring; - break; - case AMDGPU_HW_IP_VCE: - *out_ring = &adev->vce.ring[ring]; - break; - case AMDGPU_HW_IP_UVD_ENC: - *out_ring = &adev->uvd.inst[0].ring_enc[ring]; - break; - case AMDGPU_HW_IP_VCN_DEC: - *out_ring = &adev->vcn.ring_dec; - break; - case AMDGPU_HW_IP_VCN_ENC: - *out_ring = &adev->vcn.ring_enc[ring]; - break; - case AMDGPU_HW_IP_VCN_JPEG: - *out_ring = &adev->vcn.ring_jpeg; - break; - default: - *out_ring = NULL; - DRM_ERROR("unknown HW IP type: %d\n", mapper->hw_ip); - return -EINVAL; - } - - return amdgpu_update_cached_map(mapper, ring, *out_ring); -} - -static enum amdgpu_ring_type amdgpu_hw_ip_to_ring_type(int hw_ip) -{ - switch (hw_ip) { - case AMDGPU_HW_IP_GFX: - return AMDGPU_RING_TYPE_GFX; - case AMDGPU_HW_IP_COMPUTE: - return AMDGPU_RING_TYPE_COMPUTE; - case AMDGPU_HW_IP_DMA: - return AMDGPU_RING_TYPE_SDMA; - case AMDGPU_HW_IP_UVD: - return AMDGPU_RING_TYPE_UVD; - case AMDGPU_HW_IP_VCE: - return AMDGPU_RING_TYPE_VCE; - default: - DRM_ERROR("Invalid HW IP specified %d\n", hw_ip); - return -1; - } -} - -static int amdgpu_lru_map(struct amdgpu_device *adev, - struct amdgpu_queue_mapper *mapper, - u32 user_ring, bool lru_pipe_order, - struct amdgpu_ring **out_ring) -{ - int r, i, j; - int ring_type = amdgpu_hw_ip_to_ring_type(mapper->hw_ip); - int ring_blacklist[AMDGPU_MAX_RINGS]; - struct amdgpu_ring *ring; - - /* 0 is a valid ring index, so initialize to -1 */ - memset(ring_blacklist, 0xff, sizeof(ring_blacklist)); - - for (i = 0, j = 0; i < AMDGPU_MAX_RINGS; i++) { - ring = mapper->queue_map[i]; - if (ring) - ring_blacklist[j++] = ring->idx; - } - - r = amdgpu_ring_lru_get(adev, ring_type, ring_blacklist, - j, lru_pipe_order, out_ring); - if (r) - return r; - - return amdgpu_update_cached_map(mapper, user_ring, *out_ring); -} - -/** - * amdgpu_queue_mgr_init - init an amdgpu_queue_mgr struct - * - * @adev: amdgpu_device pointer - * @mgr: amdgpu_queue_mgr structure holding queue information - * - * Initialize the the selected @mgr (all asics). - * - * Returns 0 on success, error on failure. - */ -int amdgpu_queue_mgr_init(struct amdgpu_device *adev, - struct amdgpu_queue_mgr *mgr) -{ - int i, r; - - if (!adev || !mgr) - return -EINVAL; - - memset(mgr, 0, sizeof(*mgr)); - - for (i = 0; i < AMDGPU_MAX_IP_NUM; ++i) { - r = amdgpu_queue_mapper_init(&mgr->mapper[i], i); - if (r) - return r; - } - - return 0; -} - -/** - * amdgpu_queue_mgr_fini - de-initialize an amdgpu_queue_mgr struct - * - * @adev: amdgpu_device pointer - * @mgr: amdgpu_queue_mgr structure holding queue information - * - * De-initialize the the selected @mgr (all asics). - * - * Returns 0 on success, error on failure. - */ -int amdgpu_queue_mgr_fini(struct amdgpu_device *adev, - struct amdgpu_queue_mgr *mgr) -{ - return 0; -} - -/** - * amdgpu_queue_mgr_map - Map a userspace ring id to an amdgpu_ring - * - * @adev: amdgpu_device pointer - * @mgr: amdgpu_queue_mgr structure holding queue information - * @hw_ip: HW IP enum - * @instance: HW instance - * @ring: user ring id - * @our_ring: pointer to mapped amdgpu_ring - * - * Map a userspace ring id to an appropriate kernel ring. Different - * policies are configurable at a HW IP level. - * - * Returns 0 on success, error on failure. - */ -int amdgpu_queue_mgr_map(struct amdgpu_device *adev, - struct amdgpu_queue_mgr *mgr, - u32 hw_ip, u32 instance, u32 ring, - struct amdgpu_ring **out_ring) -{ - int i, r, ip_num_rings = 0; - struct amdgpu_queue_mapper *mapper = &mgr->mapper[hw_ip]; - - if (!adev || !mgr || !out_ring) - return -EINVAL; - - if (hw_ip >= AMDGPU_MAX_IP_NUM) - return -EINVAL; - - if (ring >= AMDGPU_MAX_RINGS) - return -EINVAL; - - /* Right now all IPs have only one instance - multiple rings. */ - if (instance != 0) { - DRM_DEBUG("invalid ip instance: %d\n", instance); - return -EINVAL; - } - - switch (hw_ip) { - case AMDGPU_HW_IP_GFX: - ip_num_rings = adev->gfx.num_gfx_rings; - break; - case AMDGPU_HW_IP_COMPUTE: - ip_num_rings = adev->gfx.num_compute_rings; - break; - case AMDGPU_HW_IP_DMA: - ip_num_rings = adev->sdma.num_instances; - break; - case AMDGPU_HW_IP_UVD: - for (i = 0; i < adev->uvd.num_uvd_inst; i++) { - if (!(adev->uvd.harvest_config & (1 << i))) - ip_num_rings++; - } - break; - case AMDGPU_HW_IP_VCE: - ip_num_rings = adev->vce.num_rings; - break; - case AMDGPU_HW_IP_UVD_ENC: - for (i = 0; i < adev->uvd.num_uvd_inst; i++) { - if (!(adev->uvd.harvest_config & (1 << i))) - ip_num_rings++; - } - ip_num_rings = - adev->uvd.num_enc_rings * ip_num_rings; - break; - case AMDGPU_HW_IP_VCN_DEC: - ip_num_rings = 1; - break; - case AMDGPU_HW_IP_VCN_ENC: - ip_num_rings = adev->vcn.num_enc_rings; - break; - case AMDGPU_HW_IP_VCN_JPEG: - ip_num_rings = 1; - break; - default: - DRM_DEBUG("unknown ip type: %d\n", hw_ip); - return -EINVAL; - } - - if (ring >= ip_num_rings) { - DRM_DEBUG("Ring index:%d exceeds maximum:%d for ip:%d\n", - ring, ip_num_rings, hw_ip); - return -EINVAL; - } - - mutex_lock(&mapper->lock); - - *out_ring = amdgpu_get_cached_map(mapper, ring); - if (*out_ring) { - /* cache hit */ - r = 0; - goto out_unlock; - } - - switch (mapper->hw_ip) { - case AMDGPU_HW_IP_GFX: - case AMDGPU_HW_IP_UVD: - case AMDGPU_HW_IP_VCE: - case AMDGPU_HW_IP_UVD_ENC: - case AMDGPU_HW_IP_VCN_DEC: - case AMDGPU_HW_IP_VCN_ENC: - case AMDGPU_HW_IP_VCN_JPEG: - r = amdgpu_identity_map(adev, mapper, ring, out_ring); - break; - case AMDGPU_HW_IP_DMA: - r = amdgpu_lru_map(adev, mapper, ring, false, out_ring); - break; - case AMDGPU_HW_IP_COMPUTE: - r = amdgpu_lru_map(adev, mapper, ring, true, out_ring); - break; - default: - *out_ring = NULL; - r = -EINVAL; - DRM_DEBUG("unknown HW IP type: %d\n", mapper->hw_ip); - } - -out_unlock: - mutex_unlock(&mapper->lock); - return r; -} -- GitLab From 86275d090f0196d63245796aa0e3a12fa17e61a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 16 Jul 2018 15:23:15 +0200 Subject: [PATCH 0443/1692] drm/amdgpu: remove ring lru handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Not needed any more. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 98 ------------------------ drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 5 -- 2 files changed, 103 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 93794a85f83d..5dfd26be1eec 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -135,9 +135,6 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring) if (ring->funcs->end_use) ring->funcs->end_use(ring); - - if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) - amdgpu_ring_lru_touch(ring->adev, ring); } /** @@ -320,8 +317,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, ring->max_dw = max_dw; ring->priority = DRM_SCHED_PRIORITY_NORMAL; mutex_init(&ring->priority_mutex); - INIT_LIST_HEAD(&ring->lru_list); - amdgpu_ring_lru_touch(adev, ring); for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i) atomic_set(&ring->num_jobs[i], 0); @@ -368,99 +363,6 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) ring->adev->rings[ring->idx] = NULL; } -static void amdgpu_ring_lru_touch_locked(struct amdgpu_device *adev, - struct amdgpu_ring *ring) -{ - /* list_move_tail handles the case where ring isn't part of the list */ - list_move_tail(&ring->lru_list, &adev->ring_lru_list); -} - -static bool amdgpu_ring_is_blacklisted(struct amdgpu_ring *ring, - int *blacklist, int num_blacklist) -{ - int i; - - for (i = 0; i < num_blacklist; i++) { - if (ring->idx == blacklist[i]) - return true; - } - - return false; -} - -/** - * amdgpu_ring_lru_get - get the least recently used ring for a HW IP block - * - * @adev: amdgpu_device pointer - * @type: amdgpu_ring_type enum - * @blacklist: blacklisted ring ids array - * @num_blacklist: number of entries in @blacklist - * @lru_pipe_order: find a ring from the least recently used pipe - * @ring: output ring - * - * Retrieve the amdgpu_ring structure for the least recently used ring of - * a specific IP block (all asics). - * Returns 0 on success, error on failure. - */ -int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type, - int *blacklist, int num_blacklist, - bool lru_pipe_order, struct amdgpu_ring **ring) -{ - struct amdgpu_ring *entry; - - /* List is sorted in LRU order, find first entry corresponding - * to the desired HW IP */ - *ring = NULL; - spin_lock(&adev->ring_lru_list_lock); - list_for_each_entry(entry, &adev->ring_lru_list, lru_list) { - if (entry->funcs->type != type) - continue; - - if (amdgpu_ring_is_blacklisted(entry, blacklist, num_blacklist)) - continue; - - if (!*ring) { - *ring = entry; - - /* We are done for ring LRU */ - if (!lru_pipe_order) - break; - } - - /* Move all rings on the same pipe to the end of the list */ - if (entry->pipe == (*ring)->pipe) - amdgpu_ring_lru_touch_locked(adev, entry); - } - - /* Move the ring we found to the end of the list */ - if (*ring) - amdgpu_ring_lru_touch_locked(adev, *ring); - - spin_unlock(&adev->ring_lru_list_lock); - - if (!*ring) { - DRM_ERROR("Ring LRU contains no entries for ring type:%d\n", type); - return -EINVAL; - } - - return 0; -} - -/** - * amdgpu_ring_lru_touch - mark a ring as recently being used - * - * @adev: amdgpu_device pointer - * @ring: ring to touch - * - * Move @ring to the tail of the lru list - */ -void amdgpu_ring_lru_touch(struct amdgpu_device *adev, struct amdgpu_ring *ring) -{ - spin_lock(&adev->ring_lru_list_lock); - amdgpu_ring_lru_touch_locked(adev, ring); - spin_unlock(&adev->ring_lru_list_lock); -} - /** * amdgpu_ring_emit_reg_write_reg_wait_helper - ring helper * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index 906897a38743..409fdd9b9710 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -175,7 +175,6 @@ struct amdgpu_ring { const struct amdgpu_ring_funcs *funcs; struct amdgpu_fence_driver fence_drv; struct drm_gpu_scheduler sched; - struct list_head lru_list; struct amdgpu_bo *ring_obj; volatile uint32_t *ring; @@ -258,10 +257,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned ring_size, struct amdgpu_irq_src *irq_src, unsigned irq_type); void amdgpu_ring_fini(struct amdgpu_ring *ring); -int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type, - int *blacklist, int num_blacklist, - bool lru_pipe_order, struct amdgpu_ring **ring); -void amdgpu_ring_lru_touch(struct amdgpu_device *adev, struct amdgpu_ring *ring); void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring, uint32_t reg0, uint32_t val0, uint32_t reg1, uint32_t val1); -- GitLab From 8290268f31b8c1bc3d331212b60ae7fb2262e20d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 18 Jul 2018 16:34:49 +0200 Subject: [PATCH 0444/1692] drm/amdgpu: move context related stuff to amdgpu_ctx.h MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Further unmangle amdgpu.h. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 59 +---------------- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h | 84 +++++++++++++++++++++++++ 2 files changed, 86 insertions(+), 57 deletions(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 20e81df5cd94..50eeb7c1350e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -28,6 +28,8 @@ #ifndef __AMDGPU_H__ #define __AMDGPU_H__ +#include "amdgpu_ctx.h" + #include #include #include @@ -477,63 +479,6 @@ struct amdgpu_ib { extern const struct drm_sched_backend_ops amdgpu_sched_ops; -/* - * context related structures - */ - -struct amdgpu_ctx_ring { - uint64_t sequence; - struct dma_fence **fences; - struct drm_sched_entity entity; -}; - -struct amdgpu_ctx { - struct kref refcount; - struct amdgpu_device *adev; - unsigned reset_counter; - unsigned reset_counter_query; - uint32_t vram_lost_counter; - spinlock_t ring_lock; - struct dma_fence **fences; - struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS]; - bool preamble_presented; - enum drm_sched_priority init_priority; - enum drm_sched_priority override_priority; - struct mutex lock; - atomic_t guilty; -}; - -struct amdgpu_ctx_mgr { - struct amdgpu_device *adev; - struct mutex lock; - /* protected by lock */ - struct idr ctx_handles; -}; - -struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id); -int amdgpu_ctx_put(struct amdgpu_ctx *ctx); - -int amdgpu_ctx_get_ring(struct amdgpu_ctx *ctx, - u32 hw_ip, u32 instance, u32 ring, - struct amdgpu_ring **out_ring); -int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, - struct dma_fence *fence, uint64_t *seq); -struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, - struct amdgpu_ring *ring, uint64_t seq); -void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, - enum drm_sched_priority priority); - -int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, - struct drm_file *filp); - -int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id); - -void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr); -void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr); -void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr); -void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr); - - /* * file private structure */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h new file mode 100644 index 000000000000..5664b1f54142 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h @@ -0,0 +1,84 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __AMDGPU_CTX_H__ +#define __AMDGPU_CTX_H__ + +#include "amdgpu_ring.h" + +struct drm_device; +struct drm_file; +struct amdgpu_fpriv; + +struct amdgpu_ctx_ring { + uint64_t sequence; + struct dma_fence **fences; + struct drm_sched_entity entity; +}; + +struct amdgpu_ctx { + struct kref refcount; + struct amdgpu_device *adev; + unsigned reset_counter; + unsigned reset_counter_query; + uint32_t vram_lost_counter; + spinlock_t ring_lock; + struct dma_fence **fences; + struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS]; + bool preamble_presented; + enum drm_sched_priority init_priority; + enum drm_sched_priority override_priority; + struct mutex lock; + atomic_t guilty; +}; + +struct amdgpu_ctx_mgr { + struct amdgpu_device *adev; + struct mutex lock; + /* protected by lock */ + struct idr ctx_handles; +}; + +struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id); +int amdgpu_ctx_put(struct amdgpu_ctx *ctx); + +int amdgpu_ctx_get_ring(struct amdgpu_ctx *ctx, + u32 hw_ip, u32 instance, u32 ring, + struct amdgpu_ring **out_ring); +int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, + struct dma_fence *fence, uint64_t *seq); +struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, + struct amdgpu_ring *ring, uint64_t seq); +void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, + enum drm_sched_priority priority); + +int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); + +int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id); + +void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr); +void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr); +void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr); +void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr); + +#endif -- GitLab From 0d346a14c634120046d194377e2cb5b387a6c1c6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 19 Jul 2018 14:22:25 +0200 Subject: [PATCH 0445/1692] drm/amdgpu: use entity instead of ring for CS MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Further demangle ring from entity handling. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 66 ++++++++++++----------- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 53 ++++++++++-------- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h | 16 +++--- drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | 4 +- drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 3 +- 6 files changed, 78 insertions(+), 66 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 50eeb7c1350e..6265b88135fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -523,7 +523,7 @@ struct amdgpu_cs_parser { /* scheduler job object */ struct amdgpu_job *job; - struct amdgpu_ring *ring; + struct drm_sched_entity *entity; /* buffer objects */ struct ww_acquire_ctx ticket; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 55667ab4fbf5..313ac971eaaf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -893,13 +893,13 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, struct amdgpu_cs_parser *p) { + struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched); struct amdgpu_fpriv *fpriv = p->filp->driver_priv; struct amdgpu_vm *vm = &fpriv->vm; - struct amdgpu_ring *ring = p->ring; int r; /* Only for UVD/VCE VM emulation */ - if (p->ring->funcs->parse_cs || p->ring->funcs->patch_cs_in_place) { + if (ring->funcs->parse_cs || ring->funcs->patch_cs_in_place) { unsigned i, j; for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) { @@ -940,7 +940,7 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, offset = m->start * AMDGPU_GPU_PAGE_SIZE; kptr += va_start - offset; - if (p->ring->funcs->parse_cs) { + if (ring->funcs->parse_cs) { memcpy(ib->ptr, kptr, chunk_ib->ib_bytes); amdgpu_bo_kunmap(aobj); @@ -979,14 +979,15 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, { struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; struct amdgpu_vm *vm = &fpriv->vm; - int i, j; int r, ce_preempt = 0, de_preempt = 0; + struct amdgpu_ring *ring; + int i, j; for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) { struct amdgpu_cs_chunk *chunk; struct amdgpu_ib *ib; struct drm_amdgpu_cs_chunk_ib *chunk_ib; - struct amdgpu_ring *ring; + struct drm_sched_entity *entity; chunk = &parser->chunks[i]; ib = &parser->job->ibs[j]; @@ -1008,9 +1009,9 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, return -EINVAL; } - r = amdgpu_ctx_get_ring(parser->ctx, chunk_ib->ip_type, - chunk_ib->ip_instance, chunk_ib->ring, - &ring); + r = amdgpu_ctx_get_entity(parser->ctx, chunk_ib->ip_type, + chunk_ib->ip_instance, chunk_ib->ring, + &entity); if (r) return r; @@ -1018,14 +1019,14 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT; - if (parser->ring && parser->ring != ring) + if (parser->entity && parser->entity != entity) return -EINVAL; - parser->ring = ring; + parser->entity = entity; - r = amdgpu_ib_get(adev, vm, - ring->funcs->parse_cs ? chunk_ib->ib_bytes : 0, - ib); + ring = to_amdgpu_ring(entity->rq->sched); + r = amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ? + chunk_ib->ib_bytes : 0, ib); if (r) { DRM_ERROR("Failed to get ib !\n"); return r; @@ -1039,12 +1040,13 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, } /* UVD & VCE fw doesn't support user fences */ + ring = to_amdgpu_ring(parser->entity->rq->sched); if (parser->job->uf_addr && ( - parser->ring->funcs->type == AMDGPU_RING_TYPE_UVD || - parser->ring->funcs->type == AMDGPU_RING_TYPE_VCE)) + ring->funcs->type == AMDGPU_RING_TYPE_UVD || + ring->funcs->type == AMDGPU_RING_TYPE_VCE)) return -EINVAL; - return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->ring->idx); + return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->entity); } static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, @@ -1060,23 +1062,23 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, sizeof(struct drm_amdgpu_cs_chunk_dep); for (i = 0; i < num_deps; ++i) { - struct amdgpu_ring *ring; struct amdgpu_ctx *ctx; + struct drm_sched_entity *entity; struct dma_fence *fence; ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id); if (ctx == NULL) return -EINVAL; - r = amdgpu_ctx_get_ring(ctx, deps[i].ip_type, - deps[i].ip_instance, - deps[i].ring, &ring); + r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type, + deps[i].ip_instance, + deps[i].ring, &entity); if (r) { amdgpu_ctx_put(ctx); return r; } - fence = amdgpu_ctx_get_fence(ctx, ring, + fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle); if (IS_ERR(fence)) { r = PTR_ERR(fence); @@ -1195,9 +1197,9 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs) { struct amdgpu_fpriv *fpriv = p->filp->driver_priv; - struct amdgpu_ring *ring = p->ring; - struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity; + struct drm_sched_entity *entity = p->entity; enum drm_sched_priority priority; + struct amdgpu_ring *ring; struct amdgpu_bo_list_entry *e; struct amdgpu_job *job; uint64_t seq; @@ -1227,7 +1229,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, job->owner = p->filp; p->fence = dma_fence_get(&job->base.s_fence->finished); - r = amdgpu_ctx_add_fence(p->ctx, ring, p->fence, &seq); + r = amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq); if (r) { dma_fence_put(p->fence); dma_fence_put(&job->base.s_fence->finished); @@ -1332,7 +1334,7 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, { union drm_amdgpu_wait_cs *wait = data; unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout); - struct amdgpu_ring *ring = NULL; + struct drm_sched_entity *entity; struct amdgpu_ctx *ctx; struct dma_fence *fence; long r; @@ -1341,14 +1343,14 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, if (ctx == NULL) return -EINVAL; - r = amdgpu_ctx_get_ring(ctx, wait->in.ip_type, wait->in.ip_instance, - wait->in.ring, &ring); + r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance, + wait->in.ring, &entity); if (r) { amdgpu_ctx_put(ctx); return r; } - fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle); + fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle); if (IS_ERR(fence)) r = PTR_ERR(fence); else if (fence) { @@ -1380,7 +1382,7 @@ static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev, struct drm_file *filp, struct drm_amdgpu_fence *user) { - struct amdgpu_ring *ring; + struct drm_sched_entity *entity; struct amdgpu_ctx *ctx; struct dma_fence *fence; int r; @@ -1389,14 +1391,14 @@ static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev, if (ctx == NULL) return ERR_PTR(-EINVAL); - r = amdgpu_ctx_get_ring(ctx, user->ip_type, user->ip_instance, - user->ring, &ring); + r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance, + user->ring, &entity); if (r) { amdgpu_ctx_put(ctx); return ERR_PTR(r); } - fence = amdgpu_ctx_get_fence(ctx, ring, user->seq_no); + fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no); amdgpu_ctx_put(ctx); return fence; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index e5acc72b05d2..0a6cd1202ee5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -27,6 +27,9 @@ #include "amdgpu.h" #include "amdgpu_sched.h" +#define to_amdgpu_ctx_ring(e) \ + container_of((e), struct amdgpu_ctx_ring, entity) + static int amdgpu_ctx_priority_permit(struct drm_file *filp, enum drm_sched_priority priority) { @@ -151,12 +154,12 @@ static void amdgpu_ctx_fini(struct kref *ref) kfree(ctx); } -int amdgpu_ctx_get_ring(struct amdgpu_ctx *ctx, - u32 hw_ip, u32 instance, u32 ring, - struct amdgpu_ring **out_ring) +int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance, + u32 ring, struct drm_sched_entity **entity) { struct amdgpu_device *adev = ctx->adev; unsigned num_rings = 0; + struct amdgpu_ring *out_ring; /* Right now all IPs have only one instance - multiple rings. */ if (instance != 0) { @@ -166,39 +169,39 @@ int amdgpu_ctx_get_ring(struct amdgpu_ctx *ctx, switch (hw_ip) { case AMDGPU_HW_IP_GFX: - *out_ring = &adev->gfx.gfx_ring[ring]; + out_ring = &adev->gfx.gfx_ring[ring]; num_rings = adev->gfx.num_gfx_rings; break; case AMDGPU_HW_IP_COMPUTE: - *out_ring = &adev->gfx.compute_ring[ring]; + out_ring = &adev->gfx.compute_ring[ring]; num_rings = adev->gfx.num_compute_rings; break; case AMDGPU_HW_IP_DMA: - *out_ring = &adev->sdma.instance[ring].ring; + out_ring = &adev->sdma.instance[ring].ring; num_rings = adev->sdma.num_instances; break; case AMDGPU_HW_IP_UVD: - *out_ring = &adev->uvd.inst[0].ring; + out_ring = &adev->uvd.inst[0].ring; num_rings = adev->uvd.num_uvd_inst; break; case AMDGPU_HW_IP_VCE: - *out_ring = &adev->vce.ring[ring]; + out_ring = &adev->vce.ring[ring]; num_rings = adev->vce.num_rings; break; case AMDGPU_HW_IP_UVD_ENC: - *out_ring = &adev->uvd.inst[0].ring_enc[ring]; + out_ring = &adev->uvd.inst[0].ring_enc[ring]; num_rings = adev->uvd.num_enc_rings; break; case AMDGPU_HW_IP_VCN_DEC: - *out_ring = &adev->vcn.ring_dec; + out_ring = &adev->vcn.ring_dec; num_rings = 1; break; case AMDGPU_HW_IP_VCN_ENC: - *out_ring = &adev->vcn.ring_enc[ring]; + out_ring = &adev->vcn.ring_enc[ring]; num_rings = adev->vcn.num_enc_rings; break; case AMDGPU_HW_IP_VCN_JPEG: - *out_ring = &adev->vcn.ring_jpeg; + out_ring = &adev->vcn.ring_jpeg; num_rings = 1; break; default: @@ -209,6 +212,7 @@ int amdgpu_ctx_get_ring(struct amdgpu_ctx *ctx, if (ring > num_rings) return -EINVAL; + *entity = &ctx->rings[out_ring->idx].entity; return 0; } @@ -414,13 +418,14 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx) return 0; } -int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, - struct dma_fence *fence, uint64_t* handler) +int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, + struct drm_sched_entity *entity, + struct dma_fence *fence, uint64_t* handle) { - struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; + struct amdgpu_ctx_ring *cring = to_amdgpu_ctx_ring(entity); uint64_t seq = cring->sequence; - unsigned idx = 0; struct dma_fence *other = NULL; + unsigned idx = 0; idx = seq & (amdgpu_sched_jobs - 1); other = cring->fences[idx]; @@ -435,22 +440,23 @@ int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, spin_unlock(&ctx->ring_lock); dma_fence_put(other); - if (handler) - *handler = seq; + if (handle) + *handle = seq; return 0; } struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, - struct amdgpu_ring *ring, uint64_t seq) + struct drm_sched_entity *entity, + uint64_t seq) { - struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; + struct amdgpu_ctx_ring *cring = to_amdgpu_ctx_ring(entity); struct dma_fence *fence; spin_lock(&ctx->ring_lock); if (seq == ~0ull) - seq = ctx->rings[ring->idx].sequence - 1; + seq = cring->sequence - 1; if (seq >= cring->sequence) { spin_unlock(&ctx->ring_lock); @@ -494,9 +500,10 @@ void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, } } -int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id) +int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, + struct drm_sched_entity *entity) { - struct amdgpu_ctx_ring *cring = &ctx->rings[ring_id]; + struct amdgpu_ctx_ring *cring = to_amdgpu_ctx_ring(entity); unsigned idx = cring->sequence & (amdgpu_sched_jobs - 1); struct dma_fence *other = cring->fences[idx]; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h index 5664b1f54142..609f925b076c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h @@ -61,20 +61,22 @@ struct amdgpu_ctx_mgr { struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id); int amdgpu_ctx_put(struct amdgpu_ctx *ctx); -int amdgpu_ctx_get_ring(struct amdgpu_ctx *ctx, - u32 hw_ip, u32 instance, u32 ring, - struct amdgpu_ring **out_ring); -int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, - struct dma_fence *fence, uint64_t *seq); +int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance, + u32 ring, struct drm_sched_entity **entity); +int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, + struct drm_sched_entity *entity, + struct dma_fence *fence, uint64_t *seq); struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, - struct amdgpu_ring *ring, uint64_t seq); + struct drm_sched_entity *entity, + uint64_t seq); void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, enum drm_sched_priority priority); int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); -int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id); +int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, + struct drm_sched_entity *entity); void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr); void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 8c2dab20eb36..2e87414422f9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -150,10 +150,10 @@ TRACE_EVENT(amdgpu_cs, TP_fast_assign( __entry->bo_list = p->bo_list; - __entry->ring = p->ring->idx; + __entry->ring = to_amdgpu_ring(p->entity->rq->sched)->idx; __entry->dw = p->job->ibs[i].length_dw; __entry->fences = amdgpu_fence_count_emitted( - p->ring); + to_amdgpu_ring(p->entity->rq->sched)); ), TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u", __entry->bo_list, __entry->ring, __entry->dw, diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 9b7f8469bc5c..e33425513a89 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -1264,11 +1264,12 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring) static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, uint32_t ib_idx) { + struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched); struct amdgpu_ib *ib = &p->job->ibs[ib_idx]; unsigned i; /* No patching necessary for the first instance */ - if (!p->ring->me) + if (!ring->me) return 0; for (i = 0; i < ib->length_dw; i += 2) { -- GitLab From 851c2509aef6ee2374d8192130f33e1166c1c2a3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Wed, 15 Aug 2018 12:58:13 +0200 Subject: [PATCH 0446/1692] drm/doc: Adapt GPU scheduler references for renamed C file MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes: "drm/scheduler: rename gpu_scheduler.c to sched_main.c" Reviewed-by: Christian König Signed-off-by: Michel Dänzer Signed-off-by: Alex Deucher --- Documentation/gpu/drm-mm.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst index 21b6b72a9ba8..d3acb4949e2d 100644 --- a/Documentation/gpu/drm-mm.rst +++ b/Documentation/gpu/drm-mm.rst @@ -505,7 +505,7 @@ GPU Scheduler Overview -------- -.. kernel-doc:: drivers/gpu/drm/scheduler/gpu_scheduler.c +.. kernel-doc:: drivers/gpu/drm/scheduler/sched_main.c :doc: Overview Scheduler Function References @@ -514,5 +514,5 @@ Scheduler Function References .. kernel-doc:: include/drm/gpu_scheduler.h :internal: -.. kernel-doc:: drivers/gpu/drm/scheduler/gpu_scheduler.c +.. kernel-doc:: drivers/gpu/drm/scheduler/sched_main.c :export: -- GitLab From d4e838431d56ac132a7f387b34e5c9f227dce428 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Tue, 14 Aug 2018 14:53:52 -0400 Subject: [PATCH 0447/1692] drm/amdgpu: added support 2nd UVD instance Added psp fw loading support for vega20 2nd UVD instance. Signed-off-by: Evan Quan Acked-by: Huang Rui Reviewed-by: Alex Deucher Signed-off-by: Feifei Xu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h | 1 + drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h | 3 ++- drivers/gpu/drm/amd/amdgpu/psp_v11_0.c | 3 +++ drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 7 +++++++ 4 files changed, 13 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index a1edc70da979..b358e7519987 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h @@ -193,6 +193,7 @@ enum AMDGPU_UCODE_ID { AMDGPU_UCODE_ID_STORAGE, AMDGPU_UCODE_ID_SMC, AMDGPU_UCODE_ID_UVD, + AMDGPU_UCODE_ID_UVD1, AMDGPU_UCODE_ID_VCE, AMDGPU_UCODE_ID_VCN, AMDGPU_UCODE_ID_MAXIMUM, diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h index 0cf48d26c676..882bd83a28c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h +++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h @@ -189,7 +189,8 @@ enum psp_gfx_fw_type GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM = 20, GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM = 21, GFX_FW_TYPE_RLC_RESTORE_LIST_CNTL = 22, - GFX_FW_TYPE_MAX = 23 + GFX_FW_TYPE_UVD1 = 23, + GFX_FW_TYPE_MAX = 24 }; /* Command to load HW IP FW. */ diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 9c58a23adc5d..b70cfa3fe1b2 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -81,6 +81,9 @@ psp_v11_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type * case AMDGPU_UCODE_ID_VCE: *type = GFX_FW_TYPE_VCE; break; + case AMDGPU_UCODE_ID_UVD1: + *type = GFX_FW_TYPE_UVD1; + break; case AMDGPU_UCODE_ID_MAXIMUM: default: return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index e33425513a89..79cb3787a282 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -441,6 +441,13 @@ static int uvd_v7_0_sw_init(void *handle) adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].fw = adev->uvd.fw; adev->firmware.fw_size += ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); + + if (adev->uvd.num_uvd_inst == UVD7_MAX_HW_INSTANCES_VEGA20) { + adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].ucode_id = AMDGPU_UCODE_ID_UVD1; + adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].fw = adev->uvd.fw; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); + } DRM_INFO("PSP loading UVD firmware\n"); } -- GitLab From bfcea5204287b0a09dac71fa56a5d066d94d9bb1 Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Tue, 14 Aug 2018 14:53:53 -0400 Subject: [PATCH 0448/1692] drm/amdgpu:change VEGA booting with firmware loaded by PSP With PSP firmware loading, TMR mc address is supposed to be used. Signed-off-by: James Zhu Acked-by: Huang Rui Reviewed-by: Alex Deucher Signed-off-by: Feifei Xu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 79cb3787a282..a289f6a20b6b 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -671,9 +671,14 @@ static void uvd_v7_0_mc_resume(struct amdgpu_device *adev) continue; if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, - lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr)); + i == 0 ? + adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo: + adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_lo); WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, - upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr)); + i == 0 ? + adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi: + adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_hi); + WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0); offset = 0; } else { WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, @@ -681,10 +686,10 @@ static void uvd_v7_0_mc_resume(struct amdgpu_device *adev) WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, upper_32_bits(adev->uvd.inst[i].gpu_addr)); offset = size; + WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, + AMDGPU_UVD_FIRMWARE_OFFSET >> 3); } - WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, - AMDGPU_UVD_FIRMWARE_OFFSET >> 3); WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size); WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, -- GitLab From 8c3db1284a016dc670fe0a98afec33e001d363bc Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Wed, 15 Aug 2018 16:49:27 -0500 Subject: [PATCH 0449/1692] drm/amdgpu: fill in amdgpu_dm_remove_sink_from_freesync_module Add code to tear down freesync modules when disabled. Signed-off-by: Harry Wentland Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 40 ++++++++++++++----- 1 file changed, 29 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 497a718d1bc4..0c805be054a1 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -5229,19 +5229,37 @@ void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector, dm_con_state->freesync_capable = true; } } - - /* - * TODO figure out how to notify user-mode or DRM of freesync caps - * once we figure out how to deal with freesync in an upstreamable - * fashion - */ - } void amdgpu_dm_remove_sink_from_freesync_module(struct drm_connector *connector) { - /* - * TODO fill in once we figure out how to deal with freesync in - * an upstreamable fashion - */ + struct amdgpu_dm_connector *amdgpu_dm_connector = + to_amdgpu_dm_connector(connector); + struct dm_connector_state *dm_con_state; + struct drm_device *dev = connector->dev; + struct amdgpu_device *adev = dev->dev_private; + + if (!amdgpu_dm_connector->dc_sink || !adev->dm.freesync_module) { + DRM_ERROR("dc_sink NULL or no free_sync module.\n"); + return; + } + + if (!connector->state) { + DRM_ERROR("%s - Connector has no state", __func__); + return; + } + + dm_con_state = to_dm_connector_state(connector->state); + + amdgpu_dm_connector->min_vfreq = 0; + amdgpu_dm_connector->max_vfreq = 0; + amdgpu_dm_connector->pixel_clock_mhz = 0; + + memset(&amdgpu_dm_connector->caps, 0, sizeof(amdgpu_dm_connector->caps)); + + dm_con_state->freesync_capable = false; + + dm_con_state->user_enable.enable_for_gaming = false; + dm_con_state->user_enable.enable_for_static = false; + dm_con_state->user_enable.enable_for_video = false; } -- GitLab From 98e6436d3af5fef7ca9b59d865dd5807ede36fb9 Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Tue, 21 Aug 2018 14:40:28 -0500 Subject: [PATCH 0450/1692] drm/amd/display: Refactor FreeSync module Remove dependency on internal sink map and instead use existing stream and plane state Signed-off-by: Anthony Koo Signed-off-by: Harry Wentland Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 288 +-- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 18 +- .../display/amdgpu_dm/amdgpu_dm_mst_types.c | 10 +- drivers/gpu/drm/amd/display/dc/core/dc.c | 60 +- .../gpu/drm/amd/display/dc/core/dc_link_dp.c | 3 + .../gpu/drm/amd/display/dc/core/dc_resource.c | 110 +- drivers/gpu/drm/amd/display/dc/dc_hw_types.h | 6 - drivers/gpu/drm/amd/display/dc/dc_stream.h | 29 +- drivers/gpu/drm/amd/display/dc/dc_types.h | 22 +- .../display/dc/dce110/dce110_hw_sequencer.c | 34 +- .../amd/display/dc/dcn10/dcn10_hw_sequencer.c | 27 +- .../drm/amd/display/include/set_mode_types.h | 12 - .../amd/display/modules/freesync/freesync.c | 1837 ++++++----------- .../amd/display/modules/inc/mod_freesync.h | 144 +- 14 files changed, 930 insertions(+), 1670 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 0c805be054a1..5f5e5ea20d78 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -839,8 +839,7 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) if (sink) { if (aconnector->dc_sink) { - amdgpu_dm_remove_sink_from_freesync_module( - connector); + amdgpu_dm_update_freesync_caps(connector, NULL); /* retain and release bellow are used for * bump up refcount for sink because the link don't point * to it anymore after disconnect so on next crtc to connector @@ -850,10 +849,10 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) dc_sink_release(aconnector->dc_sink); } aconnector->dc_sink = sink; - amdgpu_dm_add_sink_to_freesync_module( - connector, aconnector->edid); + amdgpu_dm_update_freesync_caps(connector, + aconnector->edid); } else { - amdgpu_dm_remove_sink_from_freesync_module(connector); + amdgpu_dm_update_freesync_caps(connector, NULL); if (!aconnector->dc_sink) aconnector->dc_sink = aconnector->dc_em_sink; else if (aconnector->dc_sink != aconnector->dc_em_sink) @@ -890,8 +889,7 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) /* TODO: check if we still need the S3 mode update workaround. * If yes, put it here. */ if (aconnector->dc_sink) - amdgpu_dm_remove_sink_from_freesync_module( - connector); + amdgpu_dm_update_freesync_caps(connector, NULL); aconnector->dc_sink = sink; if (sink->dc_edid.length == 0) { @@ -904,10 +902,10 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) drm_connector_update_edid_property(connector, aconnector->edid); } - amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid); + amdgpu_dm_update_freesync_caps(connector, aconnector->edid); } else { - amdgpu_dm_remove_sink_from_freesync_module(connector); + amdgpu_dm_update_freesync_caps(connector, NULL); drm_connector_update_edid_property(connector, NULL); aconnector->num_modes = 0; aconnector->dc_sink = NULL; @@ -1580,26 +1578,68 @@ static void dm_bandwidth_update(struct amdgpu_device *adev) static int amdgpu_notify_freesync(struct drm_device *dev, void *data, struct drm_file *filp) { - struct mod_freesync_params freesync_params; - uint8_t num_streams; + struct drm_atomic_state *state; + struct drm_modeset_acquire_ctx ctx; + struct drm_crtc *crtc; + struct drm_connector *connector; + struct drm_connector_state *old_con_state, *new_con_state; + int ret = 0; uint8_t i; + bool enable = false; - struct amdgpu_device *adev = dev->dev_private; - int r = 0; + drm_modeset_acquire_init(&ctx, 0); + + state = drm_atomic_state_alloc(dev); + if (!state) { + ret = -ENOMEM; + goto out; + } + state->acquire_ctx = &ctx; + +retry: + drm_for_each_crtc(crtc, dev) { + ret = drm_atomic_add_affected_connectors(state, crtc); + if (ret) + goto fail; + + /* TODO rework amdgpu_dm_commit_planes so we don't need this */ + ret = drm_atomic_add_affected_planes(state, crtc); + if (ret) + goto fail; + } - /* Get freesync enable flag from DRM */ + for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { + struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); + struct drm_crtc_state *new_crtc_state; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); + struct dm_crtc_state *dm_new_crtc_state; - num_streams = dc_get_current_stream_count(adev->dm.dc); + if (!acrtc) { + ASSERT(0); + continue; + } - for (i = 0; i < num_streams; i++) { - struct dc_stream_state *stream; - stream = dc_get_stream_at_index(adev->dm.dc, i); + new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); - mod_freesync_update_state(adev->dm.freesync_module, - &stream, 1, &freesync_params); + dm_new_crtc_state->freesync_enabled = enable; } - return r; + ret = drm_atomic_commit(state); + +fail: + if (ret == -EDEADLK) { + drm_atomic_state_clear(state); + drm_modeset_backoff(&ctx); + goto retry; + } + + drm_atomic_state_put(state); + +out: + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + return ret; } static const struct amdgpu_display_funcs dm_display_funcs = { @@ -2563,6 +2603,10 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc) dc_stream_retain(state->stream); } + state->adjust = cur->adjust; + state->vrr_infopacket = cur->vrr_infopacket; + state->freesync_enabled = cur->freesync_enabled; + /* TODO Duplicate dc_stream after objects are stream object is flattened */ return &state->base; @@ -2770,13 +2814,15 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector) struct dm_connector_state *new_state = kmemdup(state, sizeof(*state), GFP_KERNEL); - if (new_state) { - __drm_atomic_helper_connector_duplicate_state(connector, - &new_state->base); - return &new_state->base; - } + if (!new_state) + return NULL; - return NULL; + __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base); + + new_state->freesync_capable = state->freesync_capable; + new_state->freesync_enable = state->freesync_enable; + + return &new_state->base; } static const struct drm_connector_funcs amdgpu_dm_connector_funcs = { @@ -3786,8 +3832,6 @@ static void remove_stream(struct amdgpu_device *adev, struct dc_stream_state *stream) { /* this is the update mode case */ - if (adev->dm.freesync_module) - mod_freesync_remove_stream(adev->dm.freesync_module, stream); acrtc->otg_inst = -1; acrtc->enabled = false; @@ -4055,6 +4099,11 @@ static bool commit_planes_to_stream( stream_update->dst = dc_stream->dst; stream_update->out_transfer_func = dc_stream->out_transfer_func; + if (dm_new_crtc_state->freesync_enabled != dm_old_crtc_state->freesync_enabled) { + stream_update->vrr_infopacket = &dc_stream->vrr_infopacket; + stream_update->adjust = &dc_stream->adjust; + } + for (i = 0; i < new_plane_count; i++) { updates[i].surface = plane_states[i]; updates[i].gamma = @@ -4190,6 +4239,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); } + dc_stream_attach->adjust = acrtc_state->adjust; + dc_stream_attach->vrr_infopacket = acrtc_state->vrr_infopacket; if (false == commit_planes_to_stream(dm->dc, plane_states_constructed, @@ -4339,62 +4390,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) } } /* for_each_crtc_in_state() */ - /* - * Add streams after required streams from new and replaced streams - * are removed from freesync module - */ - if (adev->dm.freesync_module) { - for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, - new_crtc_state, i) { - struct amdgpu_dm_connector *aconnector = NULL; - struct dm_connector_state *dm_new_con_state = NULL; - struct amdgpu_crtc *acrtc = NULL; - bool modeset_needed; - - dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); - dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); - modeset_needed = modeset_required( - new_crtc_state, - dm_new_crtc_state->stream, - dm_old_crtc_state->stream); - /* We add stream to freesync if: - * 1. Said stream is not null, and - * 2. A modeset is requested. This means that the - * stream was removed previously, and needs to be - * replaced. - */ - if (dm_new_crtc_state->stream == NULL || - !modeset_needed) - continue; - - acrtc = to_amdgpu_crtc(crtc); - - aconnector = - amdgpu_dm_find_first_crtc_matching_connector( - state, crtc); - if (!aconnector) { - DRM_DEBUG_DRIVER("Atomic commit: Failed to " - "find connector for acrtc " - "id:%d skipping freesync " - "init\n", - acrtc->crtc_id); - continue; - } - - mod_freesync_add_stream(adev->dm.freesync_module, - dm_new_crtc_state->stream, - &aconnector->caps); - new_con_state = drm_atomic_get_new_connector_state( - state, &aconnector->base); - dm_new_con_state = to_dm_connector_state(new_con_state); - - mod_freesync_set_user_enable(adev->dm.freesync_module, - &dm_new_crtc_state->stream, - 1, - &dm_new_con_state->user_enable); - } - } - if (dm_state->context) { dm_enable_per_frame_crtc_master_sync(dm_state->context); WARN_ON(!dc_commit_state(dm->dc, dm_state->context)); @@ -4448,6 +4443,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) WARN_ON(!status); WARN_ON(!status->plane_count); + dm_new_crtc_state->stream->adjust = dm_new_crtc_state->adjust; + dm_new_crtc_state->stream->vrr_infopacket = dm_new_crtc_state->vrr_infopacket; + /*TODO How it works with MPO ?*/ if (!commit_planes_to_stream( dm->dc, @@ -4480,11 +4478,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) if (dm_new_crtc_state->stream == NULL || !modeset_needed) continue; - if (adev->dm.freesync_module) - mod_freesync_notify_mode_change( - adev->dm.freesync_module, - &dm_new_crtc_state->stream, 1); - manage_dm_interrupts(adev, acrtc, true); } @@ -4667,7 +4660,42 @@ static int do_aquire_global_lock(struct drm_device *dev, return ret < 0 ? ret : 0; } -static int dm_update_crtcs_state(struct dc *dc, +void set_freesync_on_stream(struct amdgpu_display_manager *dm, + struct dm_crtc_state *new_crtc_state, + struct dm_connector_state *new_con_state, + struct dc_stream_state *new_stream) +{ + struct mod_freesync_config config = {0}; + struct mod_vrr_params vrr = {0}; + struct dc_info_packet vrr_infopacket = {0}; + struct amdgpu_dm_connector *aconnector = + to_amdgpu_dm_connector(new_con_state->base.connector); + + if (new_con_state->freesync_capable && + new_con_state->freesync_enable) { + config.state = new_crtc_state->freesync_enabled ? + VRR_STATE_ACTIVE_VARIABLE : + VRR_STATE_INACTIVE; + config.min_refresh_in_uhz = + aconnector->min_vfreq * 1000000; + config.max_refresh_in_uhz = + aconnector->max_vfreq * 1000000; + } + + mod_freesync_build_vrr_params(dm->freesync_module, + new_stream, + &config, &vrr); + + mod_freesync_build_vrr_infopacket(dm->freesync_module, + new_stream, + &vrr, + &vrr_infopacket); + + new_crtc_state->adjust = vrr.adjust; + new_crtc_state->vrr_infopacket = vrr_infopacket; +} + +static int dm_update_crtcs_state(struct amdgpu_display_manager *dm, struct drm_atomic_state *state, bool enable, bool *lock_and_validation_needed) @@ -4737,6 +4765,9 @@ static int dm_update_crtcs_state(struct dc *dc, break; } + set_freesync_on_stream(dm, dm_new_crtc_state, + dm_new_conn_state, new_stream); + if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) { new_crtc_state->mode_changed = false; @@ -4745,6 +4776,9 @@ static int dm_update_crtcs_state(struct dc *dc, } } + if (dm_old_crtc_state->freesync_enabled != dm_new_crtc_state->freesync_enabled) + new_crtc_state->mode_changed = true; + if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) goto next_crtc; @@ -4771,7 +4805,7 @@ static int dm_update_crtcs_state(struct dc *dc, /* i.e. reset mode */ if (dc_remove_stream_from_ctx( - dc, + dm->dc, dm_state->context, dm_old_crtc_state->stream) != DC_OK) { ret = -EINVAL; @@ -4808,7 +4842,7 @@ static int dm_update_crtcs_state(struct dc *dc, crtc->base.id); if (dc_add_stream_to_ctx( - dc, + dm->dc, dm_state->context, dm_new_crtc_state->stream) != DC_OK) { ret = -EINVAL; @@ -4857,6 +4891,8 @@ static int dm_update_crtcs_state(struct dc *dc, goto fail; amdgpu_dm_set_ctm(dm_new_crtc_state); } + + } return ret; @@ -5024,8 +5060,12 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, goto fail; for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); + if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && - !new_crtc_state->color_mgmt_changed) + !new_crtc_state->color_mgmt_changed && + (dm_old_crtc_state->freesync_enabled == dm_new_crtc_state->freesync_enabled)) continue; if (!new_crtc_state->enable) @@ -5051,13 +5091,13 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, } /* Disable all crtcs which require disable */ - ret = dm_update_crtcs_state(dc, state, false, &lock_and_validation_needed); + ret = dm_update_crtcs_state(&adev->dm, state, false, &lock_and_validation_needed); if (ret) { goto fail; } /* Enable all crtcs which require enable */ - ret = dm_update_crtcs_state(dc, state, true, &lock_and_validation_needed); + ret = dm_update_crtcs_state(&adev->dm, state, true, &lock_and_validation_needed); if (ret) { goto fail; } @@ -5150,8 +5190,8 @@ static bool is_dp_capable_without_timing_msa(struct dc *dc, return capable; } -void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector, - struct edid *edid) +void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, + struct edid *edid) { int i; bool edid_check_required; @@ -5170,6 +5210,18 @@ void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector, return; } + if (!edid) { + dm_con_state = to_dm_connector_state(connector->state); + + amdgpu_dm_connector->min_vfreq = 0; + amdgpu_dm_connector->max_vfreq = 0; + amdgpu_dm_connector->pixel_clock_mhz = 0; + + dm_con_state->freesync_capable = false; + dm_con_state->freesync_enable = false; + return; + } + dm_con_state = to_dm_connector_state(connector->state); edid_check_required = false; @@ -5220,46 +5272,10 @@ void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector, } if (amdgpu_dm_connector->max_vfreq - - amdgpu_dm_connector->min_vfreq > 10) { - amdgpu_dm_connector->caps.supported = true; - amdgpu_dm_connector->caps.min_refresh_in_micro_hz = - amdgpu_dm_connector->min_vfreq * 1000000; - amdgpu_dm_connector->caps.max_refresh_in_micro_hz = - amdgpu_dm_connector->max_vfreq * 1000000; + amdgpu_dm_connector->min_vfreq > 10) { + dm_con_state->freesync_capable = true; } } } -void amdgpu_dm_remove_sink_from_freesync_module(struct drm_connector *connector) -{ - struct amdgpu_dm_connector *amdgpu_dm_connector = - to_amdgpu_dm_connector(connector); - struct dm_connector_state *dm_con_state; - struct drm_device *dev = connector->dev; - struct amdgpu_device *adev = dev->dev_private; - - if (!amdgpu_dm_connector->dc_sink || !adev->dm.freesync_module) { - DRM_ERROR("dc_sink NULL or no free_sync module.\n"); - return; - } - - if (!connector->state) { - DRM_ERROR("%s - Connector has no state", __func__); - return; - } - - dm_con_state = to_dm_connector_state(connector->state); - - amdgpu_dm_connector->min_vfreq = 0; - amdgpu_dm_connector->max_vfreq = 0; - amdgpu_dm_connector->pixel_clock_mhz = 0; - - memset(&amdgpu_dm_connector->caps, 0, sizeof(amdgpu_dm_connector->caps)); - - dm_con_state->freesync_capable = false; - - dm_con_state->user_enable.enable_for_gaming = false; - dm_con_state->user_enable.enable_for_static = false; - dm_con_state->user_enable.enable_for_video = false; -} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index a29dc35954c9..c159584c04f7 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -167,9 +167,6 @@ struct amdgpu_dm_connector { int max_vfreq ; int pixel_clock_mhz; - /*freesync caps*/ - struct mod_freesync_caps caps; - struct mutex hpd_lock; bool fake_enable; @@ -197,9 +194,13 @@ struct dm_crtc_state { int crc_skip_count; bool crc_enabled; + + bool freesync_enabled; + struct dc_crtc_timing_adjust adjust; + struct dc_info_packet vrr_infopacket; }; -#define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base) +#define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base) struct dm_atomic_state { struct drm_atomic_state base; @@ -216,7 +217,7 @@ struct dm_connector_state { uint8_t underscan_vborder; uint8_t underscan_hborder; bool underscan_enable; - struct mod_freesync_user_enable user_enable; + bool freesync_enable; bool freesync_capable; }; @@ -250,11 +251,8 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec void dm_restore_drm_connector_state(struct drm_device *dev, struct drm_connector *connector); -void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector, - struct edid *edid); - -void -amdgpu_dm_remove_sink_from_freesync_module(struct drm_connector *connector); +void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, + struct edid *edid); /* amdgpu_dm_crc.c */ #ifdef CONFIG_DEBUG_FS diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 9a300732ba37..67683645ce2c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -234,8 +234,9 @@ void dm_dp_mst_dc_sink_create(struct drm_connector *connector) dc_sink->priv = aconnector; aconnector->dc_sink = dc_sink; - amdgpu_dm_add_sink_to_freesync_module( - connector, aconnector->edid); + if (aconnector->dc_sink) + amdgpu_dm_update_freesync_caps( + connector, aconnector->edid); } static int dm_dp_mst_get_modes(struct drm_connector *connector) @@ -275,8 +276,9 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) aconnector->dc_sink = dc_sink; if (aconnector->dc_sink) - amdgpu_dm_add_sink_to_freesync_module( + amdgpu_dm_update_freesync_caps( connector, aconnector->edid); + } drm_connector_update_edid_property( @@ -439,7 +441,7 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, aconnector->port = NULL; if (aconnector->dc_sink) { - amdgpu_dm_remove_sink_from_freesync_module(connector); + amdgpu_dm_update_freesync_caps(connector, NULL); dc_link_remove_remote_sink(aconnector->dc_link, aconnector->dc_sink); dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 71742635e797..a4df627d6936 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -188,11 +188,9 @@ static bool create_links( ***************************************************************************** */ bool dc_stream_adjust_vmin_vmax(struct dc *dc, - struct dc_stream_state **streams, int num_streams, - int vmin, int vmax) + struct dc_stream_state *stream, + struct dc_crtc_timing_adjust *adjust) { - /* TODO: Support multiple streams */ - struct dc_stream_state *stream = streams[0]; int i = 0; bool ret = false; @@ -200,11 +198,11 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc, struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe->stream == stream && pipe->stream_res.stream_enc) { - dc->hwss.set_drr(&pipe, 1, vmin, vmax); - - /* build and update the info frame */ - resource_build_info_frame(pipe); - dc->hwss.update_info_frame(pipe); + pipe->stream->adjust = *adjust; + dc->hwss.set_drr(&pipe, + 1, + adjust->v_total_min, + adjust->v_total_max); ret = true; } @@ -217,7 +215,7 @@ bool dc_stream_get_crtc_position(struct dc *dc, unsigned int *v_pos, unsigned int *nom_v_pos) { /* TODO: Support multiple streams */ - struct dc_stream_state *stream = streams[0]; + const struct dc_stream_state *stream = streams[0]; int i = 0; bool ret = false; struct crtc_position position; @@ -1257,8 +1255,25 @@ static enum surface_update_type check_update_surfaces_for_stream( if (stream_status == NULL || stream_status->plane_count != surface_count) return UPDATE_TYPE_FULL; - if (stream_update) - return UPDATE_TYPE_FULL; + /* some stream updates require passive update */ + if (stream_update) { + if ((stream_update->src.height != 0) && + (stream_update->src.width != 0)) + return UPDATE_TYPE_FULL; + + if ((stream_update->dst.height != 0) && + (stream_update->dst.width != 0)) + return UPDATE_TYPE_FULL; + + if (stream_update->out_transfer_func) + return UPDATE_TYPE_FULL; + + if (stream_update->hdr_static_metadata) + return UPDATE_TYPE_FULL; + + if (stream_update->abm_level) + return UPDATE_TYPE_FULL; + } for (i = 0 ; i < surface_count; i++) { enum surface_update_type type = @@ -1337,7 +1352,6 @@ static void commit_planes_for_stream(struct dc *dc, return; } - /* Full fe update*/ for (j = 0; j < dc->res_pool->pipe_count; j++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; @@ -1348,11 +1362,22 @@ static void commit_planes_for_stream(struct dc *dc, top_pipe_to_program = pipe_ctx; - if (update_type == UPDATE_TYPE_FAST || !pipe_ctx->plane_state) + if (!pipe_ctx->plane_state) + continue; + + /* Fast update*/ + // VRR program can be done as part of FAST UPDATE + if (stream_update && stream_update->adjust) + dc->hwss.set_drr(&pipe_ctx, 1, + stream_update->adjust->v_total_min, + stream_update->adjust->v_total_max); + + /* Full fe update*/ + if (update_type == UPDATE_TYPE_FAST) continue; stream_status = - stream_get_status(context, pipe_ctx->stream); + stream_get_status(context, pipe_ctx->stream); dc->hwss.apply_ctx_for_surface( dc, pipe_ctx->stream, stream_status->plane_count, context); @@ -1407,7 +1432,7 @@ static void commit_planes_for_stream(struct dc *dc, dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false); } - if (stream && stream_update && update_type > UPDATE_TYPE_FAST) + if (stream && stream_update) for (j = 0; j < dc->res_pool->pipe_count; j++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; @@ -1415,7 +1440,8 @@ static void commit_planes_for_stream(struct dc *dc, if (pipe_ctx->stream != stream) continue; - if (stream_update->hdr_static_metadata) { + if (stream_update->hdr_static_metadata || + (stream_update->vrr_infopacket)) { resource_build_info_frame(pipe_ctx); dc->hwss.update_info_frame(pipe_ctx); } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index a7553b6d59c2..d91df5ef0cb3 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -2389,6 +2389,9 @@ static bool retrieve_link_cap(struct dc_link *link) dp_wa_power_up_0010FA(link, dpcd_data, sizeof(dpcd_data)); + down_strm_port_count.raw = dpcd_data[DP_DOWN_STREAM_PORT_COUNT - + DP_DPCD_REV]; + link->dpcd_caps.allow_invalid_MSA_timing_param = down_strm_port_count.bits.IGNORE_MSA_TIMING_PARAM; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 2c348b11b9a5..4468b240929a 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -2475,119 +2475,13 @@ static void set_spd_info_packet( { /* SPD info packet for FreeSync */ - unsigned char checksum = 0; - unsigned int idx, payload_size = 0; - /* Check if Freesync is supported. Return if false. If true, * set the corresponding bit in the info packet */ - if (stream->freesync_ctx.supported == false) + if (!stream->vrr_infopacket.valid) return; - if (dc_is_hdmi_signal(stream->signal)) { - - /* HEADER */ - - /* HB0 = Packet Type = 0x83 (Source Product - * Descriptor InfoFrame) - */ - info_packet->hb0 = HDMI_INFOFRAME_TYPE_SPD; - - /* HB1 = Version = 0x01 */ - info_packet->hb1 = 0x01; - - /* HB2 = [Bits 7:5 = 0] [Bits 4:0 = Length = 0x08] */ - info_packet->hb2 = 0x08; - - payload_size = 0x08; - - } else if (dc_is_dp_signal(stream->signal)) { - - /* HEADER */ - - /* HB0 = Secondary-data Packet ID = 0 - Only non-zero - * when used to associate audio related info packets - */ - info_packet->hb0 = 0x00; - - /* HB1 = Packet Type = 0x83 (Source Product - * Descriptor InfoFrame) - */ - info_packet->hb1 = HDMI_INFOFRAME_TYPE_SPD; - - /* HB2 = [Bits 7:0 = Least significant eight bits - - * For INFOFRAME, the value must be 1Bh] - */ - info_packet->hb2 = 0x1B; - - /* HB3 = [Bits 7:2 = INFOFRAME SDP Version Number = 0x1] - * [Bits 1:0 = Most significant two bits = 0x00] - */ - info_packet->hb3 = 0x04; - - payload_size = 0x1B; - } - - /* PB1 = 0x1A (24bit AMD IEEE OUI (0x00001A) - Byte 0) */ - info_packet->sb[1] = 0x1A; - - /* PB2 = 0x00 (24bit AMD IEEE OUI (0x00001A) - Byte 1) */ - info_packet->sb[2] = 0x00; - - /* PB3 = 0x00 (24bit AMD IEEE OUI (0x00001A) - Byte 2) */ - info_packet->sb[3] = 0x00; - - /* PB4 = Reserved */ - info_packet->sb[4] = 0x00; - - /* PB5 = Reserved */ - info_packet->sb[5] = 0x00; - - /* PB6 = [Bits 7:3 = Reserved] */ - info_packet->sb[6] = 0x00; - - if (stream->freesync_ctx.supported == true) - /* PB6 = [Bit 0 = FreeSync Supported] */ - info_packet->sb[6] |= 0x01; - - if (stream->freesync_ctx.enabled == true) - /* PB6 = [Bit 1 = FreeSync Enabled] */ - info_packet->sb[6] |= 0x02; - - if (stream->freesync_ctx.active == true) - /* PB6 = [Bit 2 = FreeSync Active] */ - info_packet->sb[6] |= 0x04; - - /* PB7 = FreeSync Minimum refresh rate (Hz) */ - info_packet->sb[7] = (unsigned char) (stream->freesync_ctx. - min_refresh_in_micro_hz / 1000000); - - /* PB8 = FreeSync Maximum refresh rate (Hz) - * - * Note: We do not use the maximum capable refresh rate - * of the panel, because we should never go above the field - * rate of the mode timing set. - */ - info_packet->sb[8] = (unsigned char) (stream->freesync_ctx. - nominal_refresh_in_micro_hz / 1000000); - - /* PB9 - PB27 = Reserved */ - for (idx = 9; idx <= 27; idx++) - info_packet->sb[idx] = 0x00; - - /* Calculate checksum */ - checksum += info_packet->hb0; - checksum += info_packet->hb1; - checksum += info_packet->hb2; - checksum += info_packet->hb3; - - for (idx = 1; idx <= payload_size; idx++) - checksum += info_packet->sb[idx]; - - /* PB0 = Checksum (one byte complement) */ - info_packet->sb[0] = (unsigned char) (0x100 - checksum); - - info_packet->valid = true; + *info_packet = stream->vrr_infopacket; } static void set_hdr_static_info_packet( diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index b789cb2b354b..57f57cf0fe2a 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -708,12 +708,6 @@ struct crtc_trigger_info { enum trigger_delay delay; }; -enum vrr_state { - VRR_STATE_OFF = 0, - VRR_STATE_VARIABLE, - VRR_STATE_FIXED, -}; - struct dc_crtc_timing_adjust { uint32_t v_total_min; uint32_t v_total_max; diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index cbfe418006cb..67101a525e3d 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -45,19 +45,25 @@ struct dc_stream_status { struct dc_link *link; }; +// TODO: References to this needs to be removed.. +struct freesync_context { + bool dummy; +}; + struct dc_stream_state { struct dc_sink *sink; struct dc_crtc_timing timing; - struct dc_crtc_timing_adjust timing_adjust; - struct vrr_params vrr_params; + struct dc_crtc_timing_adjust adjust; + struct dc_info_packet vrr_infopacket; struct rect src; /* composition area */ struct rect dst; /* stream addressable area */ - struct audio_info audio_info; - + // TODO: References to this needs to be removed.. struct freesync_context freesync_ctx; + struct audio_info audio_info; + struct dc_info_packet hdr_static_metadata; PHYSICAL_ADDRESS_LOC dmdata_address; bool use_dynamic_meta; @@ -120,6 +126,8 @@ struct dc_stream_update { unsigned int *abm_level; unsigned long long *periodic_fn_vsync_delta; + struct dc_crtc_timing_adjust *adjust; + struct dc_info_packet *vrr_infopacket; }; bool dc_is_stream_unchanged( @@ -258,10 +266,8 @@ bool dc_stream_set_cursor_position( bool dc_stream_adjust_vmin_vmax(struct dc *dc, - struct dc_stream_state **stream, - int num_streams, - int vmin, - int vmax); + struct dc_stream_state *stream, + struct dc_crtc_timing_adjust *adjust); bool dc_stream_get_crtc_position(struct dc *dc, struct dc_stream_state **stream, @@ -288,13 +294,6 @@ void dc_stream_set_static_screen_events(struct dc *dc, void dc_stream_set_dither_option(struct dc_stream_state *stream, enum dc_dither_option option); - -bool dc_stream_adjust_vmin_vmax(struct dc *dc, - struct dc_stream_state **stream, - int num_streams, - int vmin, - int vmax); - bool dc_stream_get_crtc_position(struct dc *dc, struct dc_stream_state **stream, int num_streams, diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 8c6eb78b0c3b..58a6ef80a60e 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -513,13 +513,11 @@ struct audio_info { struct audio_mode modes[DC_MAX_AUDIO_DESC_COUNT]; }; -struct vrr_params { - enum vrr_state state; - uint32_t window_min; - uint32_t window_max; - uint32_t inserted_frame_duration_in_us; - uint32_t frames_to_insert; - uint32_t frame_counter; +enum dc_infoframe_type { + DC_HDMI_INFOFRAME_TYPE_VENDOR = 0x81, + DC_HDMI_INFOFRAME_TYPE_AVI = 0x82, + DC_HDMI_INFOFRAME_TYPE_SPD = 0x83, + DC_HDMI_INFOFRAME_TYPE_AUDIO = 0x84, }; struct dc_info_packet { @@ -539,16 +537,6 @@ struct dc_plane_flip_time { unsigned int prev_update_time_in_us; }; -// Will combine with vrr_params at some point. -struct freesync_context { - bool supported; - bool enabled; - bool active; - - unsigned int min_refresh_in_micro_hz; - unsigned int nominal_refresh_in_micro_hz; -}; - struct psr_config { unsigned char psr_version; unsigned int psr_rfb_setup_time; diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 2f2c5155c5aa..ae4792494fe7 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -1286,6 +1286,8 @@ static enum dc_status dce110_enable_stream_timing( struct pipe_ctx *pipe_ctx_old = &dc->current_state->res_ctx. pipe_ctx[pipe_ctx->pipe_idx]; struct tg_color black_color = {0}; + struct drr_params params = {0}; + unsigned int event_triggers = 0; if (!pipe_ctx_old->stream) { @@ -1315,9 +1317,19 @@ static enum dc_status dce110_enable_stream_timing( &stream->timing, true); - pipe_ctx->stream_res.tg->funcs->set_static_screen_control( - pipe_ctx->stream_res.tg, - 0x182); + params.vertical_total_min = stream->adjust.v_total_min; + params.vertical_total_max = stream->adjust.v_total_max; + if (pipe_ctx->stream_res.tg->funcs->set_drr) + pipe_ctx->stream_res.tg->funcs->set_drr( + pipe_ctx->stream_res.tg, ¶ms); + + // DRR should set trigger event to monitor surface update event + if (stream->adjust.v_total_min != 0 && + stream->adjust.v_total_max != 0) + event_triggers = 0x80; + if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control) + pipe_ctx->stream_res.tg->funcs->set_static_screen_control( + pipe_ctx->stream_res.tg, event_triggers); } if (!pipe_ctx_old->stream) { @@ -1328,8 +1340,6 @@ static enum dc_status dce110_enable_stream_timing( } } - - return DC_OK; } @@ -1719,16 +1729,24 @@ static void set_drr(struct pipe_ctx **pipe_ctx, { int i = 0; struct drr_params params = {0}; + // DRR should set trigger event to monitor surface update event + unsigned int event_triggers = 0x80; params.vertical_total_max = vmax; params.vertical_total_min = vmin; /* TODO: If multiple pipes are to be supported, you need - * some GSL stuff + * some GSL stuff. Static screen triggers may be programmed differently + * as well. */ - for (i = 0; i < num_pipes; i++) { - pipe_ctx[i]->stream_res.tg->funcs->set_drr(pipe_ctx[i]->stream_res.tg, ¶ms); + pipe_ctx[i]->stream_res.tg->funcs->set_drr( + pipe_ctx[i]->stream_res.tg, ¶ms); + + if (vmax != 0 && vmin != 0) + pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control( + pipe_ctx[i]->stream_res.tg, + event_triggers); } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 6d27f1db3c69..4b8bedb625b4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -631,6 +631,8 @@ static enum dc_status dcn10_enable_stream_timing( struct dc_stream_state *stream = pipe_ctx->stream; enum dc_color_space color_space; struct tg_color black_color = {0}; + struct drr_params params = {0}; + unsigned int event_triggers = 0; /* by upper caller loop, pipe0 is parent pipe and be called first. * back end is set up by for pipe0. Other children pipe share back end @@ -698,6 +700,19 @@ static enum dc_status dcn10_enable_stream_timing( return DC_ERROR_UNEXPECTED; } + params.vertical_total_min = stream->adjust.v_total_min; + params.vertical_total_max = stream->adjust.v_total_max; + if (pipe_ctx->stream_res.tg->funcs->set_drr) + pipe_ctx->stream_res.tg->funcs->set_drr( + pipe_ctx->stream_res.tg, ¶ms); + + // DRR should set trigger event to monitor surface update event + if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0) + event_triggers = 0x80; + if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control) + pipe_ctx->stream_res.tg->funcs->set_static_screen_control( + pipe_ctx->stream_res.tg, event_triggers); + /* TODO program crtc source select for non-virtual signal*/ /* TODO program FMT */ /* TODO setup link_enc */ @@ -2399,15 +2414,23 @@ static void set_drr(struct pipe_ctx **pipe_ctx, { int i = 0; struct drr_params params = {0}; + // DRR should set trigger event to monitor surface update event + unsigned int event_triggers = 0x80; params.vertical_total_max = vmax; params.vertical_total_min = vmin; /* TODO: If multiple pipes are to be supported, you need - * some GSL stuff + * some GSL stuff. Static screen triggers may be programmed differently + * as well. */ for (i = 0; i < num_pipes; i++) { - pipe_ctx[i]->stream_res.tg->funcs->set_drr(pipe_ctx[i]->stream_res.tg, ¶ms); + pipe_ctx[i]->stream_res.tg->funcs->set_drr( + pipe_ctx[i]->stream_res.tg, ¶ms); + if (vmax != 0 && vmin != 0) + pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control( + pipe_ctx[i]->stream_res.tg, + event_triggers); } } diff --git a/drivers/gpu/drm/amd/display/include/set_mode_types.h b/drivers/gpu/drm/amd/display/include/set_mode_types.h index fee2b6ffcfc1..2b836e582c08 100644 --- a/drivers/gpu/drm/amd/display/include/set_mode_types.h +++ b/drivers/gpu/drm/amd/display/include/set_mode_types.h @@ -90,18 +90,6 @@ union hdmi_info_packet { struct info_packet_raw_data packet_raw_data; }; -struct info_packet { - enum info_frame_flag flags; - union hdmi_info_packet info_packet_hdmi; -}; - -struct info_frame { - struct info_packet avi_info_packet; - struct info_packet gamut_packet; - struct info_packet vendor_info_packet; - struct info_packet spd_info_packet; -}; - #pragma pack(pop) #endif /* __DAL_SET_MODE_TYPES_H__ */ diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index fa344ceafc17..5e12e463c06a 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -30,6 +30,7 @@ #define MOD_FREESYNC_MAX_CONCURRENT_STREAMS 32 +#define MIN_REFRESH_RANGE_IN_US 10000000 /* Refresh rate ramp at a fixed rate of 65 Hz/second */ #define STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME ((1000 / 60) * 65) /* Number of elements in the render times cache array */ @@ -40,103 +41,9 @@ #define FIXED_REFRESH_ENTER_FRAME_COUNT 5 #define FIXED_REFRESH_EXIT_FRAME_COUNT 5 -#define FREESYNC_REGISTRY_NAME "freesync_v1" - -#define FREESYNC_NO_STATIC_FOR_EXTERNAL_DP_REGKEY "DalFreeSyncNoStaticForExternalDp" - -#define FREESYNC_NO_STATIC_FOR_INTERNAL_REGKEY "DalFreeSyncNoStaticForInternal" - -#define FREESYNC_DEFAULT_REGKEY "LCDFreeSyncDefault" - -struct gradual_static_ramp { - bool ramp_is_active; - bool ramp_direction_is_up; - unsigned int ramp_current_frame_duration_in_ns; -}; - -struct freesync_time { - /* video (48Hz feature) related */ - unsigned int update_duration_in_ns; - - /* BTR/fixed refresh related */ - unsigned int prev_time_stamp_in_us; - - unsigned int min_render_time_in_us; - unsigned int max_render_time_in_us; - - unsigned int render_times_index; - unsigned int render_times[RENDER_TIMES_MAX_COUNT]; - - unsigned int min_window; - unsigned int max_window; -}; - -struct below_the_range { - bool btr_active; - bool program_btr; - - unsigned int mid_point_in_us; - - unsigned int inserted_frame_duration_in_us; - unsigned int frames_to_insert; - unsigned int frame_counter; -}; - -struct fixed_refresh { - bool fixed_active; - bool program_fixed; - unsigned int frame_counter; -}; - -struct freesync_range { - unsigned int min_refresh; - unsigned int max_frame_duration; - unsigned int vmax; - - unsigned int max_refresh; - unsigned int min_frame_duration; - unsigned int vmin; -}; - -struct freesync_state { - bool fullscreen; - bool static_screen; - bool video; - - unsigned int vmin; - unsigned int vmax; - - struct freesync_time time; - - unsigned int nominal_refresh_rate_in_micro_hz; - bool windowed_fullscreen; - - struct gradual_static_ramp static_ramp; - struct below_the_range btr; - struct fixed_refresh fixed_refresh; - struct freesync_range freesync_range; -}; - -struct freesync_entity { - struct dc_stream_state *stream; - struct mod_freesync_caps *caps; - struct freesync_state state; - struct mod_freesync_user_enable user_enable; -}; - -struct freesync_registry_options { - bool drr_external_supported; - bool drr_internal_supported; - bool lcd_freesync_default_set; - int lcd_freesync_default_value; -}; - struct core_freesync { struct mod_freesync public; struct dc *dc; - struct freesync_registry_options opts; - struct freesync_entity *map; - int num_entities; }; #define MOD_FREESYNC_TO_CORE(mod_freesync)\ @@ -147,69 +54,16 @@ struct mod_freesync *mod_freesync_create(struct dc *dc) struct core_freesync *core_freesync = kzalloc(sizeof(struct core_freesync), GFP_KERNEL); - - struct persistent_data_flag flag; - - int i, data = 0; - if (core_freesync == NULL) goto fail_alloc_context; - core_freesync->map = kcalloc(MOD_FREESYNC_MAX_CONCURRENT_STREAMS, - sizeof(struct freesync_entity), - GFP_KERNEL); - - if (core_freesync->map == NULL) - goto fail_alloc_map; - - for (i = 0; i < MOD_FREESYNC_MAX_CONCURRENT_STREAMS; i++) - core_freesync->map[i].stream = NULL; - - core_freesync->num_entities = 0; - if (dc == NULL) goto fail_construct; core_freesync->dc = dc; - - /* Create initial module folder in registry for freesync enable data */ - flag.save_per_edid = true; - flag.save_per_link = false; - dm_write_persistent_data(dc->ctx, NULL, FREESYNC_REGISTRY_NAME, - NULL, NULL, 0, &flag); - flag.save_per_edid = false; - flag.save_per_link = false; - - if (dm_read_persistent_data(dc->ctx, NULL, NULL, - FREESYNC_NO_STATIC_FOR_INTERNAL_REGKEY, - &data, sizeof(data), &flag)) { - core_freesync->opts.drr_internal_supported = - (data & 1) ? false : true; - } - - if (dm_read_persistent_data(dc->ctx, NULL, NULL, - FREESYNC_NO_STATIC_FOR_EXTERNAL_DP_REGKEY, - &data, sizeof(data), &flag)) { - core_freesync->opts.drr_external_supported = - (data & 1) ? false : true; - } - - if (dm_read_persistent_data(dc->ctx, NULL, NULL, - FREESYNC_DEFAULT_REGKEY, - &data, sizeof(data), &flag)) { - core_freesync->opts.lcd_freesync_default_set = true; - core_freesync->opts.lcd_freesync_default_value = data; - } else { - core_freesync->opts.lcd_freesync_default_set = false; - core_freesync->opts.lcd_freesync_default_value = 0; - } - return &core_freesync->public; fail_construct: - kfree(core_freesync->map); - -fail_alloc_map: kfree(core_freesync); fail_alloc_context: @@ -218,968 +72,396 @@ struct mod_freesync *mod_freesync_create(struct dc *dc) void mod_freesync_destroy(struct mod_freesync *mod_freesync) { - if (mod_freesync != NULL) { - int i; - struct core_freesync *core_freesync = - MOD_FREESYNC_TO_CORE(mod_freesync); - - for (i = 0; i < core_freesync->num_entities; i++) - if (core_freesync->map[i].stream) - dc_stream_release(core_freesync->map[i].stream); - - kfree(core_freesync->map); - - kfree(core_freesync); - } -} - -/* Given a specific dc_stream* this function finds its equivalent - * on the core_freesync->map and returns the corresponding index - */ -static unsigned int map_index_from_stream(struct core_freesync *core_freesync, - struct dc_stream_state *stream) -{ - unsigned int index = 0; - - for (index = 0; index < core_freesync->num_entities; index++) { - if (core_freesync->map[index].stream == stream) { - return index; - } - } - /* Could not find stream requested */ - ASSERT(false); - return index; -} - -bool mod_freesync_add_stream(struct mod_freesync *mod_freesync, - struct dc_stream_state *stream, struct mod_freesync_caps *caps) -{ - struct dc *dc = NULL; struct core_freesync *core_freesync = NULL; - int persistent_freesync_enable = 0; - struct persistent_data_flag flag; - unsigned int nom_refresh_rate_uhz; - unsigned long long temp; - if (mod_freesync == NULL) - return false; - + return; core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync); - dc = core_freesync->dc; - - flag.save_per_edid = true; - flag.save_per_link = false; - - if (core_freesync->num_entities < MOD_FREESYNC_MAX_CONCURRENT_STREAMS) { - - dc_stream_retain(stream); - - temp = stream->timing.pix_clk_khz; - temp *= 1000ULL * 1000ULL * 1000ULL; - temp = div_u64(temp, stream->timing.h_total); - temp = div_u64(temp, stream->timing.v_total); - - nom_refresh_rate_uhz = (unsigned int) temp; - - core_freesync->map[core_freesync->num_entities].stream = stream; - core_freesync->map[core_freesync->num_entities].caps = caps; - - core_freesync->map[core_freesync->num_entities].state. - fullscreen = false; - core_freesync->map[core_freesync->num_entities].state. - static_screen = false; - core_freesync->map[core_freesync->num_entities].state. - video = false; - core_freesync->map[core_freesync->num_entities].state.time. - update_duration_in_ns = 0; - core_freesync->map[core_freesync->num_entities].state. - static_ramp.ramp_is_active = false; - - /* get persistent data from registry */ - if (dm_read_persistent_data(dc->ctx, stream->sink, - FREESYNC_REGISTRY_NAME, - "userenable", &persistent_freesync_enable, - sizeof(int), &flag)) { - core_freesync->map[core_freesync->num_entities].user_enable. - enable_for_gaming = - (persistent_freesync_enable & 1) ? true : false; - core_freesync->map[core_freesync->num_entities].user_enable. - enable_for_static = - (persistent_freesync_enable & 2) ? true : false; - core_freesync->map[core_freesync->num_entities].user_enable. - enable_for_video = - (persistent_freesync_enable & 4) ? true : false; - /* If FreeSync display and LCDFreeSyncDefault is set, use as default values write back to userenable */ - } else if (caps->supported && (core_freesync->opts.lcd_freesync_default_set)) { - core_freesync->map[core_freesync->num_entities].user_enable.enable_for_gaming = - (core_freesync->opts.lcd_freesync_default_value & 1) ? true : false; - core_freesync->map[core_freesync->num_entities].user_enable.enable_for_static = - (core_freesync->opts.lcd_freesync_default_value & 2) ? true : false; - core_freesync->map[core_freesync->num_entities].user_enable.enable_for_video = - (core_freesync->opts.lcd_freesync_default_value & 4) ? true : false; - dm_write_persistent_data(dc->ctx, stream->sink, - FREESYNC_REGISTRY_NAME, - "userenable", &core_freesync->opts.lcd_freesync_default_value, - sizeof(int), &flag); - } else { - core_freesync->map[core_freesync->num_entities].user_enable. - enable_for_gaming = false; - core_freesync->map[core_freesync->num_entities].user_enable. - enable_for_static = false; - core_freesync->map[core_freesync->num_entities].user_enable. - enable_for_video = false; - } - - if (caps->supported && - nom_refresh_rate_uhz >= caps->min_refresh_in_micro_hz && - nom_refresh_rate_uhz <= caps->max_refresh_in_micro_hz) - stream->ignore_msa_timing_param = 1; - - core_freesync->num_entities++; - return true; - } - return false; + kfree(core_freesync); } -bool mod_freesync_remove_stream(struct mod_freesync *mod_freesync, - struct dc_stream_state *stream) +#if 0 /* unused currently */ +static unsigned int calc_refresh_in_uhz_from_duration( + unsigned int duration_in_ns) { - int i = 0; - struct core_freesync *core_freesync = NULL; - unsigned int index = 0; - - if (mod_freesync == NULL) - return false; + unsigned int refresh_in_uhz = + ((unsigned int)(div64_u64((1000000000ULL * 1000000), + duration_in_ns))); + return refresh_in_uhz; +} +#endif - core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync); - index = map_index_from_stream(core_freesync, stream); - - dc_stream_release(core_freesync->map[index].stream); - core_freesync->map[index].stream = NULL; - /* To remove this entity, shift everything after down */ - for (i = index; i < core_freesync->num_entities - 1; i++) - core_freesync->map[i] = core_freesync->map[i + 1]; - core_freesync->num_entities--; - return true; +static unsigned int calc_duration_in_us_from_refresh_in_uhz( + unsigned int refresh_in_uhz) +{ + unsigned int duration_in_us = + ((unsigned int)(div64_u64((1000000000ULL * 1000), + refresh_in_uhz))); + return duration_in_us; } -static void adjust_vmin_vmax(struct core_freesync *core_freesync, - struct dc_stream_state **streams, - int num_streams, - int map_index, - unsigned int v_total_min, - unsigned int v_total_max) +static unsigned int calc_duration_in_us_from_v_total( + const struct dc_stream_state *stream, + const struct mod_vrr_params *in_vrr, + unsigned int v_total) { - if (num_streams == 0 || streams == NULL || num_streams > 1) - return; + unsigned int duration_in_us = + (unsigned int)(div64_u64(((unsigned long long)(v_total) + * 1000) * stream->timing.h_total, + stream->timing.pix_clk_khz)); - core_freesync->map[map_index].state.vmin = v_total_min; - core_freesync->map[map_index].state.vmax = v_total_max; + if (duration_in_us < in_vrr->min_duration_in_us) + duration_in_us = in_vrr->min_duration_in_us; - dc_stream_adjust_vmin_vmax(core_freesync->dc, streams, - num_streams, v_total_min, - v_total_max); -} + if (duration_in_us > in_vrr->max_duration_in_us) + duration_in_us = in_vrr->max_duration_in_us; + return duration_in_us; +} -static void update_stream_freesync_context(struct core_freesync *core_freesync, - struct dc_stream_state *stream) +static unsigned int calc_v_total_from_refresh( + const struct dc_stream_state *stream, + unsigned int refresh_in_uhz) { - unsigned int index; - struct freesync_context *ctx; + unsigned int v_total = stream->timing.v_total; + unsigned int frame_duration_in_ns; - ctx = &stream->freesync_ctx; + frame_duration_in_ns = + ((unsigned int)(div64_u64((1000000000ULL * 1000000), + refresh_in_uhz))); - index = map_index_from_stream(core_freesync, stream); + v_total = div64_u64(div64_u64(((unsigned long long)( + frame_duration_in_ns) * stream->timing.pix_clk_khz), + stream->timing.h_total), 1000000); - ctx->supported = core_freesync->map[index].caps->supported; - ctx->enabled = (core_freesync->map[index].user_enable.enable_for_gaming || - core_freesync->map[index].user_enable.enable_for_video || - core_freesync->map[index].user_enable.enable_for_static); - ctx->active = (core_freesync->map[index].state.fullscreen || - core_freesync->map[index].state.video || - core_freesync->map[index].state.static_ramp.ramp_is_active); - ctx->min_refresh_in_micro_hz = - core_freesync->map[index].caps->min_refresh_in_micro_hz; - ctx->nominal_refresh_in_micro_hz = core_freesync-> - map[index].state.nominal_refresh_rate_in_micro_hz; + /* v_total cannot be less than nominal */ + if (v_total < stream->timing.v_total) { + ASSERT(v_total < stream->timing.v_total); + v_total = stream->timing.v_total; + } + return v_total; } -static void update_stream(struct core_freesync *core_freesync, - struct dc_stream_state *stream) +static unsigned int calc_v_total_from_duration( + const struct dc_stream_state *stream, + const struct mod_vrr_params *vrr, + unsigned int duration_in_us) { - unsigned int index = map_index_from_stream(core_freesync, stream); - if (core_freesync->map[index].caps->supported) { - stream->ignore_msa_timing_param = 1; - update_stream_freesync_context(core_freesync, stream); - } -} + unsigned int v_total = 0; -static void calc_freesync_range(struct core_freesync *core_freesync, - struct dc_stream_state *stream, - struct freesync_state *state, - unsigned int min_refresh_in_uhz, - unsigned int max_refresh_in_uhz) -{ - unsigned int min_frame_duration_in_ns = 0, max_frame_duration_in_ns = 0; - unsigned int index = map_index_from_stream(core_freesync, stream); - uint32_t vtotal = stream->timing.v_total; - - if ((min_refresh_in_uhz == 0) || (max_refresh_in_uhz == 0)) { - state->freesync_range.min_refresh = - state->nominal_refresh_rate_in_micro_hz; - state->freesync_range.max_refresh = - state->nominal_refresh_rate_in_micro_hz; + if (duration_in_us < vrr->min_duration_in_us) + duration_in_us = vrr->min_duration_in_us; - state->freesync_range.max_frame_duration = 0; - state->freesync_range.min_frame_duration = 0; + if (duration_in_us > vrr->max_duration_in_us) + duration_in_us = vrr->max_duration_in_us; - state->freesync_range.vmax = vtotal; - state->freesync_range.vmin = vtotal; - - return; - } + v_total = div64_u64(div64_u64(((unsigned long long)( + duration_in_us) * stream->timing.pix_clk_khz), + stream->timing.h_total), 1000); - min_frame_duration_in_ns = ((unsigned int) (div64_u64( - (1000000000ULL * 1000000), - max_refresh_in_uhz))); - max_frame_duration_in_ns = ((unsigned int) (div64_u64( - (1000000000ULL * 1000000), - min_refresh_in_uhz))); - - state->freesync_range.min_refresh = min_refresh_in_uhz; - state->freesync_range.max_refresh = max_refresh_in_uhz; - - state->freesync_range.max_frame_duration = max_frame_duration_in_ns; - state->freesync_range.min_frame_duration = min_frame_duration_in_ns; - - state->freesync_range.vmax = div64_u64(div64_u64(((unsigned long long)( - max_frame_duration_in_ns) * stream->timing.pix_clk_khz), - stream->timing.h_total), 1000000); - state->freesync_range.vmin = div64_u64(div64_u64(((unsigned long long)( - min_frame_duration_in_ns) * stream->timing.pix_clk_khz), - stream->timing.h_total), 1000000); - - /* vmin/vmax cannot be less than vtotal */ - if (state->freesync_range.vmin < vtotal) { - /* Error of 1 is permissible */ - ASSERT((state->freesync_range.vmin + 1) >= vtotal); - state->freesync_range.vmin = vtotal; + /* v_total cannot be less than nominal */ + if (v_total < stream->timing.v_total) { + ASSERT(v_total < stream->timing.v_total); + v_total = stream->timing.v_total; } - if (state->freesync_range.vmax < vtotal) { - /* Error of 1 is permissible */ - ASSERT((state->freesync_range.vmax + 1) >= vtotal); - state->freesync_range.vmax = vtotal; - } - - /* Determine whether BTR can be supported */ - if (max_frame_duration_in_ns >= - 2 * min_frame_duration_in_ns) - core_freesync->map[index].caps->btr_supported = true; - else - core_freesync->map[index].caps->btr_supported = false; - - /* Cache the time variables */ - state->time.max_render_time_in_us = - max_frame_duration_in_ns / 1000; - state->time.min_render_time_in_us = - min_frame_duration_in_ns / 1000; - state->btr.mid_point_in_us = - (max_frame_duration_in_ns + - min_frame_duration_in_ns) / 2000; + return v_total; } -static void calc_v_total_from_duration(struct dc_stream_state *stream, - unsigned int duration_in_ns, int *v_total_nominal) +static void update_v_total_for_static_ramp( + struct core_freesync *core_freesync, + const struct dc_stream_state *stream, + struct mod_vrr_params *in_out_vrr) { - *v_total_nominal = div64_u64(div64_u64(((unsigned long long)( - duration_in_ns) * stream->timing.pix_clk_khz), - stream->timing.h_total), 1000000); -} - -static void calc_v_total_for_static_ramp(struct core_freesync *core_freesync, - struct dc_stream_state *stream, - unsigned int index, int *v_total) -{ - unsigned int frame_duration = 0; - - struct gradual_static_ramp *static_ramp_variables = - &core_freesync->map[index].state.static_ramp; + unsigned int v_total = 0; + unsigned int current_duration_in_us = + calc_duration_in_us_from_v_total( + stream, in_out_vrr, + in_out_vrr->adjust.v_total_max); + unsigned int target_duration_in_us = + calc_duration_in_us_from_refresh_in_uhz( + in_out_vrr->fixed.target_refresh_in_uhz); + bool ramp_direction_is_up = (current_duration_in_us > + target_duration_in_us) ? true : false; /* Calc ratio between new and current frame duration with 3 digit */ unsigned int frame_duration_ratio = div64_u64(1000000, (1000 + div64_u64(((unsigned long long)( STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME) * - static_ramp_variables->ramp_current_frame_duration_in_ns), - 1000000000))); + current_duration_in_us), + 1000000))); - /* Calculate delta between new and current frame duration in ns */ + /* Calculate delta between new and current frame duration in us */ unsigned int frame_duration_delta = div64_u64(((unsigned long long)( - static_ramp_variables->ramp_current_frame_duration_in_ns) * + current_duration_in_us) * (1000 - frame_duration_ratio)), 1000); /* Adjust frame duration delta based on ratio between current and * standard frame duration (frame duration at 60 Hz refresh rate). */ unsigned int ramp_rate_interpolated = div64_u64(((unsigned long long)( - frame_duration_delta) * static_ramp_variables-> - ramp_current_frame_duration_in_ns), 16666666); + frame_duration_delta) * current_duration_in_us), 16666); /* Going to a higher refresh rate (lower frame duration) */ - if (static_ramp_variables->ramp_direction_is_up) { + if (ramp_direction_is_up) { /* reduce frame duration */ - static_ramp_variables->ramp_current_frame_duration_in_ns -= - ramp_rate_interpolated; - - /* min frame duration */ - frame_duration = ((unsigned int) (div64_u64( - (1000000000ULL * 1000000), - core_freesync->map[index].state. - nominal_refresh_rate_in_micro_hz))); + current_duration_in_us -= ramp_rate_interpolated; /* adjust for frame duration below min */ - if (static_ramp_variables->ramp_current_frame_duration_in_ns <= - frame_duration) { - - static_ramp_variables->ramp_is_active = false; - static_ramp_variables-> - ramp_current_frame_duration_in_ns = - frame_duration; + if (current_duration_in_us <= target_duration_in_us) { + in_out_vrr->fixed.ramping_active = false; + in_out_vrr->fixed.ramping_done = true; + current_duration_in_us = + calc_duration_in_us_from_refresh_in_uhz( + in_out_vrr->fixed.target_refresh_in_uhz); } /* Going to a lower refresh rate (larger frame duration) */ } else { /* increase frame duration */ - static_ramp_variables->ramp_current_frame_duration_in_ns += - ramp_rate_interpolated; - - /* max frame duration */ - frame_duration = ((unsigned int) (div64_u64( - (1000000000ULL * 1000000), - core_freesync->map[index].caps->min_refresh_in_micro_hz))); + current_duration_in_us += ramp_rate_interpolated; /* adjust for frame duration above max */ - if (static_ramp_variables->ramp_current_frame_duration_in_ns >= - frame_duration) { - - static_ramp_variables->ramp_is_active = false; - static_ramp_variables-> - ramp_current_frame_duration_in_ns = - frame_duration; + if (current_duration_in_us >= target_duration_in_us) { + in_out_vrr->fixed.ramping_active = false; + in_out_vrr->fixed.ramping_done = true; + current_duration_in_us = + calc_duration_in_us_from_refresh_in_uhz( + in_out_vrr->fixed.target_refresh_in_uhz); } } - calc_v_total_from_duration(stream, static_ramp_variables-> - ramp_current_frame_duration_in_ns, v_total); -} - -static void reset_freesync_state_variables(struct freesync_state* state) -{ - state->static_ramp.ramp_is_active = false; - if (state->nominal_refresh_rate_in_micro_hz) - state->static_ramp.ramp_current_frame_duration_in_ns = - ((unsigned int) (div64_u64( - (1000000000ULL * 1000000), - state->nominal_refresh_rate_in_micro_hz))); - - state->btr.btr_active = false; - state->btr.frame_counter = 0; - state->btr.frames_to_insert = 0; - state->btr.inserted_frame_duration_in_us = 0; - state->btr.program_btr = false; - - state->fixed_refresh.fixed_active = false; - state->fixed_refresh.program_fixed = false; -} -/* - * Sets freesync mode on a stream depending on current freesync state. - */ -static bool set_freesync_on_streams(struct core_freesync *core_freesync, - struct dc_stream_state **streams, int num_streams) -{ - int v_total_nominal = 0, v_total_min = 0, v_total_max = 0; - unsigned int stream_idx, map_index = 0; - struct freesync_state *state; + v_total = calc_v_total_from_duration(stream, + in_out_vrr, + current_duration_in_us); - if (num_streams == 0 || streams == NULL || num_streams > 1) - return false; - for (stream_idx = 0; stream_idx < num_streams; stream_idx++) { - - map_index = map_index_from_stream(core_freesync, - streams[stream_idx]); - - state = &core_freesync->map[map_index].state; - - if (core_freesync->map[map_index].caps->supported) { - - /* Fullscreen has the topmost priority. If the - * fullscreen bit is set, we are in a fullscreen - * application where it should not matter if it is - * static screen. We should not check the static_screen - * or video bit. - * - * Special cases of fullscreen include btr and fixed - * refresh. We program btr on every flip and involves - * programming full range right before the last inserted frame. - * However, we do not want to program the full freesync range - * when fixed refresh is active, because we only program - * that logic once and this will override it. - */ - if (core_freesync->map[map_index].user_enable. - enable_for_gaming == true && - state->fullscreen == true && - state->fixed_refresh.fixed_active == false) { - /* Enable freesync */ - - v_total_min = state->freesync_range.vmin; - v_total_max = state->freesync_range.vmax; - - /* Update the freesync context for the stream */ - update_stream_freesync_context(core_freesync, - streams[stream_idx]); - - adjust_vmin_vmax(core_freesync, streams, - num_streams, map_index, - v_total_min, - v_total_max); - - return true; - - } else if (core_freesync->map[map_index].user_enable. - enable_for_video && state->video == true) { - /* Enable 48Hz feature */ - - calc_v_total_from_duration(streams[stream_idx], - state->time.update_duration_in_ns, - &v_total_nominal); - - /* Program only if v_total_nominal is in range*/ - if (v_total_nominal >= - streams[stream_idx]->timing.v_total) { - - /* Update the freesync context for - * the stream - */ - update_stream_freesync_context( - core_freesync, - streams[stream_idx]); - - adjust_vmin_vmax( - core_freesync, streams, - num_streams, map_index, - v_total_nominal, - v_total_nominal); - } - return true; - - } else { - /* Disable freesync */ - v_total_nominal = streams[stream_idx]-> - timing.v_total; - - /* Update the freesync context for - * the stream - */ - update_stream_freesync_context( - core_freesync, - streams[stream_idx]); - - adjust_vmin_vmax(core_freesync, streams, - num_streams, map_index, - v_total_nominal, - v_total_nominal); - - /* Reset the cached variables */ - reset_freesync_state_variables(state); - - return true; - } - } else { - /* Disable freesync */ - v_total_nominal = streams[stream_idx]-> - timing.v_total; - /* - * we have to reset drr always even sink does - * not support freesync because a former stream has - * be programmed - */ - adjust_vmin_vmax(core_freesync, streams, - num_streams, map_index, - v_total_nominal, - v_total_nominal); - /* Reset the cached variables */ - reset_freesync_state_variables(state); - } - - } - - return false; + in_out_vrr->adjust.v_total_min = v_total; + in_out_vrr->adjust.v_total_max = v_total; } -static void set_static_ramp_variables(struct core_freesync *core_freesync, - unsigned int index, bool enable_static_screen) -{ - unsigned int frame_duration = 0; - unsigned int nominal_refresh_rate = core_freesync->map[index].state. - nominal_refresh_rate_in_micro_hz; - unsigned int min_refresh_rate= core_freesync->map[index].caps-> - min_refresh_in_micro_hz; - struct gradual_static_ramp *static_ramp_variables = - &core_freesync->map[index].state.static_ramp; - - /* If we are ENABLING static screen, refresh rate should go DOWN. - * If we are DISABLING static screen, refresh rate should go UP. - */ - if (enable_static_screen) - static_ramp_variables->ramp_direction_is_up = false; - else - static_ramp_variables->ramp_direction_is_up = true; - - /* If ramp is not active, set initial frame duration depending on - * whether we are enabling/disabling static screen mode. If the ramp is - * already active, ramp should continue in the opposite direction - * starting with the current frame duration - */ - if (!static_ramp_variables->ramp_is_active) { - if (enable_static_screen == true) { - /* Going to lower refresh rate, so start from max - * refresh rate (min frame duration) - */ - frame_duration = ((unsigned int) (div64_u64( - (1000000000ULL * 1000000), - nominal_refresh_rate))); - } else { - /* Going to higher refresh rate, so start from min - * refresh rate (max frame duration) - */ - frame_duration = ((unsigned int) (div64_u64( - (1000000000ULL * 1000000), - min_refresh_rate))); - } - static_ramp_variables-> - ramp_current_frame_duration_in_ns = frame_duration; - - static_ramp_variables->ramp_is_active = true; - } -} - -void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync, - struct dc_stream_state **streams, int num_streams) +static void apply_below_the_range(struct core_freesync *core_freesync, + const struct dc_stream_state *stream, + unsigned int last_render_time_in_us, + struct mod_vrr_params *in_out_vrr) { - unsigned int index, v_total, inserted_frame_v_total = 0; - unsigned int min_frame_duration_in_ns, vmax, vmin = 0; - struct freesync_state *state; - struct core_freesync *core_freesync = NULL; - struct dc_static_screen_events triggers = {0}; - - if (mod_freesync == NULL) - return; - - core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync); - - if (core_freesync->num_entities == 0) - return; - - index = map_index_from_stream(core_freesync, - streams[0]); - - if (core_freesync->map[index].caps->supported == false) - return; - - state = &core_freesync->map[index].state; - - /* Below the Range Logic */ - - /* Only execute if in fullscreen mode */ - if (state->fullscreen == true && - core_freesync->map[index].user_enable.enable_for_gaming && - core_freesync->map[index].caps->btr_supported && - state->btr.btr_active) { + unsigned int inserted_frame_duration_in_us = 0; + unsigned int mid_point_frames_ceil = 0; + unsigned int mid_point_frames_floor = 0; + unsigned int frame_time_in_us = 0; + unsigned int delta_from_mid_point_in_us_1 = 0xFFFFFFFF; + unsigned int delta_from_mid_point_in_us_2 = 0xFFFFFFFF; + unsigned int frames_to_insert = 0; + unsigned int min_frame_duration_in_ns = 0; + unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us; - /* TODO: pass in flag for Pre-DCE12 ASIC - * in order for frame variable duration to take affect, - * it needs to be done one VSYNC early, which is at - * frameCounter == 1. - * For DCE12 and newer updates to V_TOTAL_MIN/MAX - * will take affect on current frame - */ - if (state->btr.frames_to_insert == state->btr.frame_counter) { + min_frame_duration_in_ns = ((unsigned int) (div64_u64( + (1000000000ULL * 1000000), + in_out_vrr->max_refresh_in_uhz))); - min_frame_duration_in_ns = ((unsigned int) (div64_u64( - (1000000000ULL * 1000000), - state->nominal_refresh_rate_in_micro_hz))); + /* Program BTR */ + if (last_render_time_in_us + BTR_EXIT_MARGIN < max_render_time_in_us) { + /* Exit Below the Range */ + if (in_out_vrr->btr.btr_active) { + in_out_vrr->btr.frame_counter = 0; + in_out_vrr->btr.btr_active = false; - vmin = state->freesync_range.vmin; + /* Exit Fixed Refresh mode */ + } else if (in_out_vrr->fixed.fixed_active) { - inserted_frame_v_total = vmin; + in_out_vrr->fixed.frame_counter++; - if (min_frame_duration_in_ns / 1000) - inserted_frame_v_total = - state->btr.inserted_frame_duration_in_us * - vmin / (min_frame_duration_in_ns / 1000); + if (in_out_vrr->fixed.frame_counter > + FIXED_REFRESH_EXIT_FRAME_COUNT) { + in_out_vrr->fixed.frame_counter = 0; + in_out_vrr->fixed.fixed_active = false; + } + } + } else if (last_render_time_in_us > max_render_time_in_us) { + /* Enter Below the Range */ + if (!in_out_vrr->btr.btr_active && + in_out_vrr->btr.btr_enabled) { + in_out_vrr->btr.btr_active = true; - /* Set length of inserted frames as v_total_max*/ - vmax = inserted_frame_v_total; - vmin = inserted_frame_v_total; + /* Enter Fixed Refresh mode */ + } else if (!in_out_vrr->fixed.fixed_active && + !in_out_vrr->btr.btr_enabled) { + in_out_vrr->fixed.frame_counter++; - /* Program V_TOTAL */ - adjust_vmin_vmax(core_freesync, streams, - num_streams, index, - vmin, vmax); + if (in_out_vrr->fixed.frame_counter > + FIXED_REFRESH_ENTER_FRAME_COUNT) { + in_out_vrr->fixed.frame_counter = 0; + in_out_vrr->fixed.fixed_active = true; + } } + } - if (state->btr.frame_counter > 0) - state->btr.frame_counter--; + /* BTR set to "not active" so disengage */ + if (!in_out_vrr->btr.btr_active) { + in_out_vrr->btr.btr_active = false; + in_out_vrr->btr.inserted_duration_in_us = 0; + in_out_vrr->btr.frames_to_insert = 0; + in_out_vrr->btr.frame_counter = 0; /* Restore FreeSync */ - if (state->btr.frame_counter == 0) - set_freesync_on_streams(core_freesync, streams, num_streams); - } - - /* If in fullscreen freesync mode or in video, do not program - * static screen ramp values - */ - if (state->fullscreen == true || state->video == true) { + in_out_vrr->adjust.v_total_min = + calc_v_total_from_refresh(stream, + in_out_vrr->max_refresh_in_uhz); + in_out_vrr->adjust.v_total_max = + calc_v_total_from_refresh(stream, + in_out_vrr->min_refresh_in_uhz); + /* BTR set to "active" so engage */ + } else { - state->static_ramp.ramp_is_active = false; + /* Calculate number of midPoint frames that could fit within + * the render time interval- take ceil of this value + */ + mid_point_frames_ceil = (last_render_time_in_us + + in_out_vrr->btr.mid_point_in_us - 1) / + in_out_vrr->btr.mid_point_in_us; - return; - } + if (mid_point_frames_ceil > 0) { + frame_time_in_us = last_render_time_in_us / + mid_point_frames_ceil; + delta_from_mid_point_in_us_1 = + (in_out_vrr->btr.mid_point_in_us > + frame_time_in_us) ? + (in_out_vrr->btr.mid_point_in_us - frame_time_in_us) : + (frame_time_in_us - in_out_vrr->btr.mid_point_in_us); + } - /* Gradual Static Screen Ramping Logic */ + /* Calculate number of midPoint frames that could fit within + * the render time interval- take floor of this value + */ + mid_point_frames_floor = last_render_time_in_us / + in_out_vrr->btr.mid_point_in_us; - /* Execute if ramp is active and user enabled freesync static screen*/ - if (state->static_ramp.ramp_is_active && - core_freesync->map[index].user_enable.enable_for_static) { + if (mid_point_frames_floor > 0) { - calc_v_total_for_static_ramp(core_freesync, streams[0], - index, &v_total); + frame_time_in_us = last_render_time_in_us / + mid_point_frames_floor; + delta_from_mid_point_in_us_2 = + (in_out_vrr->btr.mid_point_in_us > + frame_time_in_us) ? + (in_out_vrr->btr.mid_point_in_us - frame_time_in_us) : + (frame_time_in_us - in_out_vrr->btr.mid_point_in_us); + } - /* Update the freesync context for the stream */ - update_stream_freesync_context(core_freesync, streams[0]); + /* Choose number of frames to insert based on how close it + * can get to the mid point of the variable range. + */ + if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) + frames_to_insert = mid_point_frames_ceil; + else + frames_to_insert = mid_point_frames_floor; - /* Program static screen ramp values */ - adjust_vmin_vmax(core_freesync, streams, - num_streams, index, - v_total, - v_total); + /* Either we've calculated the number of frames to insert, + * or we need to insert min duration frames + */ + if (frames_to_insert > 0) + inserted_frame_duration_in_us = last_render_time_in_us / + frames_to_insert; - triggers.overlay_update = true; - triggers.surface_update = true; + if (inserted_frame_duration_in_us < + (1000000 / in_out_vrr->max_refresh_in_uhz)) + inserted_frame_duration_in_us = + (1000000 / in_out_vrr->max_refresh_in_uhz); - dc_stream_set_static_screen_events(core_freesync->dc, streams, - num_streams, &triggers); + /* Cache the calculated variables */ + in_out_vrr->btr.inserted_duration_in_us = + inserted_frame_duration_in_us; + in_out_vrr->btr.frames_to_insert = frames_to_insert; + in_out_vrr->btr.frame_counter = frames_to_insert; + + in_out_vrr->adjust.v_total_min = + calc_v_total_from_duration(stream, in_out_vrr, + in_out_vrr->btr.inserted_duration_in_us); + in_out_vrr->adjust.v_total_max = + in_out_vrr->adjust.v_total_min; } } -void mod_freesync_update_state(struct mod_freesync *mod_freesync, - struct dc_stream_state **streams, int num_streams, - struct mod_freesync_params *freesync_params) +static void apply_fixed_refresh(struct core_freesync *core_freesync, + const struct dc_stream_state *stream, + unsigned int last_render_time_in_us, + struct mod_vrr_params *in_out_vrr) { - bool freesync_program_required = false; - unsigned int stream_index; - struct freesync_state *state; - struct core_freesync *core_freesync = NULL; - struct dc_static_screen_events triggers = {0}; + bool update = false; + unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us; - if (mod_freesync == NULL) - return; - - core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync); - - if (core_freesync->num_entities == 0) - return; + if (last_render_time_in_us + BTR_EXIT_MARGIN < max_render_time_in_us) { + /* Exit Fixed Refresh mode */ + if (in_out_vrr->fixed.fixed_active) { + in_out_vrr->fixed.frame_counter++; - for(stream_index = 0; stream_index < num_streams; stream_index++) { - - unsigned int map_index = map_index_from_stream(core_freesync, - streams[stream_index]); - - bool is_embedded = dc_is_embedded_signal( - streams[stream_index]->sink->sink_signal); - - struct freesync_registry_options *opts = &core_freesync->opts; - - state = &core_freesync->map[map_index].state; - - switch (freesync_params->state){ - case FREESYNC_STATE_FULLSCREEN: - state->fullscreen = freesync_params->enable; - freesync_program_required = true; - state->windowed_fullscreen = - freesync_params->windowed_fullscreen; - break; - case FREESYNC_STATE_STATIC_SCREEN: - /* Static screen ramp is disabled by default, but can - * be enabled through regkey. - */ - if ((is_embedded && opts->drr_internal_supported) || - (!is_embedded && opts->drr_external_supported)) - - if (state->static_screen != - freesync_params->enable) { - - /* Change the state flag */ - state->static_screen = - freesync_params->enable; - - /* Update static screen ramp */ - set_static_ramp_variables(core_freesync, - map_index, - freesync_params->enable); - } - /* We program the ramp starting next VUpdate */ - break; - case FREESYNC_STATE_VIDEO: - /* Change core variables only if there is a change*/ - if(freesync_params->update_duration_in_ns != - state->time.update_duration_in_ns) { - - state->video = freesync_params->enable; - state->time.update_duration_in_ns = - freesync_params->update_duration_in_ns; - - freesync_program_required = true; + if (in_out_vrr->fixed.frame_counter > + FIXED_REFRESH_EXIT_FRAME_COUNT) { + in_out_vrr->fixed.frame_counter = 0; + in_out_vrr->fixed.fixed_active = false; + in_out_vrr->fixed.target_refresh_in_uhz = 0; + update = true; } - break; - case FREESYNC_STATE_NONE: - /* handle here to avoid warning */ - break; } - } - - /* Update mask */ - triggers.overlay_update = true; - triggers.surface_update = true; - - dc_stream_set_static_screen_events(core_freesync->dc, streams, - num_streams, &triggers); - - if (freesync_program_required) - /* Program freesync according to current state*/ - set_freesync_on_streams(core_freesync, streams, num_streams); -} - - -bool mod_freesync_get_state(struct mod_freesync *mod_freesync, - struct dc_stream_state *stream, - struct mod_freesync_params *freesync_params) -{ - unsigned int index = 0; - struct core_freesync *core_freesync = NULL; - - if (mod_freesync == NULL) - return false; + } else if (last_render_time_in_us > max_render_time_in_us) { + /* Enter Fixed Refresh mode */ + if (!in_out_vrr->fixed.fixed_active) { + in_out_vrr->fixed.frame_counter++; - core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync); - index = map_index_from_stream(core_freesync, stream); - - if (core_freesync->map[index].state.fullscreen) { - freesync_params->state = FREESYNC_STATE_FULLSCREEN; - freesync_params->enable = true; - } else if (core_freesync->map[index].state.static_screen) { - freesync_params->state = FREESYNC_STATE_STATIC_SCREEN; - freesync_params->enable = true; - } else if (core_freesync->map[index].state.video) { - freesync_params->state = FREESYNC_STATE_VIDEO; - freesync_params->enable = true; - } else { - freesync_params->state = FREESYNC_STATE_NONE; - freesync_params->enable = false; + if (in_out_vrr->fixed.frame_counter > + FIXED_REFRESH_ENTER_FRAME_COUNT) { + in_out_vrr->fixed.frame_counter = 0; + in_out_vrr->fixed.fixed_active = true; + in_out_vrr->fixed.target_refresh_in_uhz = + in_out_vrr->max_refresh_in_uhz; + update = true; + } + } } - freesync_params->update_duration_in_ns = - core_freesync->map[index].state.time.update_duration_in_ns; - - freesync_params->windowed_fullscreen = - core_freesync->map[index].state.windowed_fullscreen; - - return true; -} - -bool mod_freesync_set_user_enable(struct mod_freesync *mod_freesync, - struct dc_stream_state **streams, int num_streams, - struct mod_freesync_user_enable *user_enable) -{ - unsigned int stream_index, map_index; - int persistent_data = 0; - struct persistent_data_flag flag; - struct dc *dc = NULL; - struct core_freesync *core_freesync = NULL; - - if (mod_freesync == NULL) - return false; - - core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync); - dc = core_freesync->dc; - - flag.save_per_edid = true; - flag.save_per_link = false; - - for(stream_index = 0; stream_index < num_streams; - stream_index++){ - - map_index = map_index_from_stream(core_freesync, - streams[stream_index]); - - core_freesync->map[map_index].user_enable = *user_enable; - - /* Write persistent data in registry*/ - if (core_freesync->map[map_index].user_enable. - enable_for_gaming) - persistent_data = persistent_data | 1; - if (core_freesync->map[map_index].user_enable. - enable_for_static) - persistent_data = persistent_data | 2; - if (core_freesync->map[map_index].user_enable. - enable_for_video) - persistent_data = persistent_data | 4; - - dm_write_persistent_data(dc->ctx, - streams[stream_index]->sink, - FREESYNC_REGISTRY_NAME, - "userenable", - &persistent_data, - sizeof(int), - &flag); + if (update) { + if (in_out_vrr->fixed.fixed_active) { + in_out_vrr->adjust.v_total_min = + calc_v_total_from_refresh( + stream, in_out_vrr->max_refresh_in_uhz); + in_out_vrr->adjust.v_total_max = + in_out_vrr->adjust.v_total_min; + } else { + in_out_vrr->adjust.v_total_min = + calc_v_total_from_refresh( + stream, in_out_vrr->max_refresh_in_uhz); + in_out_vrr->adjust.v_total_max = + in_out_vrr->adjust.v_total_min; + } } - - set_freesync_on_streams(core_freesync, streams, num_streams); - - return true; } -bool mod_freesync_get_user_enable(struct mod_freesync *mod_freesync, - struct dc_stream_state *stream, - struct mod_freesync_user_enable *user_enable) -{ - unsigned int index = 0; - struct core_freesync *core_freesync = NULL; - - if (mod_freesync == NULL) - return false; - - core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync); - index = map_index_from_stream(core_freesync, stream); - - *user_enable = core_freesync->map[index].user_enable; - - return true; -} - -bool mod_freesync_get_static_ramp_active(struct mod_freesync *mod_freesync, - struct dc_stream_state *stream, - bool *is_ramp_active) -{ - unsigned int index = 0; - struct core_freesync *core_freesync = NULL; - - if (mod_freesync == NULL) - return false; - - core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync); - index = map_index_from_stream(core_freesync, stream); - - *is_ramp_active = - core_freesync->map[index].state.static_ramp.ramp_is_active; - - return true; -} - -bool mod_freesync_override_min_max(struct mod_freesync *mod_freesync, - struct dc_stream_state *streams, - unsigned int min_refresh, - unsigned int max_refresh, - struct mod_freesync_caps *caps) +static bool vrr_settings_require_update(struct core_freesync *core_freesync, + struct mod_freesync_config *in_config, + unsigned int min_refresh_in_uhz, + unsigned int max_refresh_in_uhz, + struct mod_vrr_params *in_vrr) { - unsigned int index = 0; - struct core_freesync *core_freesync; - struct freesync_state *state; - - if (mod_freesync == NULL) - return false; - - core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync); - index = map_index_from_stream(core_freesync, streams); - state = &core_freesync->map[index].state; - - if (max_refresh == 0) - max_refresh = state->nominal_refresh_rate_in_micro_hz; - - if (min_refresh == 0) { - /* Restore defaults */ - calc_freesync_range(core_freesync, streams, state, - core_freesync->map[index].caps-> - min_refresh_in_micro_hz, - state->nominal_refresh_rate_in_micro_hz); - } else { - calc_freesync_range(core_freesync, streams, - state, - min_refresh, - max_refresh); - - /* Program vtotal min/max */ - adjust_vmin_vmax(core_freesync, &streams, 1, index, - state->freesync_range.vmin, - state->freesync_range.vmax); - } - - if (min_refresh != 0 && - dc_is_embedded_signal(streams->sink->sink_signal) && - (max_refresh - min_refresh >= 10000000)) { - caps->supported = true; - caps->min_refresh_in_micro_hz = min_refresh; - caps->max_refresh_in_micro_hz = max_refresh; + if (in_vrr->state != in_config->state) { + return true; + } else if (in_vrr->state == VRR_STATE_ACTIVE_FIXED && + in_vrr->fixed.target_refresh_in_uhz != + in_config->min_refresh_in_uhz) { + return true; + } else if (in_vrr->min_refresh_in_uhz != min_refresh_in_uhz) { + return true; + } else if (in_vrr->max_refresh_in_uhz != max_refresh_in_uhz) { + return true; } - /* Update the stream */ - update_stream(core_freesync, streams); - - return true; -} - -bool mod_freesync_get_min_max(struct mod_freesync *mod_freesync, - struct dc_stream_state *stream, - unsigned int *min_refresh, - unsigned int *max_refresh) -{ - unsigned int index = 0; - struct core_freesync *core_freesync = NULL; - - if (mod_freesync == NULL) - return false; - - core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync); - index = map_index_from_stream(core_freesync, stream); - - *min_refresh = - core_freesync->map[index].state.freesync_range.min_refresh; - *max_refresh = - core_freesync->map[index].state.freesync_range.max_refresh; - - return true; + return false; } bool mod_freesync_get_vmin_vmax(struct mod_freesync *mod_freesync, - struct dc_stream_state *stream, + const struct dc_stream_state *stream, unsigned int *vmin, unsigned int *vmax) { - unsigned int index = 0; - struct core_freesync *core_freesync = NULL; - - if (mod_freesync == NULL) - return false; - - core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync); - index = map_index_from_stream(core_freesync, stream); - - *vmin = - core_freesync->map[index].state.freesync_range.vmin; - *vmax = - core_freesync->map[index].state.freesync_range.vmax; + *vmin = stream->adjust.v_total_min; + *vmax = stream->adjust.v_total_max; return true; } @@ -1189,7 +471,6 @@ bool mod_freesync_get_v_position(struct mod_freesync *mod_freesync, unsigned int *nom_v_pos, unsigned int *v_pos) { - unsigned int index = 0; struct core_freesync *core_freesync = NULL; struct crtc_position position; @@ -1197,7 +478,6 @@ bool mod_freesync_get_v_position(struct mod_freesync *mod_freesync, return false; core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync); - index = map_index_from_stream(core_freesync, stream); if (dc_stream_get_crtc_position(core_freesync->dc, &stream, 1, &position.vertical_count, @@ -1212,310 +492,368 @@ bool mod_freesync_get_v_position(struct mod_freesync *mod_freesync, return false; } -void mod_freesync_notify_mode_change(struct mod_freesync *mod_freesync, - struct dc_stream_state **streams, int num_streams) +void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync, + const struct dc_stream_state *stream, + const struct mod_vrr_params *vrr, + struct dc_info_packet *infopacket) { - unsigned int stream_index, map_index; - struct freesync_state *state; - struct core_freesync *core_freesync = NULL; - struct dc_static_screen_events triggers = {0}; - unsigned long long temp = 0; + /* SPD info packet for FreeSync */ + unsigned char checksum = 0; + unsigned int idx, payload_size = 0; - if (mod_freesync == NULL) + /* Check if Freesync is supported. Return if false. If true, + * set the corresponding bit in the info packet + */ + if (!vrr->supported) return; - core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync); + if (dc_is_hdmi_signal(stream->signal)) { - for (stream_index = 0; stream_index < num_streams; stream_index++) { - map_index = map_index_from_stream(core_freesync, - streams[stream_index]); - - state = &core_freesync->map[map_index].state; - - /* Update the field rate for new timing */ - temp = streams[stream_index]->timing.pix_clk_khz; - temp *= 1000ULL * 1000ULL * 1000ULL; - temp = div_u64(temp, - streams[stream_index]->timing.h_total); - temp = div_u64(temp, - streams[stream_index]->timing.v_total); - state->nominal_refresh_rate_in_micro_hz = - (unsigned int) temp; - - if (core_freesync->map[map_index].caps->supported) { - - /* Update the stream */ - update_stream(core_freesync, streams[stream_index]); - - /* Calculate vmin/vmax and refresh rate for - * current mode - */ - calc_freesync_range(core_freesync, *streams, state, - core_freesync->map[map_index].caps-> - min_refresh_in_micro_hz, - state->nominal_refresh_rate_in_micro_hz); - - /* Update mask */ - triggers.overlay_update = true; - triggers.surface_update = true; - - dc_stream_set_static_screen_events(core_freesync->dc, - streams, num_streams, - &triggers); - } - } + /* HEADER */ - /* Program freesync according to current state*/ - set_freesync_on_streams(core_freesync, streams, num_streams); -} + /* HB0 = Packet Type = 0x83 (Source Product + * Descriptor InfoFrame) + */ + infopacket->hb0 = DC_HDMI_INFOFRAME_TYPE_SPD; -/* Add the timestamps to the cache and determine whether BTR programming - * is required, depending on the times calculated - */ -static void update_timestamps(struct core_freesync *core_freesync, - const struct dc_stream_state *stream, unsigned int map_index, - unsigned int last_render_time_in_us) -{ - struct freesync_state *state = &core_freesync->map[map_index].state; + /* HB1 = Version = 0x01 */ + infopacket->hb1 = 0x01; - state->time.render_times[state->time.render_times_index] = - last_render_time_in_us; - state->time.render_times_index++; + /* HB2 = [Bits 7:5 = 0] [Bits 4:0 = Length = 0x08] */ + infopacket->hb2 = 0x08; - if (state->time.render_times_index >= RENDER_TIMES_MAX_COUNT) - state->time.render_times_index = 0; + payload_size = 0x08; - if (last_render_time_in_us + BTR_EXIT_MARGIN < - state->time.max_render_time_in_us) { + } else if (dc_is_dp_signal(stream->signal)) { - /* Exit Below the Range */ - if (state->btr.btr_active) { + /* HEADER */ - state->btr.program_btr = true; - state->btr.btr_active = false; - state->btr.frame_counter = 0; + /* HB0 = Secondary-data Packet ID = 0 - Only non-zero + * when used to associate audio related info packets + */ + infopacket->hb0 = 0x00; - /* Exit Fixed Refresh mode */ - } else if (state->fixed_refresh.fixed_active) { + /* HB1 = Packet Type = 0x83 (Source Product + * Descriptor InfoFrame) + */ + infopacket->hb1 = DC_HDMI_INFOFRAME_TYPE_SPD; - state->fixed_refresh.frame_counter++; + /* HB2 = [Bits 7:0 = Least significant eight bits - + * For INFOFRAME, the value must be 1Bh] + */ + infopacket->hb2 = 0x1B; - if (state->fixed_refresh.frame_counter > - FIXED_REFRESH_EXIT_FRAME_COUNT) { - state->fixed_refresh.frame_counter = 0; - state->fixed_refresh.program_fixed = true; - state->fixed_refresh.fixed_active = false; - } - } + /* HB3 = [Bits 7:2 = INFOFRAME SDP Version Number = 0x1] + * [Bits 1:0 = Most significant two bits = 0x00] + */ + infopacket->hb3 = 0x04; - } else if (last_render_time_in_us > state->time.max_render_time_in_us) { + payload_size = 0x1B; + } - /* Enter Below the Range */ - if (!state->btr.btr_active && - core_freesync->map[map_index].caps->btr_supported) { + /* PB1 = 0x1A (24bit AMD IEEE OUI (0x00001A) - Byte 0) */ + infopacket->sb[1] = 0x1A; - state->btr.program_btr = true; - state->btr.btr_active = true; + /* PB2 = 0x00 (24bit AMD IEEE OUI (0x00001A) - Byte 1) */ + infopacket->sb[2] = 0x00; - /* Enter Fixed Refresh mode */ - } else if (!state->fixed_refresh.fixed_active && - !core_freesync->map[map_index].caps->btr_supported) { + /* PB3 = 0x00 (24bit AMD IEEE OUI (0x00001A) - Byte 2) */ + infopacket->sb[3] = 0x00; - state->fixed_refresh.frame_counter++; + /* PB4 = Reserved */ - if (state->fixed_refresh.frame_counter > - FIXED_REFRESH_ENTER_FRAME_COUNT) { - state->fixed_refresh.frame_counter = 0; - state->fixed_refresh.program_fixed = true; - state->fixed_refresh.fixed_active = true; - } - } - } + /* PB5 = Reserved */ - /* When Below the Range is active, must react on every frame */ - if (state->btr.btr_active) - state->btr.program_btr = true; -} + /* PB6 = [Bits 7:3 = Reserved] */ -static void apply_below_the_range(struct core_freesync *core_freesync, - struct dc_stream_state *stream, unsigned int map_index, - unsigned int last_render_time_in_us) -{ - unsigned int inserted_frame_duration_in_us = 0; - unsigned int mid_point_frames_ceil = 0; - unsigned int mid_point_frames_floor = 0; - unsigned int frame_time_in_us = 0; - unsigned int delta_from_mid_point_in_us_1 = 0xFFFFFFFF; - unsigned int delta_from_mid_point_in_us_2 = 0xFFFFFFFF; - unsigned int frames_to_insert = 0; - unsigned int min_frame_duration_in_ns = 0; - struct freesync_state *state = &core_freesync->map[map_index].state; + /* PB6 = [Bit 0 = FreeSync Supported] */ + if (vrr->state != VRR_STATE_UNSUPPORTED) + infopacket->sb[6] |= 0x01; - if (!state->btr.program_btr) - return; + /* PB6 = [Bit 1 = FreeSync Enabled] */ + if (vrr->state != VRR_STATE_DISABLED && + vrr->state != VRR_STATE_UNSUPPORTED) + infopacket->sb[6] |= 0x02; - state->btr.program_btr = false; + /* PB6 = [Bit 2 = FreeSync Active] */ + if (vrr->state == VRR_STATE_ACTIVE_VARIABLE || + vrr->state == VRR_STATE_ACTIVE_FIXED) + infopacket->sb[6] |= 0x04; - min_frame_duration_in_ns = ((unsigned int) (div64_u64( - (1000000000ULL * 1000000), - state->nominal_refresh_rate_in_micro_hz))); + /* PB7 = FreeSync Minimum refresh rate (Hz) */ + infopacket->sb[7] = (unsigned char)(vrr->min_refresh_in_uhz / 1000000); - /* Program BTR */ + /* PB8 = FreeSync Maximum refresh rate (Hz) + * Note: We should never go above the field rate of the mode timing set. + */ + infopacket->sb[8] = (unsigned char)(vrr->max_refresh_in_uhz / 1000000); - /* BTR set to "not active" so disengage */ - if (!state->btr.btr_active) + /* PB9 - PB27 = Reserved */ - /* Restore FreeSync */ - set_freesync_on_streams(core_freesync, &stream, 1); + /* Calculate checksum */ + checksum += infopacket->hb0; + checksum += infopacket->hb1; + checksum += infopacket->hb2; + checksum += infopacket->hb3; - /* BTR set to "active" so engage */ - else { + for (idx = 1; idx <= payload_size; idx++) + checksum += infopacket->sb[idx]; - /* Calculate number of midPoint frames that could fit within - * the render time interval- take ceil of this value - */ - mid_point_frames_ceil = (last_render_time_in_us + - state->btr.mid_point_in_us- 1) / - state->btr.mid_point_in_us; + /* PB0 = Checksum (one byte complement) */ + infopacket->sb[0] = (unsigned char)(0x100 - checksum); - if (mid_point_frames_ceil > 0) { + infopacket->valid = true; +} - frame_time_in_us = last_render_time_in_us / - mid_point_frames_ceil; - delta_from_mid_point_in_us_1 = - (state->btr.mid_point_in_us > - frame_time_in_us) ? - (state->btr.mid_point_in_us - frame_time_in_us): - (frame_time_in_us - state->btr.mid_point_in_us); - } +void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, + const struct dc_stream_state *stream, + struct mod_freesync_config *in_config, + struct mod_vrr_params *in_out_vrr) +{ + struct core_freesync *core_freesync = NULL; + unsigned long long nominal_field_rate_in_uhz = 0; + bool nominal_field_rate_in_range = true; + unsigned int refresh_range = 0; + unsigned int min_refresh_in_uhz = 0; + unsigned int max_refresh_in_uhz = 0; - /* Calculate number of midPoint frames that could fit within - * the render time interval- take floor of this value - */ - mid_point_frames_floor = last_render_time_in_us / - state->btr.mid_point_in_us; + if (mod_freesync == NULL) + return; - if (mid_point_frames_floor > 0) { + core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync); - frame_time_in_us = last_render_time_in_us / - mid_point_frames_floor; - delta_from_mid_point_in_us_2 = - (state->btr.mid_point_in_us > - frame_time_in_us) ? - (state->btr.mid_point_in_us - frame_time_in_us): - (frame_time_in_us - state->btr.mid_point_in_us); - } + /* Calculate nominal field rate for stream */ + nominal_field_rate_in_uhz = stream->timing.pix_clk_khz; + nominal_field_rate_in_uhz *= 1000ULL * 1000ULL * 1000ULL; + nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, + stream->timing.h_total); + nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, + stream->timing.v_total); + + min_refresh_in_uhz = in_config->min_refresh_in_uhz; + max_refresh_in_uhz = in_config->max_refresh_in_uhz; + + // Don't allow min > max + if (min_refresh_in_uhz > max_refresh_in_uhz) + min_refresh_in_uhz = max_refresh_in_uhz; + + // Full range may be larger than current video timing, so cap at nominal + if (max_refresh_in_uhz > nominal_field_rate_in_uhz) + max_refresh_in_uhz = nominal_field_rate_in_uhz; + + /* Allow for some rounding error of actual video timing by taking ceil. + * For example, 144 Hz mode timing may actually be 143.xxx Hz when + * calculated from pixel rate and vertical/horizontal totals, but + * this should be allowed instead of blocking FreeSync. + */ + if ((min_refresh_in_uhz / 1000000) > + ((nominal_field_rate_in_uhz + 1000000 - 1) / 1000000)) + nominal_field_rate_in_range = false; - /* Choose number of frames to insert based on how close it - * can get to the mid point of the variable range. - */ - if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) - frames_to_insert = mid_point_frames_ceil; - else - frames_to_insert = mid_point_frames_floor; + // Full range may be larger than current video timing, so cap at nominal + if (min_refresh_in_uhz > nominal_field_rate_in_uhz) + min_refresh_in_uhz = nominal_field_rate_in_uhz; - /* Either we've calculated the number of frames to insert, - * or we need to insert min duration frames - */ - if (frames_to_insert > 0) - inserted_frame_duration_in_us = last_render_time_in_us / - frames_to_insert; + if (!vrr_settings_require_update(core_freesync, + in_config, min_refresh_in_uhz, max_refresh_in_uhz, + in_out_vrr)) + return; - if (inserted_frame_duration_in_us < - state->time.min_render_time_in_us) + in_out_vrr->state = in_config->state; - inserted_frame_duration_in_us = - state->time.min_render_time_in_us; + if ((in_config->state == VRR_STATE_UNSUPPORTED) || + (!nominal_field_rate_in_range)) { + in_out_vrr->state = VRR_STATE_UNSUPPORTED; + in_out_vrr->supported = false; + } else { + in_out_vrr->min_refresh_in_uhz = min_refresh_in_uhz; + in_out_vrr->max_duration_in_us = + calc_duration_in_us_from_refresh_in_uhz( + min_refresh_in_uhz); - /* Cache the calculated variables */ - state->btr.inserted_frame_duration_in_us = - inserted_frame_duration_in_us; - state->btr.frames_to_insert = frames_to_insert; - state->btr.frame_counter = frames_to_insert; + in_out_vrr->max_refresh_in_uhz = max_refresh_in_uhz; + in_out_vrr->min_duration_in_us = + calc_duration_in_us_from_refresh_in_uhz( + max_refresh_in_uhz); + refresh_range = in_out_vrr->max_refresh_in_uhz - + in_out_vrr->min_refresh_in_uhz; + + in_out_vrr->supported = true; + } + + in_out_vrr->fixed.ramping_active = in_config->ramping; + + in_out_vrr->btr.btr_enabled = in_config->btr; + if (in_out_vrr->max_refresh_in_uhz < + 2 * in_out_vrr->min_refresh_in_uhz) + in_out_vrr->btr.btr_enabled = false; + in_out_vrr->btr.btr_active = false; + in_out_vrr->btr.inserted_duration_in_us = 0; + in_out_vrr->btr.frames_to_insert = 0; + in_out_vrr->btr.frame_counter = 0; + in_out_vrr->btr.mid_point_in_us = + in_out_vrr->min_duration_in_us + + (in_out_vrr->max_duration_in_us - + in_out_vrr->min_duration_in_us) / 2; + + if (in_out_vrr->state == VRR_STATE_UNSUPPORTED) { + in_out_vrr->adjust.v_total_min = stream->timing.v_total; + in_out_vrr->adjust.v_total_max = stream->timing.v_total; + } else if (in_out_vrr->state == VRR_STATE_DISABLED) { + in_out_vrr->adjust.v_total_min = stream->timing.v_total; + in_out_vrr->adjust.v_total_max = stream->timing.v_total; + } else if (in_out_vrr->state == VRR_STATE_INACTIVE) { + in_out_vrr->adjust.v_total_min = stream->timing.v_total; + in_out_vrr->adjust.v_total_max = stream->timing.v_total; + } else if (in_out_vrr->state == VRR_STATE_ACTIVE_VARIABLE && + refresh_range >= MIN_REFRESH_RANGE_IN_US) { + in_out_vrr->adjust.v_total_min = + calc_v_total_from_refresh(stream, + in_out_vrr->max_refresh_in_uhz); + in_out_vrr->adjust.v_total_max = + calc_v_total_from_refresh(stream, + in_out_vrr->min_refresh_in_uhz); + } else if (in_out_vrr->state == VRR_STATE_ACTIVE_FIXED) { + in_out_vrr->fixed.target_refresh_in_uhz = + in_out_vrr->min_refresh_in_uhz; + if (in_out_vrr->fixed.ramping_active) { + in_out_vrr->fixed.fixed_active = true; + } else { + in_out_vrr->fixed.fixed_active = true; + in_out_vrr->adjust.v_total_min = + calc_v_total_from_refresh(stream, + in_out_vrr->fixed.target_refresh_in_uhz); + in_out_vrr->adjust.v_total_max = + in_out_vrr->adjust.v_total_min; + } + } else { + in_out_vrr->state = VRR_STATE_INACTIVE; + in_out_vrr->adjust.v_total_min = stream->timing.v_total; + in_out_vrr->adjust.v_total_max = stream->timing.v_total; } } -static void apply_fixed_refresh(struct core_freesync *core_freesync, - struct dc_stream_state *stream, unsigned int map_index) +void mod_freesync_handle_preflip(struct mod_freesync *mod_freesync, + const struct dc_plane_state *plane, + const struct dc_stream_state *stream, + unsigned int curr_time_stamp_in_us, + struct mod_vrr_params *in_out_vrr) { - unsigned int vmin = 0, vmax = 0; - struct freesync_state *state = &core_freesync->map[map_index].state; + struct core_freesync *core_freesync = NULL; + unsigned int last_render_time_in_us = 0; + unsigned int average_render_time_in_us = 0; - if (!state->fixed_refresh.program_fixed) + if (mod_freesync == NULL) return; - state->fixed_refresh.program_fixed = false; + core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync); - /* Program Fixed Refresh */ + if (in_out_vrr->supported && + in_out_vrr->state == VRR_STATE_ACTIVE_VARIABLE) { + unsigned int i = 0; + unsigned int oldest_index = plane->time.index + 1; - /* Fixed Refresh set to "not active" so disengage */ - if (!state->fixed_refresh.fixed_active) { - set_freesync_on_streams(core_freesync, &stream, 1); + if (oldest_index >= DC_PLANE_UPDATE_TIMES_MAX) + oldest_index = 0; - /* Fixed Refresh set to "active" so engage (fix to max) */ - } else { + last_render_time_in_us = curr_time_stamp_in_us - + plane->time.prev_update_time_in_us; + + // Sum off all entries except oldest one + for (i = 0; i < DC_PLANE_UPDATE_TIMES_MAX; i++) { + average_render_time_in_us += + plane->time.time_elapsed_in_us[i]; + } + average_render_time_in_us -= + plane->time.time_elapsed_in_us[oldest_index]; + + // Add render time for current flip + average_render_time_in_us += last_render_time_in_us; + average_render_time_in_us /= DC_PLANE_UPDATE_TIMES_MAX; + + if (in_out_vrr->btr.btr_enabled) { + apply_below_the_range(core_freesync, + stream, + last_render_time_in_us, + in_out_vrr); + } else { + apply_fixed_refresh(core_freesync, + stream, + last_render_time_in_us, + in_out_vrr); + } - vmin = state->freesync_range.vmin; - vmax = vmin; - adjust_vmin_vmax(core_freesync, &stream, map_index, - 1, vmin, vmax); } } -void mod_freesync_pre_update_plane_addresses(struct mod_freesync *mod_freesync, - struct dc_stream_state **streams, int num_streams, - unsigned int curr_time_stamp_in_us) +void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync, + const struct dc_stream_state *stream, + struct mod_vrr_params *in_out_vrr) { - unsigned int stream_index, map_index, last_render_time_in_us = 0; struct core_freesync *core_freesync = NULL; - if (mod_freesync == NULL) + if ((mod_freesync == NULL) || (stream == NULL) || (in_out_vrr == NULL)) return; core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync); - for (stream_index = 0; stream_index < num_streams; stream_index++) { - - map_index = map_index_from_stream(core_freesync, - streams[stream_index]); - - if (core_freesync->map[map_index].caps->supported) { - - last_render_time_in_us = curr_time_stamp_in_us - - core_freesync->map[map_index].state.time. - prev_time_stamp_in_us; - - /* Add the timestamps to the cache and determine - * whether BTR program is required - */ - update_timestamps(core_freesync, streams[stream_index], - map_index, last_render_time_in_us); + if (in_out_vrr->supported == false) + return; - if (core_freesync->map[map_index].state.fullscreen && - core_freesync->map[map_index].user_enable. - enable_for_gaming) { + /* Below the Range Logic */ - if (core_freesync->map[map_index].caps->btr_supported) { + /* Only execute if in fullscreen mode */ + if (in_out_vrr->state == VRR_STATE_ACTIVE_VARIABLE && + in_out_vrr->btr.btr_active) { + /* TODO: pass in flag for Pre-DCE12 ASIC + * in order for frame variable duration to take affect, + * it needs to be done one VSYNC early, which is at + * frameCounter == 1. + * For DCE12 and newer updates to V_TOTAL_MIN/MAX + * will take affect on current frame + */ + if (in_out_vrr->btr.frames_to_insert == + in_out_vrr->btr.frame_counter) { + in_out_vrr->adjust.v_total_min = + calc_v_total_from_duration(stream, + in_out_vrr, + in_out_vrr->btr.inserted_duration_in_us); + in_out_vrr->adjust.v_total_max = + in_out_vrr->adjust.v_total_min; + } - apply_below_the_range(core_freesync, - streams[stream_index], map_index, - last_render_time_in_us); - } else { - apply_fixed_refresh(core_freesync, - streams[stream_index], map_index); - } - } + if (in_out_vrr->btr.frame_counter > 0) + in_out_vrr->btr.frame_counter--; - core_freesync->map[map_index].state.time. - prev_time_stamp_in_us = curr_time_stamp_in_us; + /* Restore FreeSync */ + if (in_out_vrr->btr.frame_counter == 0) { + in_out_vrr->adjust.v_total_min = + calc_v_total_from_refresh(stream, + in_out_vrr->max_refresh_in_uhz); + in_out_vrr->adjust.v_total_max = + calc_v_total_from_refresh(stream, + in_out_vrr->min_refresh_in_uhz); } + } + + /* If in fullscreen freesync mode or in video, do not program + * static screen ramp values + */ + if (in_out_vrr->state == VRR_STATE_ACTIVE_VARIABLE) + in_out_vrr->fixed.ramping_active = false; + /* Gradual Static Screen Ramping Logic */ + /* Execute if ramp is active and user enabled freesync static screen*/ + if (in_out_vrr->state == VRR_STATE_ACTIVE_FIXED && + in_out_vrr->fixed.ramping_active) { + update_v_total_for_static_ramp( + core_freesync, stream, in_out_vrr); } } void mod_freesync_get_settings(struct mod_freesync *mod_freesync, - struct dc_stream_state **streams, int num_streams, + const struct mod_vrr_params *vrr, unsigned int *v_total_min, unsigned int *v_total_max, unsigned int *event_triggers, unsigned int *window_min, unsigned int *window_max, @@ -1523,7 +861,6 @@ void mod_freesync_get_settings(struct mod_freesync *mod_freesync, unsigned int *inserted_frames, unsigned int *inserted_duration_in_us) { - unsigned int stream_index, map_index; struct core_freesync *core_freesync = NULL; if (mod_freesync == NULL) @@ -1531,25 +868,13 @@ void mod_freesync_get_settings(struct mod_freesync *mod_freesync, core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync); - for (stream_index = 0; stream_index < num_streams; stream_index++) { - - map_index = map_index_from_stream(core_freesync, - streams[stream_index]); - - if (core_freesync->map[map_index].caps->supported) { - struct freesync_state state = - core_freesync->map[map_index].state; - *v_total_min = state.vmin; - *v_total_max = state.vmax; - *event_triggers = 0; - *window_min = state.time.min_window; - *window_max = state.time.max_window; - *lfc_mid_point_in_us = state.btr.mid_point_in_us; - *inserted_frames = state.btr.frames_to_insert; - *inserted_duration_in_us = - state.btr.inserted_frame_duration_in_us; - } - + if (vrr->supported) { + *v_total_min = vrr->adjust.v_total_min; + *v_total_max = vrr->adjust.v_total_max; + *event_triggers = 0; + *lfc_mid_point_in_us = vrr->btr.mid_point_in_us; + *inserted_frames = vrr->btr.frames_to_insert; + *inserted_duration_in_us = vrr->btr.inserted_duration_in_us; } } diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h index f083e1619dbe..bd75ca5f1cd3 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h @@ -56,96 +56,72 @@ #include "dm_services.h" -struct mod_freesync *mod_freesync_create(struct dc *dc); -void mod_freesync_destroy(struct mod_freesync *mod_freesync); - +// Access structures struct mod_freesync { int dummy; }; -enum mod_freesync_state { - FREESYNC_STATE_NONE, - FREESYNC_STATE_FULLSCREEN, - FREESYNC_STATE_STATIC_SCREEN, - FREESYNC_STATE_VIDEO -}; - -enum mod_freesync_user_enable_mask { - FREESYNC_USER_ENABLE_STATIC = 0x1, - FREESYNC_USER_ENABLE_VIDEO = 0x2, - FREESYNC_USER_ENABLE_GAMING = 0x4 -}; - -struct mod_freesync_user_enable { - bool enable_for_static; - bool enable_for_video; - bool enable_for_gaming; -}; - +// TODO: References to this should be removed struct mod_freesync_caps { bool supported; unsigned int min_refresh_in_micro_hz; unsigned int max_refresh_in_micro_hz; - - bool btr_supported; }; -struct mod_freesync_params { - enum mod_freesync_state state; - bool enable; - unsigned int update_duration_in_ns; - bool windowed_fullscreen; +enum mod_vrr_state { + VRR_STATE_UNSUPPORTED = 0, + VRR_STATE_DISABLED, + VRR_STATE_INACTIVE, + VRR_STATE_ACTIVE_VARIABLE, + VRR_STATE_ACTIVE_FIXED }; -/* - * Add stream to be tracked by module - */ -bool mod_freesync_add_stream(struct mod_freesync *mod_freesync, - struct dc_stream_state *stream, struct mod_freesync_caps *caps); +struct mod_freesync_config { + enum mod_vrr_state state; + bool ramping; + bool btr; + unsigned int min_refresh_in_uhz; + unsigned int max_refresh_in_uhz; +}; -/* - * Remove stream to be tracked by module - */ -bool mod_freesync_remove_stream(struct mod_freesync *mod_freesync, - struct dc_stream_state *stream); +struct mod_vrr_params_btr { + bool btr_enabled; + bool btr_active; + uint32_t mid_point_in_us; + uint32_t inserted_duration_in_us; + uint32_t frames_to_insert; + uint32_t frame_counter; +}; -/* - * Update the freesync state flags for each display and program - * freesync accordingly - */ -void mod_freesync_update_state(struct mod_freesync *mod_freesync, - struct dc_stream_state **streams, int num_streams, - struct mod_freesync_params *freesync_params); +struct mod_vrr_params_fixed_refresh { + bool fixed_active; + bool ramping_active; + bool ramping_done; + uint32_t target_refresh_in_uhz; + uint32_t frame_counter; +}; -bool mod_freesync_get_state(struct mod_freesync *mod_freesync, - struct dc_stream_state *stream, - struct mod_freesync_params *freesync_params); +struct mod_vrr_params { + bool supported; + enum mod_vrr_state state; -bool mod_freesync_set_user_enable(struct mod_freesync *mod_freesync, - struct dc_stream_state **streams, int num_streams, - struct mod_freesync_user_enable *user_enable); + uint32_t min_refresh_in_uhz; + uint32_t max_duration_in_us; + uint32_t max_refresh_in_uhz; + uint32_t min_duration_in_us; -bool mod_freesync_get_user_enable(struct mod_freesync *mod_freesync, - struct dc_stream_state *stream, - struct mod_freesync_user_enable *user_enable); + struct dc_crtc_timing_adjust adjust; -bool mod_freesync_get_static_ramp_active(struct mod_freesync *mod_freesync, - struct dc_stream_state *stream, - bool *is_ramp_active); + struct mod_vrr_params_fixed_refresh fixed; -bool mod_freesync_override_min_max(struct mod_freesync *mod_freesync, - struct dc_stream_state *streams, - unsigned int min_refresh, - unsigned int max_refresh, - struct mod_freesync_caps *caps); + struct mod_vrr_params_btr btr; +}; -bool mod_freesync_get_min_max(struct mod_freesync *mod_freesync, - struct dc_stream_state *stream, - unsigned int *min_refresh, - unsigned int *max_refresh); +struct mod_freesync *mod_freesync_create(struct dc *dc); +void mod_freesync_destroy(struct mod_freesync *mod_freesync); bool mod_freesync_get_vmin_vmax(struct mod_freesync *mod_freesync, - struct dc_stream_state *stream, + const struct dc_stream_state *stream, unsigned int *vmin, unsigned int *vmax); @@ -154,18 +130,8 @@ bool mod_freesync_get_v_position(struct mod_freesync *mod_freesync, unsigned int *nom_v_pos, unsigned int *v_pos); -void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync, - struct dc_stream_state **streams, int num_streams); - -void mod_freesync_notify_mode_change(struct mod_freesync *mod_freesync, - struct dc_stream_state **streams, int num_streams); - -void mod_freesync_pre_update_plane_addresses(struct mod_freesync *mod_freesync, - struct dc_stream_state **streams, int num_streams, - unsigned int curr_time_stamp); - void mod_freesync_get_settings(struct mod_freesync *mod_freesync, - struct dc_stream_state **streams, int num_streams, + const struct mod_vrr_params *vrr, unsigned int *v_total_min, unsigned int *v_total_max, unsigned int *event_triggers, unsigned int *window_min, unsigned int *window_max, @@ -173,4 +139,24 @@ void mod_freesync_get_settings(struct mod_freesync *mod_freesync, unsigned int *inserted_frames, unsigned int *inserted_duration_in_us); +void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync, + const struct dc_stream_state *stream, + const struct mod_vrr_params *vrr, + struct dc_info_packet *infopacket); + +void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, + const struct dc_stream_state *stream, + struct mod_freesync_config *in_config, + struct mod_vrr_params *in_out_vrr); + +void mod_freesync_handle_preflip(struct mod_freesync *mod_freesync, + const struct dc_plane_state *plane, + const struct dc_stream_state *stream, + unsigned int curr_time_stamp_in_us, + struct mod_vrr_params *in_out_vrr); + +void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync, + const struct dc_stream_state *stream, + struct mod_vrr_params *in_out_vrr); + #endif -- GitLab From e80e9446084168c4f186f502dd15e6241bf454a1 Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Wed, 4 Apr 2018 20:59:43 -0400 Subject: [PATCH 0451/1692] drm/amd/display: add method to check for supported range Signed-off-by: Anthony Koo Reviewed-by: Aric Cyr Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- .../amd/display/modules/freesync/freesync.c | 64 +++++++++++++++++-- .../amd/display/modules/inc/mod_freesync.h | 7 ++ 2 files changed, 65 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index 5e12e463c06a..4af73a72b9a9 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -168,6 +168,21 @@ static unsigned int calc_v_total_from_duration( return v_total; } +static unsigned long long calc_nominal_field_rate(const struct dc_stream_state *stream) +{ + unsigned long long nominal_field_rate_in_uhz = 0; + + /* Calculate nominal field rate for stream */ + nominal_field_rate_in_uhz = stream->timing.pix_clk_khz; + nominal_field_rate_in_uhz *= 1000ULL * 1000ULL * 1000ULL; + nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, + stream->timing.h_total); + nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, + stream->timing.v_total); + + return nominal_field_rate_in_uhz; +} + static void update_v_total_for_static_ramp( struct core_freesync *core_freesync, const struct dc_stream_state *stream, @@ -623,12 +638,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync); /* Calculate nominal field rate for stream */ - nominal_field_rate_in_uhz = stream->timing.pix_clk_khz; - nominal_field_rate_in_uhz *= 1000ULL * 1000ULL * 1000ULL; - nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, - stream->timing.h_total); - nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, - stream->timing.v_total); + nominal_field_rate_in_uhz = calc_nominal_field_rate(stream); min_refresh_in_uhz = in_config->min_refresh_in_uhz; max_refresh_in_uhz = in_config->max_refresh_in_uhz; @@ -878,3 +888,45 @@ void mod_freesync_get_settings(struct mod_freesync *mod_freesync, } } +bool mod_freesync_is_valid_range(struct mod_freesync *mod_freesync, + const struct dc_stream_state *stream, + uint32_t min_refresh_cap_in_uhz, + uint32_t max_refresh_cap_in_uhz, + uint32_t min_refresh_request_in_uhz, + uint32_t max_refresh_request_in_uhz) +{ + /* Calculate nominal field rate for stream */ + unsigned long long nominal_field_rate_in_uhz = + calc_nominal_field_rate(stream); + + // Check nominal is within range + if (nominal_field_rate_in_uhz > max_refresh_cap_in_uhz || + nominal_field_rate_in_uhz < min_refresh_cap_in_uhz) + return false; + + // If nominal is less than max, limit the max allowed refresh rate + if (nominal_field_rate_in_uhz < max_refresh_cap_in_uhz) + max_refresh_cap_in_uhz = nominal_field_rate_in_uhz; + + // Don't allow min > max + if (min_refresh_request_in_uhz > max_refresh_request_in_uhz) + return false; + + // Check min is within range + if (min_refresh_request_in_uhz > max_refresh_cap_in_uhz || + min_refresh_request_in_uhz < min_refresh_cap_in_uhz) + return false; + + // Check max is within range + if (max_refresh_request_in_uhz > max_refresh_cap_in_uhz || + max_refresh_request_in_uhz < min_refresh_cap_in_uhz) + return false; + + // For variable range, check for at least 10 Hz range + if ((max_refresh_request_in_uhz != min_refresh_request_in_uhz) && + (max_refresh_request_in_uhz - min_refresh_request_in_uhz < 10000000)) + return false; + + return true; +} + diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h index bd75ca5f1cd3..e7d77bb6209f 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h @@ -159,4 +159,11 @@ void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync, const struct dc_stream_state *stream, struct mod_vrr_params *in_out_vrr); +bool mod_freesync_is_valid_range(struct mod_freesync *mod_freesync, + const struct dc_stream_state *stream, + uint32_t min_refresh_cap_in_uhz, + uint32_t max_refresh_cap_in_uhz, + uint32_t min_refresh_request_in_uhz, + uint32_t max_refresh_request_in_uhz); + #endif -- GitLab From ff6014d63a87d9a801ddd9ddd10359b2dead6943 Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Wed, 4 Apr 2018 21:01:21 -0400 Subject: [PATCH 0452/1692] drm/amd/display: Fix bug where refresh rate becomes fixed This issue occurs if refresh rate range is very small and lfc is not used. When frame spikes occur, refresh rate becomes fixed and will not restore properly Signed-off-by: Anthony Koo Reviewed-by: Aric Cyr Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- .../amd/display/modules/freesync/freesync.c | 43 ++++++++++--------- .../amd/display/modules/inc/mod_freesync.h | 3 ++ 2 files changed, 26 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index 4af73a72b9a9..be6a6c63b4cc 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -168,21 +168,6 @@ static unsigned int calc_v_total_from_duration( return v_total; } -static unsigned long long calc_nominal_field_rate(const struct dc_stream_state *stream) -{ - unsigned long long nominal_field_rate_in_uhz = 0; - - /* Calculate nominal field rate for stream */ - nominal_field_rate_in_uhz = stream->timing.pix_clk_khz; - nominal_field_rate_in_uhz *= 1000ULL * 1000ULL * 1000ULL; - nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, - stream->timing.h_total); - nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, - stream->timing.v_total); - - return nominal_field_rate_in_uhz; -} - static void update_v_total_for_static_ramp( struct core_freesync *core_freesync, const struct dc_stream_state *stream, @@ -441,10 +426,11 @@ static void apply_fixed_refresh(struct core_freesync *core_freesync, in_out_vrr->adjust.v_total_min; } else { in_out_vrr->adjust.v_total_min = - calc_v_total_from_refresh( - stream, in_out_vrr->max_refresh_in_uhz); + calc_v_total_from_refresh(stream, + in_out_vrr->max_refresh_in_uhz); in_out_vrr->adjust.v_total_max = - in_out_vrr->adjust.v_total_min; + calc_v_total_from_refresh(stream, + in_out_vrr->min_refresh_in_uhz); } } } @@ -638,7 +624,8 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync); /* Calculate nominal field rate for stream */ - nominal_field_rate_in_uhz = calc_nominal_field_rate(stream); + nominal_field_rate_in_uhz = + mod_freesync_calc_nominal_field_rate(stream); min_refresh_in_uhz = in_config->min_refresh_in_uhz; max_refresh_in_uhz = in_config->max_refresh_in_uhz; @@ -888,6 +875,22 @@ void mod_freesync_get_settings(struct mod_freesync *mod_freesync, } } +unsigned long long mod_freesync_calc_nominal_field_rate( + const struct dc_stream_state *stream) +{ + unsigned long long nominal_field_rate_in_uhz = 0; + + /* Calculate nominal field rate for stream */ + nominal_field_rate_in_uhz = stream->timing.pix_clk_khz; + nominal_field_rate_in_uhz *= 1000ULL * 1000ULL * 1000ULL; + nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, + stream->timing.h_total); + nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, + stream->timing.v_total); + + return nominal_field_rate_in_uhz; +} + bool mod_freesync_is_valid_range(struct mod_freesync *mod_freesync, const struct dc_stream_state *stream, uint32_t min_refresh_cap_in_uhz, @@ -897,7 +900,7 @@ bool mod_freesync_is_valid_range(struct mod_freesync *mod_freesync, { /* Calculate nominal field rate for stream */ unsigned long long nominal_field_rate_in_uhz = - calc_nominal_field_rate(stream); + mod_freesync_calc_nominal_field_rate(stream); // Check nominal is within range if (nominal_field_rate_in_uhz > max_refresh_cap_in_uhz || diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h index e7d77bb6209f..85c98afe9375 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h @@ -159,6 +159,9 @@ void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync, const struct dc_stream_state *stream, struct mod_vrr_params *in_out_vrr); +unsigned long long mod_freesync_calc_nominal_field_rate( + const struct dc_stream_state *stream); + bool mod_freesync_is_valid_range(struct mod_freesync *mod_freesync, const struct dc_stream_state *stream, uint32_t min_refresh_cap_in_uhz, -- GitLab From 050790cc59732cd99789235cb118df23e9b42911 Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Wed, 4 Apr 2018 21:04:42 -0400 Subject: [PATCH 0453/1692] drm/amd/display: Fix bug that causes black screen Ignore MSA bit on DP display is usually set during SetTimings, but there was a case where the module thought refresh rate was not valid and ignore MSA bit was not set. Later, a valid refresh rate range was requested but since ignore MSA bit not set, it caused black screen. Issue if with how the module checked for VRR support. Fix up that logic. DM should call new valid_range function to determine if timing is supported. Signed-off-by: Anthony Koo Reviewed-by: Aric Cyr Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- .../amd/display/modules/freesync/freesync.c | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index be6a6c63b4cc..4887c888bbe7 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -613,7 +613,6 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, { struct core_freesync *core_freesync = NULL; unsigned long long nominal_field_rate_in_uhz = 0; - bool nominal_field_rate_in_range = true; unsigned int refresh_range = 0; unsigned int min_refresh_in_uhz = 0; unsigned int max_refresh_in_uhz = 0; @@ -638,15 +637,6 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, if (max_refresh_in_uhz > nominal_field_rate_in_uhz) max_refresh_in_uhz = nominal_field_rate_in_uhz; - /* Allow for some rounding error of actual video timing by taking ceil. - * For example, 144 Hz mode timing may actually be 143.xxx Hz when - * calculated from pixel rate and vertical/horizontal totals, but - * this should be allowed instead of blocking FreeSync. - */ - if ((min_refresh_in_uhz / 1000000) > - ((nominal_field_rate_in_uhz + 1000000 - 1) / 1000000)) - nominal_field_rate_in_range = false; - // Full range may be larger than current video timing, so cap at nominal if (min_refresh_in_uhz > nominal_field_rate_in_uhz) min_refresh_in_uhz = nominal_field_rate_in_uhz; @@ -658,10 +648,14 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, in_out_vrr->state = in_config->state; - if ((in_config->state == VRR_STATE_UNSUPPORTED) || - (!nominal_field_rate_in_range)) { + if (in_config->state == VRR_STATE_UNSUPPORTED) { in_out_vrr->state = VRR_STATE_UNSUPPORTED; in_out_vrr->supported = false; + in_out_vrr->adjust.v_total_min = stream->timing.v_total; + in_out_vrr->adjust.v_total_max = stream->timing.v_total; + + return; + } else { in_out_vrr->min_refresh_in_uhz = min_refresh_in_uhz; in_out_vrr->max_duration_in_us = -- GitLab From be922ff750e40b292824959577aa914388ff6c8b Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Thu, 5 Apr 2018 15:20:15 -0400 Subject: [PATCH 0454/1692] drm/amd/display: Add back code to allow for rounding error Signed-off-by: Anthony Koo Reviewed-by: Aric Cyr Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/modules/freesync/freesync.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index 4887c888bbe7..abd5c9374eb3 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -896,6 +896,17 @@ bool mod_freesync_is_valid_range(struct mod_freesync *mod_freesync, unsigned long long nominal_field_rate_in_uhz = mod_freesync_calc_nominal_field_rate(stream); + /* Allow for some rounding error of actual video timing by taking ceil. + * For example, 144 Hz mode timing may actually be 143.xxx Hz when + * calculated from pixel rate and vertical/horizontal totals, but + * this should be allowed instead of blocking FreeSync. + */ + nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, 1000000); + min_refresh_cap_in_uhz /= 1000000; + max_refresh_cap_in_uhz /= 1000000; + min_refresh_request_in_uhz /= 1000000; + max_refresh_request_in_uhz /= 1000000; + // Check nominal is within range if (nominal_field_rate_in_uhz > max_refresh_cap_in_uhz || nominal_field_rate_in_uhz < min_refresh_cap_in_uhz) @@ -921,7 +932,7 @@ bool mod_freesync_is_valid_range(struct mod_freesync *mod_freesync, // For variable range, check for at least 10 Hz range if ((max_refresh_request_in_uhz != min_refresh_request_in_uhz) && - (max_refresh_request_in_uhz - min_refresh_request_in_uhz < 10000000)) + (max_refresh_request_in_uhz - min_refresh_request_in_uhz < 10)) return false; return true; -- GitLab From 9410a3776bbf1e172cfdb9f8b771a5ce64081a51 Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Fri, 6 Apr 2018 12:12:06 -0400 Subject: [PATCH 0455/1692] drm/amd/display: fix LFC tearing at top of screen Tearing occurred because new VTOTAL MIN/MAX was being programmed too early. The flip can happen within the VUPDATE high region, and the new min/max would take effect immediately. But this means that frame is not variable anymore, and tearing would occur when the flip actually happens. The fixed insert duration should be programmed on the first VUPDATE interrupt instead. Signed-off-by: Anthony Koo Reviewed-by: Aric Cyr Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/modules/freesync/freesync.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index abd5c9374eb3..daad60ec1ce3 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -371,12 +371,6 @@ static void apply_below_the_range(struct core_freesync *core_freesync, inserted_frame_duration_in_us; in_out_vrr->btr.frames_to_insert = frames_to_insert; in_out_vrr->btr.frame_counter = frames_to_insert; - - in_out_vrr->adjust.v_total_min = - calc_v_total_from_duration(stream, in_out_vrr, - in_out_vrr->btr.inserted_duration_in_us); - in_out_vrr->adjust.v_total_max = - in_out_vrr->adjust.v_total_min; } } -- GitLab From 953c2901c860da16963b48db8344bf0fd5b03040 Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Fri, 6 Apr 2018 13:55:39 -0400 Subject: [PATCH 0456/1692] drm/amd/display: refactor vupdate interrupt registration We only need to register once OS calls the interrupt control. Also, if we are entering static screen mode, disable after ramping is done. Disable shall be done via timer of 2 seconds regardless of ramping complete or not, just to simplify. Also, ramp to mid instead of min, due to better flicker performance... Signed-off-by: Anthony Koo Reviewed-by: Aric Cyr Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- .../amd/display/modules/freesync/freesync.c | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index daad60ec1ce3..349387eb9fe6 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -109,12 +109,6 @@ static unsigned int calc_duration_in_us_from_v_total( * 1000) * stream->timing.h_total, stream->timing.pix_clk_khz)); - if (duration_in_us < in_vrr->min_duration_in_us) - duration_in_us = in_vrr->min_duration_in_us; - - if (duration_in_us > in_vrr->max_duration_in_us) - duration_in_us = in_vrr->max_duration_in_us; - return duration_in_us; } @@ -230,10 +224,9 @@ static void update_v_total_for_static_ramp( } } - v_total = calc_v_total_from_duration(stream, - in_out_vrr, - current_duration_in_us); - + v_total = div64_u64(div64_u64(((unsigned long long)( + current_duration_in_us) * stream->timing.pix_clk_khz), + stream->timing.h_total), 1000); in_out_vrr->adjust.v_total_min = v_total; in_out_vrr->adjust.v_total_max = v_total; @@ -702,7 +695,11 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, } else if (in_out_vrr->state == VRR_STATE_ACTIVE_FIXED) { in_out_vrr->fixed.target_refresh_in_uhz = in_out_vrr->min_refresh_in_uhz; - if (in_out_vrr->fixed.ramping_active) { + if (in_out_vrr->fixed.ramping_active && + in_out_vrr->fixed.fixed_active) { + /* Do not update vtotals if ramping is already active + * in order to continue ramp from current refresh. + */ in_out_vrr->fixed.fixed_active = true; } else { in_out_vrr->fixed.fixed_active = true; -- GitLab From bf2af91cb3a4ef1c5f9132687b9818f33d6a389f Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Tue, 17 Apr 2018 11:40:31 -0400 Subject: [PATCH 0457/1692] drm/amd/display: Correct rounding calcs in mod_freesync_is_valid_range Signed-off-by: Anthony Koo Reviewed-by: Aric Cyr Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- .../amd/display/modules/freesync/freesync.c | 39 ++++++++++++++++--- 1 file changed, 34 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index 349387eb9fe6..769f46777a1d 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -887,12 +887,41 @@ bool mod_freesync_is_valid_range(struct mod_freesync *mod_freesync, unsigned long long nominal_field_rate_in_uhz = mod_freesync_calc_nominal_field_rate(stream); - /* Allow for some rounding error of actual video timing by taking ceil. - * For example, 144 Hz mode timing may actually be 143.xxx Hz when - * calculated from pixel rate and vertical/horizontal totals, but - * this should be allowed instead of blocking FreeSync. + /* Typically nominal refresh calculated can have some fractional part. + * Allow for some rounding error of actual video timing by taking floor + * of caps and request. Round the nominal refresh rate. + * + * Dividing will convert everything to units in Hz although input + * variable name is in uHz! + * + * Also note, this takes care of rounding error on the nominal refresh + * so by rounding error we only expect it to be off by a small amount, + * such as < 0.1 Hz. i.e. 143.9xxx or 144.1xxx. + * + * Example 1. Caps Min = 40 Hz, Max = 144 Hz + * Request Min = 40 Hz, Max = 144 Hz + * Nominal = 143.5x Hz rounded to 144 Hz + * This function should allow this as valid request + * + * Example 2. Caps Min = 40 Hz, Max = 144 Hz + * Request Min = 40 Hz, Max = 144 Hz + * Nominal = 144.4x Hz rounded to 144 Hz + * This function should allow this as valid request + * + * Example 3. Caps Min = 40 Hz, Max = 144 Hz + * Request Min = 40 Hz, Max = 144 Hz + * Nominal = 120.xx Hz rounded to 120 Hz + * This function should return NOT valid since the requested + * max is greater than current timing's nominal + * + * Example 4. Caps Min = 40 Hz, Max = 120 Hz + * Request Min = 40 Hz, Max = 120 Hz + * Nominal = 144.xx Hz rounded to 144 Hz + * This function should return NOT valid since the nominal + * is greater than the capability's max refresh */ - nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, 1000000); + nominal_field_rate_in_uhz = + div_u64(nominal_field_rate_in_uhz + 500000, 1000000); min_refresh_cap_in_uhz /= 1000000; max_refresh_cap_in_uhz /= 1000000; min_refresh_request_in_uhz /= 1000000; -- GitLab From 4c1fa3630b44f03d5539a778ddcea319e9c8223a Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Sun, 25 Mar 2018 16:28:33 -0400 Subject: [PATCH 0458/1692] drm/amd/display: Don't force UPDATE_TYPE_FULL if stream_update has hdr_static_metadata This was missed when pushing public patch for 3e3a40b03847 (drm/amd/display: Updated HDR Static Metadata to directly take info packet raw) This is currently no problem yet since we're not doing HDR on Linux yet. Signed-off-by: Harry Wentland Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index a4df627d6936..0179d2be9866 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1268,9 +1268,6 @@ static enum surface_update_type check_update_surfaces_for_stream( if (stream_update->out_transfer_func) return UPDATE_TYPE_FULL; - if (stream_update->hdr_static_metadata) - return UPDATE_TYPE_FULL; - if (stream_update->abm_level) return UPDATE_TYPE_FULL; } -- GitLab From 69ff884526742fcb00b7509461bf8e41c87d9b10 Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Tue, 8 May 2018 17:09:49 -0400 Subject: [PATCH 0459/1692] drm/amd/display: add config for sending VSIF Signed-off-by: Anthony Koo Reviewed-by: Aric Cyr Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 1 + drivers/gpu/drm/amd/display/modules/freesync/freesync.c | 3 ++- drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h | 2 ++ 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 5f5e5ea20d78..f0f1e58b9830 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -4680,6 +4680,7 @@ void set_freesync_on_stream(struct amdgpu_display_manager *dm, aconnector->min_vfreq * 1000000; config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000; + config.vsif_supported = true; } mod_freesync_build_vrr_params(dm->freesync_module, diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index 769f46777a1d..e1688902a1b0 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -492,7 +492,7 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync, /* Check if Freesync is supported. Return if false. If true, * set the corresponding bit in the info packet */ - if (!vrr->supported) + if (!vrr->supported || !vrr->send_vsif) return; if (dc_is_hdmi_signal(stream->signal)) { @@ -634,6 +634,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, return; in_out_vrr->state = in_config->state; + in_out_vrr->send_vsif = in_config->vsif_supported; if (in_config->state == VRR_STATE_UNSUPPORTED) { in_out_vrr->state = VRR_STATE_UNSUPPORTED; diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h index 85c98afe9375..a0f32cde721c 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h @@ -78,6 +78,7 @@ enum mod_vrr_state { struct mod_freesync_config { enum mod_vrr_state state; + bool vsif_supported; bool ramping; bool btr; unsigned int min_refresh_in_uhz; @@ -103,6 +104,7 @@ struct mod_vrr_params_fixed_refresh { struct mod_vrr_params { bool supported; + bool send_vsif; enum mod_vrr_state state; uint32_t min_refresh_in_uhz; -- GitLab From 1e7e86c43f38d2cc0183ae2a440c70f3c6163883 Mon Sep 17 00:00:00 2001 From: Samson Tam Date: Tue, 1 May 2018 10:39:26 -0400 Subject: [PATCH 0460/1692] drm/amd/display: decouple front and backend pgm using dpms_off as backend enable flag Signed-off-by: Samson Tam Reviewed-by: Anthony Koo Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 125 +++++++++++------- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 34 +++++ .../gpu/drm/amd/display/dc/core/dc_resource.c | 6 + drivers/gpu/drm/amd/display/dc/dc_stream.h | 2 + .../display/dc/dce110/dce110_hw_sequencer.c | 38 +----- 5 files changed, 123 insertions(+), 82 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 0179d2be9866..2d4a5a85f799 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1270,6 +1270,9 @@ static enum surface_update_type check_update_surfaces_for_stream( if (stream_update->abm_level) return UPDATE_TYPE_FULL; + + if (stream_update->dpms_off) + return UPDATE_TYPE_FULL; } for (i = 0 ; i < surface_count; i++) { @@ -1324,6 +1327,71 @@ static struct dc_stream_status *stream_get_status( static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL; +static void commit_planes_do_stream_update(struct dc *dc, + struct dc_stream_state *stream, + struct dc_stream_update *stream_update, + enum surface_update_type update_type, + struct dc_state *context) +{ + int j; + + // Stream updates + for (j = 0; j < dc->res_pool->pipe_count; j++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; + + if (!pipe_ctx->top_pipe && + pipe_ctx->stream && + pipe_ctx->stream == stream) { + + /* Fast update*/ + // VRR program can be done as part of FAST UPDATE + if (stream_update->adjust) + dc->hwss.set_drr(&pipe_ctx, 1, + stream_update->adjust->v_total_min, + stream_update->adjust->v_total_max); + + /* Full fe update*/ + if (update_type == UPDATE_TYPE_FAST) + continue; + + if (stream_update->dpms_off) { + if (*stream_update->dpms_off) { + core_link_disable_stream(pipe_ctx, KEEP_ACQUIRED_RESOURCE); + dc->hwss.pplib_apply_display_requirements( + dc, dc->current_state); + } else { + dc->hwss.pplib_apply_display_requirements( + dc, dc->current_state); + core_link_enable_stream(dc->current_state, pipe_ctx); + } + } + + if (stream_update->abm_level && pipe_ctx->stream_res.abm) { + if (pipe_ctx->stream_res.tg->funcs->is_blanked) { + // if otg funcs defined check if blanked before programming + if (!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) + pipe_ctx->stream_res.abm->funcs->set_abm_level( + pipe_ctx->stream_res.abm, stream->abm_level); + } else + pipe_ctx->stream_res.abm->funcs->set_abm_level( + pipe_ctx->stream_res.abm, stream->abm_level); + } + + if (stream_update->periodic_fn_vsync_delta && + pipe_ctx->stream_res.tg->funcs->program_vline_interrupt) + pipe_ctx->stream_res.tg->funcs->program_vline_interrupt( + pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, + pipe_ctx->stream->periodic_fn_vsync_delta); + + if (stream_update->hdr_static_metadata || + stream_update->vrr_infopacket) { + resource_build_info_frame(pipe_ctx); + dc->hwss.update_info_frame(pipe_ctx); + } + } + } +} + static void commit_planes_for_stream(struct dc *dc, struct dc_surface_update *srf_updates, int surface_count, @@ -1340,15 +1408,20 @@ static void commit_planes_for_stream(struct dc *dc, context_clock_trace(dc, context); } + // Stream updates + if (stream_update) + commit_planes_do_stream_update(dc, stream, stream_update, update_type, context); + if (surface_count == 0) { /* * In case of turning off screen, no need to program front end a second time. - * just return after program front end. + * just return after program blank. */ - dc->hwss.apply_ctx_for_surface(dc, stream, surface_count, context); + dc->hwss.apply_ctx_for_surface(dc, stream, 0, context); return; } + // Update Type FULL, Surface updates for (j = 0; j < dc->res_pool->pipe_count; j++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; @@ -1362,13 +1435,6 @@ static void commit_planes_for_stream(struct dc *dc, if (!pipe_ctx->plane_state) continue; - /* Fast update*/ - // VRR program can be done as part of FAST UPDATE - if (stream_update && stream_update->adjust) - dc->hwss.set_drr(&pipe_ctx, 1, - stream_update->adjust->v_total_min, - stream_update->adjust->v_total_max); - /* Full fe update*/ if (update_type == UPDATE_TYPE_FAST) continue; @@ -1378,34 +1444,18 @@ static void commit_planes_for_stream(struct dc *dc, dc->hwss.apply_ctx_for_surface( dc, pipe_ctx->stream, stream_status->plane_count, context); - - if (stream_update && stream_update->abm_level && pipe_ctx->stream_res.abm) { - if (pipe_ctx->stream_res.tg->funcs->is_blanked) { - // if otg funcs defined check if blanked before programming - if (!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) - pipe_ctx->stream_res.abm->funcs->set_abm_level( - pipe_ctx->stream_res.abm, stream->abm_level); - } else - pipe_ctx->stream_res.abm->funcs->set_abm_level( - pipe_ctx->stream_res.abm, stream->abm_level); - } - - if (stream_update && stream_update->periodic_fn_vsync_delta && - pipe_ctx->stream_res.tg->funcs->program_vline_interrupt) - pipe_ctx->stream_res.tg->funcs->program_vline_interrupt( - pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, - pipe_ctx->stream->periodic_fn_vsync_delta); } } if (update_type == UPDATE_TYPE_FULL) context_timing_trace(dc, &context->res_ctx); - /* Lock the top pipe while updating plane addrs, since freesync requires - * plane addr update event triggers to be synchronized. - * top_pipe_to_program is expected to never be NULL - */ + // Update Type FAST, Surface updates if (update_type == UPDATE_TYPE_FAST) { + /* Lock the top pipe while updating plane addrs, since freesync requires + * plane addr update event triggers to be synchronized. + * top_pipe_to_program is expected to never be NULL + */ dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true); /* Perform requested Updates */ @@ -1428,21 +1478,6 @@ static void commit_planes_for_stream(struct dc *dc, dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false); } - - if (stream && stream_update) - for (j = 0; j < dc->res_pool->pipe_count; j++) { - struct pipe_ctx *pipe_ctx = - &context->res_ctx.pipe_ctx[j]; - - if (pipe_ctx->stream != stream) - continue; - - if (stream_update->hdr_static_metadata || - (stream_update->vrr_infopacket)) { - resource_build_info_frame(pipe_ctx); - dc->hwss.update_info_frame(pipe_ctx); - } - } } void dc_commit_updates_for_stream(struct dc *dc, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 1adfcdd588d6..739c6654d849 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -2458,9 +2458,43 @@ void core_link_enable_stream( struct pipe_ctx *pipe_ctx) { struct dc *core_dc = pipe_ctx->stream->ctx->dc; + struct dc_stream_state *stream = pipe_ctx->stream; enum dc_status status; DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); + if (pipe_ctx->stream->signal != SIGNAL_TYPE_VIRTUAL) { + stream->sink->link->link_enc->funcs->setup( + stream->sink->link->link_enc, + pipe_ctx->stream->signal); + pipe_ctx->stream_res.stream_enc->funcs->setup_stereo_sync( + pipe_ctx->stream_res.stream_enc, + pipe_ctx->stream_res.tg->inst, + stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE); + } + + if (dc_is_dp_signal(pipe_ctx->stream->signal)) + pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute( + pipe_ctx->stream_res.stream_enc, + &stream->timing, + stream->output_color_space); + + if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) + pipe_ctx->stream_res.stream_enc->funcs->hdmi_set_stream_attribute( + pipe_ctx->stream_res.stream_enc, + &stream->timing, + stream->phy_pix_clk, + pipe_ctx->stream_res.audio != NULL); + + if (dc_is_dvi_signal(pipe_ctx->stream->signal)) + pipe_ctx->stream_res.stream_enc->funcs->dvi_set_stream_attribute( + pipe_ctx->stream_res.stream_enc, + &stream->timing, + (pipe_ctx->stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) ? + true : false); + + resource_build_info_frame(pipe_ctx); + core_dc->hwss.update_info_frame(pipe_ctx); + /* eDP lit up by bios already, no need to enable again. */ if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP && core_dc->apply_edp_fast_boot_optimization) { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 4468b240929a..9157f5d83b0d 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1615,6 +1615,9 @@ static bool are_stream_backends_same( if (is_hdr_static_meta_changed(stream_a, stream_b)) return false; + if (stream_a->dpms_off != stream_b->dpms_off) + return false; + return true; } @@ -2716,6 +2719,9 @@ bool pipe_need_reprogram( if (is_hdr_static_meta_changed(pipe_ctx_old->stream, pipe_ctx->stream)) return true; + if (pipe_ctx_old->stream->dpms_off != pipe_ctx->stream->dpms_off) + return true; + return false; } diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 67101a525e3d..97d2dcf2547c 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -128,6 +128,8 @@ struct dc_stream_update { unsigned long long *periodic_fn_vsync_delta; struct dc_crtc_timing_adjust *adjust; struct dc_info_packet *vrr_infopacket; + + bool *dpms_off; }; bool dc_is_stream_unchanged( diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index ae4792494fe7..ce1e0f6ec3ca 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -1349,8 +1349,6 @@ static enum dc_status apply_single_controller_ctx_to_hw( struct dc *dc) { struct dc_stream_state *stream = pipe_ctx->stream; - struct pipe_ctx *pipe_ctx_old = &dc->current_state->res_ctx. - pipe_ctx[pipe_ctx->pipe_idx]; if (pipe_ctx->stream_res.audio != NULL) { struct audio_output audio_output; @@ -1405,46 +1403,12 @@ static enum dc_status apply_single_controller_ctx_to_hw( stream->timing.display_color_depth, pipe_ctx->stream->signal); - if (pipe_ctx->stream->signal != SIGNAL_TYPE_VIRTUAL) - stream->sink->link->link_enc->funcs->setup( - stream->sink->link->link_enc, - pipe_ctx->stream->signal); - - if (pipe_ctx->stream->signal != SIGNAL_TYPE_VIRTUAL) - pipe_ctx->stream_res.stream_enc->funcs->setup_stereo_sync( - pipe_ctx->stream_res.stream_enc, - pipe_ctx->stream_res.tg->inst, - stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE); - - pipe_ctx->stream_res.opp->funcs->opp_program_fmt( pipe_ctx->stream_res.opp, &stream->bit_depth_params, &stream->clamping); - if (dc_is_dp_signal(pipe_ctx->stream->signal)) - pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute( - pipe_ctx->stream_res.stream_enc, - &stream->timing, - stream->output_color_space); - - if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) - pipe_ctx->stream_res.stream_enc->funcs->hdmi_set_stream_attribute( - pipe_ctx->stream_res.stream_enc, - &stream->timing, - stream->phy_pix_clk, - pipe_ctx->stream_res.audio != NULL); - - if (dc_is_dvi_signal(pipe_ctx->stream->signal)) - pipe_ctx->stream_res.stream_enc->funcs->dvi_set_stream_attribute( - pipe_ctx->stream_res.stream_enc, - &stream->timing, - (pipe_ctx->stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) ? - true : false); - - resource_build_info_frame(pipe_ctx); - dce110_update_info_frame(pipe_ctx); - if (!pipe_ctx_old->stream) + if (!stream->dpms_off) core_link_enable_stream(context, pipe_ctx); pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0; -- GitLab From 27e2e207747981ca3161dec16cea304f8e46cd65 Mon Sep 17 00:00:00 2001 From: SivapiriyanKumarasamy Date: Fri, 18 May 2018 17:05:52 -0400 Subject: [PATCH 0461/1692] drm/amd/display: Program vline interrupt on FAST update Signed-off-by: SivapiriyanKumarasamy Reviewed-by: Tony Cheng Reviewed-by: Anthony Koo Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 2d4a5a85f799..699cb6f51121 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1350,6 +1350,13 @@ static void commit_planes_do_stream_update(struct dc *dc, stream_update->adjust->v_total_min, stream_update->adjust->v_total_max); + if (stream_update->periodic_fn_vsync_delta && + pipe_ctx->stream_res.tg && + pipe_ctx->stream_res.tg->funcs->program_vline_interrupt) + pipe_ctx->stream_res.tg->funcs->program_vline_interrupt( + pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, + pipe_ctx->stream->periodic_fn_vsync_delta); + /* Full fe update*/ if (update_type == UPDATE_TYPE_FAST) continue; @@ -1377,12 +1384,6 @@ static void commit_planes_do_stream_update(struct dc *dc, pipe_ctx->stream_res.abm, stream->abm_level); } - if (stream_update->periodic_fn_vsync_delta && - pipe_ctx->stream_res.tg->funcs->program_vline_interrupt) - pipe_ctx->stream_res.tg->funcs->program_vline_interrupt( - pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, - pipe_ctx->stream->periodic_fn_vsync_delta); - if (stream_update->hdr_static_metadata || stream_update->vrr_infopacket) { resource_build_info_frame(pipe_ctx); -- GitLab From 1336926f43ccadf2a152ea89a27de725c4d17f62 Mon Sep 17 00:00:00 2001 From: Alvin lee Date: Mon, 4 Jun 2018 17:31:25 -0400 Subject: [PATCH 0462/1692] drm/amd/display: Enable Stereo in Dal3 - program infoframe for Stereo - program stereo flip control registers properly v2: Add missing license headers Signed-off-by: Alvin lee Reviewed-by: Tony Cheng Acked-by: Harry Wentland Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/Makefile | 3 +- .../gpu/drm/amd/display/dc/core/dc_resource.c | 57 +++++------- drivers/gpu/drm/amd/display/dc/dc_stream.h | 1 + .../gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 18 +++- .../gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h | 4 + .../amd/display/modules/inc/mod_info_packet.h | 40 ++++++++ .../amd/display/modules/info_packet/Makefile | 31 +++++++ .../display/modules/info_packet/info_packet.c | 92 +++++++++++++++++++ 8 files changed, 208 insertions(+), 38 deletions(-) create mode 100644 drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h create mode 100644 drivers/gpu/drm/amd/display/modules/info_packet/Makefile create mode 100644 drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile index a2c5be493555..c97dc9613325 100644 --- a/drivers/gpu/drm/amd/display/Makefile +++ b/drivers/gpu/drm/amd/display/Makefile @@ -31,11 +31,12 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/inc/hw subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/inc subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/info_packet #TODO: remove when Timing Sync feature is complete subdir-ccflags-y += -DBUILD_FEATURE_TIMING_SYNC=0 -DAL_LIBS = amdgpu_dm dc modules/freesync modules/color +DAL_LIBS = amdgpu_dm dc modules/freesync modules/color modules/info_packet AMD_DAL = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/,$(DAL_LIBS))) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 9157f5d83b0d..07a1dd41666d 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1578,6 +1578,20 @@ static bool is_hdr_static_meta_changed(struct dc_stream_state *cur_stream, return false; } +static bool is_vsc_info_packet_changed(struct dc_stream_state *cur_stream, + struct dc_stream_state *new_stream) +{ + if (cur_stream == NULL) + return true; + + if (memcmp(&cur_stream->vsc_infopacket, + &new_stream->vsc_infopacket, + sizeof(struct dc_info_packet)) != 0) + return true; + + return false; +} + static bool is_timing_changed(struct dc_stream_state *cur_stream, struct dc_stream_state *new_stream) { @@ -1618,6 +1632,9 @@ static bool are_stream_backends_same( if (stream_a->dpms_off != stream_b->dpms_off) return false; + if (is_vsc_info_packet_changed(stream_a, stream_b)) + return false; + return true; } @@ -2504,43 +2521,10 @@ static void set_vsc_info_packet( struct dc_info_packet *info_packet, struct dc_stream_state *stream) { - unsigned int vscPacketRevision = 0; - unsigned int i; - - /*VSC packet set to 2 when DP revision >= 1.2*/ - if (stream->psr_version != 0) { - vscPacketRevision = 2; - } - - /* VSC packet not needed based on the features - * supported by this DP display - */ - if (vscPacketRevision == 0) + if (!stream->vsc_infopacket.valid) return; - if (vscPacketRevision == 0x2) { - /* Secondary-data Packet ID = 0*/ - info_packet->hb0 = 0x00; - /* 07h - Packet Type Value indicating Video - * Stream Configuration packet - */ - info_packet->hb1 = 0x07; - /* 02h = VSC SDP supporting 3D stereo and PSR - * (applies to eDP v1.3 or higher). - */ - info_packet->hb2 = 0x02; - /* 08h = VSC packet supporting 3D stereo + PSR - * (HB2 = 02h). - */ - info_packet->hb3 = 0x08; - - for (i = 0; i < 28; i++) - info_packet->sb[i] = 0; - - info_packet->valid = true; - } - - /*TODO: stereo 3D support and extend pixel encoding colorimetry*/ + *info_packet = stream->vsc_infopacket; } void dc_resource_state_destruct(struct dc_state *context) @@ -2722,6 +2706,9 @@ bool pipe_need_reprogram( if (pipe_ctx_old->stream->dpms_off != pipe_ctx->stream->dpms_off) return true; + if (is_vsc_info_packet_changed(pipe_ctx_old->stream, pipe_ctx->stream)) + return true; + return false; } diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 97d2dcf2547c..8f81133ac0c1 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -55,6 +55,7 @@ struct dc_stream_state { struct dc_crtc_timing timing; struct dc_crtc_timing_adjust adjust; struct dc_info_packet vrr_infopacket; + struct dc_info_packet vsc_infopacket; struct rect src; /* composition area */ struct rect dst; /* stream addressable area */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index ec4a5f665586..8da2b8a09a12 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -313,10 +313,24 @@ bool hubp1_program_surface_flip_and_addr( { struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); - /* program flip type */ - REG_SET(DCSURF_FLIP_CONTROL, 0, + + //program flip type + REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_TYPE, flip_immediate); + + if (address->type == PLN_ADDR_TYPE_GRPH_STEREO) { + REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0x1); + REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_IN_STEREOSYNC, 0x1); + + } else { + // turn off stereo if not in stereo + REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0x0); + REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_IN_STEREOSYNC, 0x0); + } + + + /* HW automatically latch rest of address register on write to * DCSURF_PRIMARY_SURFACE_ADDRESS if SURFACE_UPDATE_LOCK is not used * diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h index 48c1907c78c6..7605af9b4837 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h @@ -270,6 +270,8 @@ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH_C, META_PITCH_C, mask_sh),\ HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, SURFACE_PIXEL_FORMAT, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_TYPE, mask_sh),\ + HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, mask_sh),\ + HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_IN_STEREOSYNC, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_PENDING, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_UPDATE_LOCK, mask_sh),\ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH, mask_sh),\ @@ -451,6 +453,8 @@ type H_MIRROR_EN;\ type SURFACE_PIXEL_FORMAT;\ type SURFACE_FLIP_TYPE;\ + type SURFACE_FLIP_MODE_FOR_STEREOSYNC;\ + type SURFACE_FLIP_IN_STEREOSYNC;\ type SURFACE_UPDATE_LOCK;\ type SURFACE_FLIP_PENDING;\ type PRI_VIEWPORT_WIDTH; \ diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h new file mode 100644 index 000000000000..786b34380f85 --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h @@ -0,0 +1,40 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef MOD_INFO_PACKET_H_ +#define MOD_INFO_PACKET_H_ + +struct info_packet_inputs { + const struct dc_stream_state *pStream; +}; + +struct info_packets { + struct dc_info_packet *pVscInfoPacket; +}; + +void mod_build_infopackets(struct info_packet_inputs *inputs, + struct info_packets *info_packets); + +#endif diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/Makefile b/drivers/gpu/drm/amd/display/modules/info_packet/Makefile new file mode 100644 index 000000000000..4c382d728536 --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/info_packet/Makefile @@ -0,0 +1,31 @@ +# +# Copyright 2017 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +# +# Makefile for the 'info_packet' sub-module of DAL. +# + +INFO_PACKET = info_packet.o + +AMD_DAL_INFO_PACKET = $(addprefix $(AMDDALPATH)/modules/info_packet/,$(INFO_PACKET)) +#$(info ************ DAL INFO_PACKET MODULE MAKEFILE ************) + +AMD_DISPLAY_FILES += $(AMD_DAL_INFO_PACKET) diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c new file mode 100644 index 000000000000..24b6cc1dfc64 --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c @@ -0,0 +1,92 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "mod_info_packet.h" +#include "core_types.h" + +static void mod_build_vsc_infopacket(const struct dc_stream_state *stream, + struct dc_info_packet *info_packet) +{ + unsigned int vscPacketRevision = 0; + unsigned int i; + + if (stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE && stream->view_format != VIEW_3D_FORMAT_NONE) + vscPacketRevision = 1; + + + /*VSC packet set to 2 when DP revision >= 1.2*/ + if (stream->psr_version != 0) + vscPacketRevision = 2; + + + /* VSC packet not needed based on the features + * supported by this DP display + */ + if (vscPacketRevision == 0) + return; + + if (vscPacketRevision == 0x2) { + /* Secondary-data Packet ID = 0*/ + info_packet->hb0 = 0x00; + /* 07h - Packet Type Value indicating Video + * Stream Configuration packet + */ + info_packet->hb1 = 0x07; + /* 02h = VSC SDP supporting 3D stereo and PSR + * (applies to eDP v1.3 or higher). + */ + info_packet->hb2 = 0x02; + /* 08h = VSC packet supporting 3D stereo + PSR + * (HB2 = 02h). + */ + info_packet->hb3 = 0x08; + + for (i = 0; i < 28; i++) + info_packet->sb[i] = 0; + + info_packet->valid = true; + } + + if (vscPacketRevision == 0x1) { + + info_packet->hb0 = 0x00; // Secondary-data Packet ID = 0 + info_packet->hb1 = 0x07; // 07h = Packet Type Value indicating Video Stream Configuration packet + info_packet->hb2 = 0x01; // 01h = Revision number. VSC SDP supporting 3D stereo only + info_packet->hb3 = 0x01; // 01h = VSC SDP supporting 3D stereo only (HB2 = 01h). + + if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_INBAND_FA) + info_packet->sb[0] = 0x1; + + info_packet->valid = true; + } +} + +void mod_build_infopackets(struct info_packet_inputs *inputs, + struct info_packets *info_packets) +{ + if (info_packets->pVscInfoPacket != NULL) + mod_build_vsc_infopacket(inputs->pStream, info_packets->pVscInfoPacket); +} + -- GitLab From 0e4af5f3675bd766dbf11e5f17428ea69b630035 Mon Sep 17 00:00:00 2001 From: Alvin lee Date: Fri, 8 Jun 2018 13:58:36 -0400 Subject: [PATCH 0463/1692] drm/amd/display: Program vsc_infopacket in commit_planes_for_stream Signed-off-by: Alvin lee Reviewed-by: Jun Lei Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 3 ++- drivers/gpu/drm/amd/display/dc/dc_stream.h | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 699cb6f51121..eda21868c892 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1385,7 +1385,8 @@ static void commit_planes_do_stream_update(struct dc *dc, } if (stream_update->hdr_static_metadata || - stream_update->vrr_infopacket) { + stream_update->vrr_infopacket || + stream_update->vsc_infopacket) { resource_build_info_frame(pipe_ctx); dc->hwss.update_info_frame(pipe_ctx); } diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 8f81133ac0c1..790beb5cb358 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -129,6 +129,7 @@ struct dc_stream_update { unsigned long long *periodic_fn_vsync_delta; struct dc_crtc_timing_adjust *adjust; struct dc_info_packet *vrr_infopacket; + struct dc_info_packet *vsc_infopacket; bool *dpms_off; }; -- GitLab From e71f8ca1a7db244642f9f882f1df003fa567ad43 Mon Sep 17 00:00:00 2001 From: Krunoslav Kovac Date: Thu, 5 Jul 2018 19:23:17 -0400 Subject: [PATCH 0464/1692] drm/amd/display: Handle HDR meta update as fast update [Why] Vesa DPMS tool sends different HDR meta in OS flips without changing output parameters. We don't properly update HDR info frame: - we label HDR meta update as fast update - when updating HW info frame, we only do it if full update [How] It should still be fast update, so when doing HW infoframe update, do it always no matter the update type. Also, don't request passive flip for HDR meta update only without output transfer function or color space changed. Signed-off-by: Krunoslav Kovac Reviewed-by: Tony Cheng Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index eda21868c892..9a947f8341bf 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1357,6 +1357,13 @@ static void commit_planes_do_stream_update(struct dc *dc, pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, pipe_ctx->stream->periodic_fn_vsync_delta); + if (stream_update->hdr_static_metadata || + stream_update->vrr_infopacket || + stream_update->vsc_infopacket) { + resource_build_info_frame(pipe_ctx); + dc->hwss.update_info_frame(pipe_ctx); + } + /* Full fe update*/ if (update_type == UPDATE_TYPE_FAST) continue; @@ -1383,13 +1390,6 @@ static void commit_planes_do_stream_update(struct dc *dc, pipe_ctx->stream_res.abm->funcs->set_abm_level( pipe_ctx->stream_res.abm, stream->abm_level); } - - if (stream_update->hdr_static_metadata || - stream_update->vrr_infopacket || - stream_update->vsc_infopacket) { - resource_build_info_frame(pipe_ctx); - dc->hwss.update_info_frame(pipe_ctx); - } } } } -- GitLab From 72ac71a7e65260cc9a5f5e7dde5d14892d867c98 Mon Sep 17 00:00:00 2001 From: Krunoslav Kovac Date: Fri, 20 Jul 2018 15:44:08 -0400 Subject: [PATCH 0465/1692] drm/amd/display: HDR dynamic meta should be treated as stream update [Why] Recently we fixed HDR static meta using AFMT registers to be treated as fast stream update. Dynamic meta is still being treated as (full) surface update because it touches HUBP and it travels with pipe data. Here we change it to be (fast) stream update. Note, originally we also wanted to redesign here a bit, but without OS level support for true dynamic meta, it's left the same. We are simply using HW that can do dynamic meta to send HDR static meta, I still prefer keeping it in one static meta type then defining dynamic meta types to hold the same info. Once we know how OS interfaces look like, we can do proper design. [How] Move dyn meta update from update_hubp_dpp to commit_planes_do_stream_update Signed-off-by: Krunoslav Kovac Reviewed-by: Tony Cheng Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 9a947f8341bf..b5c7be1cdd81 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1357,7 +1357,7 @@ static void commit_planes_do_stream_update(struct dc *dc, pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, pipe_ctx->stream->periodic_fn_vsync_delta); - if (stream_update->hdr_static_metadata || + if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) || stream_update->vrr_infopacket || stream_update->vsc_infopacket) { resource_build_info_frame(pipe_ctx); -- GitLab From 8ab5617279507044682248f47c3afa9f753d8fe3 Mon Sep 17 00:00:00 2001 From: SivapiriyanKumarasamy Date: Thu, 26 Jul 2018 14:58:35 -0400 Subject: [PATCH 0466/1692] drm/amd/display: Program gamut remap as part of stream update Add gamut remap to dc_stream_update struct, and program if set when updating streams. Signed-off-by: SivapiriyanKumarasamy Reviewed-by: Anthony Koo Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 20 ++++++++++++++++++++ drivers/gpu/drm/amd/display/dc/dc_stream.h | 8 ++++++++ 2 files changed, 28 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index b5c7be1cdd81..b906b6adc5a8 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -359,6 +359,23 @@ void dc_stream_set_dither_option(struct dc_stream_state *stream, opp_program_bit_depth_reduction(pipes->stream_res.opp, ¶ms); } +bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream) +{ + int i = 0; + bool ret = false; + struct pipe_ctx *pipes; + + for (i = 0; i < MAX_PIPES; i++) { + if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) { + pipes = &dc->current_state->res_ctx.pipe_ctx[i]; + dc->hwss.program_gamut_remap(pipes); + ret = true; + } + } + + return ret; +} + void dc_stream_set_static_screen_events(struct dc *dc, struct dc_stream_state **streams, int num_streams, @@ -1364,6 +1381,9 @@ static void commit_planes_do_stream_update(struct dc *dc, dc->hwss.update_info_frame(pipe_ctx); } + if (stream_update->gamut_remap) + dc_stream_set_gamut_remap(dc, stream); + /* Full fe update*/ if (update_type == UPDATE_TYPE_FAST) continue; diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 790beb5cb358..1479b41ec177 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -132,6 +132,11 @@ struct dc_stream_update { struct dc_info_packet *vsc_infopacket; bool *dpms_off; + + struct colorspace_transform *gamut_remap; + enum dc_color_space *output_color_space; + + }; bool dc_is_stream_unchanged( @@ -298,6 +303,9 @@ void dc_stream_set_static_screen_events(struct dc *dc, void dc_stream_set_dither_option(struct dc_stream_state *stream, enum dc_dither_option option); +bool dc_stream_set_gamut_remap(struct dc *dc, + const struct dc_stream_state *stream); + bool dc_stream_get_crtc_position(struct dc *dc, struct dc_stream_state **stream, int num_streams, -- GitLab From 1112a46b48b74766bd957742c853c8a582a81991 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Tue, 14 Aug 2018 13:32:30 +0800 Subject: [PATCH 0467/1692] drm/amdgpu: Refine function name and function args There are no any logical changes here. 1. change function names: amdgpu_device_ip_late_set_pg/cg_state to amdgpu_device_set_pg/cg_state. 2. add a function argument cg/pg_state, so we can enable/disable cg/pg through those functions Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 39 +++++++++++----------- 1 file changed, 19 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 0b4815c1e181..04fbc63a83b7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1690,24 +1690,26 @@ static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev) } /** - * amdgpu_device_ip_late_set_cg_state - late init for clockgating + * amdgpu_device_set_cg_state - set clockgating for amdgpu device * * @adev: amdgpu_device pointer * - * Late initialization pass enabling clockgating for hardware IPs. * The list of all the hardware IPs that make up the asic is walked and the - * set_clockgating_state callbacks are run. This stage is run late - * in the init process. + * set_clockgating_state callbacks are run. + * Late initialization pass enabling clockgating for hardware IPs. + * Fini or suspend, pass disabling clockgating for hardware IPs. * Returns 0 on success, negative error code on failure. */ -static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev) +static int amdgpu_device_set_cg_state(struct amdgpu_device *adev, + enum amd_clockgating_state state) { - int i = 0, r; + int i, j, r; if (amdgpu_emu_mode == 1) return 0; - for (i = 0; i < adev->num_ip_blocks; i++) { + for (j = 0; j < adev->num_ip_blocks; j++) { + i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; if (!adev->ip_blocks[i].status.valid) continue; /* skip CG for VCE/UVD, it's handled specially */ @@ -1717,7 +1719,7 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev) adev->ip_blocks[i].version->funcs->set_clockgating_state) { /* enable clockgating to save power */ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, - AMD_CG_STATE_GATE); + state); if (r) { DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", adev->ip_blocks[i].version->funcs->name, r); @@ -1729,14 +1731,15 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev) return 0; } -static int amdgpu_device_ip_late_set_pg_state(struct amdgpu_device *adev) +static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_powergating_state state) { - int i = 0, r; + int i, j, r; if (amdgpu_emu_mode == 1) return 0; - for (i = 0; i < adev->num_ip_blocks; i++) { + for (j = 0; j < adev->num_ip_blocks; j++) { + i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; if (!adev->ip_blocks[i].status.valid) continue; /* skip CG for VCE/UVD, it's handled specially */ @@ -1746,7 +1749,7 @@ static int amdgpu_device_ip_late_set_pg_state(struct amdgpu_device *adev) adev->ip_blocks[i].version->funcs->set_powergating_state) { /* enable powergating to save power */ r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev, - AMD_PG_STATE_GATE); + state); if (r) { DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n", adev->ip_blocks[i].version->funcs->name, r); @@ -1787,8 +1790,8 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) } } - amdgpu_device_ip_late_set_cg_state(adev); - amdgpu_device_ip_late_set_pg_state(adev); + amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE); + amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE); queue_delayed_work(system_wq, &adev->late_init_work, msecs_to_jiffies(AMDGPU_RESUME_MS)); @@ -1906,13 +1909,9 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) } /** - * amdgpu_device_ip_late_init_func_handler - work handler for clockgating - * - * @work: work_struct + * amdgpu_device_ip_late_init_func_handler - work handler for ib test * - * Work handler for amdgpu_device_ip_late_set_cg_state. We put the - * clockgating setup into a worker thread to speed up driver init and - * resume from suspend. + * @work: work_struct. */ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work) { -- GitLab From 05df1f01b2924d2e1737deeb63ad115c1df19021 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Tue, 14 Aug 2018 16:54:15 +0800 Subject: [PATCH 0468/1692] drm/amdgpu: Set power ungate state when suspend/fini Unify to set power ungate state at the begin of suspend/fini. Remove the workaround code for gfx off feature in amdgpu_device.c. Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 11 +++++------ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 4 ---- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 19 ++++++++++++------- 3 files changed, 17 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 04fbc63a83b7..13ea4da9f23a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1817,6 +1817,8 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) int i, r; amdgpu_amdkfd_device_fini(adev); + + amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); /* need to disable SMC first */ for (i = 0; i < adev->num_ip_blocks; i++) { if (!adev->ip_blocks[i].status.hw) @@ -1831,8 +1833,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) adev->ip_blocks[i].version->funcs->name, r); return r; } - amdgpu_gfx_off_ctrl(adev, false); - cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work); + r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); /* XXX handle errors */ if (r) { @@ -1955,6 +1956,8 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev) if (amdgpu_sriov_vf(adev)) amdgpu_virt_request_full_gpu(adev, false); + amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); + for (i = adev->num_ip_blocks - 1; i >= 0; i--) { if (!adev->ip_blocks[i].status.valid) continue; @@ -2010,10 +2013,6 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n", r); } - /* call smu to disable gfx off feature first when suspend */ - amdgpu_gfx_off_ctrl(adev, false); - cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work); - for (i = adev->num_ip_blocks - 1; i >= 0; i--) { if (!adev->ip_blocks[i].status.valid) continue; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 5cd45210113f..282dba6cce86 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -5164,10 +5164,6 @@ static int gfx_v8_0_hw_fini(void *handle) gfx_v8_0_cp_enable(adev, false); gfx_v8_0_rlc_stop(adev); - amdgpu_device_ip_set_powergating_state(adev, - AMD_IP_BLOCK_TYPE_GFX, - AMD_PG_STATE_UNGATE); - return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 76d979e276a0..4e1e1a0dd681 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -3242,9 +3242,6 @@ static int gfx_v9_0_hw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; int i; - amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX, - AMD_PG_STATE_UNGATE); - amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); @@ -3763,6 +3760,10 @@ static int gfx_v9_0_set_powergating_state(void *handle, switch (adev->asic_type) { case CHIP_RAVEN: + if (!enable) { + amdgpu_gfx_off_ctrl(adev, false); + cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work); + } if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) { gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true); gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true); @@ -3782,12 +3783,16 @@ static int gfx_v9_0_set_powergating_state(void *handle, /* update mgcg state */ gfx_v9_0_update_gfx_mg_power_gating(adev, enable); - /* set gfx off through smu */ - amdgpu_gfx_off_ctrl(adev, true); + if (enable) + amdgpu_gfx_off_ctrl(adev, true); break; case CHIP_VEGA12: - /* set gfx off through smu */ - amdgpu_gfx_off_ctrl(adev, true); + if (!enable) { + amdgpu_gfx_off_ctrl(adev, false); + cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work); + } else { + amdgpu_gfx_off_ctrl(adev, true); + } break; default: break; -- GitLab From fdd34271a321e20358ba8825c59d367e75f313fa Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Tue, 14 Aug 2018 17:28:46 +0800 Subject: [PATCH 0469/1692] drm/amdgpu: Set clock ungate state when suspend/fini After set power ungate state, set clock ungate state before when suspend or fini. Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 56 ++-------------------- 1 file changed, 5 insertions(+), 51 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 13ea4da9f23a..f623c71977e9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1700,6 +1700,7 @@ static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev) * Fini or suspend, pass disabling clockgating for hardware IPs. * Returns 0 on success, negative error code on failure. */ + static int amdgpu_device_set_cg_state(struct amdgpu_device *adev, enum amd_clockgating_state state) { @@ -1819,21 +1820,13 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) amdgpu_amdkfd_device_fini(adev); amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); + amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); + /* need to disable SMC first */ for (i = 0; i < adev->num_ip_blocks; i++) { if (!adev->ip_blocks[i].status.hw) continue; - if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC && - adev->ip_blocks[i].version->funcs->set_clockgating_state) { - /* ungate blocks before hw fini so that we can shutdown the blocks safely */ - r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, - AMD_CG_STATE_UNGATE); - if (r) { - DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); - return r; - } - + if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); /* XXX handle errors */ if (r) { @@ -1849,20 +1842,6 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) if (!adev->ip_blocks[i].status.hw) continue; - if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && - adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && - adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && - adev->ip_blocks[i].version->funcs->set_clockgating_state) { - /* ungate blocks before hw fini so that we can shutdown the blocks safely */ - r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, - AMD_CG_STATE_UNGATE); - if (r) { - DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); - return r; - } - } - r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); /* XXX handle errors */ if (r) { @@ -1957,21 +1936,13 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev) amdgpu_virt_request_full_gpu(adev, false); amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); + amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); for (i = adev->num_ip_blocks - 1; i >= 0; i--) { if (!adev->ip_blocks[i].status.valid) continue; /* displays are handled separately */ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) { - /* ungate blocks so that suspend can properly shut them down */ - if (adev->ip_blocks[i].version->funcs->set_clockgating_state) { - r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, - AMD_CG_STATE_UNGATE); - if (r) { - DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); - } - } /* XXX handle errors */ r = adev->ip_blocks[i].version->funcs->suspend(adev); /* XXX handle errors */ @@ -2006,29 +1977,12 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) if (amdgpu_sriov_vf(adev)) amdgpu_virt_request_full_gpu(adev, false); - /* ungate SMC block first */ - r = amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC, - AMD_CG_STATE_UNGATE); - if (r) { - DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n", r); - } - for (i = adev->num_ip_blocks - 1; i >= 0; i--) { if (!adev->ip_blocks[i].status.valid) continue; /* displays are handled in phase1 */ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) continue; - /* ungate blocks so that suspend can properly shut them down */ - if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_SMC && - adev->ip_blocks[i].version->funcs->set_clockgating_state) { - r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, - AMD_CG_STATE_UNGATE); - if (r) { - DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); - } - } /* XXX handle errors */ r = adev->ip_blocks[i].version->funcs->suspend(adev); /* XXX handle errors */ -- GitLab From 09b6f25b55d9c66af7302e1f09ad90aa5b1dfbcb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 15 Aug 2018 14:04:47 +0200 Subject: [PATCH 0470/1692] drm/amdgpu: fix VM size reporting on Raven MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Raven doesn't have an VCE block and so also no buggy VCE firmware. Signed-off-by: Christian König Reviewed-by: Alex Deucher Reviewed-by: Huang Rui Acked-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index a1043b421e3e..0c5d59b89849 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -619,7 +619,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file vm_size -= AMDGPU_VA_RESERVED_SIZE; /* Older VCE FW versions are buggy and can handle only 40bits */ - if (adev->vce.fw_version < AMDGPU_VCE_FW_53_45) + if (adev->vce.fw_version && + adev->vce.fw_version < AMDGPU_VCE_FW_53_45) vm_size = min(vm_size, 1ULL << 40); dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE; -- GitLab From 02b29caf18b3b0f6ff7b7d0b639060893cddf930 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Wed, 15 Aug 2018 15:39:33 +0800 Subject: [PATCH 0471/1692] drm/ttm: remove dead codes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit These codes are not used. Signed-off-by: Huang Rui Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/ttm/ttm_bo_util.c | 5 +---- drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | 8 +------- 2 files changed, 2 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 046a6dda690a..ba80150d1052 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -629,10 +629,7 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, return -EINVAL; if (start_page > bo->num_pages) return -EINVAL; -#if 0 - if (num_pages > 1 && !capable(CAP_SYS_ADMIN)) - return -EPERM; -#endif + (void) ttm_mem_io_lock(man, false); ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); ttm_mem_io_unlock(man); diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c index 507be7ac1165..d594f7520b7b 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c @@ -410,13 +410,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free, if (NUM_PAGES_TO_ALLOC < nr_free) npages_to_free = NUM_PAGES_TO_ALLOC; -#if 0 - if (nr_free > 1) { - pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n", - pool->dev_name, pool->name, current->pid, - npages_to_free, nr_free); - } -#endif + if (use_static) pages_to_free = static_buf; else -- GitLab From 28a160277aef92b606c869960f40704493d30b42 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Sun, 5 Aug 2018 12:45:35 +0800 Subject: [PATCH 0472/1692] drm/amdgpu: add status checking after fw is loaded The status field must be 0 after FW is loaded. Signed-off-by: Huang Rui Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 58e20385eab5..bd397d2916fb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -134,6 +134,13 @@ psp_cmd_submit_buf(struct psp_context *psp, msleep(1); } + /* the status field must be 0 after FW is loaded */ + if (ucode && psp->cmd_buf_mem->resp.status) { + DRM_ERROR("failed loading with status (%d) and ucode id (%d)\n", + psp->cmd_buf_mem->resp.status, ucode->ucode_id); + return -EINVAL; + } + if (ucode) { ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo; ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi; -- GitLab From 2c1e9bca448ab3c49f0bfc687ae79b8123237f4d Mon Sep 17 00:00:00 2001 From: Paul Menzel Date: Wed, 25 Jul 2018 12:54:19 +0200 Subject: [PATCH 0473/1692] drm/amdgpu: Do not evict VRAM on APUs with disabled HIBERNATE MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Improve commit d796d844 (drm/radeon/kms: make hibernate work on IGPs) to only migrate VRAM objects if the Linux kernel is actually built with support for hibernation (suspend to disk). The better solution is to get the information, if this is suspend or hibernate, from `amdgpu_device_suspend()`, but that’s more involved, so apply the simple solution first. Link: https://bugs.freedesktop.org/show_bug.cgi?id=107277 Signed-off-by: Paul Menzel Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index b0e14a3d54ef..5ddd4e87480b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1019,10 +1019,12 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo) int amdgpu_bo_evict_vram(struct amdgpu_device *adev) { /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ - if (0 && (adev->flags & AMD_IS_APU)) { +#ifndef CONFIG_HIBERNATION + if (adev->flags & AMD_IS_APU) { /* Useless to evict on IGP chips */ return 0; } +#endif return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM); } -- GitLab From d6257ab531ba0053d6f7c98ef949ab53f9a8ff0a Mon Sep 17 00:00:00 2001 From: Paul Menzel Date: Tue, 31 Jul 2018 18:48:41 +0200 Subject: [PATCH 0474/1692] drm/radeon: Do not evict VRAM on APUs with disabled HIBERNATE Improve commit d796d844 (drm/radeon/kms: make hibernate work on IGPs) to only migrate VRAM objects if the Linux kernel is actually built with support for hibernation (suspend to disk). Link: https://bugs.freedesktop.org/show_bug.cgi?id=100941 Signed-off-by: Paul Menzel Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_object.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index ba2fd295697f..92f6d4002eea 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -421,11 +421,13 @@ int radeon_bo_unpin(struct radeon_bo *bo) int radeon_bo_evict_vram(struct radeon_device *rdev) { /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ - if (0 && (rdev->flags & RADEON_IS_IGP)) { +#ifndef CONFIG_HIBERNATION + if (rdev->flags & RADEON_IS_IGP) { if (rdev->mc.igp_sideport_enabled == false) /* Useless to evict on IGP chips */ return 0; } +#endif return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); } -- GitLab From c85e6e546edd7e362693218a33a6f63217802fd3 Mon Sep 17 00:00:00 2001 From: David Francis Date: Mon, 23 Jul 2018 14:12:10 -0400 Subject: [PATCH 0475/1692] drm/amd/display: Create new i2c resource [Why] I2C code did not match dc resource model and was generally unpleasant [How] Move code into new svelte dce_i2c files, replacing various i2c objects with two structs: dce_i2c_sw and dce_i2c_hw. Fully split sw and hw code paths. Remove all redundant declarations. Use address lists to distinguish between versions. Change dce80 code to newer register access macros. Signed-off-by: David Francis Reviewed-by: Tony Cheng Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 9 +- .../gpu/drm/amd/display/dc/bios/bios_parser.c | 10 +- drivers/gpu/drm/amd/display/dc/core/dc.c | 8 +- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 4 +- drivers/gpu/drm/amd/display/dc/dce/Makefile | 4 +- drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c | 60 ++ drivers/gpu/drm/amd/display/dc/dce/dce_i2c.h | 71 ++ .../gpu/drm/amd/display/dc/dce/dce_i2c_hw.c | 951 ++++++++++++++++++ .../gpu/drm/amd/display/dc/dce/dce_i2c_hw.h | 335 ++++++ .../gpu/drm/amd/display/dc/dce/dce_i2c_sw.c | 602 +++++++++++ .../gpu/drm/amd/display/dc/dce/dce_i2c_sw.h | 57 ++ .../amd/display/dc/dce100/dce100_resource.c | 51 +- .../amd/display/dc/dce110/dce110_resource.c | 51 +- .../amd/display/dc/dce112/dce112_resource.c | 51 +- .../amd/display/dc/dce120/dce120_resource.c | 65 +- .../drm/amd/display/dc/dce80/dce80_resource.c | 99 ++ .../drm/amd/display/dc/dcn10/dcn10_resource.c | 51 +- .../gpu/drm/amd/display/dc/inc/core_types.h | 3 + 18 files changed, 2452 insertions(+), 30 deletions(-) create mode 100644 drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c create mode 100644 drivers/gpu/drm/amd/display/dc/dce/dce_i2c.h create mode 100644 drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c create mode 100644 drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h create mode 100644 drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c create mode 100644 drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index f0f1e58b9830..1a6b303c8379 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -71,8 +71,6 @@ #include "modules/inc/mod_freesync.h" -#include "i2caux_interface.h" - /* basic init/fini API */ static int amdgpu_dm_init(struct amdgpu_device *adev); static void amdgpu_dm_fini(struct amdgpu_device *adev); @@ -3610,9 +3608,9 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, cmd.payloads[i].data = msgs[i].buf; } - if (dal_i2caux_submit_i2c_command( - ddc_service->ctx->i2caux, - ddc_service->ddc_pin, + if (dc_submit_i2c( + ddc_service->ctx->dc, + ddc_service->ddc_pin->hw_info.ddc_channel, &cmd)) result = num; @@ -3648,6 +3646,7 @@ create_i2c(struct ddc_service *ddc_service, snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index); i2c_set_adapdata(&i2c->base, i2c); i2c->ddc_service = ddc_service; + i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index; return i2c; } diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c index be8a2494355a..bfa5816cfc92 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c @@ -42,7 +42,7 @@ #include "bios_parser_interface.h" #include "bios_parser_common.h" -/* TODO remove - only needed for default i2c speed */ + #include "dc.h" #define THREE_PERCENT_OF_10000 300 @@ -2671,11 +2671,9 @@ static bool i2c_read( cmd.payloads = payloads; cmd.number_of_payloads = ARRAY_SIZE(payloads); - - /* TODO route this through drm i2c_adapter */ - result = dal_i2caux_submit_i2c_command( - ddc->ctx->i2caux, - ddc, + result = dc_submit_i2c( + ddc->ctx->dc, + ddc->hw_info.ddc_channel, &cmd); } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index b906b6adc5a8..99450293a1c5 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -54,6 +54,9 @@ #include "hubp.h" #include "dc_link_dp.h" + +#include "dce/dce_i2c.h" + #define DC_LOGGER \ dc->ctx->logger @@ -1673,9 +1676,8 @@ bool dc_submit_i2c( struct dc_link *link = dc->links[link_index]; struct ddc_service *ddc = link->ddc; - - return dal_i2caux_submit_i2c_command( - ddc->ctx->i2caux, + return dce_i2c_submit_command( + dc->res_pool, ddc->ddc_pin, cmd); } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 739c6654d849..6638251162b0 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -1530,8 +1530,8 @@ static bool i2c_write(struct pipe_ctx *pipe_ctx, payload.write = true; cmd.payloads = &payload; - if (dc_submit_i2c(pipe_ctx->stream->ctx->dc, - pipe_ctx->stream->sink->link->link_index, &cmd)) + if (dm_helpers_submit_i2c(pipe_ctx->stream->ctx, + pipe_ctx->stream->sink->link, &cmd)) return true; return false; diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile index 825537bd4545..8f7f0e8b341f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile @@ -28,8 +28,8 @@ DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \ dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \ -dce_clocks.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o - +dce_clocks.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \ +dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE)) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c new file mode 100644 index 000000000000..35a75398fcb4 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c @@ -0,0 +1,60 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include "dce_i2c.h" +#include "reg_helper.h" + +bool dce_i2c_submit_command( + struct resource_pool *pool, + struct ddc *ddc, + struct i2c_command *cmd) +{ + struct dce_i2c_hw *dce_i2c_hw; + struct dce_i2c_sw *dce_i2c_sw; + + if (!ddc) { + BREAK_TO_DEBUGGER(); + return false; + } + + if (!cmd) { + BREAK_TO_DEBUGGER(); + return false; + } + + /* The software engine is only available on dce8 */ + dce_i2c_sw = dce_i2c_acquire_i2c_sw_engine(pool, ddc); + + if (!dce_i2c_sw) { + dce_i2c_hw = acquire_i2c_hw_engine(pool, ddc); + + if (!dce_i2c_hw) + return false; + + return dce_i2c_submit_command_hw(pool, ddc, cmd, dce_i2c_hw); + } + + return dce_i2c_submit_command_sw(pool, ddc, cmd, dce_i2c_sw); + +} diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.h b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.h new file mode 100644 index 000000000000..d655f89578ca --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.h @@ -0,0 +1,71 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DCE_I2C_H__ +#define __DCE_I2C_H__ + +#include "inc/core_types.h" +#include "dce_i2c_hw.h" +#include "dce_i2c_sw.h" + +enum dce_i2c_transaction_status { + DCE_I2C_TRANSACTION_STATUS_UNKNOWN = (-1L), + DCE_I2C_TRANSACTION_STATUS_SUCCEEDED, + DCE_I2C_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY, + DCE_I2C_TRANSACTION_STATUS_FAILED_TIMEOUT, + DCE_I2C_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR, + DCE_I2C_TRANSACTION_STATUS_FAILED_NACK, + DCE_I2C_TRANSACTION_STATUS_FAILED_INCOMPLETE, + DCE_I2C_TRANSACTION_STATUS_FAILED_OPERATION, + DCE_I2C_TRANSACTION_STATUS_FAILED_INVALID_OPERATION, + DCE_I2C_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW, + DCE_I2C_TRANSACTION_STATUS_FAILED_HPD_DISCON +}; + +enum dce_i2c_transaction_operation { + DCE_I2C_TRANSACTION_READ, + DCE_I2C_TRANSACTION_WRITE +}; + +struct dce_i2c_transaction_payload { + enum dce_i2c_transaction_address_space address_space; + uint32_t address; + uint32_t length; + uint8_t *data; +}; + +struct dce_i2c_transaction_request { + enum dce_i2c_transaction_operation operation; + struct dce_i2c_transaction_payload payload; + enum dce_i2c_transaction_status status; +}; + + +bool dce_i2c_submit_command( + struct resource_pool *pool, + struct ddc *ddc, + struct i2c_command *cmd); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c new file mode 100644 index 000000000000..6a57c4874e6b --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c @@ -0,0 +1,951 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include "dce_i2c.h" +#include "dce_i2c_hw.h" +#include "reg_helper.h" +#include "include/gpio_service_interface.h" + +#define CTX \ + dce_i2c_hw->ctx +#define REG(reg)\ + dce_i2c_hw->regs->reg + +#undef FN +#define FN(reg_name, field_name) \ + dce_i2c_hw->shifts->field_name, dce_i2c_hw->masks->field_name + + +static inline void reset_hw_engine(struct dce_i2c_hw *dce_i2c_hw) +{ + REG_UPDATE_2(DC_I2C_CONTROL, + DC_I2C_SW_STATUS_RESET, 1, + DC_I2C_SW_STATUS_RESET, 1); +} + +static bool is_hw_busy(struct dce_i2c_hw *dce_i2c_hw) +{ + uint32_t i2c_sw_status = 0; + + REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status); + if (i2c_sw_status == DC_I2C_STATUS__DC_I2C_STATUS_IDLE) + return false; + + reset_hw_engine(dce_i2c_hw); + + REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status); + return i2c_sw_status != DC_I2C_STATUS__DC_I2C_STATUS_IDLE; +} + +static void set_speed_hw_dce80( + struct dce_i2c_hw *dce_i2c_hw, + uint32_t speed) +{ + + if (speed) { + REG_UPDATE_N(SPEED, 2, + FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), dce_i2c_hw->reference_frequency / speed, + FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2); + } +} +static void set_speed_hw_dce100( + struct dce_i2c_hw *dce_i2c_hw, + uint32_t speed) +{ + + if (speed) { + if (dce_i2c_hw->masks->DC_I2C_DDC1_START_STOP_TIMING_CNTL) + REG_UPDATE_N(SPEED, 3, + FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), dce_i2c_hw->reference_frequency / speed, + FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2, + FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL), speed > 50 ? 2:1); + else + REG_UPDATE_N(SPEED, 2, + FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), dce_i2c_hw->reference_frequency / speed, + FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2); + } +} +bool dce_i2c_hw_engine_acquire_engine( + struct dce_i2c_hw *dce_i2c_hw, + struct ddc *ddc) +{ + + enum gpio_result result; + uint32_t current_speed; + + result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE, + GPIO_DDC_CONFIG_TYPE_MODE_I2C); + + if (result != GPIO_RESULT_OK) + return false; + + dce_i2c_hw->ddc = ddc; + + + current_speed = dce_i2c_hw->funcs->get_speed(dce_i2c_hw); + + if (current_speed) + dce_i2c_hw->original_speed = current_speed; + + return true; +} +bool dce_i2c_engine_acquire_hw( + struct dce_i2c_hw *dce_i2c_hw, + struct ddc *ddc_handle) +{ + + uint32_t counter = 0; + bool result; + + do { + result = dce_i2c_hw_engine_acquire_engine( + dce_i2c_hw, ddc_handle); + + if (result) + break; + + /* i2c_engine is busy by VBios, lets wait and retry */ + + udelay(10); + + ++counter; + } while (counter < 2); + + if (result) { + if (!dce_i2c_hw->funcs->setup_engine(dce_i2c_hw)) { + dce_i2c_hw->funcs->release_engine(dce_i2c_hw); + result = false; + } + } + + return result; +} +struct dce_i2c_hw *acquire_i2c_hw_engine( + struct resource_pool *pool, + struct ddc *ddc) +{ + + struct dce_i2c_hw *engine = NULL; + + if (!ddc) + return NULL; + + if (ddc->hw_info.hw_supported) { + enum gpio_ddc_line line = dal_ddc_get_line(ddc); + + if (line < pool->pipe_count) + engine = pool->hw_i2cs[line]; + } + + if (!engine) + return NULL; + + + if (!pool->i2c_hw_buffer_in_use && + dce_i2c_engine_acquire_hw(engine, ddc)) { + pool->i2c_hw_buffer_in_use = true; + return engine; + } + + + return NULL; +} + +static bool setup_engine_hw_dce100( + struct dce_i2c_hw *dce_i2c_hw) +{ + uint32_t i2c_setup_limit = I2C_SETUP_TIME_LIMIT_DCE; + + if (dce_i2c_hw->setup_limit != 0) + i2c_setup_limit = dce_i2c_hw->setup_limit; + /* Program pin select */ + REG_UPDATE_6(DC_I2C_CONTROL, + DC_I2C_GO, 0, + DC_I2C_SOFT_RESET, 0, + DC_I2C_SEND_RESET, 0, + DC_I2C_SW_STATUS_RESET, 1, + DC_I2C_TRANSACTION_COUNT, 0, + DC_I2C_DDC_SELECT, dce_i2c_hw->engine_id); + + /* Program time limit */ + if (dce_i2c_hw->send_reset_length == 0) { + /*pre-dcn*/ + REG_UPDATE_N(SETUP, 2, + FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT), i2c_setup_limit, + FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 1); + } + /* Program HW priority + * set to High - interrupt software I2C at any time + * Enable restart of SW I2C that was interrupted by HW + * disable queuing of software while I2C is in use by HW + */ + REG_UPDATE_2(DC_I2C_ARBITRATION, + DC_I2C_NO_QUEUED_SW_GO, 0, + DC_I2C_SW_PRIORITY, DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_NORMAL); + + return true; +} +static bool setup_engine_hw_dce80( + struct dce_i2c_hw *dce_i2c_hw) +{ + + /* Program pin select */ + { + REG_UPDATE_6(DC_I2C_CONTROL, + DC_I2C_GO, 0, + DC_I2C_SOFT_RESET, 0, + DC_I2C_SEND_RESET, 0, + DC_I2C_SW_STATUS_RESET, 1, + DC_I2C_TRANSACTION_COUNT, 0, + DC_I2C_DDC_SELECT, dce_i2c_hw->engine_id); + } + + /* Program time limit */ + { + REG_UPDATE_2(SETUP, + DC_I2C_DDC1_TIME_LIMIT, I2C_SETUP_TIME_LIMIT_DCE, + DC_I2C_DDC1_ENABLE, 1); + } + + /* Program HW priority + * set to High - interrupt software I2C at any time + * Enable restart of SW I2C that was interrupted by HW + * disable queuing of software while I2C is in use by HW + */ + { + REG_UPDATE_2(DC_I2C_ARBITRATION, + DC_I2C_NO_QUEUED_SW_GO, 0, + DC_I2C_SW_PRIORITY, DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_NORMAL); + } + + return true; +} + + + +static void process_channel_reply_hw_dce80( + struct dce_i2c_hw *dce_i2c_hw, + struct i2c_reply_transaction_data *reply) +{ + uint32_t length = reply->length; + uint8_t *buffer = reply->data; + + REG_SET_3(DC_I2C_DATA, 0, + DC_I2C_INDEX, length - 1, + DC_I2C_DATA_RW, 1, + DC_I2C_INDEX_WRITE, 1); + + while (length) { + /* after reading the status, + * if the I2C operation executed successfully + * (i.e. DC_I2C_STATUS_DONE = 1) then the I2C controller + * should read data bytes from I2C circular data buffer + */ + + uint32_t i2c_data; + + REG_GET(DC_I2C_DATA, DC_I2C_DATA, &i2c_data); + *buffer++ = i2c_data; + + --length; + } +} +static void process_channel_reply_hw_dce100( + struct dce_i2c_hw *dce_i2c_hw, + struct i2c_reply_transaction_data *reply) +{ + uint32_t length = reply->length; + uint8_t *buffer = reply->data; + + REG_SET_3(DC_I2C_DATA, 0, + DC_I2C_INDEX, dce_i2c_hw->buffer_used_write, + DC_I2C_DATA_RW, 1, + DC_I2C_INDEX_WRITE, 1); + + while (length) { + /* after reading the status, + * if the I2C operation executed successfully + * (i.e. DC_I2C_STATUS_DONE = 1) then the I2C controller + * should read data bytes from I2C circular data buffer + */ + + uint32_t i2c_data; + + REG_GET(DC_I2C_DATA, DC_I2C_DATA, &i2c_data); + *buffer++ = i2c_data; + + --length; + } +} +enum i2c_channel_operation_result dce_i2c_hw_engine_wait_on_operation_result( + struct dce_i2c_hw *dce_i2c_hw, + uint32_t timeout, + enum i2c_channel_operation_result expected_result) +{ + enum i2c_channel_operation_result result; + uint32_t i = 0; + + if (!timeout) + return I2C_CHANNEL_OPERATION_SUCCEEDED; + + do { + + result = dce_i2c_hw->funcs->get_channel_status( + dce_i2c_hw, NULL); + + if (result != expected_result) + break; + + udelay(1); + + ++i; + } while (i < timeout); + return result; +} +static enum i2c_channel_operation_result get_channel_status_hw( + struct dce_i2c_hw *dce_i2c_hw, + uint8_t *returned_bytes) +{ + uint32_t i2c_sw_status = 0; + uint32_t value = + REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status); + if (i2c_sw_status == DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW) + return I2C_CHANNEL_OPERATION_ENGINE_BUSY; + else if (value & dce_i2c_hw->masks->DC_I2C_SW_STOPPED_ON_NACK) + return I2C_CHANNEL_OPERATION_NO_RESPONSE; + else if (value & dce_i2c_hw->masks->DC_I2C_SW_TIMEOUT) + return I2C_CHANNEL_OPERATION_TIMEOUT; + else if (value & dce_i2c_hw->masks->DC_I2C_SW_ABORTED) + return I2C_CHANNEL_OPERATION_FAILED; + else if (value & dce_i2c_hw->masks->DC_I2C_SW_DONE) + return I2C_CHANNEL_OPERATION_SUCCEEDED; + + /* + * this is the case when HW used for communication, I2C_SW_STATUS + * could be zero + */ + return I2C_CHANNEL_OPERATION_SUCCEEDED; +} + +static void submit_channel_request_hw( + struct dce_i2c_hw *dce_i2c_hw, + struct i2c_request_transaction_data *request) +{ + request->status = I2C_CHANNEL_OPERATION_SUCCEEDED; + + if (!dce_i2c_hw->funcs->process_transaction(dce_i2c_hw, request)) + return; + + if (dce_i2c_hw->funcs->is_hw_busy(dce_i2c_hw)) { + request->status = I2C_CHANNEL_OPERATION_ENGINE_BUSY; + return; + } + + dce_i2c_hw->funcs->execute_transaction(dce_i2c_hw); + + +} +uint32_t get_reference_clock( + struct dc_bios *bios) +{ + struct dc_firmware_info info = { { 0 } }; + + if (bios->funcs->get_firmware_info(bios, &info) != BP_RESULT_OK) + return 0; + + return info.pll_info.crystal_frequency; +} + +static void execute_transaction_hw( + struct dce_i2c_hw *dce_i2c_hw) +{ + REG_UPDATE_N(SETUP, 5, + FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_EN), 0, + FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_CLK_DRIVE_EN), 0, + FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_SEL), 0, + FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_TRANSACTION_DELAY), 0, + FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_BYTE_DELAY), 0); + + + REG_UPDATE_5(DC_I2C_CONTROL, + DC_I2C_SOFT_RESET, 0, + DC_I2C_SW_STATUS_RESET, 0, + DC_I2C_SEND_RESET, 0, + DC_I2C_GO, 0, + DC_I2C_TRANSACTION_COUNT, dce_i2c_hw->transaction_count - 1); + + /* start I2C transfer */ + REG_UPDATE(DC_I2C_CONTROL, DC_I2C_GO, 1); + + /* all transactions were executed and HW buffer became empty + * (even though it actually happens when status becomes DONE) + */ + dce_i2c_hw->transaction_count = 0; + dce_i2c_hw->buffer_used_bytes = 0; +} +static bool process_transaction_hw_dce80( + struct dce_i2c_hw *dce_i2c_hw, + struct i2c_request_transaction_data *request) +{ + uint32_t length = request->length; + uint8_t *buffer = request->data; + + bool last_transaction = false; + uint32_t value = 0; + + { + + last_transaction = ((dce_i2c_hw->transaction_count == 3) || + (request->action == DCE_I2C_TRANSACTION_ACTION_I2C_WRITE) || + (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ)); + + + switch (dce_i2c_hw->transaction_count) { + case 0: + REG_UPDATE_5(DC_I2C_TRANSACTION0, + DC_I2C_STOP_ON_NACK0, 1, + DC_I2C_START0, 1, + DC_I2C_RW0, 0 != (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ), + DC_I2C_COUNT0, length, + DC_I2C_STOP0, last_transaction ? 1 : 0); + break; + case 1: + REG_UPDATE_5(DC_I2C_TRANSACTION1, + DC_I2C_STOP_ON_NACK0, 1, + DC_I2C_START0, 1, + DC_I2C_RW0, 0 != (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ), + DC_I2C_COUNT0, length, + DC_I2C_STOP0, last_transaction ? 1 : 0); + break; + case 2: + REG_UPDATE_5(DC_I2C_TRANSACTION2, + DC_I2C_STOP_ON_NACK0, 1, + DC_I2C_START0, 1, + DC_I2C_RW0, 0 != (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ), + DC_I2C_COUNT0, length, + DC_I2C_STOP0, last_transaction ? 1 : 0); + break; + case 3: + REG_UPDATE_5(DC_I2C_TRANSACTION3, + DC_I2C_STOP_ON_NACK0, 1, + DC_I2C_START0, 1, + DC_I2C_RW0, 0 != (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ), + DC_I2C_COUNT0, length, + DC_I2C_STOP0, last_transaction ? 1 : 0); + break; + default: + /* TODO Warning ? */ + break; + } + } + + /* Write the I2C address and I2C data + * into the hardware circular buffer, one byte per entry. + * As an example, the 7-bit I2C slave address for CRT monitor + * for reading DDC/EDID information is 0b1010001. + * For an I2C send operation, the LSB must be programmed to 0; + * for I2C receive operation, the LSB must be programmed to 1. + */ + + { + if (dce_i2c_hw->transaction_count == 0) { + value = REG_SET_4(DC_I2C_DATA, 0, + DC_I2C_DATA_RW, false, + DC_I2C_DATA, request->address, + DC_I2C_INDEX, 0, + DC_I2C_INDEX_WRITE, 1); + } else + value = REG_SET_2(DC_I2C_DATA, 0, + DC_I2C_DATA_RW, false, + DC_I2C_DATA, request->address); + + if (!(request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ)) { + + while (length) { + REG_SET_2(DC_I2C_DATA, value, + DC_I2C_INDEX_WRITE, 0, + DC_I2C_DATA, *buffer++); + --length; + } + } + } + + ++dce_i2c_hw->transaction_count; + dce_i2c_hw->buffer_used_bytes += length + 1; + + return last_transaction; +} + +#define STOP_TRANS_PREDICAT \ + ((dce_i2c_hw->transaction_count == 3) || \ + (request->action == DCE_I2C_TRANSACTION_ACTION_I2C_WRITE) || \ + (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ)) + +#define SET_I2C_TRANSACTION(id) \ + do { \ + REG_UPDATE_N(DC_I2C_TRANSACTION##id, 5, \ + FN(DC_I2C_TRANSACTION0, DC_I2C_STOP_ON_NACK0), 1, \ + FN(DC_I2C_TRANSACTION0, DC_I2C_START0), 1, \ + FN(DC_I2C_TRANSACTION0, DC_I2C_STOP0), STOP_TRANS_PREDICAT ? 1:0, \ + FN(DC_I2C_TRANSACTION0, DC_I2C_RW0), (0 != (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ)), \ + FN(DC_I2C_TRANSACTION0, DC_I2C_COUNT0), length); \ + if (STOP_TRANS_PREDICAT) \ + last_transaction = true; \ + } while (false) + +static bool process_transaction_hw_dce100( + struct dce_i2c_hw *dce_i2c_hw, + struct i2c_request_transaction_data *request) +{ + uint32_t length = request->length; + uint8_t *buffer = request->data; + uint32_t value = 0; + + bool last_transaction = false; + + switch (dce_i2c_hw->transaction_count) { + case 0: + SET_I2C_TRANSACTION(0); + break; + case 1: + SET_I2C_TRANSACTION(1); + break; + case 2: + SET_I2C_TRANSACTION(2); + break; + case 3: + SET_I2C_TRANSACTION(3); + break; + default: + /* TODO Warning ? */ + break; + } + + + /* Write the I2C address and I2C data + * into the hardware circular buffer, one byte per entry. + * As an example, the 7-bit I2C slave address for CRT monitor + * for reading DDC/EDID information is 0b1010001. + * For an I2C send operation, the LSB must be programmed to 0; + * for I2C receive operation, the LSB must be programmed to 1. + */ + if (dce_i2c_hw->transaction_count == 0) { + value = REG_SET_4(DC_I2C_DATA, 0, + DC_I2C_DATA_RW, false, + DC_I2C_DATA, request->address, + DC_I2C_INDEX, 0, + DC_I2C_INDEX_WRITE, 1); + dce_i2c_hw->buffer_used_write = 0; + } else + value = REG_SET_2(DC_I2C_DATA, 0, + DC_I2C_DATA_RW, false, + DC_I2C_DATA, request->address); + + dce_i2c_hw->buffer_used_write++; + + if (!(request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ)) { + while (length) { + REG_SET_2(DC_I2C_DATA, value, + DC_I2C_INDEX_WRITE, 0, + DC_I2C_DATA, *buffer++); + dce_i2c_hw->buffer_used_write++; + --length; + } + } + + ++dce_i2c_hw->transaction_count; + dce_i2c_hw->buffer_used_bytes += length + 1; + + return last_transaction; +} +static uint32_t get_transaction_timeout_hw( + const struct dce_i2c_hw *dce_i2c_hw, + uint32_t length) +{ + + uint32_t speed = dce_i2c_hw->funcs->get_speed(dce_i2c_hw); + + + + uint32_t period_timeout; + uint32_t num_of_clock_stretches; + + if (!speed) + return 0; + + period_timeout = (1000 * TRANSACTION_TIMEOUT_IN_I2C_CLOCKS) / speed; + + num_of_clock_stretches = 1 + (length << 3) + 1; + num_of_clock_stretches += + (dce_i2c_hw->buffer_used_bytes << 3) + + (dce_i2c_hw->transaction_count << 1); + + return period_timeout * num_of_clock_stretches; +} + +static void release_engine_dce_hw( + struct resource_pool *pool, + struct dce_i2c_hw *dce_i2c_hw) +{ + pool->i2c_hw_buffer_in_use = false; + + dce_i2c_hw->funcs->release_engine(dce_i2c_hw); + dal_ddc_close(dce_i2c_hw->ddc); + + dce_i2c_hw->ddc = NULL; +} + +static void release_engine_hw( + struct dce_i2c_hw *dce_i2c_hw) +{ + bool safe_to_reset; + + /* Restore original HW engine speed */ + + dce_i2c_hw->funcs->set_speed(dce_i2c_hw, dce_i2c_hw->original_speed); + + /* Release I2C */ + REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, 1); + + /* Reset HW engine */ + { + uint32_t i2c_sw_status = 0; + + REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status); + /* if used by SW, safe to reset */ + safe_to_reset = (i2c_sw_status == 1); + } + + if (safe_to_reset) + REG_UPDATE_2(DC_I2C_CONTROL, + DC_I2C_SOFT_RESET, 1, + DC_I2C_SW_STATUS_RESET, 1); + else + REG_UPDATE(DC_I2C_CONTROL, DC_I2C_SW_STATUS_RESET, 1); + /* HW I2c engine - clock gating feature */ + if (!dce_i2c_hw->engine_keep_power_up_count) + dce_i2c_hw->funcs->disable_i2c_hw_engine(dce_i2c_hw); + +} + + +static void disable_i2c_hw_engine( + struct dce_i2c_hw *dce_i2c_hw) +{ + REG_UPDATE_N(SETUP, 1, FN(SETUP, DC_I2C_DDC1_ENABLE), 0); +} +static uint32_t get_speed_hw( + const struct dce_i2c_hw *dce_i2c_hw) +{ + uint32_t pre_scale = 0; + + REG_GET(SPEED, DC_I2C_DDC1_PRESCALE, &pre_scale); + + /* [anaumov] it seems following is unnecessary */ + /*ASSERT(value.bits.DC_I2C_DDC1_PRESCALE);*/ + return pre_scale ? + dce_i2c_hw->reference_frequency / pre_scale : + dce_i2c_hw->default_speed; +} +static uint32_t get_hw_buffer_available_size( + const struct dce_i2c_hw *dce_i2c_hw) +{ + return dce_i2c_hw->buffer_size - + dce_i2c_hw->buffer_used_bytes; +} +bool dce_i2c_hw_engine_submit_request( + struct dce_i2c_hw *dce_i2c_hw, + struct dce_i2c_transaction_request *dce_i2c_request, + bool middle_of_transaction) +{ + + struct i2c_request_transaction_data request; + + uint32_t transaction_timeout; + + enum i2c_channel_operation_result operation_result; + + bool result = false; + + /* We need following: + * transaction length will not exceed + * the number of free bytes in HW buffer (minus one for address) + */ + + if (dce_i2c_request->payload.length >= + get_hw_buffer_available_size(dce_i2c_hw)) { + dce_i2c_request->status = + DCE_I2C_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW; + return false; + } + + if (dce_i2c_request->operation == DCE_I2C_TRANSACTION_READ) + request.action = middle_of_transaction ? + DCE_I2C_TRANSACTION_ACTION_I2C_READ_MOT : + DCE_I2C_TRANSACTION_ACTION_I2C_READ; + else if (dce_i2c_request->operation == DCE_I2C_TRANSACTION_WRITE) + request.action = middle_of_transaction ? + DCE_I2C_TRANSACTION_ACTION_I2C_WRITE_MOT : + DCE_I2C_TRANSACTION_ACTION_I2C_WRITE; + else { + dce_i2c_request->status = + DCE_I2C_TRANSACTION_STATUS_FAILED_INVALID_OPERATION; + /* [anaumov] in DAL2, there was no "return false" */ + return false; + } + + request.address = (uint8_t) dce_i2c_request->payload.address; + request.length = dce_i2c_request->payload.length; + request.data = dce_i2c_request->payload.data; + + /* obtain timeout value before submitting request */ + + transaction_timeout = get_transaction_timeout_hw( + dce_i2c_hw, dce_i2c_request->payload.length + 1); + + submit_channel_request_hw( + dce_i2c_hw, &request); + + if ((request.status == I2C_CHANNEL_OPERATION_FAILED) || + (request.status == I2C_CHANNEL_OPERATION_ENGINE_BUSY)) { + dce_i2c_request->status = + DCE_I2C_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY; + return false; + } + + /* wait until transaction proceed */ + + operation_result = dce_i2c_hw_engine_wait_on_operation_result( + dce_i2c_hw, + transaction_timeout, + I2C_CHANNEL_OPERATION_ENGINE_BUSY); + + /* update transaction status */ + + switch (operation_result) { + case I2C_CHANNEL_OPERATION_SUCCEEDED: + dce_i2c_request->status = + DCE_I2C_TRANSACTION_STATUS_SUCCEEDED; + result = true; + break; + case I2C_CHANNEL_OPERATION_NO_RESPONSE: + dce_i2c_request->status = + DCE_I2C_TRANSACTION_STATUS_FAILED_NACK; + break; + case I2C_CHANNEL_OPERATION_TIMEOUT: + dce_i2c_request->status = + DCE_I2C_TRANSACTION_STATUS_FAILED_TIMEOUT; + break; + case I2C_CHANNEL_OPERATION_FAILED: + dce_i2c_request->status = + DCE_I2C_TRANSACTION_STATUS_FAILED_INCOMPLETE; + break; + default: + dce_i2c_request->status = + DCE_I2C_TRANSACTION_STATUS_FAILED_OPERATION; + } + + if (result && (dce_i2c_request->operation == DCE_I2C_TRANSACTION_READ)) { + struct i2c_reply_transaction_data reply; + + reply.data = dce_i2c_request->payload.data; + reply.length = dce_i2c_request->payload.length; + + dce_i2c_hw->funcs->process_channel_reply(dce_i2c_hw, &reply); + + + } + + return result; +} + +bool dce_i2c_submit_command_hw( + struct resource_pool *pool, + struct ddc *ddc, + struct i2c_command *cmd, + struct dce_i2c_hw *dce_i2c_hw) +{ + uint8_t index_of_payload = 0; + bool result; + + dce_i2c_hw->funcs->set_speed(dce_i2c_hw, cmd->speed); + + result = true; + + while (index_of_payload < cmd->number_of_payloads) { + bool mot = (index_of_payload != cmd->number_of_payloads - 1); + + struct i2c_payload *payload = cmd->payloads + index_of_payload; + + struct dce_i2c_transaction_request request = { 0 }; + + request.operation = payload->write ? + DCE_I2C_TRANSACTION_WRITE : + DCE_I2C_TRANSACTION_READ; + + request.payload.address_space = + DCE_I2C_TRANSACTION_ADDRESS_SPACE_I2C; + request.payload.address = (payload->address << 1) | + !payload->write; + request.payload.length = payload->length; + request.payload.data = payload->data; + + + if (!dce_i2c_hw_engine_submit_request( + dce_i2c_hw, &request, mot)) { + result = false; + break; + } + + + + ++index_of_payload; + } + + release_engine_dce_hw(pool, dce_i2c_hw); + + return result; +} +static const struct dce_i2c_hw_funcs dce100_i2c_hw_funcs = { + .setup_engine = setup_engine_hw_dce100, + .set_speed = set_speed_hw_dce100, + .get_speed = get_speed_hw, + .release_engine = release_engine_hw, + .process_transaction = process_transaction_hw_dce100, + .process_channel_reply = process_channel_reply_hw_dce100, + .is_hw_busy = is_hw_busy, + .get_channel_status = get_channel_status_hw, + .execute_transaction = execute_transaction_hw, + .disable_i2c_hw_engine = disable_i2c_hw_engine +}; +static const struct dce_i2c_hw_funcs dce80_i2c_hw_funcs = { + .setup_engine = setup_engine_hw_dce80, + .set_speed = set_speed_hw_dce80, + .get_speed = get_speed_hw, + .release_engine = release_engine_hw, + .process_transaction = process_transaction_hw_dce80, + .process_channel_reply = process_channel_reply_hw_dce80, + .is_hw_busy = is_hw_busy, + .get_channel_status = get_channel_status_hw, + .execute_transaction = execute_transaction_hw, + .disable_i2c_hw_engine = disable_i2c_hw_engine +}; + + + +void dce_i2c_hw_construct( + struct dce_i2c_hw *dce_i2c_hw, + struct dc_context *ctx, + uint32_t engine_id, + const struct dce_i2c_registers *regs, + const struct dce_i2c_shift *shifts, + const struct dce_i2c_mask *masks) +{ + dce_i2c_hw->ctx = ctx; + dce_i2c_hw->engine_id = engine_id; + dce_i2c_hw->reference_frequency = get_reference_clock(ctx->dc_bios) >> 1; + dce_i2c_hw->regs = regs; + dce_i2c_hw->shifts = shifts; + dce_i2c_hw->masks = masks; + dce_i2c_hw->buffer_used_bytes = 0; + dce_i2c_hw->transaction_count = 0; + dce_i2c_hw->engine_keep_power_up_count = 1; + dce_i2c_hw->original_speed = DEFAULT_I2C_HW_SPEED; + dce_i2c_hw->default_speed = DEFAULT_I2C_HW_SPEED; + dce_i2c_hw->send_reset_length = 0; + dce_i2c_hw->setup_limit = I2C_SETUP_TIME_LIMIT_DCE; + dce_i2c_hw->funcs = &dce80_i2c_hw_funcs; + dce_i2c_hw->buffer_size = I2C_HW_BUFFER_SIZE_DCE; +} + +void dce100_i2c_hw_construct( + struct dce_i2c_hw *dce_i2c_hw, + struct dc_context *ctx, + uint32_t engine_id, + const struct dce_i2c_registers *regs, + const struct dce_i2c_shift *shifts, + const struct dce_i2c_mask *masks) +{ + + uint32_t xtal_ref_div = 0; + + dce_i2c_hw_construct(dce_i2c_hw, + ctx, + engine_id, + regs, + shifts, + masks); + dce_i2c_hw->funcs = &dce100_i2c_hw_funcs; + dce_i2c_hw->buffer_size = I2C_HW_BUFFER_SIZE_DCE100; + + REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div); + + if (xtal_ref_div == 0) + xtal_ref_div = 2; + + /*Calculating Reference Clock by divding original frequency by + * XTAL_REF_DIV. + * At upper level, uint32_t reference_frequency = + * dal_dce_i2c_get_reference_clock(as) >> 1 + * which already divided by 2. So we need x2 to get original + * reference clock from ppll_info + */ + dce_i2c_hw->reference_frequency = + (dce_i2c_hw->reference_frequency * 2) / xtal_ref_div; +} + +void dce112_i2c_hw_construct( + struct dce_i2c_hw *dce_i2c_hw, + struct dc_context *ctx, + uint32_t engine_id, + const struct dce_i2c_registers *regs, + const struct dce_i2c_shift *shifts, + const struct dce_i2c_mask *masks) +{ + dce100_i2c_hw_construct(dce_i2c_hw, + ctx, + engine_id, + regs, + shifts, + masks); + dce_i2c_hw->default_speed = DEFAULT_I2C_HW_SPEED_100KHZ; +} + +void dcn1_i2c_hw_construct( + struct dce_i2c_hw *dce_i2c_hw, + struct dc_context *ctx, + uint32_t engine_id, + const struct dce_i2c_registers *regs, + const struct dce_i2c_shift *shifts, + const struct dce_i2c_mask *masks) +{ + dce112_i2c_hw_construct(dce_i2c_hw, + ctx, + engine_id, + regs, + shifts, + masks); + dce_i2c_hw->setup_limit = I2C_SETUP_TIME_LIMIT_DCN; +} + diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h new file mode 100644 index 000000000000..8baef3916246 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h @@ -0,0 +1,335 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DCE_I2C_HW_H__ +#define __DCE_I2C_HW_H__ + +enum dc_i2c_status { + DC_I2C_STATUS__DC_I2C_STATUS_IDLE, + DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW, + DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_HW +}; + +enum dc_i2c_arbitration { + DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_NORMAL, + DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_HIGH +}; + +enum i2c_channel_operation_result { + I2C_CHANNEL_OPERATION_SUCCEEDED, + I2C_CHANNEL_OPERATION_FAILED, + I2C_CHANNEL_OPERATION_NOT_GRANTED, + I2C_CHANNEL_OPERATION_IS_BUSY, + I2C_CHANNEL_OPERATION_NO_HANDLE_PROVIDED, + I2C_CHANNEL_OPERATION_CHANNEL_IN_USE, + I2C_CHANNEL_OPERATION_CHANNEL_CLIENT_MAX_ALLOWED, + I2C_CHANNEL_OPERATION_ENGINE_BUSY, + I2C_CHANNEL_OPERATION_TIMEOUT, + I2C_CHANNEL_OPERATION_NO_RESPONSE, + I2C_CHANNEL_OPERATION_HW_REQUEST_I2C_BUS, + I2C_CHANNEL_OPERATION_WRONG_PARAMETER, + I2C_CHANNEL_OPERATION_OUT_NB_OF_RETRIES, + I2C_CHANNEL_OPERATION_NOT_STARTED +}; + + +enum dce_i2c_transaction_action { + DCE_I2C_TRANSACTION_ACTION_I2C_WRITE = 0x00, + DCE_I2C_TRANSACTION_ACTION_I2C_READ = 0x10, + DCE_I2C_TRANSACTION_ACTION_I2C_STATUS_REQUEST = 0x20, + + DCE_I2C_TRANSACTION_ACTION_I2C_WRITE_MOT = 0x40, + DCE_I2C_TRANSACTION_ACTION_I2C_READ_MOT = 0x50, + DCE_I2C_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT = 0x60, + + DCE_I2C_TRANSACTION_ACTION_DP_WRITE = 0x80, + DCE_I2C_TRANSACTION_ACTION_DP_READ = 0x90 +}; + +enum { + I2C_SETUP_TIME_LIMIT_DCE = 255, + I2C_SETUP_TIME_LIMIT_DCN = 3, + I2C_HW_BUFFER_SIZE_DCE100 = 538, + I2C_HW_BUFFER_SIZE_DCE = 144, + I2C_SEND_RESET_LENGTH_9 = 9, + I2C_SEND_RESET_LENGTH_10 = 10, + DEFAULT_I2C_HW_SPEED = 50, + DEFAULT_I2C_HW_SPEED_100KHZ = 100, + TRANSACTION_TIMEOUT_IN_I2C_CLOCKS = 32, +}; + +#define I2C_HW_ENGINE_COMMON_REG_LIST(id)\ + SRI(SETUP, DC_I2C_DDC, id),\ + SRI(SPEED, DC_I2C_DDC, id),\ + SR(DC_I2C_ARBITRATION),\ + SR(DC_I2C_CONTROL),\ + SR(DC_I2C_SW_STATUS),\ + SR(DC_I2C_TRANSACTION0),\ + SR(DC_I2C_TRANSACTION1),\ + SR(DC_I2C_TRANSACTION2),\ + SR(DC_I2C_TRANSACTION3),\ + SR(DC_I2C_DATA),\ + SR(MICROSECOND_TIME_BASE_DIV) + +#define I2C_SF(reg_name, field_name, post_fix)\ + .field_name = reg_name ## __ ## field_name ## post_fix + +#define I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh)\ + I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE, mask_sh),\ + I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT, mask_sh),\ + I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_EN, mask_sh),\ + I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_CLK_DRIVE_EN, mask_sh),\ + I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_SEL, mask_sh),\ + I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_TRANSACTION_DELAY, mask_sh),\ + I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_BYTE_DELAY, mask_sh),\ + I2C_SF(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, mask_sh),\ + I2C_SF(DC_I2C_ARBITRATION, DC_I2C_NO_QUEUED_SW_GO, mask_sh),\ + I2C_SF(DC_I2C_ARBITRATION, DC_I2C_SW_PRIORITY, mask_sh),\ + I2C_SF(DC_I2C_CONTROL, DC_I2C_SOFT_RESET, mask_sh),\ + I2C_SF(DC_I2C_CONTROL, DC_I2C_SW_STATUS_RESET, mask_sh),\ + I2C_SF(DC_I2C_CONTROL, DC_I2C_GO, mask_sh),\ + I2C_SF(DC_I2C_CONTROL, DC_I2C_SEND_RESET, mask_sh),\ + I2C_SF(DC_I2C_CONTROL, DC_I2C_TRANSACTION_COUNT, mask_sh),\ + I2C_SF(DC_I2C_CONTROL, DC_I2C_DDC_SELECT, mask_sh),\ + I2C_SF(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE, mask_sh),\ + I2C_SF(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD, mask_sh),\ + I2C_SF(DC_I2C_SW_STATUS, DC_I2C_SW_STOPPED_ON_NACK, mask_sh),\ + I2C_SF(DC_I2C_SW_STATUS, DC_I2C_SW_TIMEOUT, mask_sh),\ + I2C_SF(DC_I2C_SW_STATUS, DC_I2C_SW_ABORTED, mask_sh),\ + I2C_SF(DC_I2C_SW_STATUS, DC_I2C_SW_DONE, mask_sh),\ + I2C_SF(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, mask_sh),\ + I2C_SF(DC_I2C_TRANSACTION0, DC_I2C_STOP_ON_NACK0, mask_sh),\ + I2C_SF(DC_I2C_TRANSACTION0, DC_I2C_START0, mask_sh),\ + I2C_SF(DC_I2C_TRANSACTION0, DC_I2C_RW0, mask_sh),\ + I2C_SF(DC_I2C_TRANSACTION0, DC_I2C_STOP0, mask_sh),\ + I2C_SF(DC_I2C_TRANSACTION0, DC_I2C_COUNT0, mask_sh),\ + I2C_SF(DC_I2C_DATA, DC_I2C_DATA_RW, mask_sh),\ + I2C_SF(DC_I2C_DATA, DC_I2C_DATA, mask_sh),\ + I2C_SF(DC_I2C_DATA, DC_I2C_INDEX, mask_sh),\ + I2C_SF(DC_I2C_DATA, DC_I2C_INDEX_WRITE, mask_sh),\ + I2C_SF(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, mask_sh) + +#define I2C_COMMON_MASK_SH_LIST_DCE110(mask_sh)\ + I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh),\ + I2C_SF(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL, mask_sh) + +struct dce_i2c_shift { + uint8_t DC_I2C_DDC1_ENABLE; + uint8_t DC_I2C_DDC1_TIME_LIMIT; + uint8_t DC_I2C_DDC1_DATA_DRIVE_EN; + uint8_t DC_I2C_DDC1_CLK_DRIVE_EN; + uint8_t DC_I2C_DDC1_DATA_DRIVE_SEL; + uint8_t DC_I2C_DDC1_INTRA_TRANSACTION_DELAY; + uint8_t DC_I2C_DDC1_INTRA_BYTE_DELAY; + uint8_t DC_I2C_SW_DONE_USING_I2C_REG; + uint8_t DC_I2C_NO_QUEUED_SW_GO; + uint8_t DC_I2C_SW_PRIORITY; + uint8_t DC_I2C_SOFT_RESET; + uint8_t DC_I2C_SW_STATUS_RESET; + uint8_t DC_I2C_GO; + uint8_t DC_I2C_SEND_RESET; + uint8_t DC_I2C_TRANSACTION_COUNT; + uint8_t DC_I2C_DDC_SELECT; + uint8_t DC_I2C_DDC1_PRESCALE; + uint8_t DC_I2C_DDC1_THRESHOLD; + uint8_t DC_I2C_DDC1_START_STOP_TIMING_CNTL; + uint8_t DC_I2C_SW_STOPPED_ON_NACK; + uint8_t DC_I2C_SW_TIMEOUT; + uint8_t DC_I2C_SW_ABORTED; + uint8_t DC_I2C_SW_DONE; + uint8_t DC_I2C_SW_STATUS; + uint8_t DC_I2C_STOP_ON_NACK0; + uint8_t DC_I2C_START0; + uint8_t DC_I2C_RW0; + uint8_t DC_I2C_STOP0; + uint8_t DC_I2C_COUNT0; + uint8_t DC_I2C_DATA_RW; + uint8_t DC_I2C_DATA; + uint8_t DC_I2C_INDEX; + uint8_t DC_I2C_INDEX_WRITE; + uint8_t XTAL_REF_DIV; +}; + +struct dce_i2c_mask { + uint32_t DC_I2C_DDC1_ENABLE; + uint32_t DC_I2C_DDC1_TIME_LIMIT; + uint32_t DC_I2C_DDC1_DATA_DRIVE_EN; + uint32_t DC_I2C_DDC1_CLK_DRIVE_EN; + uint32_t DC_I2C_DDC1_DATA_DRIVE_SEL; + uint32_t DC_I2C_DDC1_INTRA_TRANSACTION_DELAY; + uint32_t DC_I2C_DDC1_INTRA_BYTE_DELAY; + uint32_t DC_I2C_SW_DONE_USING_I2C_REG; + uint32_t DC_I2C_NO_QUEUED_SW_GO; + uint32_t DC_I2C_SW_PRIORITY; + uint32_t DC_I2C_SOFT_RESET; + uint32_t DC_I2C_SW_STATUS_RESET; + uint32_t DC_I2C_GO; + uint32_t DC_I2C_SEND_RESET; + uint32_t DC_I2C_TRANSACTION_COUNT; + uint32_t DC_I2C_DDC_SELECT; + uint32_t DC_I2C_DDC1_PRESCALE; + uint32_t DC_I2C_DDC1_THRESHOLD; + uint32_t DC_I2C_DDC1_START_STOP_TIMING_CNTL; + uint32_t DC_I2C_SW_STOPPED_ON_NACK; + uint32_t DC_I2C_SW_TIMEOUT; + uint32_t DC_I2C_SW_ABORTED; + uint32_t DC_I2C_SW_DONE; + uint32_t DC_I2C_SW_STATUS; + uint32_t DC_I2C_STOP_ON_NACK0; + uint32_t DC_I2C_START0; + uint32_t DC_I2C_RW0; + uint32_t DC_I2C_STOP0; + uint32_t DC_I2C_COUNT0; + uint32_t DC_I2C_DATA_RW; + uint32_t DC_I2C_DATA; + uint32_t DC_I2C_INDEX; + uint32_t DC_I2C_INDEX_WRITE; + uint32_t XTAL_REF_DIV; +}; + +struct dce_i2c_registers { + uint32_t SETUP; + uint32_t SPEED; + uint32_t DC_I2C_ARBITRATION; + uint32_t DC_I2C_CONTROL; + uint32_t DC_I2C_SW_STATUS; + uint32_t DC_I2C_TRANSACTION0; + uint32_t DC_I2C_TRANSACTION1; + uint32_t DC_I2C_TRANSACTION2; + uint32_t DC_I2C_TRANSACTION3; + uint32_t DC_I2C_DATA; + uint32_t MICROSECOND_TIME_BASE_DIV; +}; + +enum dce_i2c_transaction_address_space { + DCE_I2C_TRANSACTION_ADDRESS_SPACE_I2C = 1, + DCE_I2C_TRANSACTION_ADDRESS_SPACE_DPCD +}; + +struct i2c_request_transaction_data { + enum dce_i2c_transaction_action action; + enum i2c_channel_operation_result status; + uint8_t address; + uint32_t length; + uint8_t *data; +}; + +struct i2c_reply_transaction_data { + uint32_t length; + uint8_t *data; +}; + +struct dce_i2c_hw { + struct ddc *ddc; + uint32_t original_speed; + uint32_t engine_keep_power_up_count; + uint32_t transaction_count; + uint32_t buffer_used_bytes; + uint32_t buffer_used_write; + uint32_t reference_frequency; + uint32_t default_speed; + uint32_t engine_id; + uint32_t setup_limit; + uint32_t send_reset_length; + uint32_t buffer_size; + struct dc_context *ctx; + + const struct dce_i2c_hw_funcs *funcs; + const struct dce_i2c_registers *regs; + const struct dce_i2c_shift *shifts; + const struct dce_i2c_mask *masks; +}; + + +struct dce_i2c_hw_funcs { + bool (*setup_engine)( + struct dce_i2c_hw *dce_i2c_hw); + void (*set_speed)( + struct dce_i2c_hw *dce_i2c_hw, + uint32_t speed); + uint32_t (*get_speed)( + const struct dce_i2c_hw *dce_i2c_hw); + void (*release_engine)( + struct dce_i2c_hw *dce_i2c_hw); + bool (*process_transaction)( + struct dce_i2c_hw *dce_i2c_hw, + struct i2c_request_transaction_data *request); + void (*process_channel_reply)( + struct dce_i2c_hw *dce_i2c_hw, + struct i2c_reply_transaction_data *reply); + bool (*is_hw_busy)( + struct dce_i2c_hw *dce_i2c_hw); + enum i2c_channel_operation_result (*get_channel_status)( + struct dce_i2c_hw *dce_i2c_hw, + uint8_t *returned_bytes); + void (*execute_transaction)( + struct dce_i2c_hw *dce_i2c_hw); + void (*disable_i2c_hw_engine)( + struct dce_i2c_hw *dce_i2c_hw); +}; + +void dce_i2c_hw_construct( + struct dce_i2c_hw *dce_i2c_hw, + struct dc_context *ctx, + uint32_t engine_id, + const struct dce_i2c_registers *regs, + const struct dce_i2c_shift *shifts, + const struct dce_i2c_mask *masks); + +void dce100_i2c_hw_construct( + struct dce_i2c_hw *dce_i2c_hw, + struct dc_context *ctx, + uint32_t engine_id, + const struct dce_i2c_registers *regs, + const struct dce_i2c_shift *shifts, + const struct dce_i2c_mask *masks); + +void dce112_i2c_hw_construct( + struct dce_i2c_hw *dce_i2c_hw, + struct dc_context *ctx, + uint32_t engine_id, + const struct dce_i2c_registers *regs, + const struct dce_i2c_shift *shifts, + const struct dce_i2c_mask *masks); + +void dcn1_i2c_hw_construct( + struct dce_i2c_hw *dce_i2c_hw, + struct dc_context *ctx, + uint32_t engine_id, + const struct dce_i2c_registers *regs, + const struct dce_i2c_shift *shifts, + const struct dce_i2c_mask *masks); + +bool dce_i2c_submit_command_hw( + struct resource_pool *pool, + struct ddc *ddc, + struct i2c_command *cmd, + struct dce_i2c_hw *dce_i2c_hw); + +struct dce_i2c_hw *acquire_i2c_hw_engine( + struct resource_pool *pool, + struct ddc *ddc); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c new file mode 100644 index 000000000000..ab11129ea425 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c @@ -0,0 +1,602 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include "dce_i2c.h" +#include "dce_i2c_sw.h" +#include "include/gpio_service_interface.h" +#define SCL false +#define SDA true + +void dce_i2c_sw_construct( + struct dce_i2c_sw *dce_i2c_sw, + struct dc_context *ctx) +{ + dce_i2c_sw->ctx = ctx; +} + +static inline bool read_bit_from_ddc( + struct ddc *ddc, + bool data_nor_clock) +{ + uint32_t value = 0; + + if (data_nor_clock) + dal_gpio_get_value(ddc->pin_data, &value); + else + dal_gpio_get_value(ddc->pin_clock, &value); + + return (value != 0); +} + +static inline void write_bit_to_ddc( + struct ddc *ddc, + bool data_nor_clock, + bool bit) +{ + uint32_t value = bit ? 1 : 0; + + if (data_nor_clock) + dal_gpio_set_value(ddc->pin_data, value); + else + dal_gpio_set_value(ddc->pin_clock, value); +} + +static void release_engine_dce_sw( + struct resource_pool *pool, + struct dce_i2c_sw *dce_i2c_sw) +{ + dal_ddc_close(dce_i2c_sw->ddc); + dce_i2c_sw->ddc = NULL; +} + +enum i2c_channel_operation_result dce_i2c_sw_engine_get_channel_status( + struct dce_i2c_sw *engine, + uint8_t *returned_bytes) +{ + /* No arbitration with VBIOS is performed since DCE 6.0 */ + return I2C_CHANNEL_OPERATION_SUCCEEDED; +} +static bool get_hw_supported_ddc_line( + struct ddc *ddc, + enum gpio_ddc_line *line) +{ + enum gpio_ddc_line line_found; + + *line = GPIO_DDC_LINE_UNKNOWN; + + if (!ddc) { + BREAK_TO_DEBUGGER(); + return false; + } + + if (!ddc->hw_info.hw_supported) + return false; + + line_found = dal_ddc_get_line(ddc); + + if (line_found >= GPIO_DDC_LINE_COUNT) + return false; + + *line = line_found; + + return true; +} +static bool wait_for_scl_high_sw( + struct dc_context *ctx, + struct ddc *ddc, + uint16_t clock_delay_div_4) +{ + uint32_t scl_retry = 0; + uint32_t scl_retry_max = I2C_SW_TIMEOUT_DELAY / clock_delay_div_4; + + udelay(clock_delay_div_4); + + do { + if (read_bit_from_ddc(ddc, SCL)) + return true; + + udelay(clock_delay_div_4); + + ++scl_retry; + } while (scl_retry <= scl_retry_max); + + return false; +} +static bool write_byte_sw( + struct dc_context *ctx, + struct ddc *ddc_handle, + uint16_t clock_delay_div_4, + uint8_t byte) +{ + int32_t shift = 7; + bool ack; + + /* bits are transmitted serially, starting from MSB */ + + do { + udelay(clock_delay_div_4); + + write_bit_to_ddc(ddc_handle, SDA, (byte >> shift) & 1); + + udelay(clock_delay_div_4); + + write_bit_to_ddc(ddc_handle, SCL, true); + + if (!wait_for_scl_high_sw(ctx, ddc_handle, clock_delay_div_4)) + return false; + + write_bit_to_ddc(ddc_handle, SCL, false); + + --shift; + } while (shift >= 0); + + /* The display sends ACK by preventing the SDA from going high + * after the SCL pulse we use to send our last data bit. + * If the SDA goes high after that bit, it's a NACK + */ + + udelay(clock_delay_div_4); + + write_bit_to_ddc(ddc_handle, SDA, true); + + udelay(clock_delay_div_4); + + write_bit_to_ddc(ddc_handle, SCL, true); + + if (!wait_for_scl_high_sw(ctx, ddc_handle, clock_delay_div_4)) + return false; + + /* read ACK bit */ + + ack = !read_bit_from_ddc(ddc_handle, SDA); + + udelay(clock_delay_div_4 << 1); + + write_bit_to_ddc(ddc_handle, SCL, false); + + udelay(clock_delay_div_4 << 1); + + return ack; +} + +static bool read_byte_sw( + struct dc_context *ctx, + struct ddc *ddc_handle, + uint16_t clock_delay_div_4, + uint8_t *byte, + bool more) +{ + int32_t shift = 7; + + uint8_t data = 0; + + /* The data bits are read from MSB to LSB; + * bit is read while SCL is high + */ + + do { + write_bit_to_ddc(ddc_handle, SCL, true); + + if (!wait_for_scl_high_sw(ctx, ddc_handle, clock_delay_div_4)) + return false; + + if (read_bit_from_ddc(ddc_handle, SDA)) + data |= (1 << shift); + + write_bit_to_ddc(ddc_handle, SCL, false); + + udelay(clock_delay_div_4 << 1); + + --shift; + } while (shift >= 0); + + /* read only whole byte */ + + *byte = data; + + udelay(clock_delay_div_4); + + /* send the acknowledge bit: + * SDA low means ACK, SDA high means NACK + */ + + write_bit_to_ddc(ddc_handle, SDA, !more); + + udelay(clock_delay_div_4); + + write_bit_to_ddc(ddc_handle, SCL, true); + + if (!wait_for_scl_high_sw(ctx, ddc_handle, clock_delay_div_4)) + return false; + + write_bit_to_ddc(ddc_handle, SCL, false); + + udelay(clock_delay_div_4); + + write_bit_to_ddc(ddc_handle, SDA, true); + + udelay(clock_delay_div_4); + + return true; +} +static bool stop_sync_sw( + struct dc_context *ctx, + struct ddc *ddc_handle, + uint16_t clock_delay_div_4) +{ + uint32_t retry = 0; + + /* The I2C communications stop signal is: + * the SDA going high from low, while the SCL is high. + */ + + write_bit_to_ddc(ddc_handle, SCL, false); + + udelay(clock_delay_div_4); + + write_bit_to_ddc(ddc_handle, SDA, false); + + udelay(clock_delay_div_4); + + write_bit_to_ddc(ddc_handle, SCL, true); + + if (!wait_for_scl_high_sw(ctx, ddc_handle, clock_delay_div_4)) + return false; + + write_bit_to_ddc(ddc_handle, SDA, true); + + do { + udelay(clock_delay_div_4); + + if (read_bit_from_ddc(ddc_handle, SDA)) + return true; + + ++retry; + } while (retry <= 2); + + return false; +} +static bool i2c_write_sw( + struct dc_context *ctx, + struct ddc *ddc_handle, + uint16_t clock_delay_div_4, + uint8_t address, + uint32_t length, + const uint8_t *data) +{ + uint32_t i = 0; + + if (!write_byte_sw(ctx, ddc_handle, clock_delay_div_4, address)) + return false; + + while (i < length) { + if (!write_byte_sw(ctx, ddc_handle, clock_delay_div_4, data[i])) + return false; + ++i; + } + + return true; +} + +static bool i2c_read_sw( + struct dc_context *ctx, + struct ddc *ddc_handle, + uint16_t clock_delay_div_4, + uint8_t address, + uint32_t length, + uint8_t *data) +{ + uint32_t i = 0; + + if (!write_byte_sw(ctx, ddc_handle, clock_delay_div_4, address)) + return false; + + while (i < length) { + if (!read_byte_sw(ctx, ddc_handle, clock_delay_div_4, data + i, + i < length - 1)) + return false; + ++i; + } + + return true; +} + + + +static bool start_sync_sw( + struct dc_context *ctx, + struct ddc *ddc_handle, + uint16_t clock_delay_div_4) +{ + uint32_t retry = 0; + + /* The I2C communications start signal is: + * the SDA going low from high, while the SCL is high. + */ + + write_bit_to_ddc(ddc_handle, SCL, true); + + udelay(clock_delay_div_4); + + do { + write_bit_to_ddc(ddc_handle, SDA, true); + + if (!read_bit_from_ddc(ddc_handle, SDA)) { + ++retry; + continue; + } + + udelay(clock_delay_div_4); + + write_bit_to_ddc(ddc_handle, SCL, true); + + if (!wait_for_scl_high_sw(ctx, ddc_handle, clock_delay_div_4)) + break; + + write_bit_to_ddc(ddc_handle, SDA, false); + + udelay(clock_delay_div_4); + + write_bit_to_ddc(ddc_handle, SCL, false); + + udelay(clock_delay_div_4); + + return true; + } while (retry <= I2C_SW_RETRIES); + + return false; +} + +void dce_i2c_sw_engine_set_speed( + struct dce_i2c_sw *engine, + uint32_t speed) +{ + ASSERT(speed); + + engine->speed = speed ? speed : DCE_I2C_DEFAULT_I2C_SW_SPEED; + + engine->clock_delay = 1000 / engine->speed; + + if (engine->clock_delay < 12) + engine->clock_delay = 12; +} + +bool dce_i2c_sw_engine_acquire_engine( + struct dce_i2c_sw *engine, + struct ddc *ddc) +{ + enum gpio_result result; + + result = dal_ddc_open(ddc, GPIO_MODE_FAST_OUTPUT, + GPIO_DDC_CONFIG_TYPE_MODE_I2C); + + if (result != GPIO_RESULT_OK) + return false; + + engine->ddc = ddc; + + return true; +} +bool dce_i2c_engine_acquire_sw( + struct dce_i2c_sw *dce_i2c_sw, + struct ddc *ddc_handle) +{ + uint32_t counter = 0; + bool result; + + do { + + result = dce_i2c_sw_engine_acquire_engine( + dce_i2c_sw, ddc_handle); + + if (result) + break; + + /* i2c_engine is busy by VBios, lets wait and retry */ + + udelay(10); + + ++counter; + } while (counter < 2); + + return result; +} + + + + +void dce_i2c_sw_engine_submit_channel_request( + struct dce_i2c_sw *engine, + struct i2c_request_transaction_data *req) +{ + struct ddc *ddc = engine->ddc; + uint16_t clock_delay_div_4 = engine->clock_delay >> 2; + + /* send sync (start / repeated start) */ + + bool result = start_sync_sw(engine->ctx, ddc, clock_delay_div_4); + + /* process payload */ + + if (result) { + switch (req->action) { + case DCE_I2C_TRANSACTION_ACTION_I2C_WRITE: + case DCE_I2C_TRANSACTION_ACTION_I2C_WRITE_MOT: + result = i2c_write_sw(engine->ctx, ddc, clock_delay_div_4, + req->address, req->length, req->data); + break; + case DCE_I2C_TRANSACTION_ACTION_I2C_READ: + case DCE_I2C_TRANSACTION_ACTION_I2C_READ_MOT: + result = i2c_read_sw(engine->ctx, ddc, clock_delay_div_4, + req->address, req->length, req->data); + break; + default: + result = false; + break; + } + } + + /* send stop if not 'mot' or operation failed */ + + if (!result || + (req->action == DCE_I2C_TRANSACTION_ACTION_I2C_WRITE) || + (req->action == DCE_I2C_TRANSACTION_ACTION_I2C_READ)) + if (!stop_sync_sw(engine->ctx, ddc, clock_delay_div_4)) + result = false; + + req->status = result ? + I2C_CHANNEL_OPERATION_SUCCEEDED : + I2C_CHANNEL_OPERATION_FAILED; +} +bool dce_i2c_sw_engine_submit_request( + struct dce_i2c_sw *engine, + struct dce_i2c_transaction_request *dce_i2c_request, + bool middle_of_transaction) +{ + struct i2c_request_transaction_data request; + bool operation_succeeded = false; + + if (dce_i2c_request->operation == DCE_I2C_TRANSACTION_READ) + request.action = middle_of_transaction ? + DCE_I2C_TRANSACTION_ACTION_I2C_READ_MOT : + DCE_I2C_TRANSACTION_ACTION_I2C_READ; + else if (dce_i2c_request->operation == DCE_I2C_TRANSACTION_WRITE) + request.action = middle_of_transaction ? + DCE_I2C_TRANSACTION_ACTION_I2C_WRITE_MOT : + DCE_I2C_TRANSACTION_ACTION_I2C_WRITE; + else { + dce_i2c_request->status = + DCE_I2C_TRANSACTION_STATUS_FAILED_INVALID_OPERATION; + /* in DAL2, there was no "return false" */ + return false; + } + + request.address = (uint8_t)dce_i2c_request->payload.address; + request.length = dce_i2c_request->payload.length; + request.data = dce_i2c_request->payload.data; + + dce_i2c_sw_engine_submit_channel_request(engine, &request); + + if ((request.status == I2C_CHANNEL_OPERATION_ENGINE_BUSY) || + (request.status == I2C_CHANNEL_OPERATION_FAILED)) + dce_i2c_request->status = + DCE_I2C_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY; + else { + enum i2c_channel_operation_result operation_result; + + do { + operation_result = + dce_i2c_sw_engine_get_channel_status(engine, NULL); + + switch (operation_result) { + case I2C_CHANNEL_OPERATION_SUCCEEDED: + dce_i2c_request->status = + DCE_I2C_TRANSACTION_STATUS_SUCCEEDED; + operation_succeeded = true; + break; + case I2C_CHANNEL_OPERATION_NO_RESPONSE: + dce_i2c_request->status = + DCE_I2C_TRANSACTION_STATUS_FAILED_NACK; + break; + case I2C_CHANNEL_OPERATION_TIMEOUT: + dce_i2c_request->status = + DCE_I2C_TRANSACTION_STATUS_FAILED_TIMEOUT; + break; + case I2C_CHANNEL_OPERATION_FAILED: + dce_i2c_request->status = + DCE_I2C_TRANSACTION_STATUS_FAILED_INCOMPLETE; + break; + default: + dce_i2c_request->status = + DCE_I2C_TRANSACTION_STATUS_FAILED_OPERATION; + break; + } + } while (operation_result == I2C_CHANNEL_OPERATION_ENGINE_BUSY); + } + + return operation_succeeded; +} +bool dce_i2c_submit_command_sw( + struct resource_pool *pool, + struct ddc *ddc, + struct i2c_command *cmd, + struct dce_i2c_sw *dce_i2c_sw) +{ + uint8_t index_of_payload = 0; + bool result; + + dce_i2c_sw_engine_set_speed(dce_i2c_sw, cmd->speed); + + result = true; + + while (index_of_payload < cmd->number_of_payloads) { + bool mot = (index_of_payload != cmd->number_of_payloads - 1); + + struct i2c_payload *payload = cmd->payloads + index_of_payload; + + struct dce_i2c_transaction_request request = { 0 }; + + request.operation = payload->write ? + DCE_I2C_TRANSACTION_WRITE : + DCE_I2C_TRANSACTION_READ; + + request.payload.address_space = + DCE_I2C_TRANSACTION_ADDRESS_SPACE_I2C; + request.payload.address = (payload->address << 1) | + !payload->write; + request.payload.length = payload->length; + request.payload.data = payload->data; + + + if (!dce_i2c_sw_engine_submit_request( + dce_i2c_sw, &request, mot)) { + result = false; + break; + } + + ++index_of_payload; + } + + release_engine_dce_sw(pool, dce_i2c_sw); + + return result; +} +struct dce_i2c_sw *dce_i2c_acquire_i2c_sw_engine( + struct resource_pool *pool, + struct ddc *ddc) +{ + enum gpio_ddc_line line; + struct dce_i2c_sw *engine = NULL; + + if (get_hw_supported_ddc_line(ddc, &line)) + engine = pool->sw_i2cs[line]; + + if (!engine) + return NULL; + + if (!dce_i2c_engine_acquire_sw(engine, ddc)) + return NULL; + + return engine; +} diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h new file mode 100644 index 000000000000..5bbcdd455614 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h @@ -0,0 +1,57 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DCE_I2C_SW_H__ +#define __DCE_I2C_SW_H__ + +enum { + DCE_I2C_DEFAULT_I2C_SW_SPEED = 50, + I2C_SW_RETRIES = 10, + I2C_SW_TIMEOUT_DELAY = 3000, +}; + +struct dce_i2c_sw { + struct ddc *ddc; + struct dc_context *ctx; + uint32_t clock_delay; + uint32_t speed; +}; + +void dce_i2c_sw_construct( + struct dce_i2c_sw *dce_i2c_sw, + struct dc_context *ctx); + +bool dce_i2c_submit_command_sw( + struct resource_pool *pool, + struct ddc *ddc, + struct i2c_command *cmd, + struct dce_i2c_sw *dce_i2c_sw); + +struct dce_i2c_sw *dce_i2c_acquire_i2c_sw_engine( + struct resource_pool *pool, + struct ddc *ddc); + +#endif + diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c index 3f76e6019546..ae613b025756 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c @@ -54,6 +54,7 @@ #include "dce/dce_dmcu.h" #include "dce/dce_aux.h" #include "dce/dce_abm.h" +#include "dce/dce_i2c.h" #ifndef mmMC_HUB_RDREQ_DMIF_LIMIT #include "gmc/gmc_8_2_d.h" @@ -602,7 +603,40 @@ struct aux_engine *dce100_aux_engine_create( return &aux_engine->base; } +#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } + +static const struct dce_i2c_registers i2c_hw_regs[] = { + i2c_inst_regs(1), + i2c_inst_regs(2), + i2c_inst_regs(3), + i2c_inst_regs(4), + i2c_inst_regs(5), + i2c_inst_regs(6), +}; + +static const struct dce_i2c_shift i2c_shifts = { + I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) +}; + +static const struct dce_i2c_mask i2c_masks = { + I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) +}; + +struct dce_i2c_hw *dce100_i2c_hw_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce_i2c_hw *dce_i2c_hw = + kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); + + if (!dce_i2c_hw) + return NULL; + + dce100_i2c_hw_construct(dce_i2c_hw, ctx, inst, + &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); + return dce_i2c_hw; +} struct clock_source *dce100_clock_source_create( struct dc_context *ctx, struct dc_bios *bios, @@ -658,7 +692,14 @@ static void destruct(struct dce110_resource_pool *pool) if (pool->base.engines[i] != NULL) dce110_engine_destroy(&pool->base.engines[i]); - + if (pool->base.hw_i2cs[i] != NULL) { + kfree(pool->base.hw_i2cs[i]); + pool->base.hw_i2cs[i] = NULL; + } + if (pool->base.sw_i2cs[i] != NULL) { + kfree(pool->base.sw_i2cs[i]); + pool->base.sw_i2cs[i] = NULL; + } } for (i = 0; i < pool->base.stream_enc_count; i++) { @@ -970,6 +1011,14 @@ static bool construct( "DC:failed to create aux engine!!\n"); goto res_create_fail; } + pool->base.hw_i2cs[i] = dce100_i2c_hw_create(ctx, i); + if (pool->base.hw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create i2c engine!!\n"); + goto res_create_fail; + } + pool->base.sw_i2cs[i] = NULL; } dc->caps.max_planes = pool->base.pipe_count; diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c index e5e9e92521e9..49c5c7037be2 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c @@ -52,6 +52,7 @@ #include "dce/dce_aux.h" #include "dce/dce_abm.h" #include "dce/dce_dmcu.h" +#include "dce/dce_i2c.h" #define DC_LOGGER \ dc->ctx->logger @@ -620,7 +621,40 @@ struct aux_engine *dce110_aux_engine_create( return &aux_engine->base; } +#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } + +static const struct dce_i2c_registers i2c_hw_regs[] = { + i2c_inst_regs(1), + i2c_inst_regs(2), + i2c_inst_regs(3), + i2c_inst_regs(4), + i2c_inst_regs(5), + i2c_inst_regs(6), +}; + +static const struct dce_i2c_shift i2c_shifts = { + I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT) +}; + +static const struct dce_i2c_mask i2c_masks = { + I2C_COMMON_MASK_SH_LIST_DCE110(_MASK) +}; + +struct dce_i2c_hw *dce110_i2c_hw_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce_i2c_hw *dce_i2c_hw = + kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); + + if (!dce_i2c_hw) + return NULL; + + dce100_i2c_hw_construct(dce_i2c_hw, ctx, inst, + &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); + return dce_i2c_hw; +} struct clock_source *dce110_clock_source_create( struct dc_context *ctx, struct dc_bios *bios, @@ -687,7 +721,14 @@ static void destruct(struct dce110_resource_pool *pool) if (pool->base.engines[i] != NULL) dce110_engine_destroy(&pool->base.engines[i]); - + if (pool->base.hw_i2cs[i] != NULL) { + kfree(pool->base.hw_i2cs[i]); + pool->base.hw_i2cs[i] = NULL; + } + if (pool->base.sw_i2cs[i] != NULL) { + kfree(pool->base.sw_i2cs[i]); + pool->base.sw_i2cs[i] = NULL; + } } for (i = 0; i < pool->base.stream_enc_count; i++) { @@ -1303,6 +1344,14 @@ static bool construct( "DC:failed to create aux engine!!\n"); goto res_create_fail; } + pool->base.hw_i2cs[i] = dce110_i2c_hw_create(ctx, i); + if (pool->base.hw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create i2c engine!!\n"); + goto res_create_fail; + } + pool->base.sw_i2cs[i] = NULL; } dc->fbc_compressor = dce110_compressor_create(ctx); diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c index 288129343c77..d35dc730e01c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c @@ -50,6 +50,7 @@ #include "dce/dce_abm.h" #include "dce/dce_dmcu.h" #include "dce/dce_aux.h" +#include "dce/dce_i2c.h" #include "reg_helper.h" @@ -620,7 +621,40 @@ struct aux_engine *dce112_aux_engine_create( return &aux_engine->base; } +#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } + +static const struct dce_i2c_registers i2c_hw_regs[] = { + i2c_inst_regs(1), + i2c_inst_regs(2), + i2c_inst_regs(3), + i2c_inst_regs(4), + i2c_inst_regs(5), + i2c_inst_regs(6), +}; + +static const struct dce_i2c_shift i2c_shifts = { + I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT) +}; + +static const struct dce_i2c_mask i2c_masks = { + I2C_COMMON_MASK_SH_LIST_DCE110(_MASK) +}; + +struct dce_i2c_hw *dce112_i2c_hw_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce_i2c_hw *dce_i2c_hw = + kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); + + if (!dce_i2c_hw) + return NULL; + + dce112_i2c_hw_construct(dce_i2c_hw, ctx, inst, + &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); + return dce_i2c_hw; +} struct clock_source *dce112_clock_source_create( struct dc_context *ctx, struct dc_bios *bios, @@ -676,7 +710,14 @@ static void destruct(struct dce110_resource_pool *pool) kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i])); pool->base.timing_generators[i] = NULL; } - + if (pool->base.hw_i2cs[i] != NULL) { + kfree(pool->base.hw_i2cs[i]); + pool->base.hw_i2cs[i] = NULL; + } + if (pool->base.sw_i2cs[i] != NULL) { + kfree(pool->base.sw_i2cs[i]); + pool->base.sw_i2cs[i] = NULL; + } } for (i = 0; i < pool->base.stream_enc_count; i++) { @@ -1252,6 +1293,14 @@ static bool construct( "DC:failed to create aux engine!!\n"); goto res_create_fail; } + pool->base.hw_i2cs[i] = dce112_i2c_hw_create(ctx, i); + if (pool->base.hw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create i2c engine!!\n"); + goto res_create_fail; + } + pool->base.sw_i2cs[i] = NULL; } if (!resource_construct(num_virtual_links, dc, &pool->base, diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c index d43f37d99c7d..b2fb06f37648 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c @@ -54,6 +54,7 @@ #include "dce/dce_abm.h" #include "dce/dce_dmcu.h" #include "dce/dce_aux.h" +#include "dce/dce_i2c.h" #include "dce/dce_12_0_offset.h" #include "dce/dce_12_0_sh_mask.h" @@ -392,7 +393,40 @@ struct aux_engine *dce120_aux_engine_create( return &aux_engine->base; } +#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } + +static const struct dce_i2c_registers i2c_hw_regs[] = { + i2c_inst_regs(1), + i2c_inst_regs(2), + i2c_inst_regs(3), + i2c_inst_regs(4), + i2c_inst_regs(5), + i2c_inst_regs(6), +}; + +static const struct dce_i2c_shift i2c_shifts = { + I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT) +}; +static const struct dce_i2c_mask i2c_masks = { + I2C_COMMON_MASK_SH_LIST_DCE110(_MASK) +}; + +struct dce_i2c_hw *dce120_i2c_hw_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce_i2c_hw *dce_i2c_hw = + kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); + + if (!dce_i2c_hw) + return NULL; + + dce112_i2c_hw_construct(dce_i2c_hw, ctx, inst, + &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); + + return dce_i2c_hw; +} static const struct bios_registers bios_regs = { .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 + NBIO_BASE(mmBIOS_SCRATCH_6_BASE_IDX) }; @@ -501,7 +535,14 @@ static void destruct(struct dce110_resource_pool *pool) if (pool->base.engines[i] != NULL) dce110_engine_destroy(&pool->base.engines[i]); - + if (pool->base.hw_i2cs[i] != NULL) { + kfree(pool->base.hw_i2cs[i]); + pool->base.hw_i2cs[i] = NULL; + } + if (pool->base.sw_i2cs[i] != NULL) { + kfree(pool->base.sw_i2cs[i]); + pool->base.sw_i2cs[i] = NULL; + } } for (i = 0; i < pool->base.audio_count; i++) { @@ -957,6 +998,7 @@ static bool construct( goto res_create_fail; } + irq_init_data.ctx = dc->ctx; pool->base.irqs = dal_irq_service_dce120_create(&irq_init_data); if (!pool->base.irqs) @@ -1021,13 +1063,20 @@ static bool construct( "DC: failed to create output pixel processor!\n"); } pool->base.engines[i] = dce120_aux_engine_create(ctx, i); - if (pool->base.engines[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create aux engine!!\n"); - goto res_create_fail; - } - + if (pool->base.engines[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create aux engine!!\n"); + goto res_create_fail; + } + pool->base.hw_i2cs[i] = dce120_i2c_hw_create(ctx, i); + if (pool->base.hw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create i2c engine!!\n"); + goto res_create_fail; + } + pool->base.sw_i2cs[i] = NULL; /* check next valid pipe */ j++; } diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c index 604c62969ead..4eae859e6383 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c @@ -56,6 +56,7 @@ #include "dce/dce_dmcu.h" #include "dce/dce_aux.h" #include "dce/dce_abm.h" +#include "dce/dce_i2c.h" /* TODO remove this include */ #ifndef mmMC_HUB_RDREQ_DMIF_LIMIT @@ -480,7 +481,54 @@ struct aux_engine *dce80_aux_engine_create( return &aux_engine->base; } +#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } + +static const struct dce_i2c_registers i2c_hw_regs[] = { + i2c_inst_regs(1), + i2c_inst_regs(2), + i2c_inst_regs(3), + i2c_inst_regs(4), + i2c_inst_regs(5), + i2c_inst_regs(6), +}; + +static const struct dce_i2c_shift i2c_shifts = { + I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) +}; + +static const struct dce_i2c_mask i2c_masks = { + I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) +}; + +struct dce_i2c_hw *dce80_i2c_hw_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce_i2c_hw *dce_i2c_hw = + kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); + + if (!dce_i2c_hw) + return NULL; + + dce_i2c_hw_construct(dce_i2c_hw, ctx, inst, + &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); + return dce_i2c_hw; +} + +struct dce_i2c_sw *dce80_i2c_sw_create( + struct dc_context *ctx) +{ + struct dce_i2c_sw *dce_i2c_sw = + kzalloc(sizeof(struct dce_i2c_sw), GFP_KERNEL); + + if (!dce_i2c_sw) + return NULL; + + dce_i2c_sw_construct(dce_i2c_sw, ctx); + + return dce_i2c_sw; +} static struct stream_encoder *dce80_stream_encoder_create( enum engine_id eng_id, struct dc_context *ctx) @@ -691,6 +739,14 @@ static void destruct(struct dce110_resource_pool *pool) if (pool->base.engines[i] != NULL) dce110_engine_destroy(&pool->base.engines[i]); + if (pool->base.hw_i2cs[i] != NULL) { + kfree(pool->base.hw_i2cs[i]); + pool->base.hw_i2cs[i] = NULL; + } + if (pool->base.sw_i2cs[i] != NULL) { + kfree(pool->base.sw_i2cs[i]); + pool->base.sw_i2cs[i] = NULL; + } } for (i = 0; i < pool->base.stream_enc_count; i++) { @@ -887,6 +943,7 @@ static bool dce80_construct( BREAK_TO_DEBUGGER(); goto res_create_fail; } + if (dm_pp_get_static_clocks(ctx, &static_clk_info)) pool->base.dccg->max_clks_state = static_clk_info.max_clocks_state; @@ -943,6 +1000,20 @@ static bool dce80_construct( "DC:failed to create aux engine!!\n"); goto res_create_fail; } + pool->base.hw_i2cs[i] = dce80_i2c_hw_create(ctx, i); + if (pool->base.hw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create i2c engine!!\n"); + goto res_create_fail; + } + pool->base.sw_i2cs[i] = dce80_i2c_sw_create(ctx); + if (pool->base.sw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create sw i2c!!\n"); + goto res_create_fail; + } } dc->caps.max_planes = pool->base.pipe_count; @@ -1129,6 +1200,20 @@ static bool dce81_construct( dm_error("DC: failed to create output pixel processor!\n"); goto res_create_fail; } + pool->base.hw_i2cs[i] = dce80_i2c_hw_create(ctx, i); + if (pool->base.hw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create i2c engine!!\n"); + goto res_create_fail; + } + pool->base.sw_i2cs[i] = dce80_i2c_sw_create(ctx); + if (pool->base.sw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create sw i2c!!\n"); + goto res_create_fail; + } } dc->caps.max_planes = pool->base.pipe_count; @@ -1311,6 +1396,20 @@ static bool dce83_construct( dm_error("DC: failed to create output pixel processor!\n"); goto res_create_fail; } + pool->base.hw_i2cs[i] = dce80_i2c_hw_create(ctx, i); + if (pool->base.hw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create i2c engine!!\n"); + goto res_create_fail; + } + pool->base.sw_i2cs[i] = dce80_i2c_sw_create(ctx); + if (pool->base.sw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create sw i2c!!\n"); + goto res_create_fail; + } } dc->caps.max_planes = pool->base.pipe_count; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 6b44ed3697a4..28ebad8c3ec4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -65,6 +65,7 @@ #include "dce/dce_abm.h" #include "dce/dce_dmcu.h" #include "dce/dce_aux.h" +#include "dce/dce_i2c.h" const struct _vcs_dpi_ip_params_st dcn1_0_ip = { .rob_buffer_size_kbytes = 64, @@ -610,7 +611,40 @@ struct aux_engine *dcn10_aux_engine_create( return &aux_engine->base; } +#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } + +static const struct dce_i2c_registers i2c_hw_regs[] = { + i2c_inst_regs(1), + i2c_inst_regs(2), + i2c_inst_regs(3), + i2c_inst_regs(4), + i2c_inst_regs(5), + i2c_inst_regs(6), +}; + +static const struct dce_i2c_shift i2c_shifts = { + I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT) +}; + +static const struct dce_i2c_mask i2c_masks = { + I2C_COMMON_MASK_SH_LIST_DCE110(_MASK) +}; + +struct dce_i2c_hw *dcn10_i2c_hw_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce_i2c_hw *dce_i2c_hw = + kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); + + if (!dce_i2c_hw) + return NULL; + + dcn1_i2c_hw_construct(dce_i2c_hw, ctx, inst, + &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); + return dce_i2c_hw; +} static struct mpc *dcn10_mpc_create(struct dc_context *ctx) { struct dcn10_mpc *mpc10 = kzalloc(sizeof(struct dcn10_mpc), @@ -862,6 +896,14 @@ static void destruct(struct dcn10_resource_pool *pool) if (pool->base.engines[i] != NULL) pool->base.engines[i]->funcs->destroy_engine(&pool->base.engines[i]); + if (pool->base.hw_i2cs[i] != NULL) { + kfree(pool->base.hw_i2cs[i]); + pool->base.hw_i2cs[i] = NULL; + } + if (pool->base.sw_i2cs[i] != NULL) { + kfree(pool->base.sw_i2cs[i]); + pool->base.sw_i2cs[i] = NULL; + } } for (i = 0; i < pool->base.stream_enc_count; i++) @@ -1300,7 +1342,14 @@ static bool construct( "DC:failed to create aux engine!!\n"); goto fail; } - + pool->base.hw_i2cs[i] = dcn10_i2c_hw_create(ctx, i); + if (pool->base.hw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create hw i2c!!\n"); + goto fail; + } + pool->base.sw_i2cs[i] = NULL; /* check next valid pipe */ j++; } diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index c0b9ca13393b..609bff8ed72e 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -144,6 +144,9 @@ struct resource_pool { struct mpc *mpc; struct pp_smu_funcs_rv *pp_smu; struct pp_smu_display_requirement_rv pp_smu_req; + struct dce_i2c_hw *hw_i2cs[MAX_PIPES]; + struct dce_i2c_sw *sw_i2cs[MAX_PIPES]; + bool i2c_hw_buffer_in_use; unsigned int pipe_count; unsigned int underlay_pipe_index; -- GitLab From eb385204b2416f88aea6e1d1cfbbf5b8dbddcaa5 Mon Sep 17 00:00:00 2001 From: SivapiriyanKumarasamy Date: Thu, 26 Jul 2018 14:58:35 -0400 Subject: [PATCH 0476/1692] drm/amd/display: Program csc matrix as part of stream update Add csc_transform struct to dc_stream_update, and program if set when updating streams Signed-off-by: SivapiriyanKumarasamy Reviewed-by: Anthony Koo Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 24 ++++++++++++++++++++++ drivers/gpu/drm/amd/display/dc/dc_stream.h | 4 ++++ 2 files changed, 28 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 99450293a1c5..32318b4e0d1e 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -379,6 +379,27 @@ bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stre return ret; } +bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream) +{ + int i = 0; + bool ret = false; + struct pipe_ctx *pipes; + + for (i = 0; i < MAX_PIPES; i++) { + if (dc->current_state->res_ctx.pipe_ctx[i].stream + == stream) { + + pipes = &dc->current_state->res_ctx.pipe_ctx[i]; + dc->hwss.program_csc_matrix(pipes, + stream->output_color_space, + stream->csc_color_matrix.matrix); + ret = true; + } + } + + return ret; +} + void dc_stream_set_static_screen_events(struct dc *dc, struct dc_stream_state **streams, int num_streams, @@ -1387,6 +1408,9 @@ static void commit_planes_do_stream_update(struct dc *dc, if (stream_update->gamut_remap) dc_stream_set_gamut_remap(dc, stream); + if (stream_update->output_csc_transform) + dc_stream_program_csc_matrix(dc, stream); + /* Full fe update*/ if (update_type == UPDATE_TYPE_FAST) continue; diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 1479b41ec177..4a9f7e5daccf 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -136,6 +136,7 @@ struct dc_stream_update { struct colorspace_transform *gamut_remap; enum dc_color_space *output_color_space; + struct dc_csc_transform *output_csc_transform; }; @@ -306,6 +307,9 @@ void dc_stream_set_dither_option(struct dc_stream_state *stream, bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream); +bool dc_stream_program_csc_matrix(struct dc *dc, + struct dc_stream_state *stream); + bool dc_stream_get_crtc_position(struct dc *dc, struct dc_stream_state **stream, int num_streams, -- GitLab From d75de8ac943beef397375d96d01be15aed3fdf27 Mon Sep 17 00:00:00 2001 From: Nikola Cornij Date: Fri, 13 Jul 2018 18:19:07 -0400 Subject: [PATCH 0477/1692] drm/amd/display: Define registers for dcn10 Define register for dcn10 for future changes Signed-off-by: Nikola Cornij Reviewed-by: Tony Cheng Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h index 6b3e4ded155b..67f3e4dd95c1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h @@ -260,6 +260,7 @@ struct dcn10_stream_enc_registers { SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP5_ENABLE, mask_sh),\ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP6_ENABLE, mask_sh),\ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP7_ENABLE, mask_sh),\ + SE_SF(DP0_DP_SEC_CNTL2, DP_SEC_GSP7_SEND, mask_sh),\ SE_SF(DP0_DP_DB_CNTL, DP_DB_DISABLE, mask_sh),\ SE_SF(DP0_DP_MSA_COLORIMETRY, DP_MSA_MISC0, mask_sh),\ SE_SF(DP0_DP_MSA_TIMING_PARAM1, DP_MSA_HTOTAL, mask_sh),\ @@ -364,6 +365,7 @@ struct dcn10_stream_enc_registers { type DP_SEC_GSP5_ENABLE;\ type DP_SEC_GSP6_ENABLE;\ type DP_SEC_GSP7_ENABLE;\ + type DP_SEC_GSP7_SEND;\ type DP_SEC_MPG_ENABLE;\ type DP_VID_STREAM_DIS_DEFER;\ type DP_VID_STREAM_ENABLE;\ -- GitLab From 728098352ea493584feea20be114006c30d76bca Mon Sep 17 00:00:00 2001 From: David Francis Date: Thu, 9 Aug 2018 13:15:36 -0400 Subject: [PATCH 0478/1692] drm/amd/display: Combine dce80 and dce100 i2c hw functions [Why] There are two versions of the hw function pointers: one for dce80 and one for all other versions. These paired functions are nearly identical. dce80 and dce100 should not require different i2c access functions. [How] Combine each pair of functions into a single function. Mostly the new functions are based on the dce100 versions as those versions are newer, support more features, and were more maintained. Signed-off-by: David Francis Reviewed-by: Sun peng Li Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/dce/dce_i2c_hw.c | 237 +++--------------- 1 file changed, 40 insertions(+), 197 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c index 6a57c4874e6b..3a63e3cbb91d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c @@ -58,18 +58,7 @@ static bool is_hw_busy(struct dce_i2c_hw *dce_i2c_hw) return i2c_sw_status != DC_I2C_STATUS__DC_I2C_STATUS_IDLE; } -static void set_speed_hw_dce80( - struct dce_i2c_hw *dce_i2c_hw, - uint32_t speed) -{ - - if (speed) { - REG_UPDATE_N(SPEED, 2, - FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), dce_i2c_hw->reference_frequency / speed, - FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2); - } -} -static void set_speed_hw_dce100( +static void set_speed( struct dce_i2c_hw *dce_i2c_hw, uint32_t speed) { @@ -86,6 +75,7 @@ static void set_speed_hw_dce100( FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2); } } + bool dce_i2c_hw_engine_acquire_engine( struct dce_i2c_hw *dce_i2c_hw, struct ddc *ddc) @@ -172,7 +162,7 @@ struct dce_i2c_hw *acquire_i2c_hw_engine( return NULL; } -static bool setup_engine_hw_dce100( +static bool setup_engine( struct dce_i2c_hw *dce_i2c_hw) { uint32_t i2c_setup_limit = I2C_SETUP_TIME_LIMIT_DCE; @@ -206,72 +196,11 @@ static bool setup_engine_hw_dce100( return true; } -static bool setup_engine_hw_dce80( - struct dce_i2c_hw *dce_i2c_hw) -{ - - /* Program pin select */ - { - REG_UPDATE_6(DC_I2C_CONTROL, - DC_I2C_GO, 0, - DC_I2C_SOFT_RESET, 0, - DC_I2C_SEND_RESET, 0, - DC_I2C_SW_STATUS_RESET, 1, - DC_I2C_TRANSACTION_COUNT, 0, - DC_I2C_DDC_SELECT, dce_i2c_hw->engine_id); - } - - /* Program time limit */ - { - REG_UPDATE_2(SETUP, - DC_I2C_DDC1_TIME_LIMIT, I2C_SETUP_TIME_LIMIT_DCE, - DC_I2C_DDC1_ENABLE, 1); - } - - /* Program HW priority - * set to High - interrupt software I2C at any time - * Enable restart of SW I2C that was interrupted by HW - * disable queuing of software while I2C is in use by HW - */ - { - REG_UPDATE_2(DC_I2C_ARBITRATION, - DC_I2C_NO_QUEUED_SW_GO, 0, - DC_I2C_SW_PRIORITY, DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_NORMAL); - } - return true; -} -static void process_channel_reply_hw_dce80( - struct dce_i2c_hw *dce_i2c_hw, - struct i2c_reply_transaction_data *reply) -{ - uint32_t length = reply->length; - uint8_t *buffer = reply->data; - - REG_SET_3(DC_I2C_DATA, 0, - DC_I2C_INDEX, length - 1, - DC_I2C_DATA_RW, 1, - DC_I2C_INDEX_WRITE, 1); - - while (length) { - /* after reading the status, - * if the I2C operation executed successfully - * (i.e. DC_I2C_STATUS_DONE = 1) then the I2C controller - * should read data bytes from I2C circular data buffer - */ - - uint32_t i2c_data; - - REG_GET(DC_I2C_DATA, DC_I2C_DATA, &i2c_data); - *buffer++ = i2c_data; - - --length; - } -} -static void process_channel_reply_hw_dce100( +static void process_channel_reply( struct dce_i2c_hw *dce_i2c_hw, struct i2c_reply_transaction_data *reply) { @@ -404,7 +333,7 @@ static void execute_transaction_hw( dce_i2c_hw->transaction_count = 0; dce_i2c_hw->buffer_used_bytes = 0; } -static bool process_transaction_hw_dce80( +static bool process_transaction( struct dce_i2c_hw *dce_i2c_hw, struct i2c_request_transaction_data *request) { @@ -414,135 +343,49 @@ static bool process_transaction_hw_dce80( bool last_transaction = false; uint32_t value = 0; - { - - last_transaction = ((dce_i2c_hw->transaction_count == 3) || - (request->action == DCE_I2C_TRANSACTION_ACTION_I2C_WRITE) || - (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ)); + last_transaction = ((dce_i2c_hw->transaction_count == 3) || + (request->action == DCE_I2C_TRANSACTION_ACTION_I2C_WRITE) || + (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ)); - switch (dce_i2c_hw->transaction_count) { - case 0: - REG_UPDATE_5(DC_I2C_TRANSACTION0, - DC_I2C_STOP_ON_NACK0, 1, - DC_I2C_START0, 1, - DC_I2C_RW0, 0 != (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ), - DC_I2C_COUNT0, length, - DC_I2C_STOP0, last_transaction ? 1 : 0); - break; - case 1: - REG_UPDATE_5(DC_I2C_TRANSACTION1, - DC_I2C_STOP_ON_NACK0, 1, - DC_I2C_START0, 1, - DC_I2C_RW0, 0 != (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ), - DC_I2C_COUNT0, length, - DC_I2C_STOP0, last_transaction ? 1 : 0); - break; - case 2: - REG_UPDATE_5(DC_I2C_TRANSACTION2, - DC_I2C_STOP_ON_NACK0, 1, - DC_I2C_START0, 1, - DC_I2C_RW0, 0 != (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ), - DC_I2C_COUNT0, length, - DC_I2C_STOP0, last_transaction ? 1 : 0); - break; - case 3: - REG_UPDATE_5(DC_I2C_TRANSACTION3, - DC_I2C_STOP_ON_NACK0, 1, - DC_I2C_START0, 1, - DC_I2C_RW0, 0 != (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ), - DC_I2C_COUNT0, length, - DC_I2C_STOP0, last_transaction ? 1 : 0); - break; - default: - /* TODO Warning ? */ - break; - } - } - - /* Write the I2C address and I2C data - * into the hardware circular buffer, one byte per entry. - * As an example, the 7-bit I2C slave address for CRT monitor - * for reading DDC/EDID information is 0b1010001. - * For an I2C send operation, the LSB must be programmed to 0; - * for I2C receive operation, the LSB must be programmed to 1. - */ - - { - if (dce_i2c_hw->transaction_count == 0) { - value = REG_SET_4(DC_I2C_DATA, 0, - DC_I2C_DATA_RW, false, - DC_I2C_DATA, request->address, - DC_I2C_INDEX, 0, - DC_I2C_INDEX_WRITE, 1); - } else - value = REG_SET_2(DC_I2C_DATA, 0, - DC_I2C_DATA_RW, false, - DC_I2C_DATA, request->address); - - if (!(request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ)) { - - while (length) { - REG_SET_2(DC_I2C_DATA, value, - DC_I2C_INDEX_WRITE, 0, - DC_I2C_DATA, *buffer++); - --length; - } - } - } - - ++dce_i2c_hw->transaction_count; - dce_i2c_hw->buffer_used_bytes += length + 1; - - return last_transaction; -} - -#define STOP_TRANS_PREDICAT \ - ((dce_i2c_hw->transaction_count == 3) || \ - (request->action == DCE_I2C_TRANSACTION_ACTION_I2C_WRITE) || \ - (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ)) - -#define SET_I2C_TRANSACTION(id) \ - do { \ - REG_UPDATE_N(DC_I2C_TRANSACTION##id, 5, \ - FN(DC_I2C_TRANSACTION0, DC_I2C_STOP_ON_NACK0), 1, \ - FN(DC_I2C_TRANSACTION0, DC_I2C_START0), 1, \ - FN(DC_I2C_TRANSACTION0, DC_I2C_STOP0), STOP_TRANS_PREDICAT ? 1:0, \ - FN(DC_I2C_TRANSACTION0, DC_I2C_RW0), (0 != (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ)), \ - FN(DC_I2C_TRANSACTION0, DC_I2C_COUNT0), length); \ - if (STOP_TRANS_PREDICAT) \ - last_transaction = true; \ - } while (false) - -static bool process_transaction_hw_dce100( - struct dce_i2c_hw *dce_i2c_hw, - struct i2c_request_transaction_data *request) -{ - uint32_t length = request->length; - uint8_t *buffer = request->data; - uint32_t value = 0; - - bool last_transaction = false; - switch (dce_i2c_hw->transaction_count) { case 0: - SET_I2C_TRANSACTION(0); + REG_UPDATE_5(DC_I2C_TRANSACTION0, + DC_I2C_STOP_ON_NACK0, 1, + DC_I2C_START0, 1, + DC_I2C_RW0, 0 != (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ), + DC_I2C_COUNT0, length, + DC_I2C_STOP0, last_transaction ? 1 : 0); break; case 1: - SET_I2C_TRANSACTION(1); + REG_UPDATE_5(DC_I2C_TRANSACTION1, + DC_I2C_STOP_ON_NACK0, 1, + DC_I2C_START0, 1, + DC_I2C_RW0, 0 != (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ), + DC_I2C_COUNT0, length, + DC_I2C_STOP0, last_transaction ? 1 : 0); break; case 2: - SET_I2C_TRANSACTION(2); + REG_UPDATE_5(DC_I2C_TRANSACTION2, + DC_I2C_STOP_ON_NACK0, 1, + DC_I2C_START0, 1, + DC_I2C_RW0, 0 != (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ), + DC_I2C_COUNT0, length, + DC_I2C_STOP0, last_transaction ? 1 : 0); break; case 3: - SET_I2C_TRANSACTION(3); + REG_UPDATE_5(DC_I2C_TRANSACTION3, + DC_I2C_STOP_ON_NACK0, 1, + DC_I2C_START0, 1, + DC_I2C_RW0, 0 != (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ), + DC_I2C_COUNT0, length, + DC_I2C_STOP0, last_transaction ? 1 : 0); break; default: /* TODO Warning ? */ break; } - /* Write the I2C address and I2C data * into the hardware circular buffer, one byte per entry. * As an example, the 7-bit I2C slave address for CRT monitor @@ -828,24 +671,24 @@ bool dce_i2c_submit_command_hw( return result; } static const struct dce_i2c_hw_funcs dce100_i2c_hw_funcs = { - .setup_engine = setup_engine_hw_dce100, - .set_speed = set_speed_hw_dce100, + .setup_engine = setup_engine, + .set_speed = set_speed, .get_speed = get_speed_hw, .release_engine = release_engine_hw, - .process_transaction = process_transaction_hw_dce100, - .process_channel_reply = process_channel_reply_hw_dce100, + .process_transaction = process_transaction, + .process_channel_reply = process_channel_reply, .is_hw_busy = is_hw_busy, .get_channel_status = get_channel_status_hw, .execute_transaction = execute_transaction_hw, .disable_i2c_hw_engine = disable_i2c_hw_engine }; static const struct dce_i2c_hw_funcs dce80_i2c_hw_funcs = { - .setup_engine = setup_engine_hw_dce80, - .set_speed = set_speed_hw_dce80, + .setup_engine = setup_engine, + .set_speed = set_speed, .get_speed = get_speed_hw, .release_engine = release_engine_hw, - .process_transaction = process_transaction_hw_dce80, - .process_channel_reply = process_channel_reply_hw_dce80, + .process_transaction = process_transaction, + .process_channel_reply = process_channel_reply, .is_hw_busy = is_hw_busy, .get_channel_status = get_channel_status_hw, .execute_transaction = execute_transaction_hw, -- GitLab From d82f99422b21c0e9d174be453d0a5062da40568e Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Tue, 21 Aug 2018 14:28:05 -0500 Subject: [PATCH 0479/1692] drm/amd/display: move edp fast boot optimization flag to stream [Why] During S4/S3 stress test it is possible to resume from S4 without calling mode set on eDP, meaning high level optimization flag is not reset. If this is followed by an S3 resume call, driver will see optimization flag is set and consume it and think backend is powered on when in fact it is not. This results in PHY being off in sequence where S4->Resume->S3->Resume->ApplyOpt->black screen. [How] Move optimization flag to stream instead of a DC flag. Signed-off-by: Anthony Koo Reviewed-by: Tony Cheng Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 4 +-- drivers/gpu/drm/amd/display/dc/dc.h | 2 -- drivers/gpu/drm/amd/display/dc/dc_stream.h | 1 + .../display/dc/dce110/dce110_hw_sequencer.c | 28 ++++++++++++------- 4 files changed, 21 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 6638251162b0..53ce2a9b7eed 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -2497,8 +2497,8 @@ void core_link_enable_stream( /* eDP lit up by bios already, no need to enable again. */ if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP && - core_dc->apply_edp_fast_boot_optimization) { - core_dc->apply_edp_fast_boot_optimization = false; + pipe_ctx->stream->apply_edp_fast_boot_optimization) { + pipe_ctx->stream->apply_edp_fast_boot_optimization = false; pipe_ctx->stream->dpms_off = false; return; } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 1cf4ec68e741..3564f4fe420a 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -311,8 +311,6 @@ struct dc { bool optimized_required; - bool apply_edp_fast_boot_optimization; - /* FBC compressor */ struct compressor *fbc_compressor; diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 4a9f7e5daccf..c5bd1fbb6982 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -102,6 +102,7 @@ struct dc_stream_state { int phy_pix_clk; enum signal_type signal; bool dpms_off; + bool apply_edp_fast_boot_optimization; struct dc_stream_status status; diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index ce1e0f6ec3ca..dc1eed5ba996 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -1557,32 +1557,40 @@ static struct dc_link *get_link_for_edp_not_in_use( */ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) { + int i; struct dc_link *edp_link_to_turnoff = NULL; struct dc_link *edp_link = get_link_for_edp(dc); - bool can_eDP_fast_boot_optimize = false; + bool can_edp_fast_boot_optimize = false; + bool apply_edp_fast_boot_optimization = false; if (edp_link) { /* this seems to cause blank screens on DCE8 */ if ((dc->ctx->dce_version == DCE_VERSION_8_0) || (dc->ctx->dce_version == DCE_VERSION_8_1) || (dc->ctx->dce_version == DCE_VERSION_8_3)) - can_eDP_fast_boot_optimize = false; + can_edp_fast_boot_optimize = false; else - can_eDP_fast_boot_optimize = + can_edp_fast_boot_optimize = edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc); } - if (can_eDP_fast_boot_optimize) { + if (can_edp_fast_boot_optimize) edp_link_to_turnoff = get_link_for_edp_not_in_use(dc, context); - /* if OS doesn't light up eDP and eDP link is available, we want to disable - * If resume from S4/S5, should optimization. - */ - if (!edp_link_to_turnoff) - dc->apply_edp_fast_boot_optimization = true; + /* if OS doesn't light up eDP and eDP link is available, we want to disable + * If resume from S4/S5, should optimization. + */ + if (can_edp_fast_boot_optimize && !edp_link_to_turnoff) { + /* Find eDP stream and set optimization flag */ + for (i = 0; i < context->stream_count; i++) { + if (context->streams[i]->signal == SIGNAL_TYPE_EDP) { + context->streams[i]->apply_edp_fast_boot_optimization = true; + apply_edp_fast_boot_optimization = true; + } + } } - if (!dc->apply_edp_fast_boot_optimization) { + if (!apply_edp_fast_boot_optimization) { if (edp_link_to_turnoff) { /*turn off backlight before DP_blank and encoder powered down*/ dc->hwss.edp_backlight_control(edp_link_to_turnoff, false); -- GitLab From dd73043534515c1b8bf31f78f0e9945f5d95e0e6 Mon Sep 17 00:00:00 2001 From: Jun Lei Date: Wed, 8 Aug 2018 11:53:39 -0400 Subject: [PATCH 0480/1692] drm/amd/display: implement DPMS DTN test v2 [why] Existing DTN infrastructure in driver is hacky. It uses implicit log names, and also incorrect escape ID. [how] - Implement using generic DTN escape ID. - Move file logging functionality from driver to to script; driver now outputs to string/buffer - Move HWSS debug functionality to separate c file - Add debug functionalty for per-block logging as CSV - Add pretty print in python Signed-off-by: Jun Lei Reviewed-by: Tony Cheng Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/Makefile | 2 +- .../amd/display/dc/dcn10/dcn10_hw_sequencer.c | 2 +- .../amd/display/dc/dcn10/dcn10_hw_sequencer.h | 5 + .../dc/dcn10/dcn10_hw_sequencer_debug.c | 510 ++++++++++++++++++ .../gpu/drm/amd/display/dc/inc/hw_sequencer.h | 1 + 5 files changed, 518 insertions(+), 2 deletions(-) create mode 100644 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile index 84f52c63d95c..032f872be89c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile @@ -22,7 +22,7 @@ # # Makefile for DCN. -DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o \ +DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o dcn10_hw_sequencer_debug.o \ dcn10_dpp.o dcn10_opp.o dcn10_optc.o \ dcn10_hubp.o dcn10_mpc.o \ dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 4b8bedb625b4..051f427868ca 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -71,7 +71,6 @@ void print_microsec(struct dc_context *dc_ctx, uint32_t ref_cycle) us_x10 % frac); } - static void log_mpc_crc(struct dc *dc) { struct dc_context *dc_ctx = dc->ctx; @@ -2712,6 +2711,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = { .setup_stereo = dcn10_setup_stereo, .set_avmute = dce110_set_avmute, .log_hw_state = dcn10_log_hw_state, + .get_hw_state = dcn10_get_hw_state, .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, .ready_shared_resources = ready_shared_resources, .optimize_shared_resources = optimize_shared_resources, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h index 7139fb73e966..84d461e0ed3e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h @@ -46,4 +46,9 @@ void dcn10_program_pipe( struct pipe_ctx *pipe_ctx, struct dc_state *context); +void dcn10_get_hw_state( + struct dc *dc, + char *pBuf, unsigned int bufSize, + unsigned int mask); + #endif /* __DC_HWSS_DCN10_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c new file mode 100644 index 000000000000..9288b00e49b4 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c @@ -0,0 +1,510 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" +#include "core_types.h" +#include "resource.h" +#include "custom_float.h" +#include "dcn10_hw_sequencer.h" +#include "dce110/dce110_hw_sequencer.h" +#include "dce/dce_hwseq.h" +#include "abm.h" +#include "dmcu.h" +#include "dcn10_optc.h" +#include "dcn10/dcn10_dpp.h" +#include "dcn10/dcn10_mpc.h" +#include "timing_generator.h" +#include "opp.h" +#include "ipp.h" +#include "mpc.h" +#include "reg_helper.h" +#include "custom_float.h" +#include "dcn10_hubp.h" +#include "dcn10_hubbub.h" +#include "dcn10_cm_common.h" + +static unsigned int snprintf_count(char *pBuf, unsigned int bufSize, char *fmt, ...) +{ + unsigned int ret_vsnprintf; + unsigned int chars_printed; + + va_list args; + va_start(args, fmt); + + ret_vsnprintf = vsnprintf(pBuf, bufSize, fmt, args); + + va_end(args); + + if (ret_vsnprintf > 0) { + if (ret_vsnprintf < bufSize) + chars_printed = ret_vsnprintf; + else + chars_printed = bufSize - 1; + } else + chars_printed = 0; + + return chars_printed; +} + +static unsigned int dcn10_get_hubbub_state(struct dc *dc, char *pBuf, unsigned int bufSize) +{ + struct dc_context *dc_ctx = dc->ctx; + struct dcn_hubbub_wm wm = {0}; + int i; + + unsigned int chars_printed = 0; + unsigned int remaining_buffer = bufSize; + + const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clock_inKhz / 1000; + static const unsigned int frac = 1000; + + hubbub1_wm_read_state(dc->res_pool->hubbub, &wm); + + chars_printed = snprintf_count(pBuf, remaining_buffer, "wm_set_index,data_urgent,pte_meta_urgent,sr_enter,sr_exit,dram_clk_chanage\n"); + remaining_buffer -= chars_printed; + pBuf += chars_printed; + + for (i = 0; i < 4; i++) { + struct dcn_hubbub_wm_set *s; + + s = &wm.sets[i]; + + chars_printed = snprintf_count(pBuf, remaining_buffer, "%x,%d.%03d,%d.%03d,%d.%03d,%d.%03d,%d.%03d\n", + s->wm_set, + (s->data_urgent * frac) / ref_clk_mhz / frac, (s->data_urgent * frac) / ref_clk_mhz % frac, + (s->pte_meta_urgent * frac) / ref_clk_mhz / frac, (s->pte_meta_urgent * frac) / ref_clk_mhz % frac, + (s->sr_enter * frac) / ref_clk_mhz / frac, (s->sr_enter * frac) / ref_clk_mhz % frac, + (s->sr_exit * frac) / ref_clk_mhz / frac, (s->sr_exit * frac) / ref_clk_mhz % frac, + (s->dram_clk_chanage * frac) / ref_clk_mhz / frac, (s->dram_clk_chanage * frac) / ref_clk_mhz % frac); + remaining_buffer -= chars_printed; + pBuf += chars_printed; + } + + return bufSize - remaining_buffer; +} + +static unsigned int dcn10_get_hubp_states(struct dc *dc, char *pBuf, unsigned int bufSize) +{ + struct dc_context *dc_ctx = dc->ctx; + struct resource_pool *pool = dc->res_pool; + int i; + + unsigned int chars_printed = 0; + unsigned int remaining_buffer = bufSize; + + const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clock_inKhz / 1000; + static const unsigned int frac = 1000; + + chars_printed = snprintf_count(pBuf, remaining_buffer, "instance,format,addr_hi,width,height,rotation,mirror,sw_mode,dcc_en,blank_en,ttu_dis,underflow," + "min_ttu_vblank,qos_low_wm,qos_high_wm" + "\n"); + remaining_buffer -= chars_printed; + pBuf += chars_printed; + + for (i = 0; i < pool->pipe_count; i++) { + struct hubp *hubp = pool->hubps[i]; + struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state); + + hubp->funcs->hubp_read_state(hubp); + + if (!s->blank_en) { + chars_printed = snprintf_count(pBuf, remaining_buffer, "%x,%x,%x,%d,%d,%x,%x,%x,%x,%x,%x,%x," + "%d.%03d,%d.%03d,%d.%03d" + "\n", + hubp->inst, + s->pixel_format, + s->inuse_addr_hi, + s->viewport_width, + s->viewport_height, + s->rotation_angle, + s->h_mirror_en, + s->sw_mode, + s->dcc_en, + s->blank_en, + s->ttu_disable, + s->underflow_status, + (s->min_ttu_vblank * frac) / ref_clk_mhz / frac, (s->min_ttu_vblank * frac) / ref_clk_mhz % frac, + (s->qos_level_low_wm * frac) / ref_clk_mhz / frac, (s->qos_level_low_wm * frac) / ref_clk_mhz % frac, + (s->qos_level_high_wm * frac) / ref_clk_mhz / frac, (s->qos_level_high_wm * frac) / ref_clk_mhz % frac); + + remaining_buffer -= chars_printed; + pBuf += chars_printed; + } + } + + return bufSize - remaining_buffer; +} + +static unsigned int dcn10_get_rq_states(struct dc *dc, char *pBuf, unsigned int bufSize) +{ + struct resource_pool *pool = dc->res_pool; + int i; + + unsigned int chars_printed = 0; + unsigned int remaining_buffer = bufSize; + + chars_printed = snprintf_count(pBuf, remaining_buffer, "instance,drq_exp_m,prq_exp_m,mrq_exp_m,crq_exp_m,plane1_ba," + "luma_chunk_s,luma_min_chu_s,luma_meta_ch_s,luma_min_m_c_s,luma_dpte_gr_s,luma_mpte_gr_s,luma_swath_hei,luma_pte_row_h," + "chroma_chunk_s,chroma_min_chu_s,chroma_meta_ch_s,chroma_min_m_c_s,chroma_dpte_gr_s,chroma_mpte_gr_s,chroma_swath_hei,chroma_pte_row_h" + "\n"); + remaining_buffer -= chars_printed; + pBuf += chars_printed; + + for (i = 0; i < pool->pipe_count; i++) { + struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state); + struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs; + + if (!s->blank_en) { + chars_printed = snprintf_count(pBuf, remaining_buffer, "%x,%x,%x,%x,%x,%x," + "%x,%x,%x,%x,%x,%x,%x,%x," + "%x,%x,%x,%x,%x,%x,%x,%x" + "\n", + pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode, + rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size, + rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size, + rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size, + rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height, + rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size, + rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size, + rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size, + rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear); + + remaining_buffer -= chars_printed; + pBuf += chars_printed; + } + } + + return bufSize - remaining_buffer; +} + +static unsigned int dcn10_get_dlg_states(struct dc *dc, char *pBuf, unsigned int bufSize) +{ + struct resource_pool *pool = dc->res_pool; + int i; + + unsigned int chars_printed = 0; + unsigned int remaining_buffer = bufSize; + + chars_printed = snprintf_count(pBuf, remaining_buffer, "instance,rc_hbe,dlg_vbe,min_d_y_n,rc_per_ht,rc_x_a_s," + "dst_y_a_s,dst_y_pf,dst_y_vvb,dst_y_rvb,dst_y_vfl,dst_y_rfl,rf_pix_fq," + "vratio_pf,vrat_pf_c,rc_pg_vbl,rc_pg_vbc,rc_mc_vbl,rc_mc_vbc,rc_pg_fll," + "rc_pg_flc,rc_mc_fll,rc_mc_flc,pr_nom_l,pr_nom_c,rc_pg_nl,rc_pg_nc," + "mr_nom_l,mr_nom_c,rc_mc_nl,rc_mc_nc,rc_ld_pl,rc_ld_pc,rc_ld_l," + "rc_ld_c,cha_cur0,ofst_cur1,cha_cur1,vr_af_vc0,ddrq_limt,x_rt_dlay,x_rp_dlay,x_rr_sfl" + "\n"); + remaining_buffer -= chars_printed; + pBuf += chars_printed; + + for (i = 0; i < pool->pipe_count; i++) { + struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state); + struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr; + + if (!s->blank_en) { + chars_printed = snprintf_count(pBuf, remaining_buffer, "%x,%x,%x,%x,%x," + "%x,%x,%x,%x,%x,%x,%x," + "%x,%x,%x,%x,%x,%x,%x," + "%x,%x,%x,%x,%x,%x,%x," + "%x,%x,%x,%x,%x,%x,%x," + "%x,%x,%x,%x,%x,%x,%x,%x,%x,%x" + "\n", + pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start, + dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler, + dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank, + dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq, + dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l, + dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l, + dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l, + dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l, + dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l, + dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l, + dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l, + dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l, + dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l, + dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l, + dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1, + dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit, + dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay, + dlg_regs->xfc_reg_remote_surface_flip_latency); + + remaining_buffer -= chars_printed; + pBuf += chars_printed; + } + } + + return bufSize - remaining_buffer; +} + +static unsigned int dcn10_get_ttu_states(struct dc *dc, char *pBuf, unsigned int bufSize) +{ + struct resource_pool *pool = dc->res_pool; + int i; + + unsigned int chars_printed = 0; + unsigned int remaining_buffer = bufSize; + + chars_printed = snprintf_count(pBuf, remaining_buffer, "instance,qos_ll_wm,qos_lh_wm,mn_ttu_vb,qos_l_flp,rc_rd_p_l,rc_rd_l,rc_rd_p_c," + "rc_rd_c,rc_rd_c0,rc_rd_pc0,rc_rd_c1,rc_rd_pc1,qos_lf_l,qos_rds_l," + "qos_lf_c,qos_rds_c,qos_lf_c0,qos_rds_c0,qos_lf_c1,qos_rds_c1" + "\n"); + remaining_buffer -= chars_printed; + pBuf += chars_printed; + + for (i = 0; i < pool->pipe_count; i++) { + struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state); + struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr; + + if (!s->blank_en) { + chars_printed = snprintf_count(pBuf, remaining_buffer, "%x,%x,%x,%x,%x,%x,%x,%x," + "%x,%x,%x,%x,%x,%x,%x," + "%x,%x,%x,%x,%x,%x" + "\n", + pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank, + ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l, + ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0, + ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1, + ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l, + ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0, + ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1); + + remaining_buffer -= chars_printed; + pBuf += chars_printed; + } + } + + return bufSize - remaining_buffer; +} + +static unsigned int dcn10_get_cm_states(struct dc *dc, char *pBuf, unsigned int bufSize) +{ + struct resource_pool *pool = dc->res_pool; + int i; + + unsigned int chars_printed = 0; + unsigned int remaining_buffer = bufSize; + + chars_printed = snprintf_count(pBuf, remaining_buffer, "instance,igam_format,igam_mode,dgam_mode,rgam_mode,gamut_mode," + "c11_c12,c13_c14,c21_c22,c23_c24,c31_c32,c33_c34" + "\n"); + remaining_buffer -= chars_printed; + pBuf += chars_printed; + + for (i = 0; i < pool->pipe_count; i++) { + struct dpp *dpp = pool->dpps[i]; + struct dcn_dpp_state s = {0}; + + dpp->funcs->dpp_read_state(dpp, &s); + + if (s.is_enabled) { + chars_printed = snprintf_count(pBuf, remaining_buffer, "%x,%x,%x,%x,%x,%x," + "%08x,%08x,%08x,%08x,%08x,%08x" + "\n", + dpp->inst, s.igam_input_format, s.igam_lut_mode, s.dgam_lut_mode, + s.rgam_lut_mode, s.gamut_remap_mode, s.gamut_remap_c11_c12, + s.gamut_remap_c13_c14, s.gamut_remap_c21_c22, s.gamut_remap_c23_c24, + s.gamut_remap_c31_c32, s.gamut_remap_c33_c34); + + remaining_buffer -= chars_printed; + pBuf += chars_printed; + } + } + + return bufSize - remaining_buffer; +} + +static unsigned int dcn10_get_mpcc_states(struct dc *dc, char *pBuf, unsigned int bufSize) +{ + struct resource_pool *pool = dc->res_pool; + int i; + + unsigned int chars_printed = 0; + unsigned int remaining_buffer = bufSize; + + chars_printed = snprintf_count(pBuf, remaining_buffer, "instance,opp,dpp,mpccbot,mode,alpha_mode,premult,overlap_only,idle\n"); + remaining_buffer -= chars_printed; + pBuf += chars_printed; + + for (i = 0; i < pool->pipe_count; i++) { + struct mpcc_state s = {0}; + + pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s); + + if (s.opp_id != 0xf) { + chars_printed = snprintf_count(pBuf, remaining_buffer, "%x,%x,%x,%x,%x,%x,%x,%x,%x\n", + i, s.opp_id, s.dpp_id, s.bot_mpcc_id, + s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only, + s.idle); + + remaining_buffer -= chars_printed; + pBuf += chars_printed; + } + } + + return bufSize - remaining_buffer; +} + +static unsigned int dcn10_get_otg_states(struct dc *dc, char *pBuf, unsigned int bufSize) +{ + struct resource_pool *pool = dc->res_pool; + int i; + + unsigned int chars_printed = 0; + unsigned int remaining_buffer = bufSize; + + chars_printed = snprintf_count(pBuf, remaining_buffer, "instance,v_bs,v_be,v_ss,v_se,vpol,vmax,vmin,vmax_sel,vmin_sel," + "h_bs,h_be,h_ss,h_se,hpol,htot,vtot,underflow\n"); + remaining_buffer -= chars_printed; + pBuf += chars_printed; + + for (i = 0; i < pool->timing_generator_count; i++) { + struct timing_generator *tg = pool->timing_generators[i]; + struct dcn_otg_state s = {0}; + + optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s); + + //only print if OTG master is enabled + if (s.otg_enabled & 1) { + chars_printed = snprintf_count(pBuf, remaining_buffer, "%x,%d,%d,%d,%d,%d,%d,%d,%d,%d," + "%d,%d,%d,%d,%d,%d,%d,%d" + "\n", + tg->inst, + s.v_blank_start, + s.v_blank_end, + s.v_sync_a_start, + s.v_sync_a_end, + s.v_sync_a_pol, + s.v_total_max, + s.v_total_min, + s.v_total_max_sel, + s.v_total_min_sel, + s.h_blank_start, + s.h_blank_end, + s.h_sync_a_start, + s.h_sync_a_end, + s.h_sync_a_pol, + s.h_total, + s.v_total, + s.underflow_occurred_status); + + remaining_buffer -= chars_printed; + pBuf += chars_printed; + + // Clear underflow for debug purposes + // We want to keep underflow sticky bit on for the longevity tests outside of test environment. + // This function is called only from Windows or Diags test environment, hence it's safe to clear + // it from here without affecting the original intent. + tg->funcs->clear_optc_underflow(tg); + } + } + + return bufSize - remaining_buffer; +} + +static unsigned int dcn10_get_clock_states(struct dc *dc, char *pBuf, unsigned int bufSize) +{ + unsigned int chars_printed = 0; + + chars_printed = snprintf_count(pBuf, bufSize, "dcfclk_khz,dcfclk_deep_sleep_khz,dispclk_khz," + "dppclk_khz,max_supported_dppclk_khz,fclk_khz,socclk_khz\n" + "%d,%d,%d,%d,%d,%d,%d\n", + dc->current_state->bw.dcn.clk.dcfclk_khz, + dc->current_state->bw.dcn.clk.dcfclk_deep_sleep_khz, + dc->current_state->bw.dcn.clk.dispclk_khz, + dc->current_state->bw.dcn.clk.dppclk_khz, + dc->current_state->bw.dcn.clk.max_supported_dppclk_khz, + dc->current_state->bw.dcn.clk.fclk_khz, + dc->current_state->bw.dcn.clk.socclk_khz); + + return chars_printed; +} + +void dcn10_get_hw_state(struct dc *dc, char *pBuf, unsigned int bufSize, unsigned int mask) +{ + const unsigned int DC_HW_STATE_MASK_HUBBUB = 0x1; + const unsigned int DC_HW_STATE_MASK_HUBP = 0x2; + const unsigned int DC_HW_STATE_MASK_RQ = 0x4; + const unsigned int DC_HW_STATE_MASK_DLG = 0x8; + const unsigned int DC_HW_STATE_MASK_TTU = 0x10; + const unsigned int DC_HW_STATE_MASK_CM = 0x20; + const unsigned int DC_HW_STATE_MASK_MPCC = 0x40; + const unsigned int DC_HW_STATE_MASK_OTG = 0x80; + const unsigned int DC_HW_STATE_MASK_CLOCKS = 0x100; + + unsigned int chars_printed = 0; + unsigned int remaining_buf_size = bufSize; + + if (mask == 0x0) + mask = 0xFFFF; + + if ((mask & DC_HW_STATE_MASK_HUBBUB) && remaining_buf_size > 0) { + chars_printed = dcn10_get_hubbub_state(dc, pBuf, remaining_buf_size); + pBuf += chars_printed; + remaining_buf_size -= chars_printed; + } + + if ((mask & DC_HW_STATE_MASK_HUBP) && remaining_buf_size > 0) { + chars_printed = dcn10_get_hubp_states(dc, pBuf, remaining_buf_size); + pBuf += chars_printed; + remaining_buf_size -= chars_printed; + } + + if ((mask & DC_HW_STATE_MASK_RQ) && remaining_buf_size > 0) { + chars_printed = dcn10_get_rq_states(dc, pBuf, remaining_buf_size); + pBuf += chars_printed; + remaining_buf_size -= chars_printed; + } + + if ((mask & DC_HW_STATE_MASK_DLG) && remaining_buf_size > 0) { + chars_printed = dcn10_get_dlg_states(dc, pBuf, remaining_buf_size); + pBuf += chars_printed; + remaining_buf_size -= chars_printed; + } + + if ((mask & DC_HW_STATE_MASK_TTU) && remaining_buf_size > 0) { + chars_printed = dcn10_get_ttu_states(dc, pBuf, remaining_buf_size); + pBuf += chars_printed; + remaining_buf_size -= chars_printed; + } + + if ((mask & DC_HW_STATE_MASK_CM) && remaining_buf_size > 0) { + chars_printed = dcn10_get_cm_states(dc, pBuf, remaining_buf_size); + pBuf += chars_printed; + remaining_buf_size -= chars_printed; + } + + if ((mask & DC_HW_STATE_MASK_MPCC) && remaining_buf_size > 0) { + chars_printed = dcn10_get_mpcc_states(dc, pBuf, remaining_buf_size); + pBuf += chars_printed; + remaining_buf_size -= chars_printed; + } + + if ((mask & DC_HW_STATE_MASK_OTG) && remaining_buf_size > 0) { + chars_printed = dcn10_get_otg_states(dc, pBuf, remaining_buf_size); + pBuf += chars_printed; + remaining_buf_size -= chars_printed; + } + + if ((mask & DC_HW_STATE_MASK_CLOCKS) && remaining_buf_size > 0) + chars_printed = dcn10_get_clock_states(dc, pBuf, remaining_buf_size); +} diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index a14ce4de80b2..9a97356923e2 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -203,6 +203,7 @@ struct hw_sequencer_funcs { void (*set_avmute)(struct pipe_ctx *pipe_ctx, bool enable); void (*log_hw_state)(struct dc *dc); + void (*get_hw_state)(struct dc *dc, char *pBuf, unsigned int bufSize, unsigned int mask); void (*wait_for_mpcc_disconnect)(struct dc *dc, struct resource_pool *res_pool, -- GitLab From 39b62541aac396d18108c160ddb956a22483046d Mon Sep 17 00:00:00 2001 From: Emily Deng Date: Fri, 17 Aug 2018 18:26:41 +0800 Subject: [PATCH 0481/1692] drm/amdgpu: Remove the sriov checking and add firmware checking MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Unify bare metal and sriov, and add firmware checking for reg write and reg wait unify command. Signed-off-by: Emily Deng Acked-by: Christian König Reviewed-and-Tested-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 2 + drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 59 ++++++++++++++++++++++++- 2 files changed, 60 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 53e9e2a0821e..f172e92c463c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -274,6 +274,8 @@ struct amdgpu_gfx { uint32_t rlc_srls_feature_version; uint32_t mec_feature_version; uint32_t mec2_feature_version; + bool mec_fw_write_wait; + bool me_fw_write_wait; struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS]; unsigned num_gfx_rings; struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS]; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 4e1e1a0dd681..0cba430712d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -482,6 +482,59 @@ static void gfx_v9_0_init_rlc_ext_microcode(struct amdgpu_device *adev) le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length); } +static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev) +{ + adev->gfx.me_fw_write_wait = false; + adev->gfx.mec_fw_write_wait = false; + + switch (adev->asic_type) { + case CHIP_VEGA10: + if ((adev->gfx.me_fw_version >= 0x0000009c) && + (adev->gfx.me_feature_version >= 42) && + (adev->gfx.pfp_fw_version >= 0x000000b1) && + (adev->gfx.pfp_feature_version >= 42)) + adev->gfx.me_fw_write_wait = true; + + if ((adev->gfx.mec_fw_version >= 0x00000193) && + (adev->gfx.mec_feature_version >= 42)) + adev->gfx.mec_fw_write_wait = true; + break; + case CHIP_VEGA12: + if ((adev->gfx.me_fw_version >= 0x0000009c) && + (adev->gfx.me_feature_version >= 44) && + (adev->gfx.pfp_fw_version >= 0x000000b2) && + (adev->gfx.pfp_feature_version >= 44)) + adev->gfx.me_fw_write_wait = true; + + if ((adev->gfx.mec_fw_version >= 0x00000196) && + (adev->gfx.mec_feature_version >= 44)) + adev->gfx.mec_fw_write_wait = true; + break; + case CHIP_VEGA20: + if ((adev->gfx.me_fw_version >= 0x0000009c) && + (adev->gfx.me_feature_version >= 44) && + (adev->gfx.pfp_fw_version >= 0x000000b2) && + (adev->gfx.pfp_feature_version >= 44)) + adev->gfx.me_fw_write_wait = true; + + if ((adev->gfx.mec_fw_version >= 0x00000197) && + (adev->gfx.mec_feature_version >= 44)) + adev->gfx.mec_fw_write_wait = true; + break; + case CHIP_RAVEN: + if ((adev->gfx.me_fw_version >= 0x0000009c) && + (adev->gfx.me_feature_version >= 42) && + (adev->gfx.pfp_fw_version >= 0x000000b1) && + (adev->gfx.pfp_feature_version >= 42)) + adev->gfx.me_fw_write_wait = true; + + if ((adev->gfx.mec_fw_version >= 0x00000192) && + (adev->gfx.mec_feature_version >= 42)) + adev->gfx.mec_fw_write_wait = true; + break; + } +} + static int gfx_v9_0_init_microcode(struct amdgpu_device *adev) { const char *chip_name; @@ -716,6 +769,7 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev) } out: + gfx_v9_0_check_fw_write_wait(adev); if (err) { dev_err(adev->dev, "gfx9: Failed to load firmware \"%s\"\n", @@ -4353,8 +4407,11 @@ static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, uint32_t ref, uint32_t mask) { int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); + struct amdgpu_device *adev = ring->adev; + bool fw_version_ok = (ring->funcs->type == AMDGPU_RING_TYPE_GFX) ? + adev->gfx.me_fw_write_wait : adev->gfx.mec_fw_write_wait; - if (amdgpu_sriov_vf(ring->adev)) + if (fw_version_ok) gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1, ref, mask, 0x20); else -- GitLab From 3890d11153e20ed48de2adca4261788f72b93f66 Mon Sep 17 00:00:00 2001 From: Emily Deng Date: Fri, 17 Aug 2018 18:25:36 +0800 Subject: [PATCH 0482/1692] drm/amdgpu: use kiq to do invalidate tlb MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To avoid the tlb flush not interrupted by world switch, use kiq and one command to do tlb invalidate. v2: Refine the invalidate lock position. Signed-off-by: Emily Deng Reviewed-and-Tested-by: Rex Zhu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 4 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 3 - drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 74 +++++++++++++++++++++--- 3 files changed, 71 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 6265b88135fc..19ef7711d944 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -212,6 +212,10 @@ enum amdgpu_kiq_irq { AMDGPU_CP_KIQ_IRQ_LAST }; +#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */ +#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */ +#define MAX_KIQ_REG_TRY 20 + int amdgpu_device_ip_set_clockgating_state(void *dev, enum amd_ip_block_type block_type, enum amd_clockgating_state state); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 21adb1b6e5cb..38856365580d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -22,9 +22,6 @@ */ #include "amdgpu.h" -#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */ -#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */ -#define MAX_KIQ_REG_TRY 20 uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev) { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 7300be4816a9..46183c7730c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -311,6 +311,58 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid) return req; } +signed long amdgpu_kiq_reg_write_reg_wait(struct amdgpu_device *adev, + uint32_t reg0, uint32_t reg1, + uint32_t ref, uint32_t mask) +{ + signed long r, cnt = 0; + unsigned long flags; + uint32_t seq; + struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_ring *ring = &kiq->ring; + + if (!ring->ready) + return -EINVAL; + + spin_lock_irqsave(&kiq->ring_lock, flags); + + amdgpu_ring_alloc(ring, 32); + amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1, + ref, mask); + amdgpu_fence_emit_polling(ring, &seq); + amdgpu_ring_commit(ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); + + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + + /* don't wait anymore for gpu reset case because this way may + * block gpu_recover() routine forever, e.g. this virt_kiq_rreg + * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will + * never return if we keep waiting in virt_kiq_rreg, which cause + * gpu_recover() hang there. + * + * also don't wait anymore for IRQ context + * */ + if (r < 1 && (adev->in_gpu_reset || in_interrupt())) + goto failed_kiq; + + might_sleep(); + + while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { + msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + } + + if (cnt > MAX_KIQ_REG_TRY) + goto failed_kiq; + + return 0; + +failed_kiq: + pr_err("failed to invalidate tlb with kiq\n"); + return r; +} + /* * GART * VMID 0 is the physical GPU addresses as used by the kernel. @@ -332,13 +384,19 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, /* Use register 17 for GART */ const unsigned eng = 17; unsigned i, j; - - spin_lock(&adev->gmc.invalidate_lock); + int r; for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { struct amdgpu_vmhub *hub = &adev->vmhub[i]; u32 tmp = gmc_v9_0_get_invalidate_req(vmid); + r = amdgpu_kiq_reg_write_reg_wait(adev, hub->vm_inv_eng0_req + eng, + hub->vm_inv_eng0_ack + eng, tmp, 1 << vmid); + if (!r) + continue; + + spin_lock(&adev->gmc.invalidate_lock); + WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp); /* Busy wait for ACK.*/ @@ -349,8 +407,10 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, break; cpu_relax(); } - if (j < 100) + if (j < 100) { + spin_unlock(&adev->gmc.invalidate_lock); continue; + } /* Wait for ACK with a delay.*/ for (j = 0; j < adev->usec_timeout; j++) { @@ -360,13 +420,13 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, break; udelay(1); } - if (j < adev->usec_timeout) + if (j < adev->usec_timeout) { + spin_unlock(&adev->gmc.invalidate_lock); continue; - + } + spin_unlock(&adev->gmc.invalidate_lock); DRM_ERROR("Timeout waiting for VM flush ACK!\n"); } - - spin_unlock(&adev->gmc.invalidate_lock); } static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, -- GitLab From 9c70d10ae72a188adb9da83ec760e1d5779bc2ed Mon Sep 17 00:00:00 2001 From: Yintian Tao Date: Thu, 16 Aug 2018 16:17:57 +0800 Subject: [PATCH 0483/1692] drm/amdgpu: remove fulll access for suspend phase1 There is no need for gpu full access for suspend phase1 because under virtualization there is no hw register access for dce block. Signed-off-by: Yintian Tao Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index f623c71977e9..c961e781430d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1974,9 +1974,6 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) { int i, r; - if (amdgpu_sriov_vf(adev)) - amdgpu_virt_request_full_gpu(adev, false); - for (i = adev->num_ip_blocks - 1; i >= 0; i--) { if (!adev->ip_blocks[i].status.valid) continue; @@ -1992,9 +1989,6 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) } } - if (amdgpu_sriov_vf(adev)) - amdgpu_virt_release_full_gpu(adev, false); - return 0; } -- GitLab From 62347a33001c27b22465361aa4adcaa432497bdf Mon Sep 17 00:00:00 2001 From: Andrey Grodzovsky Date: Fri, 17 Aug 2018 10:32:50 -0400 Subject: [PATCH 0484/1692] drm/scheduler: Add stopped flag to drm_sched_entity The flag will prevent another thread from same process to reinsert the entity queue into scheduler's rq after it was already removewd from there by another thread during drm_sched_entity_flush. Signed-off-by: Andrey Grodzovsky Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/scheduler/sched_entity.c | 12 +++++++++++- include/drm/gpu_scheduler.h | 2 ++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 1416edb2642a..812e3530ea25 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -177,8 +177,12 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) /* For killed process disable any more IBs enqueue right now */ last_user = cmpxchg(&entity->last_user, current->group_leader, NULL); if ((!last_user || last_user == current->group_leader) && - (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) + (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) { + spin_lock(&entity->rq_lock); + entity->stopped = true; drm_sched_rq_remove_entity(entity->rq, entity); + spin_unlock(&entity->rq_lock); + } return ret; } @@ -504,6 +508,12 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job, if (first) { /* Add the entity to the run queue */ spin_lock(&entity->rq_lock); + if (entity->stopped) { + spin_unlock(&entity->rq_lock); + + DRM_ERROR("Trying to push to a killed entity\n"); + return; + } drm_sched_rq_add_entity(entity->rq, entity); spin_unlock(&entity->rq_lock); drm_sched_wakeup(entity->rq->sched); diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 919ae572f775..daec50f887b3 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -70,6 +70,7 @@ enum drm_sched_priority { * @fini_status: contains the exit status in case the process was signalled. * @last_scheduled: points to the finished fence of the last scheduled job. * @last_user: last group leader pushing a job into the entity. + * @stopped: Marks the enity as removed from rq and destined for termination. * * Entities will emit jobs in order to their corresponding hardware * ring, and the scheduler will alternate between entities based on @@ -92,6 +93,7 @@ struct drm_sched_entity { atomic_t *guilty; struct dma_fence *last_scheduled; struct task_struct *last_user; + bool stopped; }; /** -- GitLab From a245daf3d7a143fb2df16485ad200aa3298eac8c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 1 Aug 2018 13:52:25 +0200 Subject: [PATCH 0485/1692] drm/amdgpu: cleanup HW_IP query MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move the code into a separate function. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 204 +++++++++++++----------- 1 file changed, 110 insertions(+), 94 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 0c5d59b89849..bdb6362e9556 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -263,6 +263,109 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info, return 0; } +static int amdgpu_hw_ip_info(struct amdgpu_device *adev, + struct drm_amdgpu_info *info, + struct drm_amdgpu_info_hw_ip *result) +{ + uint32_t ib_start_alignment = 0; + uint32_t ib_size_alignment = 0; + enum amd_ip_block_type type; + uint32_t ring_mask = 0; + unsigned int i, j; + + if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT) + return -EINVAL; + + switch (info->query_hw_ip.type) { + case AMDGPU_HW_IP_GFX: + type = AMD_IP_BLOCK_TYPE_GFX; + for (i = 0; i < adev->gfx.num_gfx_rings; i++) + ring_mask |= adev->gfx.gfx_ring[i].ready << i; + ib_start_alignment = 32; + ib_size_alignment = 32; + break; + case AMDGPU_HW_IP_COMPUTE: + type = AMD_IP_BLOCK_TYPE_GFX; + for (i = 0; i < adev->gfx.num_compute_rings; i++) + ring_mask |= adev->gfx.compute_ring[i].ready << i; + ib_start_alignment = 32; + ib_size_alignment = 32; + break; + case AMDGPU_HW_IP_DMA: + type = AMD_IP_BLOCK_TYPE_SDMA; + for (i = 0; i < adev->sdma.num_instances; i++) + ring_mask |= adev->sdma.instance[i].ring.ready << i; + ib_start_alignment = 256; + ib_size_alignment = 4; + break; + case AMDGPU_HW_IP_UVD: + type = AMD_IP_BLOCK_TYPE_UVD; + for (i = 0; i < adev->uvd.num_uvd_inst; i++) { + if (adev->uvd.harvest_config & (1 << i)) + continue; + ring_mask |= adev->uvd.inst[i].ring.ready; + } + ib_start_alignment = 64; + ib_size_alignment = 64; + break; + case AMDGPU_HW_IP_VCE: + type = AMD_IP_BLOCK_TYPE_VCE; + for (i = 0; i < adev->vce.num_rings; i++) + ring_mask |= adev->vce.ring[i].ready << i; + ib_start_alignment = 4; + ib_size_alignment = 1; + break; + case AMDGPU_HW_IP_UVD_ENC: + type = AMD_IP_BLOCK_TYPE_UVD; + for (i = 0; i < adev->uvd.num_uvd_inst; i++) { + if (adev->uvd.harvest_config & (1 << i)) + continue; + for (j = 0; j < adev->uvd.num_enc_rings; j++) + ring_mask |= adev->uvd.inst[i].ring_enc[j].ready << j; + } + ib_start_alignment = 64; + ib_size_alignment = 64; + break; + case AMDGPU_HW_IP_VCN_DEC: + type = AMD_IP_BLOCK_TYPE_VCN; + ring_mask = adev->vcn.ring_dec.ready; + ib_start_alignment = 16; + ib_size_alignment = 16; + break; + case AMDGPU_HW_IP_VCN_ENC: + type = AMD_IP_BLOCK_TYPE_VCN; + for (i = 0; i < adev->vcn.num_enc_rings; i++) + ring_mask |= adev->vcn.ring_enc[i].ready << i; + ib_start_alignment = 64; + ib_size_alignment = 1; + break; + case AMDGPU_HW_IP_VCN_JPEG: + type = AMD_IP_BLOCK_TYPE_VCN; + ring_mask = adev->vcn.ring_jpeg.ready; + ib_start_alignment = 16; + ib_size_alignment = 16; + break; + default: + return -EINVAL; + } + + for (i = 0; i < adev->num_ip_blocks; i++) + if (adev->ip_blocks[i].version->type == type && + adev->ip_blocks[i].status.valid) + break; + + if (i == adev->num_ip_blocks) + return 0; + + result->hw_ip_version_major = adev->ip_blocks[i].version->major; + result->hw_ip_version_minor = adev->ip_blocks[i].version->minor; + result->capabilities_flags = 0; + result->available_rings = ring_mask; + result->ib_start_alignment = ib_start_alignment; + result->ib_size_alignment = ib_size_alignment; + return 0; +} + /* * Userspace get information ioctl */ @@ -288,7 +391,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file struct drm_crtc *crtc; uint32_t ui32 = 0; uint64_t ui64 = 0; - int i, j, found; + int i, found; int ui32_size = sizeof(ui32); if (!info->return_size || !info->return_pointer) @@ -318,101 +421,14 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0; case AMDGPU_INFO_HW_IP_INFO: { struct drm_amdgpu_info_hw_ip ip = {}; - enum amd_ip_block_type type; - uint32_t ring_mask = 0; - uint32_t ib_start_alignment = 0; - uint32_t ib_size_alignment = 0; - - if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT) - return -EINVAL; + int ret; - switch (info->query_hw_ip.type) { - case AMDGPU_HW_IP_GFX: - type = AMD_IP_BLOCK_TYPE_GFX; - for (i = 0; i < adev->gfx.num_gfx_rings; i++) - ring_mask |= adev->gfx.gfx_ring[i].ready << i; - ib_start_alignment = 32; - ib_size_alignment = 32; - break; - case AMDGPU_HW_IP_COMPUTE: - type = AMD_IP_BLOCK_TYPE_GFX; - for (i = 0; i < adev->gfx.num_compute_rings; i++) - ring_mask |= adev->gfx.compute_ring[i].ready << i; - ib_start_alignment = 32; - ib_size_alignment = 32; - break; - case AMDGPU_HW_IP_DMA: - type = AMD_IP_BLOCK_TYPE_SDMA; - for (i = 0; i < adev->sdma.num_instances; i++) - ring_mask |= adev->sdma.instance[i].ring.ready << i; - ib_start_alignment = 256; - ib_size_alignment = 4; - break; - case AMDGPU_HW_IP_UVD: - type = AMD_IP_BLOCK_TYPE_UVD; - for (i = 0; i < adev->uvd.num_uvd_inst; i++) { - if (adev->uvd.harvest_config & (1 << i)) - continue; - ring_mask |= adev->uvd.inst[i].ring.ready; - } - ib_start_alignment = 64; - ib_size_alignment = 64; - break; - case AMDGPU_HW_IP_VCE: - type = AMD_IP_BLOCK_TYPE_VCE; - for (i = 0; i < adev->vce.num_rings; i++) - ring_mask |= adev->vce.ring[i].ready << i; - ib_start_alignment = 4; - ib_size_alignment = 1; - break; - case AMDGPU_HW_IP_UVD_ENC: - type = AMD_IP_BLOCK_TYPE_UVD; - for (i = 0; i < adev->uvd.num_uvd_inst; i++) { - if (adev->uvd.harvest_config & (1 << i)) - continue; - for (j = 0; j < adev->uvd.num_enc_rings; j++) - ring_mask |= adev->uvd.inst[i].ring_enc[j].ready << j; - } - ib_start_alignment = 64; - ib_size_alignment = 64; - break; - case AMDGPU_HW_IP_VCN_DEC: - type = AMD_IP_BLOCK_TYPE_VCN; - ring_mask = adev->vcn.ring_dec.ready; - ib_start_alignment = 16; - ib_size_alignment = 16; - break; - case AMDGPU_HW_IP_VCN_ENC: - type = AMD_IP_BLOCK_TYPE_VCN; - for (i = 0; i < adev->vcn.num_enc_rings; i++) - ring_mask |= adev->vcn.ring_enc[i].ready << i; - ib_start_alignment = 64; - ib_size_alignment = 1; - break; - case AMDGPU_HW_IP_VCN_JPEG: - type = AMD_IP_BLOCK_TYPE_VCN; - ring_mask = adev->vcn.ring_jpeg.ready; - ib_start_alignment = 16; - ib_size_alignment = 16; - break; - default: - return -EINVAL; - } + ret = amdgpu_hw_ip_info(adev, info, &ip); + if (ret) + return ret; - for (i = 0; i < adev->num_ip_blocks; i++) { - if (adev->ip_blocks[i].version->type == type && - adev->ip_blocks[i].status.valid) { - ip.hw_ip_version_major = adev->ip_blocks[i].version->major; - ip.hw_ip_version_minor = adev->ip_blocks[i].version->minor; - ip.capabilities_flags = 0; - ip.available_rings = ring_mask; - ip.ib_start_alignment = ib_start_alignment; - ip.ib_size_alignment = ib_size_alignment; - break; - } - } - return copy_to_user(out, &ip, - min((size_t)size, sizeof(ip))) ? -EFAULT : 0; + ret = copy_to_user(out, &ip, min((size_t)size, sizeof(ip))); + return ret ? -EFAULT : 0; } case AMDGPU_INFO_HW_IP_COUNT: { enum amd_ip_block_type type; -- GitLab From 1b1f2fecb699bb4ccc3cb2fafe92950e9bdb39de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 1 Aug 2018 16:00:52 +0200 Subject: [PATCH 0486/1692] drm/amdgpu: rework ctx entity creation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use a fixed number of entities for each hardware IP. The number of compute entities is reduced to four, SDMA keeps it two entities and all other engines just expose one entity. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 291 ++++++++++++------------ drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h | 30 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 36 ++- 3 files changed, 190 insertions(+), 167 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 0a6cd1202ee5..987b7f256463 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -27,8 +27,29 @@ #include "amdgpu.h" #include "amdgpu_sched.h" -#define to_amdgpu_ctx_ring(e) \ - container_of((e), struct amdgpu_ctx_ring, entity) +#define to_amdgpu_ctx_entity(e) \ + container_of((e), struct amdgpu_ctx_entity, entity) + +const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = { + [AMDGPU_HW_IP_GFX] = 1, + [AMDGPU_HW_IP_COMPUTE] = 4, + [AMDGPU_HW_IP_DMA] = 2, + [AMDGPU_HW_IP_UVD] = 1, + [AMDGPU_HW_IP_VCE] = 1, + [AMDGPU_HW_IP_UVD_ENC] = 1, + [AMDGPU_HW_IP_VCN_DEC] = 1, + [AMDGPU_HW_IP_VCN_ENC] = 1, +}; + +static int amdgput_ctx_total_num_entities(void) +{ + unsigned i, num_entities = 0; + + for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) + num_entities += amdgpu_ctx_num_entities[i]; + + return num_entities; +} static int amdgpu_ctx_priority_permit(struct drm_file *filp, enum drm_sched_priority priority) @@ -51,9 +72,8 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct drm_file *filp, struct amdgpu_ctx *ctx) { - struct drm_sched_rq *sdma_rqs[AMDGPU_MAX_RINGS]; - struct drm_sched_rq *comp_rqs[AMDGPU_MAX_RINGS]; - unsigned i, j, num_sdma_rqs, num_comp_rqs; + unsigned num_entities = amdgput_ctx_total_num_entities(); + unsigned i, j; int r; if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX) @@ -65,19 +85,33 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, memset(ctx, 0, sizeof(*ctx)); ctx->adev = adev; - kref_init(&ctx->refcount); - spin_lock_init(&ctx->ring_lock); - ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS, + + ctx->fences = kcalloc(amdgpu_sched_jobs * num_entities, sizeof(struct dma_fence*), GFP_KERNEL); if (!ctx->fences) return -ENOMEM; - mutex_init(&ctx->lock); + ctx->entities[0] = kcalloc(num_entities, + sizeof(struct amdgpu_ctx_entity), + GFP_KERNEL); + if (!ctx->entities[0]) { + r = -ENOMEM; + goto error_free_fences; + } - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { - ctx->rings[i].sequence = 1; - ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i]; + for (i = 0; i < num_entities; ++i) { + struct amdgpu_ctx_entity *entity = &ctx->entities[0][i]; + + entity->sequence = 1; + entity->fences = &ctx->fences[amdgpu_sched_jobs * i]; } + for (i = 1; i < AMDGPU_HW_IP_NUM; ++i) + ctx->entities[i] = ctx->entities[i - 1] + + amdgpu_ctx_num_entities[i - 1]; + + kref_init(&ctx->refcount); + spin_lock_init(&ctx->ring_lock); + mutex_init(&ctx->lock); ctx->reset_counter = atomic_read(&adev->gpu_reset_counter); ctx->reset_counter_query = ctx->reset_counter; @@ -85,50 +119,70 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, ctx->init_priority = priority; ctx->override_priority = DRM_SCHED_PRIORITY_UNSET; - num_sdma_rqs = 0; - num_comp_rqs = 0; - for (i = 0; i < adev->num_rings; i++) { - struct amdgpu_ring *ring = adev->rings[i]; - struct drm_sched_rq *rq; - - rq = &ring->sched.sched_rq[priority]; - if (ring->funcs->type == AMDGPU_RING_TYPE_SDMA) - sdma_rqs[num_sdma_rqs++] = rq; - else if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) - comp_rqs[num_comp_rqs++] = rq; - } - - /* create context entity for each ring */ - for (i = 0; i < adev->num_rings; i++) { - struct amdgpu_ring *ring = adev->rings[i]; + for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { + struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; + struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS]; + unsigned num_rings; + + switch (i) { + case AMDGPU_HW_IP_GFX: + rings[0] = &adev->gfx.gfx_ring[0]; + num_rings = 1; + break; + case AMDGPU_HW_IP_COMPUTE: + for (j = 0; j < adev->gfx.num_compute_rings; ++j) + rings[j] = &adev->gfx.compute_ring[j]; + num_rings = adev->gfx.num_compute_rings; + break; + case AMDGPU_HW_IP_DMA: + for (j = 0; j < adev->sdma.num_instances; ++j) + rings[j] = &adev->sdma.instance[j].ring; + num_rings = adev->sdma.num_instances; + break; + case AMDGPU_HW_IP_UVD: + rings[0] = &adev->uvd.inst[0].ring; + num_rings = 1; + break; + case AMDGPU_HW_IP_VCE: + rings[0] = &adev->vce.ring[0]; + num_rings = 1; + break; + case AMDGPU_HW_IP_UVD_ENC: + rings[0] = &adev->uvd.inst[0].ring_enc[0]; + num_rings = 1; + break; + case AMDGPU_HW_IP_VCN_DEC: + rings[0] = &adev->vcn.ring_dec; + num_rings = 1; + break; + case AMDGPU_HW_IP_VCN_ENC: + rings[0] = &adev->vcn.ring_enc[0]; + num_rings = 1; + break; + case AMDGPU_HW_IP_VCN_JPEG: + rings[0] = &adev->vcn.ring_jpeg; + num_rings = 1; + break; + } - if (ring == &adev->gfx.kiq.ring) - continue; + for (j = 0; j < num_rings; ++j) + rqs[j] = &rings[j]->sched.sched_rq[priority]; - if (ring->funcs->type == AMDGPU_RING_TYPE_SDMA) { - r = drm_sched_entity_init(&ctx->rings[i].entity, - sdma_rqs, num_sdma_rqs, - &ctx->guilty); - } else if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { - r = drm_sched_entity_init(&ctx->rings[i].entity, - comp_rqs, num_comp_rqs, - &ctx->guilty); - } else { - struct drm_sched_rq *rq; - - rq = &ring->sched.sched_rq[priority]; - r = drm_sched_entity_init(&ctx->rings[i].entity, - &rq, 1, &ctx->guilty); - } + for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) + r = drm_sched_entity_init(&ctx->entities[i][j].entity, + rqs, num_rings, &ctx->guilty); if (r) - goto failed; + goto error_cleanup_entities; } return 0; -failed: - for (j = 0; j < i; j++) - drm_sched_entity_destroy(&ctx->rings[j].entity); +error_cleanup_entities: + for (i = 0; i < num_entities; ++i) + drm_sched_entity_destroy(&ctx->entities[0][i].entity); + kfree(ctx->entities[0]); + +error_free_fences: kfree(ctx->fences); ctx->fences = NULL; return r; @@ -137,17 +191,18 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, static void amdgpu_ctx_fini(struct kref *ref) { struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount); + unsigned num_entities = amdgput_ctx_total_num_entities(); struct amdgpu_device *adev = ctx->adev; unsigned i, j; if (!adev) return; - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) + for (i = 0; i < num_entities; ++i) for (j = 0; j < amdgpu_sched_jobs; ++j) - dma_fence_put(ctx->rings[i].fences[j]); + dma_fence_put(ctx->entities[0][i].fences[j]); kfree(ctx->fences); - ctx->fences = NULL; + kfree(ctx->entities[0]); mutex_destroy(&ctx->lock); @@ -157,9 +212,10 @@ static void amdgpu_ctx_fini(struct kref *ref) int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance, u32 ring, struct drm_sched_entity **entity) { - struct amdgpu_device *adev = ctx->adev; - unsigned num_rings = 0; - struct amdgpu_ring *out_ring; + if (hw_ip >= AMDGPU_HW_IP_NUM) { + DRM_ERROR("unknown HW IP type: %d\n", hw_ip); + return -EINVAL; + } /* Right now all IPs have only one instance - multiple rings. */ if (instance != 0) { @@ -167,52 +223,12 @@ int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance, return -EINVAL; } - switch (hw_ip) { - case AMDGPU_HW_IP_GFX: - out_ring = &adev->gfx.gfx_ring[ring]; - num_rings = adev->gfx.num_gfx_rings; - break; - case AMDGPU_HW_IP_COMPUTE: - out_ring = &adev->gfx.compute_ring[ring]; - num_rings = adev->gfx.num_compute_rings; - break; - case AMDGPU_HW_IP_DMA: - out_ring = &adev->sdma.instance[ring].ring; - num_rings = adev->sdma.num_instances; - break; - case AMDGPU_HW_IP_UVD: - out_ring = &adev->uvd.inst[0].ring; - num_rings = adev->uvd.num_uvd_inst; - break; - case AMDGPU_HW_IP_VCE: - out_ring = &adev->vce.ring[ring]; - num_rings = adev->vce.num_rings; - break; - case AMDGPU_HW_IP_UVD_ENC: - out_ring = &adev->uvd.inst[0].ring_enc[ring]; - num_rings = adev->uvd.num_enc_rings; - break; - case AMDGPU_HW_IP_VCN_DEC: - out_ring = &adev->vcn.ring_dec; - num_rings = 1; - break; - case AMDGPU_HW_IP_VCN_ENC: - out_ring = &adev->vcn.ring_enc[ring]; - num_rings = adev->vcn.num_enc_rings; - break; - case AMDGPU_HW_IP_VCN_JPEG: - out_ring = &adev->vcn.ring_jpeg; - num_rings = 1; - break; - default: - DRM_ERROR("unknown HW IP type: %d\n", hw_ip); + if (ring >= amdgpu_ctx_num_entities[hw_ip]) { + DRM_DEBUG("invalid ring: %d %d\n", hw_ip, ring); return -EINVAL; } - if (ring > num_rings) - return -EINVAL; - - *entity = &ctx->rings[out_ring->idx].entity; + *entity = &ctx->entities[hw_ip][ring].entity; return 0; } @@ -252,17 +268,17 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev, static void amdgpu_ctx_do_release(struct kref *ref) { struct amdgpu_ctx *ctx; + unsigned num_entities; u32 i; ctx = container_of(ref, struct amdgpu_ctx, refcount); - for (i = 0; i < ctx->adev->num_rings; i++) { + num_entities = 0; + for (i = 0; i < AMDGPU_HW_IP_NUM; i++) + num_entities += amdgpu_ctx_num_entities[i]; - if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) - continue; - - drm_sched_entity_destroy(&ctx->rings[i].entity); - } + for (i = 0; i < num_entities; i++) + drm_sched_entity_destroy(&ctx->entities[0][i].entity); amdgpu_ctx_fini(ref); } @@ -422,21 +438,21 @@ int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct drm_sched_entity *entity, struct dma_fence *fence, uint64_t* handle) { - struct amdgpu_ctx_ring *cring = to_amdgpu_ctx_ring(entity); - uint64_t seq = cring->sequence; + struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity); + uint64_t seq = centity->sequence; struct dma_fence *other = NULL; unsigned idx = 0; idx = seq & (amdgpu_sched_jobs - 1); - other = cring->fences[idx]; + other = centity->fences[idx]; if (other) BUG_ON(!dma_fence_is_signaled(other)); dma_fence_get(fence); spin_lock(&ctx->ring_lock); - cring->fences[idx] = fence; - cring->sequence++; + centity->fences[idx] = fence; + centity->sequence++; spin_unlock(&ctx->ring_lock); dma_fence_put(other); @@ -450,26 +466,26 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, struct drm_sched_entity *entity, uint64_t seq) { - struct amdgpu_ctx_ring *cring = to_amdgpu_ctx_ring(entity); + struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity); struct dma_fence *fence; spin_lock(&ctx->ring_lock); if (seq == ~0ull) - seq = cring->sequence - 1; + seq = centity->sequence - 1; - if (seq >= cring->sequence) { + if (seq >= centity->sequence) { spin_unlock(&ctx->ring_lock); return ERR_PTR(-EINVAL); } - if (seq + amdgpu_sched_jobs < cring->sequence) { + if (seq + amdgpu_sched_jobs < centity->sequence) { spin_unlock(&ctx->ring_lock); return NULL; } - fence = dma_fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]); + fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]); spin_unlock(&ctx->ring_lock); return fence; @@ -478,23 +494,17 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, enum drm_sched_priority priority) { - int i; - struct amdgpu_device *adev = ctx->adev; - struct drm_sched_entity *entity; - struct amdgpu_ring *ring; + unsigned num_entities = amdgput_ctx_total_num_entities(); enum drm_sched_priority ctx_prio; + unsigned i; ctx->override_priority = priority; ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ? ctx->init_priority : ctx->override_priority; - for (i = 0; i < adev->num_rings; i++) { - ring = adev->rings[i]; - entity = &ctx->rings[i].entity; - - if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) - continue; + for (i = 0; i < num_entities; i++) { + struct drm_sched_entity *entity = &ctx->entities[0][i].entity; drm_sched_entity_set_priority(entity, ctx_prio); } @@ -503,9 +513,9 @@ void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, struct drm_sched_entity *entity) { - struct amdgpu_ctx_ring *cring = to_amdgpu_ctx_ring(entity); - unsigned idx = cring->sequence & (amdgpu_sched_jobs - 1); - struct dma_fence *other = cring->fences[idx]; + struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity); + unsigned idx = centity->sequence & (amdgpu_sched_jobs - 1); + struct dma_fence *other = centity->fences[idx]; if (other) { signed long r; @@ -529,6 +539,7 @@ void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr) void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr) { + unsigned num_entities = amdgput_ctx_total_num_entities(); struct amdgpu_ctx *ctx; struct idr *idp; uint32_t id, i; @@ -544,13 +555,11 @@ void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr) return; } - for (i = 0; i < ctx->adev->num_rings; i++) { + for (i = 0; i < num_entities; i++) { + struct drm_sched_entity *entity; - if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) - continue; - - max_wait = drm_sched_entity_flush(&ctx->rings[i].entity, - max_wait); + entity = &ctx->entities[0][i].entity; + max_wait = drm_sched_entity_flush(entity, max_wait); } } mutex_unlock(&mgr->lock); @@ -558,6 +567,7 @@ void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr) void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) { + unsigned num_entities = amdgput_ctx_total_num_entities(); struct amdgpu_ctx *ctx; struct idr *idp; uint32_t id, i; @@ -569,16 +579,13 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) if (!ctx->adev) return; - for (i = 0; i < ctx->adev->num_rings; i++) { - - if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) - continue; - - if (kref_read(&ctx->refcount) == 1) - drm_sched_entity_fini(&ctx->rings[i].entity); - else - DRM_ERROR("ctx %p is still alive\n", ctx); + if (kref_read(&ctx->refcount) != 1) { + DRM_ERROR("ctx %p is still alive\n", ctx); + continue; } + + for (i = 0; i < num_entities; i++) + drm_sched_entity_fini(&ctx->entities[0][i].entity); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h index 609f925b076c..d67c1d285a4f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h @@ -29,26 +29,26 @@ struct drm_device; struct drm_file; struct amdgpu_fpriv; -struct amdgpu_ctx_ring { +struct amdgpu_ctx_entity { uint64_t sequence; struct dma_fence **fences; struct drm_sched_entity entity; }; struct amdgpu_ctx { - struct kref refcount; - struct amdgpu_device *adev; - unsigned reset_counter; - unsigned reset_counter_query; - uint32_t vram_lost_counter; - spinlock_t ring_lock; - struct dma_fence **fences; - struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS]; - bool preamble_presented; - enum drm_sched_priority init_priority; - enum drm_sched_priority override_priority; - struct mutex lock; - atomic_t guilty; + struct kref refcount; + struct amdgpu_device *adev; + unsigned reset_counter; + unsigned reset_counter_query; + uint32_t vram_lost_counter; + spinlock_t ring_lock; + struct dma_fence **fences; + struct amdgpu_ctx_entity *entities[AMDGPU_HW_IP_NUM]; + bool preamble_presented; + enum drm_sched_priority init_priority; + enum drm_sched_priority override_priority; + struct mutex lock; + atomic_t guilty; }; struct amdgpu_ctx_mgr { @@ -58,6 +58,8 @@ struct amdgpu_ctx_mgr { struct idr ctx_handles; }; +extern const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM]; + struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id); int amdgpu_ctx_put(struct amdgpu_ctx *ctx); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index bdb6362e9556..ad7978bab5fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -270,7 +270,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, uint32_t ib_start_alignment = 0; uint32_t ib_size_alignment = 0; enum amd_ip_block_type type; - uint32_t ring_mask = 0; + unsigned int num_rings = 0; unsigned int i, j; if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT) @@ -280,21 +280,24 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, case AMDGPU_HW_IP_GFX: type = AMD_IP_BLOCK_TYPE_GFX; for (i = 0; i < adev->gfx.num_gfx_rings; i++) - ring_mask |= adev->gfx.gfx_ring[i].ready << i; + if (adev->gfx.gfx_ring[i].ready) + ++num_rings; ib_start_alignment = 32; ib_size_alignment = 32; break; case AMDGPU_HW_IP_COMPUTE: type = AMD_IP_BLOCK_TYPE_GFX; for (i = 0; i < adev->gfx.num_compute_rings; i++) - ring_mask |= adev->gfx.compute_ring[i].ready << i; + if (adev->gfx.compute_ring[i].ready) + ++num_rings; ib_start_alignment = 32; ib_size_alignment = 32; break; case AMDGPU_HW_IP_DMA: type = AMD_IP_BLOCK_TYPE_SDMA; for (i = 0; i < adev->sdma.num_instances; i++) - ring_mask |= adev->sdma.instance[i].ring.ready << i; + if (adev->sdma.instance[i].ring.ready) + ++num_rings; ib_start_alignment = 256; ib_size_alignment = 4; break; @@ -303,7 +306,9 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, for (i = 0; i < adev->uvd.num_uvd_inst; i++) { if (adev->uvd.harvest_config & (1 << i)) continue; - ring_mask |= adev->uvd.inst[i].ring.ready; + + if (adev->uvd.inst[i].ring.ready) + ++num_rings; } ib_start_alignment = 64; ib_size_alignment = 64; @@ -311,7 +316,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, case AMDGPU_HW_IP_VCE: type = AMD_IP_BLOCK_TYPE_VCE; for (i = 0; i < adev->vce.num_rings; i++) - ring_mask |= adev->vce.ring[i].ready << i; + if (adev->vce.ring[i].ready) + ++num_rings; ib_start_alignment = 4; ib_size_alignment = 1; break; @@ -320,28 +326,33 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, for (i = 0; i < adev->uvd.num_uvd_inst; i++) { if (adev->uvd.harvest_config & (1 << i)) continue; + for (j = 0; j < adev->uvd.num_enc_rings; j++) - ring_mask |= adev->uvd.inst[i].ring_enc[j].ready << j; + if (adev->uvd.inst[i].ring_enc[j].ready) + ++num_rings; } ib_start_alignment = 64; ib_size_alignment = 64; break; case AMDGPU_HW_IP_VCN_DEC: type = AMD_IP_BLOCK_TYPE_VCN; - ring_mask = adev->vcn.ring_dec.ready; + if (adev->vcn.ring_dec.ready) + ++num_rings; ib_start_alignment = 16; ib_size_alignment = 16; break; case AMDGPU_HW_IP_VCN_ENC: type = AMD_IP_BLOCK_TYPE_VCN; for (i = 0; i < adev->vcn.num_enc_rings; i++) - ring_mask |= adev->vcn.ring_enc[i].ready << i; + if (adev->vcn.ring_enc[i].ready) + ++num_rings; ib_start_alignment = 64; ib_size_alignment = 1; break; case AMDGPU_HW_IP_VCN_JPEG: type = AMD_IP_BLOCK_TYPE_VCN; - ring_mask = adev->vcn.ring_jpeg.ready; + if (adev->vcn.ring_jpeg.ready) + ++num_rings; ib_start_alignment = 16; ib_size_alignment = 16; break; @@ -357,10 +368,13 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, if (i == adev->num_ip_blocks) return 0; + num_rings = min(amdgpu_ctx_num_entities[info->query_hw_ip.type], + num_rings); + result->hw_ip_version_major = adev->ip_blocks[i].version->major; result->hw_ip_version_minor = adev->ip_blocks[i].version->minor; result->capabilities_flags = 0; - result->available_rings = ring_mask; + result->available_rings = (1 << num_rings) - 1; result->ib_start_alignment = ib_start_alignment; result->ib_size_alignment = ib_size_alignment; return 0; -- GitLab From a00ead2b394bd591159261f4e8c08819ff4f45f4 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Mon, 20 Aug 2018 20:19:18 +0800 Subject: [PATCH 0487/1692] drm/amdgpu: Fix compile warning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In function ‘gfx_v9_0_check_fw_write_wait’: warning: enumeration value ‘CHIP_TAHITI’ not handled in switch [-Wswitch] Always add default case in case there is no match Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 0cba430712d1..44707f94b2c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -532,6 +532,8 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev) (adev->gfx.mec_feature_version >= 42)) adev->gfx.mec_fw_write_wait = true; break; + default: + break; } } -- GitLab From 52de2ea74f36b50e2c2b101306fe892f8e7be6a6 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Tue, 21 Aug 2018 14:51:53 +0800 Subject: [PATCH 0488/1692] drm/amdgpu: fix sdma doorbell range setting Use the old doorbell range setting until the driver is able to support more sdma queues. Signed-off-by: Evan Quan Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index 89ea92075b6b..2e65447637c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c @@ -76,7 +76,7 @@ static void nbio_v7_4_sdma_doorbell_range(struct amdgpu_device *adev, int instan if (use_doorbell) { doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index); - doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 8); + doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 2); } else doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0); -- GitLab From fc0faf04400133393faa0a289aeba5d82069e52e Mon Sep 17 00:00:00 2001 From: Emily Deng Date: Tue, 21 Aug 2018 18:51:38 +0800 Subject: [PATCH 0489/1692] drm/amdgpu/sriov: Only sriov runtime support use kiq MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For sriov, don't use kiq in exclusive mode, as don't know how long time it will take, some times it will occur exclusive timeout. Signed-off-by: Emily Deng Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 46183c7730c4..b6b5ede0ca35 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -321,9 +321,6 @@ signed long amdgpu_kiq_reg_write_reg_wait(struct amdgpu_device *adev, struct amdgpu_kiq *kiq = &adev->gfx.kiq; struct amdgpu_ring *ring = &kiq->ring; - if (!ring->ready) - return -EINVAL; - spin_lock_irqsave(&kiq->ring_lock, flags); amdgpu_ring_alloc(ring, 32); @@ -390,10 +387,14 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, struct amdgpu_vmhub *hub = &adev->vmhub[i]; u32 tmp = gmc_v9_0_get_invalidate_req(vmid); - r = amdgpu_kiq_reg_write_reg_wait(adev, hub->vm_inv_eng0_req + eng, - hub->vm_inv_eng0_ack + eng, tmp, 1 << vmid); - if (!r) - continue; + if (adev->gfx.kiq.ring.ready && + (amdgpu_sriov_runtime(adev) || + !amdgpu_sriov_vf(adev))) { + r = amdgpu_kiq_reg_write_reg_wait(adev, hub->vm_inv_eng0_req + eng, + hub->vm_inv_eng0_ack + eng, tmp, 1 << vmid); + if (!r) + continue; + } spin_lock(&adev->gmc.invalidate_lock); -- GitLab From 35fb7220742b80dadd7218448b4bee5b190c9f07 Mon Sep 17 00:00:00 2001 From: Wen Yang Date: Fri, 17 Aug 2018 11:09:48 +0800 Subject: [PATCH 0490/1692] drm/amd/display: fix a compile warning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix comile warning like, CC [M] drivers/gpu/drm/i915/gvt/execlist.o CC [M] drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.o CC [M] drivers/gpu/drm/radeon/btc_dpm.o CC [M] drivers/isdn/hisax/avm_a1p.o CC [M] drivers/gpu/drm/amd/amdgpu/../display/dc/dcn10/dcn10_dpp.o drivers/gpu/drm/amd/amdgpu/../display/dc/dcn10/dcn10_hw_sequencer.c: In function ‘dcn10_update_mpcc’: drivers/gpu/drm/amd/amdgpu/../display/dc/dcn10/dcn10_hw_sequencer.c:1903:9: warning: missing braces around initializer [-Wmissing-braces] struct mpcc_blnd_cfg blnd_cfg = {0}; ^ drivers/gpu/drm/amd/amdgpu/../display/dc/dcn10/dcn10_hw_sequencer.c:1903:9: warning: (near initialization for ‘blnd_cfg.black_color’) [-Wmissing-braces] Acked-by: Randy Dunlap Signed-off-by: Wen Yang Reviewed-by: Jiang Biao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 051f427868ca..1c5bb148efb7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -1917,7 +1917,7 @@ static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state) static void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) { struct hubp *hubp = pipe_ctx->plane_res.hubp; - struct mpcc_blnd_cfg blnd_cfg = {0}; + struct mpcc_blnd_cfg blnd_cfg = {{0}}; bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe; int mpcc_id; struct mpcc *new_mpcc; -- GitLab From 43fbbe89f15b297e269388dee63901715e55d712 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 14 Aug 2018 12:09:45 +0300 Subject: [PATCH 0491/1692] drm/amd/display: indent an if statement The if statement isn't indented and it makes static checkers complain. Signed-off-by: Dan Carpenter Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 07a1dd41666d..f85fa7b55efb 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -367,7 +367,7 @@ static bool is_dp_and_hdmi_sharable( if (stream1->clamping.c_depth != COLOR_DEPTH_888 || stream2->clamping.c_depth != COLOR_DEPTH_888) - return false; + return false; return true; -- GitLab From c89677afb30582f16d7378a324e3e3f1c07e69b2 Mon Sep 17 00:00:00 2001 From: Nayan Deshmukh Date: Tue, 21 Aug 2018 18:59:08 +0530 Subject: [PATCH 0492/1692] drm/scheduler: avoid redundant shifting of the entity v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit do not remove entity from the rq if the current rq is from the least loaded scheduler. Signed-off-by: Nayan Deshmukh Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/scheduler/sched_entity.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 812e3530ea25..4e5e95c0cab5 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -476,6 +476,9 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity) return; rq = drm_sched_entity_get_free_sched(entity); + if (rq == entity->rq) + return; + spin_lock(&entity->rq_lock); drm_sched_rq_remove_entity(entity->rq, entity); entity->rq = rq; -- GitLab From ae74da3e145198dfb766c7beddd473fad2fb2a5b Mon Sep 17 00:00:00 2001 From: Emily Deng Date: Wed, 22 Aug 2018 20:32:23 +0800 Subject: [PATCH 0493/1692] drm/amdgpu: Don't use kiq in gpu reset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When in gpu reset, don't use kiq, it will generate more TDR. Signed-off-by: Emily Deng Reviewed-by: Christian König . Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index b6b5ede0ca35..6763570c0321 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -332,15 +332,8 @@ signed long amdgpu_kiq_reg_write_reg_wait(struct amdgpu_device *adev, r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); - /* don't wait anymore for gpu reset case because this way may - * block gpu_recover() routine forever, e.g. this virt_kiq_rreg - * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will - * never return if we keep waiting in virt_kiq_rreg, which cause - * gpu_recover() hang there. - * - * also don't wait anymore for IRQ context - * */ - if (r < 1 && (adev->in_gpu_reset || in_interrupt())) + /* don't wait anymore for IRQ context */ + if (r < 1 && in_interrupt()) goto failed_kiq; might_sleep(); @@ -388,8 +381,8 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, u32 tmp = gmc_v9_0_get_invalidate_req(vmid); if (adev->gfx.kiq.ring.ready && - (amdgpu_sriov_runtime(adev) || - !amdgpu_sriov_vf(adev))) { + (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) && + !adev->in_gpu_reset) { r = amdgpu_kiq_reg_write_reg_wait(adev, hub->vm_inv_eng0_req + eng, hub->vm_inv_eng0_ack + eng, tmp, 1 << vmid); if (!r) -- GitLab From 11c3ee48bd7c232c0a750b4dde8ee48f315dcdf3 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 14 Aug 2018 14:53:52 -0500 Subject: [PATCH 0494/1692] drm/amdgpu/display: add support for LVDS (v5) This adds support for LVDS displays. v2: add support for spread spectrum, sink detect v3: clean up enable_lvds_output v4: fix up link_detect v5: remove assert on 888 format Bug: https://bugs.freedesktop.org/show_bug.cgi?id=105880 Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 + drivers/gpu/drm/amd/display/dc/core/dc_link.c | 45 +++++++++++++++++++ .../drm/amd/display/dc/dce/dce_clock_source.c | 10 +++++ .../drm/amd/display/dc/dce/dce_clock_source.h | 2 + .../drm/amd/display/dc/dce/dce_link_encoder.c | 34 ++++++++++++++ .../drm/amd/display/dc/dce/dce_link_encoder.h | 6 +++ .../amd/display/dc/dce/dce_stream_encoder.c | 24 ++++++++++ .../drm/amd/display/dc/inc/hw/link_encoder.h | 3 ++ .../amd/display/dc/inc/hw/stream_encoder.h | 4 ++ .../drm/amd/display/include/signal_types.h | 5 +++ 10 files changed, 135 insertions(+) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 1a6b303c8379..c18bad387635 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -3358,6 +3358,8 @@ static int to_drm_connector_type(enum signal_type st) return DRM_MODE_CONNECTOR_HDMIA; case SIGNAL_TYPE_EDP: return DRM_MODE_CONNECTOR_eDP; + case SIGNAL_TYPE_LVDS: + return DRM_MODE_CONNECTOR_LVDS; case SIGNAL_TYPE_RGB: return DRM_MODE_CONNECTOR_VGA; case SIGNAL_TYPE_DISPLAY_PORT: diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 53ce2a9b7eed..309059871706 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -203,6 +203,11 @@ static bool detect_sink(struct dc_link *link, enum dc_connection_type *type) uint32_t is_hpd_high = 0; struct gpio *hpd_pin; + if (link->connector_signal == SIGNAL_TYPE_LVDS) { + *type = dc_connection_single; + return true; + } + /* todo: may need to lock gpio access */ hpd_pin = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service); if (hpd_pin == NULL) @@ -616,6 +621,10 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) link->local_sink) return true; + if (link->connector_signal == SIGNAL_TYPE_LVDS && + link->local_sink) + return true; + prev_sink = link->local_sink; if (prev_sink != NULL) { dc_sink_retain(prev_sink); @@ -649,6 +658,12 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) break; } + case SIGNAL_TYPE_LVDS: { + sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; + sink_caps.signal = SIGNAL_TYPE_LVDS; + break; + } + case SIGNAL_TYPE_EDP: { detect_edp_sink_caps(link); sink_caps.transaction_type = @@ -1087,6 +1102,9 @@ static bool construct( dal_irq_get_rx_source(hpd_gpio); } break; + case CONNECTOR_ID_LVDS: + link->connector_signal = SIGNAL_TYPE_LVDS; + break; default: DC_LOG_WARNING("Unsupported Connector type:%d!\n", link->link_id.id); goto create_fail; @@ -1920,6 +1938,24 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx) dal_ddc_service_read_scdc_data(link->ddc); } +static void enable_link_lvds(struct pipe_ctx *pipe_ctx) +{ + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->sink->link; + + if (stream->phy_pix_clk == 0) + stream->phy_pix_clk = stream->timing.pix_clk_khz; + + memset(&stream->sink->link->cur_link_settings, 0, + sizeof(struct dc_link_settings)); + + link->link_enc->funcs->enable_lvds_output( + link->link_enc, + pipe_ctx->clock_source->id, + stream->phy_pix_clk); + +} + /****************************enable_link***********************************/ static enum dc_status enable_link( struct dc_state *state, @@ -1943,6 +1979,10 @@ static enum dc_status enable_link( enable_link_hdmi(pipe_ctx); status = DC_OK; break; + case SIGNAL_TYPE_LVDS: + enable_link_lvds(pipe_ctx); + status = DC_OK; + break; case SIGNAL_TYPE_VIRTUAL: status = DC_OK; break; @@ -2492,6 +2532,11 @@ void core_link_enable_stream( (pipe_ctx->stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) ? true : false); + if (dc_is_lvds_signal(pipe_ctx->stream->signal)) + pipe_ctx->stream_res.stream_enc->funcs->lvds_set_stream_attribute( + pipe_ctx->stream_res.stream_enc, + &stream->timing); + resource_build_info_frame(pipe_ctx); core_dc->hwss.update_info_frame(pipe_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c index ca137757a69e..1f23224d495a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c @@ -75,6 +75,11 @@ static const struct spread_spectrum_data *get_ss_data_entry( entrys_num = clk_src->hdmi_ss_params_cnt; break; + case SIGNAL_TYPE_LVDS: + ss_parm = clk_src->lvds_ss_params; + entrys_num = clk_src->lvds_ss_params_cnt; + break; + case SIGNAL_TYPE_DISPLAY_PORT: case SIGNAL_TYPE_DISPLAY_PORT_MST: case SIGNAL_TYPE_EDP: @@ -1184,6 +1189,11 @@ static void ss_info_from_atombios_create( AS_SIGNAL_TYPE_DVI, &clk_src->dvi_ss_params, &clk_src->dvi_ss_params_cnt); + get_ss_info_from_atombios( + clk_src, + AS_SIGNAL_TYPE_LVDS, + &clk_src->lvds_ss_params, + &clk_src->lvds_ss_params_cnt); } static bool calc_pll_max_vco_construct( diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h index c45e2f76189e..cdeb96a268fb 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h @@ -125,6 +125,8 @@ struct dce110_clk_src { uint32_t hdmi_ss_params_cnt; struct spread_spectrum_data *dvi_ss_params; uint32_t dvi_ss_params_cnt; + struct spread_spectrum_data *lvds_ss_params; + uint32_t lvds_ss_params_cnt; uint32_t ext_clk_khz; uint32_t ref_freq_khz; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c index eff7d22d78fb..4942590e8b9c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c @@ -102,6 +102,7 @@ static const struct link_encoder_funcs dce110_lnk_enc_funcs = { .enable_tmds_output = dce110_link_encoder_enable_tmds_output, .enable_dp_output = dce110_link_encoder_enable_dp_output, .enable_dp_mst_output = dce110_link_encoder_enable_dp_mst_output, + .enable_lvds_output = dce110_link_encoder_enable_lvds_output, .disable_output = dce110_link_encoder_disable_output, .dp_set_lane_settings = dce110_link_encoder_dp_set_lane_settings, .dp_set_phy_pattern = dce110_link_encoder_dp_set_phy_pattern, @@ -814,6 +815,7 @@ bool dce110_link_encoder_validate_output_with_stream( enc110, &stream->timing); break; case SIGNAL_TYPE_EDP: + case SIGNAL_TYPE_LVDS: is_valid = (stream->timing. pixel_encoding == PIXEL_ENCODING_RGB) ? true : false; @@ -955,6 +957,38 @@ void dce110_link_encoder_enable_tmds_output( } } +/* TODO: still need depth or just pass in adjusted pixel clock? */ +void dce110_link_encoder_enable_lvds_output( + struct link_encoder *enc, + enum clock_source_id clock_source, + uint32_t pixel_clock) +{ + struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc); + struct bp_transmitter_control cntl = { 0 }; + enum bp_result result; + + /* Enable the PHY */ + cntl.connector_obj_id = enc110->base.connector; + cntl.action = TRANSMITTER_CONTROL_ENABLE; + cntl.engine_id = enc->preferred_engine; + cntl.transmitter = enc110->base.transmitter; + cntl.pll_id = clock_source; + cntl.signal = SIGNAL_TYPE_LVDS; + cntl.lanes_number = 4; + + cntl.hpd_sel = enc110->base.hpd_source; + + cntl.pixel_clock = pixel_clock; + + result = link_transmitter_control(enc110, &cntl); + + if (result != BP_RESULT_OK) { + DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n", + __func__); + BREAK_TO_DEBUGGER(); + } +} + /* enables DP PHY output */ void dce110_link_encoder_enable_dp_output( struct link_encoder *enc, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h index 347069461a22..3c9368df4093 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h @@ -225,6 +225,12 @@ void dce110_link_encoder_enable_dp_mst_output( const struct dc_link_settings *link_settings, enum clock_source_id clock_source); +/* enables LVDS PHY output */ +void dce110_link_encoder_enable_lvds_output( + struct link_encoder *enc, + enum clock_source_id clock_source, + uint32_t pixel_clock); + /* disable PHY output */ void dce110_link_encoder_disable_output( struct link_encoder *enc, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c index 91642e684858..c47c81883d3c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c @@ -674,6 +674,28 @@ static void dce110_stream_encoder_dvi_set_stream_attribute( dce110_stream_encoder_set_stream_attribute_helper(enc110, crtc_timing); } +/* setup stream encoder in LVDS mode */ +static void dce110_stream_encoder_lvds_set_stream_attribute( + struct stream_encoder *enc, + struct dc_crtc_timing *crtc_timing) +{ + struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); + struct bp_encoder_control cntl = {0}; + + cntl.action = ENCODER_CONTROL_SETUP; + cntl.engine_id = enc110->base.id; + cntl.signal = SIGNAL_TYPE_LVDS; + cntl.enable_dp_audio = false; + cntl.pixel_clock = crtc_timing->pix_clk_khz; + cntl.lanes_number = LANE_COUNT_FOUR; + + if (enc110->base.bp->funcs->encoder_control( + enc110->base.bp, &cntl) != BP_RESULT_OK) + return; + + ASSERT(crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB); +} + static void dce110_stream_encoder_set_mst_bandwidth( struct stream_encoder *enc, struct fixed31_32 avg_time_slots_per_mtp) @@ -1564,6 +1586,8 @@ static const struct stream_encoder_funcs dce110_str_enc_funcs = { dce110_stream_encoder_hdmi_set_stream_attribute, .dvi_set_stream_attribute = dce110_stream_encoder_dvi_set_stream_attribute, + .lvds_set_stream_attribute = + dce110_stream_encoder_lvds_set_stream_attribute, .set_mst_bandwidth = dce110_stream_encoder_set_mst_bandwidth, .update_hdmi_info_packets = diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h index cf6df2e7beb2..58818920ed41 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h @@ -131,6 +131,9 @@ struct link_encoder_funcs { void (*enable_dp_mst_output)(struct link_encoder *enc, const struct dc_link_settings *link_settings, enum clock_source_id clock_source); + void (*enable_lvds_output)(struct link_encoder *enc, + enum clock_source_id clock_source, + uint32_t pixel_clock); void (*disable_output)(struct link_encoder *link_enc, enum signal_type signal); void (*dp_set_lane_settings)(struct link_encoder *enc, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h index cfa7ec9517ae..53a9b64df11a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h @@ -101,6 +101,10 @@ struct stream_encoder_funcs { struct dc_crtc_timing *crtc_timing, bool is_dual_link); + void (*lvds_set_stream_attribute)( + struct stream_encoder *enc, + struct dc_crtc_timing *crtc_timing); + void (*set_mst_bandwidth)( struct stream_encoder *enc, struct fixed31_32 avg_time_slots_per_mtp); diff --git a/drivers/gpu/drm/amd/display/include/signal_types.h b/drivers/gpu/drm/amd/display/include/signal_types.h index 199c5db67cbc..03476b142d8e 100644 --- a/drivers/gpu/drm/amd/display/include/signal_types.h +++ b/drivers/gpu/drm/amd/display/include/signal_types.h @@ -68,6 +68,11 @@ static inline bool dc_is_embedded_signal(enum signal_type signal) return (signal == SIGNAL_TYPE_EDP || signal == SIGNAL_TYPE_LVDS); } +static inline bool dc_is_lvds_signal(enum signal_type signal) +{ + return (signal == SIGNAL_TYPE_LVDS); +} + static inline bool dc_is_dvi_signal(enum signal_type signal) { switch (signal) { -- GitLab From 1849e73748be3c80bf752e4c4877fe90a8da4822 Mon Sep 17 00:00:00 2001 From: kbuild test robot Date: Wed, 22 Aug 2018 10:31:01 +0800 Subject: [PATCH 0495/1692] drm/amdgpu: amdgpu_kiq_reg_write_reg_wait() can be static Fixes: d790449835e6 ("drm/amdgpu: use kiq to do invalidate tlb") Reviewed-by: Emily Deng Signed-off-by: kbuild test robot Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 6763570c0321..57db52388a8b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -311,7 +311,7 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid) return req; } -signed long amdgpu_kiq_reg_write_reg_wait(struct amdgpu_device *adev, +static signed long amdgpu_kiq_reg_write_reg_wait(struct amdgpu_device *adev, uint32_t reg0, uint32_t reg1, uint32_t ref, uint32_t mask) { -- GitLab From 12938fad234a3924cc9b82080db4f62fe1cf52bb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 21 Aug 2018 10:45:29 +0200 Subject: [PATCH 0496/1692] drm/amdgpu: cleanup GPU recovery check a bit (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Check if we should call the function instead of providing the forced flag. v2: rebase on KFD changes (Alex) Signed-off-by: Christian König Acked-by: Andrey Grodzovsky Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 38 ++++++++++++++-------- drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | 4 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 3 +- drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c | 4 +-- drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c | 3 +- 8 files changed, 38 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 19ef7711d944..340e40d03d54 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1158,8 +1158,9 @@ int emu_soc_asic_init(struct amdgpu_device *adev); #define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev)) /* Common functions */ +bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev); int amdgpu_device_gpu_recover(struct amdgpu_device *adev, - struct amdgpu_job* job, bool force); + struct amdgpu_job* job); void amdgpu_device_pci_config_reset(struct amdgpu_device *adev); bool amdgpu_device_need_post(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index f8bbbb3a9504..3dbe675b6fe1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -267,7 +267,8 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd) { struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - amdgpu_device_gpu_recover(adev, NULL, false); + if (amdgpu_device_should_recover_gpu(adev)) + amdgpu_device_gpu_recover(adev, NULL); } int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index c961e781430d..8f431740c424 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3243,32 +3243,44 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, return r; } +/** + * amdgpu_device_should_recover_gpu - check if we should try GPU recovery + * + * @adev: amdgpu device pointer + * + * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover + * a hung GPU. + */ +bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev) +{ + if (!amdgpu_device_ip_check_soft_reset(adev)) { + DRM_INFO("Timeout, but no hardware hang detected.\n"); + return false; + } + + if (amdgpu_gpu_recovery == 0 || (amdgpu_gpu_recovery == -1 && + !amdgpu_sriov_vf(adev))) { + DRM_INFO("GPU recovery disabled.\n"); + return false; + } + + return true; +} + /** * amdgpu_device_gpu_recover - reset the asic and recover scheduler * * @adev: amdgpu device pointer * @job: which job trigger hang - * @force: forces reset regardless of amdgpu_gpu_recovery * * Attempt to reset the GPU if it has hung (all asics). * Returns 0 for success or an error on failure. */ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, - struct amdgpu_job *job, bool force) + struct amdgpu_job *job) { int i, r, resched; - if (!force && !amdgpu_device_ip_check_soft_reset(adev)) { - DRM_INFO("No hardware hang detected. Did some blocks stall?\n"); - return 0; - } - - if (!force && (amdgpu_gpu_recovery == 0 || - (amdgpu_gpu_recovery == -1 && !amdgpu_sriov_vf(adev)))) { - DRM_INFO("GPU recovery disabled.\n"); - return 0; - } - dev_info(adev->dev, "GPU reset begin!\n"); mutex_lock(&adev->lock_reset); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 7056925eb386..da36731460b5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -701,7 +701,7 @@ static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data) struct amdgpu_device *adev = dev->dev_private; seq_printf(m, "gpu recover\n"); - amdgpu_device_gpu_recover(adev, NULL, true); + amdgpu_device_gpu_recover(adev, NULL); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 1abf5b5bac9e..b927e8798534 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -105,8 +105,8 @@ static void amdgpu_irq_reset_work_func(struct work_struct *work) struct amdgpu_device *adev = container_of(work, struct amdgpu_device, reset_work); - if (!amdgpu_sriov_vf(adev)) - amdgpu_device_gpu_recover(adev, NULL, false); + if (!amdgpu_sriov_vf(adev) && amdgpu_device_should_recover_gpu(adev)) + amdgpu_device_gpu_recover(adev, NULL); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 391e2f7c03aa..265ff90f4e01 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -37,7 +37,8 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job) job->base.sched->name, atomic_read(&ring->fence_drv.last_seq), ring->fence_drv.sync_seq); - amdgpu_device_gpu_recover(ring->adev, job, false); + if (amdgpu_device_should_recover_gpu(ring->adev)) + amdgpu_device_gpu_recover(ring->adev, job); } int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index 078f70faedcb..8cbb4655896a 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -266,8 +266,8 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work) } /* Trigger recovery for world switch failure if no TDR */ - if (amdgpu_lockup_timeout == 0) - amdgpu_device_gpu_recover(adev, NULL, true); + if (amdgpu_device_should_recover_gpu(adev)) + amdgpu_device_gpu_recover(adev, NULL); } static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c index 9fc1c37344ce..842567b53df5 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c @@ -521,7 +521,8 @@ static void xgpu_vi_mailbox_flr_work(struct work_struct *work) } /* Trigger recovery due to world switch failure */ - amdgpu_device_gpu_recover(adev, NULL, false); + if (amdgpu_device_should_recover_gpu(adev)) + amdgpu_device_gpu_recover(adev, NULL); } static int xgpu_vi_set_mailbox_rcv_irq(struct amdgpu_device *adev, -- GitLab From 262b9c392e7dbe264b075fa5ad6a0de5a403da7a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 15 Aug 2018 19:10:40 +0200 Subject: [PATCH 0497/1692] drm/amdgpu: validate the VM root PD from the VM code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Preparation for following changes. This validates the root PD twice, but the overhead of that should be minimal. Signed-off-by: Christian König Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 995ad5e83611..7d7d7e532246 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -291,11 +291,11 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) { struct amdgpu_bo *bo = bo_base->bo; - if (bo->parent) { - r = validate(param, bo); - if (r) - break; + r = validate(param, bo); + if (r) + break; + if (bo->parent) { spin_lock(&glob->lru_lock); ttm_bo_move_to_lru_tail(&bo->tbo); if (bo->shadow) -- GitLab From 9a02ece43ee49efdfad19a3ca90c02d20f491031 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 17 Aug 2018 15:07:13 +0200 Subject: [PATCH 0498/1692] drm/amdgpu: cleanup VM handling in the CS a bit MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a helper function for getting the root PD addr and cleanup join the two VM related functions and cleanup the function name. No functional change. Signed-off-by: Christian König Reviewed-by: Huang Rui Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 160 ++++++++++++------------- 1 file changed, 74 insertions(+), 86 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 313ac971eaaf..5b70a30967ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -804,8 +804,9 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, amdgpu_bo_unref(&parser->uf_entry.robj); } -static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) +static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) { + struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched); struct amdgpu_fpriv *fpriv = p->filp->driver_priv; struct amdgpu_device *adev = p->adev; struct amdgpu_vm *vm = &fpriv->vm; @@ -814,6 +815,71 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) struct amdgpu_bo *bo; int r; + /* Only for UVD/VCE VM emulation */ + if (ring->funcs->parse_cs || ring->funcs->patch_cs_in_place) { + unsigned i, j; + + for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) { + struct drm_amdgpu_cs_chunk_ib *chunk_ib; + struct amdgpu_bo_va_mapping *m; + struct amdgpu_bo *aobj = NULL; + struct amdgpu_cs_chunk *chunk; + uint64_t offset, va_start; + struct amdgpu_ib *ib; + uint8_t *kptr; + + chunk = &p->chunks[i]; + ib = &p->job->ibs[j]; + chunk_ib = chunk->kdata; + + if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB) + continue; + + va_start = chunk_ib->va_start & AMDGPU_VA_HOLE_MASK; + r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m); + if (r) { + DRM_ERROR("IB va_start is invalid\n"); + return r; + } + + if ((va_start + chunk_ib->ib_bytes) > + (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) { + DRM_ERROR("IB va_start+ib_bytes is invalid\n"); + return -EINVAL; + } + + /* the IB should be reserved at this point */ + r = amdgpu_bo_kmap(aobj, (void **)&kptr); + if (r) { + return r; + } + + offset = m->start * AMDGPU_GPU_PAGE_SIZE; + kptr += va_start - offset; + + if (ring->funcs->parse_cs) { + memcpy(ib->ptr, kptr, chunk_ib->ib_bytes); + amdgpu_bo_kunmap(aobj); + + r = amdgpu_ring_parse_cs(ring, p, j); + if (r) + return r; + } else { + ib->ptr = (uint32_t *)kptr; + r = amdgpu_ring_patch_cs_in_place(ring, p, j); + amdgpu_bo_kunmap(aobj); + if (r) + return r; + } + + j++; + } + } + + if (!p->job->vm) + return amdgpu_cs_sync_rings(p); + + r = amdgpu_vm_clear_freed(adev, vm, NULL); if (r) return r; @@ -876,6 +942,12 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) if (r) return r; + r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv); + if (r) + return r; + + p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->root.base.bo); + if (amdgpu_vm_debug) { /* Invalidate all BOs to test for userspace bugs */ amdgpu_bo_list_for_each_entry(e, p->bo_list) { @@ -887,90 +959,6 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) } } - return r; -} - -static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, - struct amdgpu_cs_parser *p) -{ - struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched); - struct amdgpu_fpriv *fpriv = p->filp->driver_priv; - struct amdgpu_vm *vm = &fpriv->vm; - int r; - - /* Only for UVD/VCE VM emulation */ - if (ring->funcs->parse_cs || ring->funcs->patch_cs_in_place) { - unsigned i, j; - - for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) { - struct drm_amdgpu_cs_chunk_ib *chunk_ib; - struct amdgpu_bo_va_mapping *m; - struct amdgpu_bo *aobj = NULL; - struct amdgpu_cs_chunk *chunk; - uint64_t offset, va_start; - struct amdgpu_ib *ib; - uint8_t *kptr; - - chunk = &p->chunks[i]; - ib = &p->job->ibs[j]; - chunk_ib = chunk->kdata; - - if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB) - continue; - - va_start = chunk_ib->va_start & AMDGPU_VA_HOLE_MASK; - r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m); - if (r) { - DRM_ERROR("IB va_start is invalid\n"); - return r; - } - - if ((va_start + chunk_ib->ib_bytes) > - (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) { - DRM_ERROR("IB va_start+ib_bytes is invalid\n"); - return -EINVAL; - } - - /* the IB should be reserved at this point */ - r = amdgpu_bo_kmap(aobj, (void **)&kptr); - if (r) { - return r; - } - - offset = m->start * AMDGPU_GPU_PAGE_SIZE; - kptr += va_start - offset; - - if (ring->funcs->parse_cs) { - memcpy(ib->ptr, kptr, chunk_ib->ib_bytes); - amdgpu_bo_kunmap(aobj); - - r = amdgpu_ring_parse_cs(ring, p, j); - if (r) - return r; - } else { - ib->ptr = (uint32_t *)kptr; - r = amdgpu_ring_patch_cs_in_place(ring, p, j); - amdgpu_bo_kunmap(aobj); - if (r) - return r; - } - - j++; - } - } - - if (p->job->vm) { - p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->root.base.bo); - - r = amdgpu_bo_vm_update_pte(p); - if (r) - return r; - - r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv); - if (r) - return r; - } - return amdgpu_cs_sync_rings(p); } @@ -1309,7 +1297,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) for (i = 0; i < parser.job->num_ibs; i++) trace_amdgpu_cs(&parser, i); - r = amdgpu_cs_ib_vm_chunk(adev, &parser); + r = amdgpu_cs_vm_handling(&parser); if (r) goto out; -- GitLab From cbd528514276dce3a8057cff18ef328c35b49d95 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 21 Aug 2018 16:47:01 +0200 Subject: [PATCH 0499/1692] drm/amdgpu: move setting the GART addr into TTM MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move setting the GART addr for window based copies into the TTM code who uses it. Signed-off-by: Christian König Reviewed-by: Huang Rui Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 2 -- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 5 ++++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 265ff90f4e01..facc0f08d804 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -83,8 +83,6 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]); if (r) kfree(*job); - else - (*job)->vm_pd_addr = adev->gart.table_addr; return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index c6611cff64c8..b4333f60ed8b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -2048,7 +2048,10 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, if (r) return r; - job->vm_needs_flush = vm_needs_flush; + if (vm_needs_flush) { + job->vm_pd_addr = adev->gart.table_addr; + job->vm_needs_flush = true; + } if (resv) { r = amdgpu_sync_resv(adev, &job->sync, resv, AMDGPU_FENCE_OWNER_UNDEFINED, -- GitLab From 1123b989c50613ea555c51ced26257e54c6fa029 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 21 Aug 2018 17:07:47 +0200 Subject: [PATCH 0500/1692] drm/amdgpu: rename gart.robj into gart.bo MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sed -i "s/gart.robj/gart.bo/" drivers/gpu/drm/amd/amdgpu/*.c sed -i "s/gart.robj/gart.bo/" drivers/gpu/drm/amd/amdgpu/*.h Just cleaning up radeon leftovers. Signed-off-by: Christian König Reviewed-by: Huang Rui Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | 32 ++++++++++++------------ drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h | 2 +- drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 4 +-- drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 4 +-- drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 4 +-- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 4 +-- 6 files changed, 25 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index a54d5655a191..f5cb5e2856c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -112,7 +112,7 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev) { int r; - if (adev->gart.robj == NULL) { + if (adev->gart.bo == NULL) { struct amdgpu_bo_param bp; memset(&bp, 0, sizeof(bp)); @@ -123,7 +123,7 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev) AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; bp.type = ttm_bo_type_kernel; bp.resv = NULL; - r = amdgpu_bo_create(adev, &bp, &adev->gart.robj); + r = amdgpu_bo_create(adev, &bp, &adev->gart.bo); if (r) { return r; } @@ -145,19 +145,19 @@ int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev) { int r; - r = amdgpu_bo_reserve(adev->gart.robj, false); + r = amdgpu_bo_reserve(adev->gart.bo, false); if (unlikely(r != 0)) return r; - r = amdgpu_bo_pin(adev->gart.robj, AMDGPU_GEM_DOMAIN_VRAM); + r = amdgpu_bo_pin(adev->gart.bo, AMDGPU_GEM_DOMAIN_VRAM); if (r) { - amdgpu_bo_unreserve(adev->gart.robj); + amdgpu_bo_unreserve(adev->gart.bo); return r; } - r = amdgpu_bo_kmap(adev->gart.robj, &adev->gart.ptr); + r = amdgpu_bo_kmap(adev->gart.bo, &adev->gart.ptr); if (r) - amdgpu_bo_unpin(adev->gart.robj); - amdgpu_bo_unreserve(adev->gart.robj); - adev->gart.table_addr = amdgpu_bo_gpu_offset(adev->gart.robj); + amdgpu_bo_unpin(adev->gart.bo); + amdgpu_bo_unreserve(adev->gart.bo); + adev->gart.table_addr = amdgpu_bo_gpu_offset(adev->gart.bo); return r; } @@ -173,14 +173,14 @@ void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev) { int r; - if (adev->gart.robj == NULL) { + if (adev->gart.bo == NULL) { return; } - r = amdgpu_bo_reserve(adev->gart.robj, true); + r = amdgpu_bo_reserve(adev->gart.bo, true); if (likely(r == 0)) { - amdgpu_bo_kunmap(adev->gart.robj); - amdgpu_bo_unpin(adev->gart.robj); - amdgpu_bo_unreserve(adev->gart.robj); + amdgpu_bo_kunmap(adev->gart.bo); + amdgpu_bo_unpin(adev->gart.bo); + amdgpu_bo_unreserve(adev->gart.bo); adev->gart.ptr = NULL; } } @@ -196,10 +196,10 @@ void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev) */ void amdgpu_gart_table_vram_free(struct amdgpu_device *adev) { - if (adev->gart.robj == NULL) { + if (adev->gart.bo == NULL) { return; } - amdgpu_bo_unref(&adev->gart.robj); + amdgpu_bo_unref(&adev->gart.bo); } /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h index 9f9e9dc87da1..d7b7c2d408d5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h @@ -41,7 +41,7 @@ struct amdgpu_bo; struct amdgpu_gart { u64 table_addr; - struct amdgpu_bo *robj; + struct amdgpu_bo *bo; void *ptr; unsigned num_gpu_pages; unsigned num_cpu_pages; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 0a0a4dcbea2c..b4302aaa1c14 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -497,7 +497,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev) int r, i; u32 field; - if (adev->gart.robj == NULL) { + if (adev->gart.bo == NULL) { dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); return -EINVAL; } @@ -588,7 +588,7 @@ static int gmc_v6_0_gart_init(struct amdgpu_device *adev) { int r; - if (adev->gart.robj) { + if (adev->gart.bo) { dev_warn(adev->dev, "gmc_v6_0 PCIE GART already initialized\n"); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 93ea19456e91..b41b8515670d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -605,7 +605,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) int r, i; u32 tmp, field; - if (adev->gart.robj == NULL) { + if (adev->gart.bo == NULL) { dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); return -EINVAL; } @@ -706,7 +706,7 @@ static int gmc_v7_0_gart_init(struct amdgpu_device *adev) { int r; - if (adev->gart.robj) { + if (adev->gart.bo) { WARN(1, "R600 PCIE GART already initialized\n"); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 24dd86725b6e..d2fc97a2ab00 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -810,7 +810,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) int r, i; u32 tmp, field; - if (adev->gart.robj == NULL) { + if (adev->gart.bo == NULL) { dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); return -EINVAL; } @@ -928,7 +928,7 @@ static int gmc_v8_0_gart_init(struct amdgpu_device *adev) { int r; - if (adev->gart.robj) { + if (adev->gart.bo) { WARN(1, "R600 PCIE GART already initialized\n"); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 57db52388a8b..c9550b11e19a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -836,7 +836,7 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev) { int r; - if (adev->gart.robj) { + if (adev->gart.bo) { WARN(1, "VEGA10 PCIE GART already initialized\n"); return 0; } @@ -1062,7 +1062,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) golden_settings_vega10_hdp, ARRAY_SIZE(golden_settings_vega10_hdp)); - if (adev->gart.robj == NULL) { + if (adev->gart.bo == NULL) { dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); return -EINVAL; } -- GitLab From 4e830fb1b5f589352e711fc0df515c34e978e1a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 21 Aug 2018 17:18:22 +0200 Subject: [PATCH 0501/1692] drm/amdgpu: remove gart.table_addr MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We can easily figure out the address on the fly. Signed-off-by: Christian König Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | 1 - drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h | 1 - drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c | 7 +++---- drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 9 +++++---- drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 9 +++++---- drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 9 +++++---- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 7 +++---- 9 files changed, 24 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index f5cb5e2856c1..11fea28f8ad3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -157,7 +157,6 @@ int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev) if (r) amdgpu_bo_unpin(adev->gart.bo); amdgpu_bo_unreserve(adev->gart.bo); - adev->gart.table_addr = amdgpu_bo_gpu_offset(adev->gart.bo); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h index d7b7c2d408d5..9ff62887e4e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h @@ -40,7 +40,6 @@ struct amdgpu_bo; #define AMDGPU_GPU_PAGES_IN_CPU_PAGE (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE) struct amdgpu_gart { - u64 table_addr; struct amdgpu_bo *bo; void *ptr; unsigned num_gpu_pages; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index b4333f60ed8b..e7f73deed975 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1988,7 +1988,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo, src_addr = num_dw * 4; src_addr += job->ibs[0].gpu_addr; - dst_addr = adev->gart.table_addr; + dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo); dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8; amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr, num_bytes); @@ -2049,7 +2049,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, return r; if (vm_needs_flush) { - job->vm_pd_addr = adev->gart.table_addr; + job->vm_pd_addr = amdgpu_bo_gpu_offset(adev->gart.bo); job->vm_needs_flush = true; } if (resv) { diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index acfbd2d749cf..2baab7e69ef5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -37,11 +37,10 @@ u64 gfxhub_v1_0_get_mc_fb_offset(struct amdgpu_device *adev) static void gfxhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev) { - uint64_t value; + uint64_t value = amdgpu_bo_gpu_offset(adev->gart.bo); - BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL)); - value = adev->gart.table_addr - adev->gmc.vram_start - + adev->vm_manager.vram_base_offset; + BUG_ON(value & (~0x0000FFFFFFFFF000ULL)); + value -= adev->gmc.vram_start + adev->vm_manager.vram_base_offset; value &= 0x0000FFFFFFFFF000ULL; value |= 0x1; /*valid bit*/ diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index b4302aaa1c14..543287e5d67b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -494,6 +494,7 @@ static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable) static int gmc_v6_0_gart_enable(struct amdgpu_device *adev) { + uint64_t table_addr = amdgpu_bo_gpu_offset(adev->gart.bo); int r, i; u32 field; @@ -532,7 +533,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev) /* setup context0 */ WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12); WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12); - WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); + WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, table_addr >> 12); WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, (u32)(adev->dummy_page_addr >> 12)); WREG32(mmVM_CONTEXT0_CNTL2, 0); @@ -556,10 +557,10 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev) for (i = 1; i < 16; i++) { if (i < 8) WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i, - adev->gart.table_addr >> 12); + table_addr >> 12); else WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8, - adev->gart.table_addr >> 12); + table_addr >> 12); } /* enable context1-15 */ @@ -579,7 +580,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev) gmc_v6_0_flush_gpu_tlb(adev, 0); dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n", (unsigned)(adev->gmc.gart_size >> 20), - (unsigned long long)adev->gart.table_addr); + (unsigned long long)table_addr); adev->gart.ready = true; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index b41b8515670d..c88708abe016 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -602,6 +602,7 @@ static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable) */ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) { + uint64_t table_addr = amdgpu_bo_gpu_offset(adev->gart.bo); int r, i; u32 tmp, field; @@ -643,7 +644,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) /* setup context0 */ WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12); WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12); - WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); + WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, table_addr >> 12); WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, (u32)(adev->dummy_page_addr >> 12)); WREG32(mmVM_CONTEXT0_CNTL2, 0); @@ -667,10 +668,10 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) for (i = 1; i < 16; i++) { if (i < 8) WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i, - adev->gart.table_addr >> 12); + table_addr >> 12); else WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8, - adev->gart.table_addr >> 12); + table_addr >> 12); } /* enable context1-15 */ @@ -697,7 +698,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) gmc_v7_0_flush_gpu_tlb(adev, 0); DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", (unsigned)(adev->gmc.gart_size >> 20), - (unsigned long long)adev->gart.table_addr); + (unsigned long long)table_addr); adev->gart.ready = true; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index d2fc97a2ab00..8213ea1a6cbc 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -807,6 +807,7 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable) */ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) { + uint64_t table_addr = amdgpu_bo_gpu_offset(adev->gart.bo); int r, i; u32 tmp, field; @@ -864,7 +865,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) /* setup context0 */ WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12); WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12); - WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); + WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, table_addr >> 12); WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, (u32)(adev->dummy_page_addr >> 12)); WREG32(mmVM_CONTEXT0_CNTL2, 0); @@ -888,10 +889,10 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) for (i = 1; i < 16; i++) { if (i < 8) WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i, - adev->gart.table_addr >> 12); + table_addr >> 12); else WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8, - adev->gart.table_addr >> 12); + table_addr >> 12); } /* enable context1-15 */ @@ -919,7 +920,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) gmc_v8_0_flush_gpu_tlb(adev, 0); DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", (unsigned)(adev->gmc.gart_size >> 20), - (unsigned long long)adev->gart.table_addr); + (unsigned long long)table_addr); adev->gart.ready = true; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index c9550b11e19a..dc48e19d01f8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1106,7 +1106,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", (unsigned)(adev->gmc.gart_size >> 20), - (unsigned long long)adev->gart.table_addr); + (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); adev->gart.ready = true; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index e70a0d4d6db4..800ec4687f13 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -47,11 +47,10 @@ u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev) static void mmhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev) { - uint64_t value; + uint64_t value = amdgpu_bo_gpu_offset(adev->gart.bo); - BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL)); - value = adev->gart.table_addr - adev->gmc.vram_start + - adev->vm_manager.vram_base_offset; + BUG_ON(value & (~0x0000FFFFFFFFF000ULL)); + value -= adev->gmc.vram_start + adev->vm_manager.vram_base_offset; value &= 0x0000FFFFFFFFF000ULL; value |= 0x1; /* valid bit */ -- GitLab From 11c3a249ff7a1c710011bd06a451956f2a40c30c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 22 Aug 2018 12:22:14 +0200 Subject: [PATCH 0502/1692] drm/amdgpu: add amdgpu_gmc_pd_addr helper MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a helper to get the root PD address and remove the workarounds from the GMC9 code for that. Signed-off-by: Christian König Acked-by: Felix Kuehling Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 3 +- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 5 +- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 47 +++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | 2 + drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 2 +- drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c | 7 +-- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 4 -- drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 7 +-- 9 files changed, 56 insertions(+), 23 deletions(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 860cb8731c7c..d2bafabe585d 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -51,7 +51,8 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \ amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \ amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \ - amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o + amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \ + amdgpu_gmc.o # add asic specific block amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index f92597c292fe..2ef6e8557b65 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -364,7 +364,6 @@ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm) struct amdgpu_bo *pd = vm->root.base.bo; struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); struct amdgpu_vm_parser param; - uint64_t addr, flags = AMDGPU_PTE_VALID; int ret; param.domain = AMDGPU_GEM_DOMAIN_VRAM; @@ -383,9 +382,7 @@ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm) return ret; } - addr = amdgpu_bo_gpu_offset(vm->root.base.bo); - amdgpu_gmc_get_vm_pde(adev, -1, &addr, &flags); - vm->pd_phys_addr = addr; + vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo); if (vm->use_cpu_for_update) { ret = amdgpu_bo_kmap(pd, NULL); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 5b70a30967ec..fd3902983195 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -946,7 +946,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) if (r) return r; - p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->root.base.bo); + p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.base.bo); if (amdgpu_vm_debug) { /* Invalidate all BOs to test for userspace bugs */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c new file mode 100644 index 000000000000..36058feac64f --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -0,0 +1,47 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + */ + +#include "amdgpu.h" + +/** + * amdgpu_gmc_pd_addr - return the address of the root directory + * + */ +uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo) +{ + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + uint64_t pd_addr; + + pd_addr = amdgpu_bo_gpu_offset(bo); + /* TODO: move that into ASIC specific code */ + if (adev->asic_type >= CHIP_VEGA10) { + uint64_t flags = AMDGPU_PTE_VALID; + + amdgpu_gmc_get_vm_pde(adev, -1, &pd_addr, &flags); + pd_addr |= flags; + } + return pd_addr; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index 64391d811a82..1c6974a33467 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -133,4 +133,6 @@ static inline bool amdgpu_gmc_vram_full_visible(struct amdgpu_gmc *gmc) return (gmc->real_vram_size == gmc->visible_vram_size); } +uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index e7f73deed975..eb08a03b82a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -2049,7 +2049,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, return r; if (vm_needs_flush) { - job->vm_pd_addr = amdgpu_bo_gpu_offset(adev->gart.bo); + job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo); job->vm_needs_flush = true; } if (resv) { diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index 2baab7e69ef5..3403ded39d13 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -37,12 +37,7 @@ u64 gfxhub_v1_0_get_mc_fb_offset(struct amdgpu_device *adev) static void gfxhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev) { - uint64_t value = amdgpu_bo_gpu_offset(adev->gart.bo); - - BUG_ON(value & (~0x0000FFFFFFFFF000ULL)); - value -= adev->gmc.vram_start + adev->vm_manager.vram_base_offset; - value &= 0x0000FFFFFFFFF000ULL; - value |= 0x1; /*valid bit*/ + uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo); WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, lower_32_bits(value)); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index dc48e19d01f8..a82b3eb429e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -429,12 +429,8 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, struct amdgpu_device *adev = ring->adev; struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub]; uint32_t req = gmc_v9_0_get_invalidate_req(vmid); - uint64_t flags = AMDGPU_PTE_VALID; unsigned eng = ring->vm_inv_eng; - amdgpu_gmc_get_vm_pde(adev, -1, &pd_addr, &flags); - pd_addr |= flags; - amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid), lower_32_bits(pd_addr)); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 800ec4687f13..5f6a9c85488f 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -47,12 +47,7 @@ u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev) static void mmhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev) { - uint64_t value = amdgpu_bo_gpu_offset(adev->gart.bo); - - BUG_ON(value & (~0x0000FFFFFFFFF000ULL)); - value -= adev->gmc.vram_start + adev->vm_manager.vram_base_offset; - value &= 0x0000FFFFFFFFF000ULL; - value |= 0x1; /* valid bit */ + uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo); WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, lower_32_bits(value)); -- GitLab From 8c7655a0fdd32ab39cfef604403dbe1013df213b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 6 Aug 2018 16:46:26 +0800 Subject: [PATCH 0503/1692] drm/ttm: add helper structures for bulk moves on lru list MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add bulk move pos to store the pointer of first and last buffer object. The list in between will be bulk moved on lru list. Signed-off-by: Christian König Signed-off-by: Huang Rui Tested-by: Mike Lothian Tested-by: Dieter Nützel Acked-by: Chunming Zhou Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- include/drm/ttm/ttm_bo_driver.h | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 3234cc322e70..e4fee8e02559 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -490,6 +490,34 @@ struct ttm_bo_device { bool no_retry; }; +/** + * struct ttm_lru_bulk_move_pos + * + * @first: first BO in the bulk move range + * @last: last BO in the bulk move range + * + * Positions for a lru bulk move. + */ +struct ttm_lru_bulk_move_pos { + struct ttm_buffer_object *first; + struct ttm_buffer_object *last; +}; + +/** + * struct ttm_lru_bulk_move + * + * @tt: first/last lru entry for BOs in the TT domain + * @vram: first/last lru entry for BOs in the VRAM domain + * @swap: first/last lru entry for BOs on the swap list + * + * Helper structure for bulk moves on the LRU list. + */ +struct ttm_lru_bulk_move { + struct ttm_lru_bulk_move_pos tt[TTM_MAX_BO_PRIORITY]; + struct ttm_lru_bulk_move_pos vram[TTM_MAX_BO_PRIORITY]; + struct ttm_lru_bulk_move_pos swap[TTM_MAX_BO_PRIORITY]; +}; + /** * ttm_flag_masked * -- GitLab From 9a2779528eddacf0123bfd7308b71141b54cc619 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 6 Aug 2018 17:05:30 +0800 Subject: [PATCH 0504/1692] drm/ttm: revise ttm_bo_move_to_lru_tail to support bulk moves MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When move a BO to the end of LRU, it need remember the BO positions. Make sure all moved bo in between "first" and "last". And they will be bulk moving together. Signed-off-by: Christian König Signed-off-by: Huang Rui Tested-by: Mike Lothian Tested-by: Dieter Nützel Acked-by: Chunming Zhou Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 8 ++++---- drivers/gpu/drm/ttm/ttm_bo.c | 26 +++++++++++++++++++++++++- include/drm/ttm/ttm_bo_api.h | 6 +++++- 3 files changed, 34 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 7d7d7e532246..d12bffa5f70c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -297,9 +297,9 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, if (bo->parent) { spin_lock(&glob->lru_lock); - ttm_bo_move_to_lru_tail(&bo->tbo); + ttm_bo_move_to_lru_tail(&bo->tbo, NULL); if (bo->shadow) - ttm_bo_move_to_lru_tail(&bo->shadow->tbo); + ttm_bo_move_to_lru_tail(&bo->shadow->tbo, NULL); spin_unlock(&glob->lru_lock); } @@ -319,9 +319,9 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, if (!bo->parent) continue; - ttm_bo_move_to_lru_tail(&bo->tbo); + ttm_bo_move_to_lru_tail(&bo->tbo, NULL); if (bo->shadow) - ttm_bo_move_to_lru_tail(&bo->shadow->tbo); + ttm_bo_move_to_lru_tail(&bo->shadow->tbo, NULL); } spin_unlock(&glob->lru_lock); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 7c484729f9b2..7117b6b1e223 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -214,12 +214,36 @@ void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo) } EXPORT_SYMBOL(ttm_bo_del_sub_from_lru); -void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo) +static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos, + struct ttm_buffer_object *bo) +{ + if (!pos->first) + pos->first = bo; + pos->last = bo; +} + +void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, + struct ttm_lru_bulk_move *bulk) { reservation_object_assert_held(bo->resv); ttm_bo_del_from_lru(bo); ttm_bo_add_to_lru(bo); + + if (bulk && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { + switch (bo->mem.mem_type) { + case TTM_PL_TT: + ttm_bo_bulk_move_set_pos(&bulk->tt[bo->priority], bo); + break; + + case TTM_PL_VRAM: + ttm_bo_bulk_move_set_pos(&bulk->vram[bo->priority], bo); + break; + } + if (bo->ttm && !(bo->ttm->page_flags & + (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) + ttm_bo_bulk_move_set_pos(&bulk->swap[bo->priority], bo); + } } EXPORT_SYMBOL(ttm_bo_move_to_lru_tail); diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index a01ba2032f0e..0d4eb81423ee 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -51,6 +51,8 @@ struct ttm_placement; struct ttm_place; +struct ttm_lru_bulk_move; + /** * struct ttm_bus_placement * @@ -405,12 +407,14 @@ void ttm_bo_del_from_lru(struct ttm_buffer_object *bo); * ttm_bo_move_to_lru_tail * * @bo: The buffer object. + * @bulk: optional bulk move structure to remember BO positions * * Move this BO to the tail of all lru lists used to lookup and reserve an * object. This function must be called with struct ttm_bo_global::lru_lock * held, and is used to make a BO less likely to be considered for eviction. */ -void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo); +void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, + struct ttm_lru_bulk_move *bulk); /** * ttm_bo_lock_delayed_workqueue -- GitLab From 7748e2dcdaad901776c0d78e76e066403e95513c Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Mon, 6 Aug 2018 17:28:35 +0800 Subject: [PATCH 0505/1692] drm/ttm: add bulk move function on LRU MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This function allow us to bulk move a group of BOs to the tail of their LRU. The positions of group of BOs are stored on the (first, last) bulk_move_pos structure. Signed-off-by: Christian König Signed-off-by: Huang Rui Tested-by: Mike Lothian Tested-by: Dieter Nützel Acked-by: Chunming Zhou Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/ttm/ttm_bo.c | 52 ++++++++++++++++++++++++++++++++++++ include/drm/ttm/ttm_bo_api.h | 10 +++++++ 2 files changed, 62 insertions(+) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 7117b6b1e223..39d9d559b279 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -247,6 +247,58 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_bo_move_to_lru_tail); +static void ttm_bo_bulk_move_helper(struct ttm_lru_bulk_move_pos *pos, + struct list_head *lru, bool is_swap) +{ + struct list_head entries, before; + struct list_head *list1, *list2; + + list1 = is_swap ? &pos->last->swap : &pos->last->lru; + list2 = is_swap ? pos->first->swap.prev : pos->first->lru.prev; + + list_cut_position(&entries, lru, list1); + list_cut_position(&before, &entries, list2); + list_splice(&before, lru); + list_splice_tail(&entries, lru); +} + +void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk) +{ + unsigned i; + + for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { + struct ttm_mem_type_manager *man; + + if (!bulk->tt[i].first) + continue; + + man = &bulk->tt[i].first->bdev->man[TTM_PL_TT]; + ttm_bo_bulk_move_helper(&bulk->tt[i], &man->lru[i], false); + } + + for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { + struct ttm_mem_type_manager *man; + + if (!bulk->vram[i].first) + continue; + + man = &bulk->vram[i].first->bdev->man[TTM_PL_VRAM]; + ttm_bo_bulk_move_helper(&bulk->vram[i], &man->lru[i], false); + } + + for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { + struct ttm_lru_bulk_move_pos *pos = &bulk->swap[i]; + struct list_head *lru; + + if (!pos->first) + continue; + + lru = &pos->first->bdev->glob->swap_lru[i]; + ttm_bo_bulk_move_helper(&bulk->swap[i], lru, true); + } +} +EXPORT_SYMBOL(ttm_bo_bulk_move_lru_tail); + static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem, bool evict, struct ttm_operation_ctx *ctx) diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 0d4eb81423ee..8c19470785e2 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -416,6 +416,16 @@ void ttm_bo_del_from_lru(struct ttm_buffer_object *bo); void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, struct ttm_lru_bulk_move *bulk); +/** + * ttm_bo_bulk_move_lru_tail + * + * @bulk: bulk move structure + * + * Bulk move BOs to the LRU tail, only valid to use when driver makes sure that + * BO order never changes. Should be called with ttm_bo_global::lru_lock held. + */ +void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk); + /** * ttm_bo_lock_delayed_workqueue * -- GitLab From f921661bd4a112f80d57bbfb3e792da63787f4b0 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Mon, 6 Aug 2018 10:57:08 +0800 Subject: [PATCH 0506/1692] drm/amdgpu: use bulk moves for efficient VM LRU handling (v6) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit I continue to work for bulk moving that based on the proposal by Christian. Background: amdgpu driver will move all PD/PT and PerVM BOs into idle list. Then move all of them on the end of LRU list one by one. Thus, that cause so many BOs moved to the end of the LRU, and impact performance seriously. Then Christian provided a workaround to not move PD/PT BOs on LRU with below patch: Commit 0bbf32026cf5ba41e9922b30e26e1bed1ecd38ae ("drm/amdgpu: band aid validating VM PTs") However, the final solution should bulk move all PD/PT and PerVM BOs on the LRU instead of one by one. Whenever amdgpu_vm_validate_pt_bos() is called and we have BOs which need to be validated we move all BOs together to the end of the LRU without dropping the lock for the LRU. While doing so we note the beginning and end of this block in the LRU list. Now when amdgpu_vm_validate_pt_bos() is called and we don't have anything to do, we don't move every BO one by one, but instead cut the LRU list into pieces so that we bulk move everything to the end in just one operation. Test data: +--------------+-----------------+-----------+---------------------------------------+ | |The Talos |Clpeak(OCL)|BusSpeedReadback(OCL) | | |Principle(Vulkan)| | | +------------------------------------------------------------------------------------+ | | | |0.319 ms(1k) 0.314 ms(2K) 0.308 ms(4K) | | Original | 147.7 FPS | 76.86 us |0.307 ms(8K) 0.310 ms(16K) | +------------------------------------------------------------------------------------+ | Orignial + WA| | |0.254 ms(1K) 0.241 ms(2K) | |(don't move | 162.1 FPS | 42.15 us |0.230 ms(4K) 0.223 ms(8K) 0.204 ms(16K)| |PT BOs on LRU)| | | | +------------------------------------------------------------------------------------+ | Bulk move | 163.1 FPS | 40.52 us |0.244 ms(1K) 0.252 ms(2K) 0.213 ms(4K) | | | | |0.214 ms(8K) 0.225 ms(16K) | +--------------+-----------------+-----------+---------------------------------------+ After test them with above three benchmarks include vulkan and opencl. We can see the visible improvement than original, and even better than original with workaround. v2: move all BOs include idle, relocated, and moved list to the end of LRU and put them together. v3: remove unused parameter and use list_for_each_entry instead of the one with save entry. v4: move the amdgpu_vm_move_to_lru_tail after command submission, at that time, all bo will be back on idle list. v5: remove amdgpu_vm_move_to_lru_tail_by_list(), use bulk_moveable instread of validated, and move ttm_bo_bulk_move_lru_tail() also into amdgpu_vm_move_to_lru_tail(). v6: clean up and fix return value. Signed-off-by: Christian König Signed-off-by: Huang Rui Tested-by: Mike Lothian Tested-by: Dieter Nützel Acked-by: Chunming Zhou Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 3 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 66 +++++++++++++++++--------- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 11 ++++- 3 files changed, 57 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index fd3902983195..b62bbe71662d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1259,6 +1259,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) union drm_amdgpu_cs *cs = data; struct amdgpu_cs_parser parser = {}; bool reserved_buffers = false; + struct amdgpu_fpriv *fpriv; int i, r; if (!adev->accel_working) @@ -1303,6 +1304,8 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) r = amdgpu_cs_submit(&parser, cs); + fpriv = filp->driver_priv; + amdgpu_vm_move_to_lru_tail(adev, &fpriv->vm); out: amdgpu_cs_parser_fini(&parser, r, reserved_buffers); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index d12bffa5f70c..7b0fdf5c79f9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -267,6 +267,47 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, list_add(&entry->tv.head, validated); } +/** + * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU + * + * @adev: amdgpu device pointer + * @vm: vm providing the BOs + * + * Move all BOs to the end of LRU and remember their positions to put them + * together. + */ +void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, + struct amdgpu_vm *vm) +{ + struct ttm_bo_global *glob = adev->mman.bdev.glob; + struct amdgpu_vm_bo_base *bo_base; + + if (vm->bulk_moveable) { + spin_lock(&glob->lru_lock); + ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move); + spin_unlock(&glob->lru_lock); + return; + } + + memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move)); + + spin_lock(&glob->lru_lock); + list_for_each_entry(bo_base, &vm->idle, vm_status) { + struct amdgpu_bo *bo = bo_base->bo; + + if (!bo->parent) + continue; + + ttm_bo_move_to_lru_tail(&bo->tbo, &vm->lru_bulk_move); + if (bo->shadow) + ttm_bo_move_to_lru_tail(&bo->shadow->tbo, + &vm->lru_bulk_move); + } + spin_unlock(&glob->lru_lock); + + vm->bulk_moveable = true; +} + /** * amdgpu_vm_validate_pt_bos - validate the page table BOs * @@ -284,10 +325,11 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, int (*validate)(void *p, struct amdgpu_bo *bo), void *param) { - struct ttm_bo_global *glob = adev->mman.bdev.glob; struct amdgpu_vm_bo_base *bo_base, *tmp; int r = 0; + vm->bulk_moveable &= list_empty(&vm->evicted); + list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) { struct amdgpu_bo *bo = bo_base->bo; @@ -295,14 +337,6 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, if (r) break; - if (bo->parent) { - spin_lock(&glob->lru_lock); - ttm_bo_move_to_lru_tail(&bo->tbo, NULL); - if (bo->shadow) - ttm_bo_move_to_lru_tail(&bo->shadow->tbo, NULL); - spin_unlock(&glob->lru_lock); - } - if (bo->tbo.type != ttm_bo_type_kernel) { spin_lock(&vm->moved_lock); list_move(&bo_base->vm_status, &vm->moved); @@ -312,19 +346,6 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, } } - spin_lock(&glob->lru_lock); - list_for_each_entry(bo_base, &vm->idle, vm_status) { - struct amdgpu_bo *bo = bo_base->bo; - - if (!bo->parent) - continue; - - ttm_bo_move_to_lru_tail(&bo->tbo, NULL); - if (bo->shadow) - ttm_bo_move_to_lru_tail(&bo->shadow->tbo, NULL); - } - spin_unlock(&glob->lru_lock); - return r; } @@ -2590,6 +2611,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, return r; vm->pte_support_ats = false; + vm->bulk_moveable = true; if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) { vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 1162c2bf3138..14bafe771c9b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -29,6 +29,7 @@ #include #include #include +#include #include "amdgpu_sync.h" #include "amdgpu_ring.h" @@ -247,6 +248,11 @@ struct amdgpu_vm { /* Some basic info about the task */ struct amdgpu_task_info task_info; + + /* Store positions of group of BOs */ + struct ttm_lru_bulk_move lru_bulk_move; + /* mark whether can do the bulk move */ + bool bulk_moveable; }; struct amdgpu_vm_manager { @@ -354,8 +360,11 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev); void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid, - struct amdgpu_task_info *task_info); + struct amdgpu_task_info *task_info); void amdgpu_vm_set_task_info(struct amdgpu_vm *vm); +void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, + struct amdgpu_vm *vm); + #endif -- GitLab From 07e6d3f03b5ff7b93af37932ee0f4b775812274f Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Tue, 31 Jul 2018 21:24:40 +0800 Subject: [PATCH 0507/1692] drm/amdgpu: move PD/PT bos on LRU again MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The new bulk moving functionality is ready, the overhead of moving PD/PT bos to LRU is fixed. So move them on LRU again. Signed-off-by: Huang Rui Tested-by: Mike Lothian Tested-by: Dieter Nützel Acked-by: Chunming Zhou Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 7b0fdf5c79f9..7e644bc6793e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1106,7 +1106,7 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev, struct amdgpu_vm_bo_base, vm_status); bo_base->moved = false; - list_del_init(&bo_base->vm_status); + list_move(&bo_base->vm_status, &vm->idle); bo = bo_base->bo->parent; if (!bo) -- GitLab From 973e5405f2f67ddbb2bf07b3ffc71908a37fea8e Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Mon, 13 Aug 2018 16:01:10 +0200 Subject: [PATCH 0508/1692] xen/blkback: don't keep persistent grants too long MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Persistent grants are allocated until a threshold per ring is being reached. Those grants won't be freed until the ring is being destroyed meaning there will be resources kept busy which might no longer be used. Instead of freeing only persistent grants until the threshold is reached add a timestamp and remove all persistent grants not having been in use for a minute. Signed-off-by: Juergen Gross Reviewed-by: Roger Pau Monné Signed-off-by: Konrad Rzeszutek Wilk --- .../ABI/testing/sysfs-driver-xen-blkback | 10 +++ drivers/block/xen-blkback/blkback.c | 88 ++++++++++--------- drivers/block/xen-blkback/common.h | 8 +- 3 files changed, 60 insertions(+), 46 deletions(-) diff --git a/Documentation/ABI/testing/sysfs-driver-xen-blkback b/Documentation/ABI/testing/sysfs-driver-xen-blkback index 8bb43b66eb55..4e7babb3ba1f 100644 --- a/Documentation/ABI/testing/sysfs-driver-xen-blkback +++ b/Documentation/ABI/testing/sysfs-driver-xen-blkback @@ -15,3 +15,13 @@ Description: blkback. If the frontend tries to use more than max_persistent_grants, the LRU kicks in and starts removing 5% of max_persistent_grants every 100ms. + +What: /sys/module/xen_blkback/parameters/persistent_grant_unused_seconds +Date: August 2018 +KernelVersion: 4.19 +Contact: Roger Pau Monné +Description: + How long a persistent grant is allowed to remain + allocated without being in use. The time is in + seconds, 0 means indefinitely long. + The default is 60 seconds. diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index b55b245e8052..9eae7b243f68 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -83,6 +83,18 @@ module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644); MODULE_PARM_DESC(max_persistent_grants, "Maximum number of grants to map persistently"); +/* + * How long a persistent grant is allowed to remain allocated without being in + * use. The time is in seconds, 0 means indefinitely long. + */ + +static unsigned int xen_blkif_pgrant_timeout = 60; +module_param_named(persistent_grant_unused_seconds, xen_blkif_pgrant_timeout, + uint, 0644); +MODULE_PARM_DESC(persistent_grant_unused_seconds, + "Time in seconds an unused persistent grant is allowed to " + "remain allocated. Default is 60, 0 means unlimited."); + /* * Maximum number of rings/queues blkback supports, allow as many queues as there * are CPUs if user has not specified a value. @@ -123,6 +135,13 @@ module_param(log_stats, int, 0644); /* Number of free pages to remove on each call to gnttab_free_pages */ #define NUM_BATCH_FREE_PAGES 10 +static inline bool persistent_gnt_timeout(struct persistent_gnt *persistent_gnt) +{ + return xen_blkif_pgrant_timeout && + (jiffies - persistent_gnt->last_used >= + HZ * xen_blkif_pgrant_timeout); +} + static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page) { unsigned long flags; @@ -278,7 +297,7 @@ static void put_persistent_gnt(struct xen_blkif_ring *ring, { if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags)) pr_alert_ratelimited("freeing a grant already unused\n"); - set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags); + persistent_gnt->last_used = jiffies; clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags); atomic_dec(&ring->persistent_gnt_in_use); } @@ -371,26 +390,26 @@ static void purge_persistent_gnt(struct xen_blkif_ring *ring) struct persistent_gnt *persistent_gnt; struct rb_node *n; unsigned int num_clean, total; - bool scan_used = false, clean_used = false; + bool scan_used = false; struct rb_root *root; - if (ring->persistent_gnt_c < xen_blkif_max_pgrants || - (ring->persistent_gnt_c == xen_blkif_max_pgrants && - !ring->blkif->vbd.overflow_max_grants)) { - goto out; - } - if (work_busy(&ring->persistent_purge_work)) { pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n"); goto out; } - num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN; - num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants + num_clean; - num_clean = min(ring->persistent_gnt_c, num_clean); - if ((num_clean == 0) || - (num_clean > (ring->persistent_gnt_c - atomic_read(&ring->persistent_gnt_in_use)))) - goto out; + if (ring->persistent_gnt_c < xen_blkif_max_pgrants || + (ring->persistent_gnt_c == xen_blkif_max_pgrants && + !ring->blkif->vbd.overflow_max_grants)) { + num_clean = 0; + } else { + num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN; + num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants + + num_clean; + num_clean = min(ring->persistent_gnt_c, num_clean); + pr_debug("Going to purge at least %u persistent grants\n", + num_clean); + } /* * At this point, we can assure that there will be no calls @@ -401,9 +420,7 @@ static void purge_persistent_gnt(struct xen_blkif_ring *ring) * number of grants. */ - total = num_clean; - - pr_debug("Going to purge %u persistent grants\n", num_clean); + total = 0; BUG_ON(!list_empty(&ring->persistent_purge_list)); root = &ring->persistent_gnts; @@ -412,46 +429,37 @@ static void purge_persistent_gnt(struct xen_blkif_ring *ring) BUG_ON(persistent_gnt->handle == BLKBACK_INVALID_HANDLE); - if (clean_used) { - clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags); - continue; - } - if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags)) continue; - if (!scan_used && - (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags))) + if (!scan_used && !persistent_gnt_timeout(persistent_gnt)) + continue; + if (scan_used && total >= num_clean) continue; rb_erase(&persistent_gnt->node, root); list_add(&persistent_gnt->remove_node, &ring->persistent_purge_list); - if (--num_clean == 0) - goto finished; + total++; } /* - * If we get here it means we also need to start cleaning + * Check whether we also need to start cleaning * grants that were used since last purge in order to cope * with the requested num */ - if (!scan_used && !clean_used) { - pr_debug("Still missing %u purged frames\n", num_clean); + if (!scan_used && total < num_clean) { + pr_debug("Still missing %u purged frames\n", num_clean - total); scan_used = true; goto purge_list; } -finished: - if (!clean_used) { - pr_debug("Finished scanning for grants to clean, removing used flag\n"); - clean_used = true; - goto purge_list; - } - ring->persistent_gnt_c -= (total - num_clean); - ring->blkif->vbd.overflow_max_grants = 0; + if (total) { + ring->persistent_gnt_c -= total; + ring->blkif->vbd.overflow_max_grants = 0; - /* We can defer this work */ - schedule_work(&ring->persistent_purge_work); - pr_debug("Purged %u/%u\n", (total - num_clean), total); + /* We can defer this work */ + schedule_work(&ring->persistent_purge_work); + pr_debug("Purged %u/%u\n", num_clean, total); + } out: return; diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index ecb35fe8ca8d..7bff72db3b7e 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h @@ -234,14 +234,9 @@ struct xen_vbd { struct backend_info; /* Number of available flags */ -#define PERSISTENT_GNT_FLAGS_SIZE 2 +#define PERSISTENT_GNT_FLAGS_SIZE 1 /* This persistent grant is currently in use */ #define PERSISTENT_GNT_ACTIVE 0 -/* - * This persistent grant has been used, this flag is set when we remove the - * PERSISTENT_GNT_ACTIVE, to know that this grant has been used recently. - */ -#define PERSISTENT_GNT_WAS_ACTIVE 1 /* Number of requests that we can fit in a ring */ #define XEN_BLKIF_REQS_PER_PAGE 32 @@ -250,6 +245,7 @@ struct persistent_gnt { struct page *page; grant_ref_t gnt; grant_handle_t handle; + unsigned long last_used; DECLARE_BITMAP(flags, PERSISTENT_GNT_FLAGS_SIZE); struct rb_node node; struct list_head remove_node; -- GitLab From a46b53672b2c2e3770b38a4abf90d16364d2584b Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Mon, 13 Aug 2018 16:01:11 +0200 Subject: [PATCH 0509/1692] xen/blkfront: cleanup stale persistent grants MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a periodic cleanup function to remove old persistent grants which are no longer in use on the backend side. This avoids starvation in case there are lots of persistent grants for a device which no longer is involved in I/O business. Signed-off-by: Juergen Gross Reviewed-by: Roger Pau Monné Signed-off-by: Konrad Rzeszutek Wilk --- drivers/block/xen-blkfront.c | 94 ++++++++++++++++++++++++++++++++++-- 1 file changed, 90 insertions(+), 4 deletions(-) diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 8986adab9bf5..a2a395f85a41 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -46,6 +46,7 @@ #include #include #include +#include #include #include @@ -121,6 +122,8 @@ static inline struct blkif_req *blkif_req(struct request *rq) static DEFINE_MUTEX(blkfront_mutex); static const struct block_device_operations xlvbd_block_fops; +static struct delayed_work blkfront_work; +static LIST_HEAD(info_list); /* * Maximum number of segments in indirect requests, the actual value used by @@ -216,6 +219,7 @@ struct blkfront_info /* Save uncomplete reqs and bios for migration. */ struct list_head requests; struct bio_list bio_list; + struct list_head info_list; }; static unsigned int nr_minors; @@ -1759,6 +1763,12 @@ static int write_per_ring_nodes(struct xenbus_transaction xbt, return err; } +static void free_info(struct blkfront_info *info) +{ + list_del(&info->info_list); + kfree(info); +} + /* Common code used when first setting up, and when resuming. */ static int talk_to_blkback(struct xenbus_device *dev, struct blkfront_info *info) @@ -1880,7 +1890,10 @@ static int talk_to_blkback(struct xenbus_device *dev, destroy_blkring: blkif_free(info, 0); - kfree(info); + mutex_lock(&blkfront_mutex); + free_info(info); + mutex_unlock(&blkfront_mutex); + dev_set_drvdata(&dev->dev, NULL); return err; @@ -1991,6 +2004,10 @@ static int blkfront_probe(struct xenbus_device *dev, info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0); dev_set_drvdata(&dev->dev, info); + mutex_lock(&blkfront_mutex); + list_add(&info->info_list, &info_list); + mutex_unlock(&blkfront_mutex); + return 0; } @@ -2301,6 +2318,12 @@ static void blkfront_gather_backend_features(struct blkfront_info *info) if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST) indirect_segments = 0; info->max_indirect_segments = indirect_segments; + + if (info->feature_persistent) { + mutex_lock(&blkfront_mutex); + schedule_delayed_work(&blkfront_work, HZ * 10); + mutex_unlock(&blkfront_mutex); + } } /* @@ -2482,7 +2505,9 @@ static int blkfront_remove(struct xenbus_device *xbdev) mutex_unlock(&info->mutex); if (!bdev) { - kfree(info); + mutex_lock(&blkfront_mutex); + free_info(info); + mutex_unlock(&blkfront_mutex); return 0; } @@ -2502,7 +2527,9 @@ static int blkfront_remove(struct xenbus_device *xbdev) if (info && !bdev->bd_openers) { xlvbd_release_gendisk(info); disk->private_data = NULL; - kfree(info); + mutex_lock(&blkfront_mutex); + free_info(info); + mutex_unlock(&blkfront_mutex); } mutex_unlock(&bdev->bd_mutex); @@ -2585,7 +2612,7 @@ static void blkif_release(struct gendisk *disk, fmode_t mode) dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n"); xlvbd_release_gendisk(info); disk->private_data = NULL; - kfree(info); + free_info(info); } out: @@ -2618,6 +2645,61 @@ static struct xenbus_driver blkfront_driver = { .is_ready = blkfront_is_ready, }; +static void purge_persistent_grants(struct blkfront_info *info) +{ + unsigned int i; + unsigned long flags; + + for (i = 0; i < info->nr_rings; i++) { + struct blkfront_ring_info *rinfo = &info->rinfo[i]; + struct grant *gnt_list_entry, *tmp; + + spin_lock_irqsave(&rinfo->ring_lock, flags); + + if (rinfo->persistent_gnts_c == 0) { + spin_unlock_irqrestore(&rinfo->ring_lock, flags); + continue; + } + + list_for_each_entry_safe(gnt_list_entry, tmp, &rinfo->grants, + node) { + if (gnt_list_entry->gref == GRANT_INVALID_REF || + gnttab_query_foreign_access(gnt_list_entry->gref)) + continue; + + list_del(&gnt_list_entry->node); + gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL); + rinfo->persistent_gnts_c--; + __free_page(gnt_list_entry->page); + kfree(gnt_list_entry); + } + + spin_unlock_irqrestore(&rinfo->ring_lock, flags); + } +} + +static void blkfront_delay_work(struct work_struct *work) +{ + struct blkfront_info *info; + bool need_schedule_work = false; + + mutex_lock(&blkfront_mutex); + + list_for_each_entry(info, &info_list, info_list) { + if (info->feature_persistent) { + need_schedule_work = true; + mutex_lock(&info->mutex); + purge_persistent_grants(info); + mutex_unlock(&info->mutex); + } + } + + if (need_schedule_work) + schedule_delayed_work(&blkfront_work, HZ * 10); + + mutex_unlock(&blkfront_mutex); +} + static int __init xlblk_init(void) { int ret; @@ -2650,6 +2732,8 @@ static int __init xlblk_init(void) return -ENODEV; } + INIT_DELAYED_WORK(&blkfront_work, blkfront_delay_work); + ret = xenbus_register_frontend(&blkfront_driver); if (ret) { unregister_blkdev(XENVBD_MAJOR, DEV_NAME); @@ -2663,6 +2747,8 @@ module_init(xlblk_init); static void __exit xlblk_exit(void) { + cancel_delayed_work_sync(&blkfront_work); + xenbus_unregister_driver(&blkfront_driver); unregister_blkdev(XENVBD_MAJOR, DEV_NAME); kfree(minors); -- GitLab From 4bcddbae019df2614ea36976ba3d36313f93c6d3 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Mon, 13 Aug 2018 16:01:12 +0200 Subject: [PATCH 0510/1692] xen/blkfront: reorder tests in xlblk_init() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In case we don't want pv block devices we should not test parameters for sanity and eventually print out error messages. So test precluding conditions before checking parameters. Signed-off-by: Juergen Gross Reviewed-by: Roger Pau Monné Signed-off-by: Konrad Rzeszutek Wilk --- drivers/block/xen-blkfront.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index a2a395f85a41..a71d817e900d 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -2708,6 +2708,15 @@ static int __init xlblk_init(void) if (!xen_domain()) return -ENODEV; + if (!xen_has_pv_disk_devices()) + return -ENODEV; + + if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) { + pr_warn("xen_blk: can't get major %d with name %s\n", + XENVBD_MAJOR, DEV_NAME); + return -ENODEV; + } + if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST) xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST; @@ -2723,15 +2732,6 @@ static int __init xlblk_init(void) xen_blkif_max_queues = nr_cpus; } - if (!xen_has_pv_disk_devices()) - return -ENODEV; - - if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) { - printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n", - XENVBD_MAJOR, DEV_NAME); - return -ENODEV; - } - INIT_DELAYED_WORK(&blkfront_work, blkfront_delay_work); ret = xenbus_register_frontend(&blkfront_driver); -- GitLab From d77ff24e7fa2258877fa0b87efa06b9a58a37aab Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Mon, 13 Aug 2018 16:01:13 +0200 Subject: [PATCH 0511/1692] xen/blkback: move persistent grants flags to bool MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The struct persistent_gnt flags member is meant to be a bitfield of different flags. There is only PERSISTENT_GNT_ACTIVE flag left, so convert it to a bool named "active". Signed-off-by: Juergen Gross Reviewed-by: Roger Pau Monné Signed-off-by: Konrad Rzeszutek Wilk --- drivers/block/xen-blkback/blkback.c | 13 ++++++------- drivers/block/xen-blkback/common.h | 7 +------ 2 files changed, 7 insertions(+), 13 deletions(-) diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 9eae7b243f68..fd1e19f1a49f 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -255,8 +255,7 @@ static int add_persistent_gnt(struct xen_blkif_ring *ring, } } - bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE); - set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags); + persistent_gnt->active = true; /* Add new node and rebalance tree. */ rb_link_node(&(persistent_gnt->node), parent, new); rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts); @@ -280,11 +279,11 @@ static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring, else if (gref > data->gnt) node = node->rb_right; else { - if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) { + if (data->active) { pr_alert_ratelimited("requesting a grant already in use\n"); return NULL; } - set_bit(PERSISTENT_GNT_ACTIVE, data->flags); + data->active = true; atomic_inc(&ring->persistent_gnt_in_use); return data; } @@ -295,10 +294,10 @@ static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring, static void put_persistent_gnt(struct xen_blkif_ring *ring, struct persistent_gnt *persistent_gnt) { - if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags)) + if (!persistent_gnt->active) pr_alert_ratelimited("freeing a grant already unused\n"); persistent_gnt->last_used = jiffies; - clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags); + persistent_gnt->active = false; atomic_dec(&ring->persistent_gnt_in_use); } @@ -429,7 +428,7 @@ static void purge_persistent_gnt(struct xen_blkif_ring *ring) BUG_ON(persistent_gnt->handle == BLKBACK_INVALID_HANDLE); - if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags)) + if (persistent_gnt->active) continue; if (!scan_used && !persistent_gnt_timeout(persistent_gnt)) continue; diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index 7bff72db3b7e..2339b8d39c5e 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h @@ -233,11 +233,6 @@ struct xen_vbd { struct backend_info; -/* Number of available flags */ -#define PERSISTENT_GNT_FLAGS_SIZE 1 -/* This persistent grant is currently in use */ -#define PERSISTENT_GNT_ACTIVE 0 - /* Number of requests that we can fit in a ring */ #define XEN_BLKIF_REQS_PER_PAGE 32 @@ -246,7 +241,7 @@ struct persistent_gnt { grant_ref_t gnt; grant_handle_t handle; unsigned long last_used; - DECLARE_BITMAP(flags, PERSISTENT_GNT_FLAGS_SIZE); + bool active; struct rb_node node; struct list_head remove_node; }; -- GitLab From 6f2f39ad1a54978394851c05e327419ebeb7227e Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Mon, 13 Aug 2018 16:01:14 +0200 Subject: [PATCH 0512/1692] xen/blkback: remove unused pers_gnts_lock from struct xen_blkif_ring MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit pers_gnts_lock isn't being used anywhere. Remove it. Signed-off-by: Juergen Gross Reviewed-by: Roger Pau Monné Signed-off-by: Konrad Rzeszutek Wilk --- drivers/block/xen-blkback/common.h | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index 2339b8d39c5e..1d3002d773f7 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h @@ -269,7 +269,6 @@ struct xen_blkif_ring { wait_queue_head_t pending_free_wq; /* Tree to store persistent grants. */ - spinlock_t pers_gnts_lock; struct rb_root persistent_gnts; unsigned int persistent_gnt_c; atomic_t persistent_gnt_in_use; -- GitLab From 3fba68fa35a2cbda157c6f49f26eefccb2e10043 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 21 Aug 2018 13:44:07 +0200 Subject: [PATCH 0513/1692] scsi: core: Update SCSI_MQ_DEFAULT help text to match default The default was changed, but the help text was not updated. Fix grammar (s/the option/this option/) while at it. [mkp: drop "new" as suggested by John Garry] Fixes: d5038a13eca72fb2 ("scsi: core: switch to scsi-mq by default") Signed-off-by: Geert Uytterhoeven Reviewed-by: Ming Lei Signed-off-by: Martin K. Petersen --- drivers/scsi/Kconfig | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 8fc851a9e116..7c097006c54d 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -52,12 +52,12 @@ config SCSI_MQ_DEFAULT default y depends on SCSI ---help--- - This option enables the new blk-mq based I/O path for SCSI - devices by default. With the option the scsi_mod.use_blk_mq - module/boot option defaults to Y, without it to N, but it can - still be overridden either way. + This option enables the blk-mq based I/O path for SCSI devices by + default. With this option the scsi_mod.use_blk_mq module/boot + option defaults to Y, without it to N, but it can still be + overridden either way. - If unsure say N. + If unsure say Y. config SCSI_PROC_FS bool "legacy /proc/scsi/ support" -- GitLab From a7ccd92c8d2ac4eb168b621e086be2dc9b8344f6 Mon Sep 17 00:00:00 2001 From: John Pittman Date: Thu, 23 Aug 2018 15:49:18 -0400 Subject: [PATCH 0514/1692] scsi: documentation: add scsi_mod.use_blk_mq to scsi-parameters Kernel line argument scsi_mod.use_blk_mq is missing from file Documentation/scsi/scsi-parameters.txt. Add this option, providing mention of config setting and format. [mkp: clarified where to look] Signed-off-by: John Pittman Signed-off-by: Martin K. Petersen --- Documentation/scsi/scsi-parameters.txt | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/Documentation/scsi/scsi-parameters.txt b/Documentation/scsi/scsi-parameters.txt index 25a4b4cf04a6..92999d4e0cb8 100644 --- a/Documentation/scsi/scsi-parameters.txt +++ b/Documentation/scsi/scsi-parameters.txt @@ -97,6 +97,11 @@ parameters may be changed at runtime by the command allowing boot to proceed. none ignores them, expecting user space to do the scan. + scsi_mod.use_blk_mq= + [SCSI] use blk-mq I/O path by default + See SCSI_MQ_DEFAULT in drivers/scsi/Kconfig. + Format: + sim710= [SCSI,HW] See header of drivers/scsi/sim710.c. -- GitLab From 89809b028b6f54187b7d81a0c69b35d394c52e62 Mon Sep 17 00:00:00 2001 From: Varun Prakash Date: Sat, 11 Aug 2018 21:03:58 +0530 Subject: [PATCH 0515/1692] scsi: csiostor: add a check for NULL pointer after kmalloc() Reported-by: Colin Ian King Signed-off-by: Varun Prakash Signed-off-by: Martin K. Petersen --- drivers/scsi/csiostor/csio_hw.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c index 23d07e9f87d0..6ff7c5580fcb 100644 --- a/drivers/scsi/csiostor/csio_hw.c +++ b/drivers/scsi/csiostor/csio_hw.c @@ -2364,8 +2364,8 @@ static int csio_hw_prep_fw(struct csio_hw *hw, struct fw_info *fw_info, } /* - * Returns -EINVAL if attempts to flash the firmware failed - * else returns 0, + * Returns -EINVAL if attempts to flash the firmware failed, + * -ENOMEM if memory allocation failed else returns 0, * if flashing was not attempted because the card had the * latest firmware ECANCELED is returned */ @@ -2393,6 +2393,13 @@ csio_hw_flash_fw(struct csio_hw *hw, int *reset) return -EINVAL; } + /* allocate memory to read the header of the firmware on the + * card + */ + card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL); + if (!card_fw) + return -ENOMEM; + if (csio_is_t5(pci_dev->device & CSIO_HW_CHIP_MASK)) fw_bin_file = FW_FNAME_T5; else @@ -2406,11 +2413,6 @@ csio_hw_flash_fw(struct csio_hw *hw, int *reset) fw_size = fw->size; } - /* allocate memory to read the header of the firmware on the - * card - */ - card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL); - /* upgrade FW logic */ ret = csio_hw_prep_fw(hw, fw_info, fw_data, fw_size, card_fw, hw->fw_state, reset); -- GitLab From 68bdc630721c40e908d22cffe07b5ca225a69f6e Mon Sep 17 00:00:00 2001 From: Varun Prakash Date: Sat, 11 Aug 2018 21:14:08 +0530 Subject: [PATCH 0516/1692] scsi: csiostor: fix incorrect port capabilities - use be32_to_cpu() instead of ntohs() for 32 bit port capabilities. - add a new function fwcaps32_to_caps16() to convert 32 bit port capabilities to 16 bit port capabilities. Signed-off-by: Varun Prakash Signed-off-by: Martin K. Petersen --- drivers/scsi/csiostor/csio_hw.c | 55 ++++++++++++++++++++++++++------- drivers/scsi/csiostor/csio_hw.h | 1 + drivers/scsi/csiostor/csio_mb.c | 6 ++-- 3 files changed, 48 insertions(+), 14 deletions(-) diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c index 6ff7c5580fcb..e51923886475 100644 --- a/drivers/scsi/csiostor/csio_hw.c +++ b/drivers/scsi/csiostor/csio_hw.c @@ -1601,6 +1601,46 @@ fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16) return caps32; } +/** + * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits + * @caps32: a 32-bit Port Capabilities value + * + * Returns the equivalent 16-bit Port Capabilities value. Note that + * not all 32-bit Port Capabilities can be represented in the 16-bit + * Port Capabilities and some fields/values may not make it. + */ +fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32) +{ + fw_port_cap16_t caps16 = 0; + + #define CAP32_TO_CAP16(__cap) \ + do { \ + if (caps32 & FW_PORT_CAP32_##__cap) \ + caps16 |= FW_PORT_CAP_##__cap; \ + } while (0) + + CAP32_TO_CAP16(SPEED_100M); + CAP32_TO_CAP16(SPEED_1G); + CAP32_TO_CAP16(SPEED_10G); + CAP32_TO_CAP16(SPEED_25G); + CAP32_TO_CAP16(SPEED_40G); + CAP32_TO_CAP16(SPEED_100G); + CAP32_TO_CAP16(FC_RX); + CAP32_TO_CAP16(FC_TX); + CAP32_TO_CAP16(802_3_PAUSE); + CAP32_TO_CAP16(802_3_ASM_DIR); + CAP32_TO_CAP16(ANEG); + CAP32_TO_CAP16(FORCE_PAUSE); + CAP32_TO_CAP16(MDIAUTO); + CAP32_TO_CAP16(MDISTRAIGHT); + CAP32_TO_CAP16(FEC_RS); + CAP32_TO_CAP16(FEC_BASER_RS); + + #undef CAP32_TO_CAP16 + + return caps16; +} + /** * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value @@ -1759,7 +1799,7 @@ csio_enable_ports(struct csio_hw *hw) val = 1; csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, - hw->pfn, 0, 1, ¶m, &val, false, + hw->pfn, 0, 1, ¶m, &val, true, NULL); if (csio_mb_issue(hw, mbp)) { @@ -1769,16 +1809,9 @@ csio_enable_ports(struct csio_hw *hw) return -EINVAL; } - csio_mb_process_read_params_rsp(hw, mbp, &retval, 1, - &val); - if (retval != FW_SUCCESS) { - csio_err(hw, "FW_PARAMS_CMD(r) port:%d failed: 0x%x\n", - portid, retval); - mempool_free(mbp, hw->mb_mempool); - return -EINVAL; - } - - fw_caps = val; + csio_mb_process_read_params_rsp(hw, mbp, &retval, + 0, NULL); + fw_caps = retval ? FW_CAPS16 : FW_CAPS32; } /* Read PORT information */ diff --git a/drivers/scsi/csiostor/csio_hw.h b/drivers/scsi/csiostor/csio_hw.h index 9e73ef771eb7..e351af6e7c81 100644 --- a/drivers/scsi/csiostor/csio_hw.h +++ b/drivers/scsi/csiostor/csio_hw.h @@ -639,6 +639,7 @@ int csio_handle_intr_status(struct csio_hw *, unsigned int, fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps); fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16); +fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32); fw_port_cap32_t lstatus_to_fwcap(u32 lstatus); int csio_hw_start(struct csio_hw *); diff --git a/drivers/scsi/csiostor/csio_mb.c b/drivers/scsi/csiostor/csio_mb.c index c026417269c3..6f13673d6aa0 100644 --- a/drivers/scsi/csiostor/csio_mb.c +++ b/drivers/scsi/csiostor/csio_mb.c @@ -368,7 +368,7 @@ csio_mb_port(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); if (fw_caps == FW_CAPS16) - cmdp->u.l1cfg.rcap = cpu_to_be32(fc); + cmdp->u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(fc)); else cmdp->u.l1cfg32.rcap32 = cpu_to_be32(fc); } @@ -395,8 +395,8 @@ csio_mb_process_read_port_rsp(struct csio_hw *hw, struct csio_mb *mbp, *pcaps = fwcaps16_to_caps32(ntohs(rsp->u.info.pcap)); *acaps = fwcaps16_to_caps32(ntohs(rsp->u.info.acap)); } else { - *pcaps = ntohs(rsp->u.info32.pcaps32); - *acaps = ntohs(rsp->u.info32.acaps32); + *pcaps = be32_to_cpu(rsp->u.info32.pcaps32); + *acaps = be32_to_cpu(rsp->u.info32.acaps32); } } } -- GitLab From 9abd9990e9779dc9c548c3599aaca7e3505ab19d Mon Sep 17 00:00:00 2001 From: James Smart Date: Tue, 14 Aug 2018 12:55:05 -0700 Subject: [PATCH 0517/1692] scsi: lpfc: Default fdmi_on to on Change default behavior for fdmi registration to on. [mkp: patch was mangled] Signed-off-by: Dick Kennedy Signed-off-by: James Smart Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_attr.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 5a25553415f8..057a60abe664 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -5122,16 +5122,16 @@ LPFC_ATTR_R(enable_SmartSAN, 0, 0, 1, "Enable SmartSAN functionality"); /* # lpfc_fdmi_on: Controls FDMI support. -# 0 No FDMI support (default) -# 1 Traditional FDMI support +# 0 No FDMI support +# 1 Traditional FDMI support (default) # Traditional FDMI support means the driver will assume FDMI-2 support; # however, if that fails, it will fallback to FDMI-1. # If lpfc_enable_SmartSAN is set to 1, the driver ignores lpfc_fdmi_on. # If lpfc_enable_SmartSAN is set 0, the driver uses the current value of # lpfc_fdmi_on. -# Value range [0,1]. Default value is 0. +# Value range [0,1]. Default value is 1. */ -LPFC_ATTR_R(fdmi_on, 0, 0, 1, "Enable FDMI support"); +LPFC_ATTR_R(fdmi_on, 1, 0, 1, "Enable FDMI support"); /* # Specifies the maximum number of ELS cmds we can have outstanding (for -- GitLab From 53e13ee087a80e8d4fc95436318436e5c2c1f8c2 Mon Sep 17 00:00:00 2001 From: James Smart Date: Thu, 16 Aug 2018 16:04:05 -0700 Subject: [PATCH 0518/1692] scsi: lpfc: Correct MDS diag and nvmet configuration A recent change added some MDS processing in the lpfc_drain_txq routine that relies on the fcp_wq being allocated. For nvmet operation the fcp_wq is not allocated because it can only be an nvme-target. When the original MDS support was added LS_MDS_LOOPBACK was defined wrong, (0x16) it should have been 0x10 (decimal value used for hex setting). This incorrect value allowed MDS_LOOPBACK to be set simultaneously with LS_NPIV_FAB_SUPPORTED, causing the driver to crash when it accesses the non-existent fcp_wq. Correct the bad value setting for LS_MDS_LOOPBACK. Fixes: ae9e28f36a6c ("lpfc: Add MDS Diagnostic support.") Cc: # v4.12+ Signed-off-by: Dick Kennedy Signed-off-by: James Smart Tested-by: Ewan D. Milne Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index e0d0da5f43d6..43732e8d1347 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -672,7 +672,7 @@ struct lpfc_hba { #define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */ #define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */ #define LS_MDS_LINK_DOWN 0x8 /* MDS Diagnostics Link Down */ -#define LS_MDS_LOOPBACK 0x16 /* MDS Diagnostics Link Up (Loopback) */ +#define LS_MDS_LOOPBACK 0x10 /* MDS Diagnostics Link Up (Loopback) */ uint32_t hba_flag; /* hba generic flags */ #define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ -- GitLab From eb53a3ea3e009578a388f106620b22b1707cf2f6 Mon Sep 17 00:00:00 2001 From: Martin Wilck Date: Wed, 22 Aug 2018 13:25:44 +0200 Subject: [PATCH 0519/1692] scsi: hpsa: limit transfer length to 1MB, not 512kB e2c7b43 was supposed to limit transfer length to 1MB, but got the unit of max_sectors wrong. Fixes: e2c7b433f729 ("scsi: hpsa: limit transfer length to 1MB") Signed-off-by: Martin Wilck Signed-off-by: Martin K. Petersen --- drivers/scsi/hpsa.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 58bb70b886d7..c120929d4ffe 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -976,7 +976,7 @@ static struct scsi_host_template hpsa_driver_template = { #endif .sdev_attrs = hpsa_sdev_attrs, .shost_attrs = hpsa_shost_attrs, - .max_sectors = 1024, + .max_sectors = 2048, .no_write_same = 1, }; -- GitLab From cedefa8544c6be216b4710575065e3a11065f8d0 Mon Sep 17 00:00:00 2001 From: Varun Prakash Date: Sat, 11 Aug 2018 21:10:29 +0530 Subject: [PATCH 0520/1692] scsi: target: iscsi: cxgbit: use pr_debug() instead of pr_info() DDP programming happens in data path and it can fail because of lack of resources so use pr_debug() instead of pr_info() for this case. Signed-off-by: Varun Prakash Reviewed-by: Mike Christie Signed-off-by: Martin K. Petersen --- drivers/target/iscsi/cxgbit/cxgbit_ddp.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c index 768cce0ccb80..76a262674c8d 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c @@ -207,8 +207,8 @@ cxgbit_ddp_reserve(struct cxgbit_sock *csk, struct cxgbi_task_tag_info *ttinfo, ret = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE); sgl->offset = sg_offset; if (!ret) { - pr_info("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n", - __func__, 0, xferlen, sgcnt); + pr_debug("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n", + __func__, 0, xferlen, sgcnt); goto rel_ppods; } @@ -250,8 +250,8 @@ cxgbit_get_r2t_ttt(struct iscsi_conn *conn, struct iscsi_cmd *cmd, ret = cxgbit_ddp_reserve(csk, ttinfo, cmd->se_cmd.data_length); if (ret < 0) { - pr_info("csk 0x%p, cmd 0x%p, xfer len %u, sgcnt %u no ddp.\n", - csk, cmd, cmd->se_cmd.data_length, ttinfo->nents); + pr_debug("csk 0x%p, cmd 0x%p, xfer len %u, sgcnt %u no ddp.\n", + csk, cmd, cmd->se_cmd.data_length, ttinfo->nents); ttinfo->sgl = NULL; ttinfo->nents = 0; -- GitLab From 4e8065aa6c6f50765290be27ab8a64a4e44cb009 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 23 Aug 2018 23:23:06 +0200 Subject: [PATCH 0521/1692] scsi: libata: Add missing newline at end of file With gcc 4.1.2: drivers/ata/libata-core.c:7396:33: warning: no newline at end of file Fixes: 2fa4a32613c9182b ("scsi: libsas: dynamically allocate and free ata host") Signed-off-by: Geert Uytterhoeven Signed-off-by: Martin K. Petersen --- drivers/ata/libata-core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 172e32840256..599e01bcdef2 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -7394,4 +7394,4 @@ EXPORT_SYMBOL_GPL(ata_cable_unknown); EXPORT_SYMBOL_GPL(ata_cable_ignore); EXPORT_SYMBOL_GPL(ata_cable_sata); EXPORT_SYMBOL_GPL(ata_host_get); -EXPORT_SYMBOL_GPL(ata_host_put); \ No newline at end of file +EXPORT_SYMBOL_GPL(ata_host_put); -- GitLab From 23aa8e69f2c6ceb0bdca52f4450ad1f45675ca73 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Mon, 27 Aug 2018 15:24:42 +0800 Subject: [PATCH 0522/1692] Revert "scsi: core: fix scsi_host_queue_ready" This reverts commit 265d59aacbce7e50bdc1f5d25033c38dd70b3767. There is fundamental issue in commit 328728630d9f2bf1 (scsi: core: avoid host-wide host_busy counter for scsi_mq) because SCSI's host busy counter may not be same with counter of blk-mq's inflight tags, especially in case of none io scheduler. So revert this commit first. Cc: Omar Sandoval , Cc: "Martin K. Petersen" , Cc: James Bottomley , Cc: Christoph Hellwig , Cc: Don Brace Cc: Kashyap Desai Cc: Mike Snitzer Cc: Hannes Reinecke Cc: Laurence Oberman Cc: Bart Van Assche Cc: Guenter Roeck Cc: Jens Axboe Reported-by: Jens Axboe Signed-off-by: Ming Lei Signed-off-by: Martin K. Petersen --- drivers/scsi/scsi_lib.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 0adfb3bce0fd..1046679f5473 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1611,7 +1611,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q, else busy = 0; if (atomic_read(&shost->host_blocked) > 0) { - if (busy) + if (busy || scsi_host_busy(shost)) goto starved; /* -- GitLab From f45b8934b90b1d0017d33f8529941ec5020e9e0e Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 13 Aug 2018 23:20:33 +0200 Subject: [PATCH 0523/1692] staging: wilc1000: revert "fix TODO to compile spi and sdio components in single module" The TODO item named "make spi and sdio components coexist in one build" was apparently addressed a long time ago, but never removed from the TODO file. However, the new patch that tries to address it actually makes it worse again by duplicating the common parts of the driver into two separate modules rather than sharing them. This also introduces a build regression when one of the two is built-in while the other is a loadable module: drivers/staging/wilc1000/wilc_debugfs.o:(.data+0x10): undefined reference to `__this_module' Reverting the patch makes it build again. I'm leaving the TODO file modification though, as there is nothing left to do for this item. A related problem however still seems to exist: one still cannot have multiple concurrent instances of wilc1000 devices present in the system, as there are lots of shared global variables such as host_interface.c:static struct wilc_vif *periodic_rssi_vif; wilc_sdio.c:static struct wilc_sdio g_sdio; wilc_wlan.c:static enum chip_ps_states chip_ps_state = CHIP_WAKEDUP; wilc_wlan.c:static u32 pending_acks; wilc_wfi_cfgoperations.c:int wilc_connecting; In order to have multiple instances working (sdio, spi, or mixed), all such variables need to be dynamically allocated per instance and stored in 'struct wilc' or one of the structures referenced by it. Fixes: 9abc44ba4e2f ("staging: wilc1000: fix TODO to compile spi and sdio components in single module") Signed-off-by: Arnd Bergmann Signed-off-by: Greg Kroah-Hartman --- drivers/staging/wilc1000/Makefile | 3 +-- drivers/staging/wilc1000/linux_wlan.c | 6 ++++-- drivers/staging/wilc1000/wilc_debugfs.c | 7 +++++-- drivers/staging/wilc1000/wilc_wlan.c | 6 ++++++ drivers/staging/wilc1000/wilc_wlan_if.h | 2 -- 5 files changed, 16 insertions(+), 8 deletions(-) diff --git a/drivers/staging/wilc1000/Makefile b/drivers/staging/wilc1000/Makefile index f7b07c0b5ce2..ee7e26b886a5 100644 --- a/drivers/staging/wilc1000/Makefile +++ b/drivers/staging/wilc1000/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_WILC1000) += wilc1000.o ccflags-y += -DFIRMWARE_1002=\"atmel/wilc1002_firmware.bin\" \ -DFIRMWARE_1003=\"atmel/wilc1003_firmware.bin\" @@ -11,9 +12,7 @@ wilc1000-objs := wilc_wfi_cfgoperations.o linux_wlan.o linux_mon.o \ wilc_wlan.o obj-$(CONFIG_WILC1000_SDIO) += wilc1000-sdio.o -wilc1000-sdio-objs += $(wilc1000-objs) wilc1000-sdio-objs += wilc_sdio.o obj-$(CONFIG_WILC1000_SPI) += wilc1000-spi.o -wilc1000-spi-objs += $(wilc1000-objs) wilc1000-spi-objs += wilc_spi.o diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c index 01cf4bd2e192..3b8d237decbf 100644 --- a/drivers/staging/wilc1000/linux_wlan.c +++ b/drivers/staging/wilc1000/linux_wlan.c @@ -1038,8 +1038,8 @@ void wilc_netdev_cleanup(struct wilc *wilc) } kfree(wilc); - wilc_debugfs_remove(); } +EXPORT_SYMBOL_GPL(wilc_netdev_cleanup); static const struct net_device_ops wilc_netdev_ops = { .ndo_init = mac_init_fn, @@ -1062,7 +1062,6 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type, if (!wl) return -ENOMEM; - wilc_debugfs_init(); *wilc = wl; wl->io_type = io_type; wl->hif_func = ops; @@ -1124,3 +1123,6 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type, return 0; } +EXPORT_SYMBOL_GPL(wilc_netdev_init); + +MODULE_LICENSE("GPL"); diff --git a/drivers/staging/wilc1000/wilc_debugfs.c b/drivers/staging/wilc1000/wilc_debugfs.c index edc72876458d..8001df66b8c2 100644 --- a/drivers/staging/wilc1000/wilc_debugfs.c +++ b/drivers/staging/wilc1000/wilc_debugfs.c @@ -19,6 +19,7 @@ static struct dentry *wilc_dir; #define DBG_LEVEL_ALL (DEBUG | INFO | WRN | ERR) static atomic_t WILC_DEBUG_LEVEL = ATOMIC_INIT(ERR); +EXPORT_SYMBOL_GPL(WILC_DEBUG_LEVEL); static ssize_t wilc_debug_level_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) @@ -87,7 +88,7 @@ static struct wilc_debugfs_info_t debugfs_info[] = { }, }; -int wilc_debugfs_init(void) +static int __init wilc_debugfs_init(void) { int i; struct wilc_debugfs_info_t *info; @@ -103,10 +104,12 @@ int wilc_debugfs_init(void) } return 0; } +module_init(wilc_debugfs_init); -void wilc_debugfs_remove(void) +static void __exit wilc_debugfs_remove(void) { debugfs_remove_recursive(wilc_dir); } +module_exit(wilc_debugfs_remove); #endif diff --git a/drivers/staging/wilc1000/wilc_wlan.c b/drivers/staging/wilc1000/wilc_wlan.c index 6787b6e9f124..8b184aa30d25 100644 --- a/drivers/staging/wilc1000/wilc_wlan.c +++ b/drivers/staging/wilc1000/wilc_wlan.c @@ -417,6 +417,7 @@ void chip_allow_sleep(struct wilc *wilc) wilc->hif_func->hif_write_reg(wilc, 0xf0, reg & ~BIT(0)); wilc->hif_func->hif_write_reg(wilc, 0xfa, 0); } +EXPORT_SYMBOL_GPL(chip_allow_sleep); void chip_wakeup(struct wilc *wilc) { @@ -471,6 +472,7 @@ void chip_wakeup(struct wilc *wilc) } chip_ps_state = CHIP_WAKEDUP; } +EXPORT_SYMBOL_GPL(chip_wakeup); void wilc_chip_sleep_manually(struct wilc *wilc) { @@ -484,6 +486,7 @@ void wilc_chip_sleep_manually(struct wilc *wilc) chip_ps_state = CHIP_SLEEPING_MANUAL; release_bus(wilc, RELEASE_ONLY); } +EXPORT_SYMBOL_GPL(wilc_chip_sleep_manually); void host_wakeup_notify(struct wilc *wilc) { @@ -491,6 +494,7 @@ void host_wakeup_notify(struct wilc *wilc) wilc->hif_func->hif_write_reg(wilc, 0x10b0, 1); release_bus(wilc, RELEASE_ONLY); } +EXPORT_SYMBOL_GPL(host_wakeup_notify); void host_sleep_notify(struct wilc *wilc) { @@ -498,6 +502,7 @@ void host_sleep_notify(struct wilc *wilc) wilc->hif_func->hif_write_reg(wilc, 0x10ac, 1); release_bus(wilc, RELEASE_ONLY); } +EXPORT_SYMBOL_GPL(host_sleep_notify); int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count) { @@ -871,6 +876,7 @@ void wilc_handle_isr(struct wilc *wilc) release_bus(wilc, RELEASE_ALLOW_SLEEP); } +EXPORT_SYMBOL_GPL(wilc_handle_isr); int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer, u32 buffer_size) diff --git a/drivers/staging/wilc1000/wilc_wlan_if.h b/drivers/staging/wilc1000/wilc_wlan_if.h index 00d13b153f80..b81a73b9bd67 100644 --- a/drivers/staging/wilc1000/wilc_wlan_if.h +++ b/drivers/staging/wilc1000/wilc_wlan_if.h @@ -831,6 +831,4 @@ struct wilc; int wilc_wlan_init(struct net_device *dev); u32 wilc_get_chipid(struct wilc *wilc, bool update); -int wilc_debugfs_init(void); -void wilc_debugfs_remove(void); #endif -- GitLab From d772a65d8a6c45c376a8200a38f7f82fb480af6a Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Mon, 27 Aug 2018 15:24:43 +0800 Subject: [PATCH 0524/1692] Revert "scsi: core: avoid host-wide host_busy counter for scsi_mq" This reverts commit 328728630d9f2bf14b82ca30b5e47489beefe361. There is fundamental issue in commit 328728630d9f2bf1 (scsi: core: avoid host-wide host_busy counter for scsi_mq) because SCSI's host busy counter may not be same with counter of blk-mq's inflight tags, especially in case of none io scheduler. We may switch to other approach for addressing this scsi_mq's performance issue, such as percpu counter or kind of ways, so revert this commit first for fixing this kind of issue in EH path, as reported by Jens. Cc: Omar Sandoval , Cc: "Martin K. Petersen" , Cc: James Bottomley , Cc: Christoph Hellwig , Cc: Don Brace Cc: Kashyap Desai Cc: Mike Snitzer Cc: Hannes Reinecke Cc: Laurence Oberman Cc: Bart Van Assche Cc: Jens Axboe Reported-by: Jens Axboe Signed-off-by: Ming Lei Signed-off-by: Martin K. Petersen --- drivers/scsi/hosts.c | 24 +----------------------- drivers/scsi/scsi_lib.c | 23 ++++++----------------- 2 files changed, 7 insertions(+), 40 deletions(-) diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index f02dcc875a09..ea4b0bb0c1cd 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -563,35 +563,13 @@ struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost) } EXPORT_SYMBOL(scsi_host_get); -struct scsi_host_mq_in_flight { - int cnt; -}; - -static void scsi_host_check_in_flight(struct request *rq, void *data, - bool reserved) -{ - struct scsi_host_mq_in_flight *in_flight = data; - - if (blk_mq_request_started(rq)) - in_flight->cnt++; -} - /** * scsi_host_busy - Return the host busy counter * @shost: Pointer to Scsi_Host to inc. **/ int scsi_host_busy(struct Scsi_Host *shost) { - struct scsi_host_mq_in_flight in_flight = { - .cnt = 0, - }; - - if (!shost->use_blk_mq) - return atomic_read(&shost->host_busy); - - blk_mq_tagset_busy_iter(&shost->tag_set, scsi_host_check_in_flight, - &in_flight); - return in_flight.cnt; + return atomic_read(&shost->host_busy); } EXPORT_SYMBOL(scsi_host_busy); diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 1046679f5473..eb97d2dd3651 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -345,8 +345,7 @@ static void scsi_dec_host_busy(struct Scsi_Host *shost) unsigned long flags; rcu_read_lock(); - if (!shost->use_blk_mq) - atomic_dec(&shost->host_busy); + atomic_dec(&shost->host_busy); if (unlikely(scsi_host_in_recovery(shost))) { spin_lock_irqsave(shost->host_lock, flags); if (shost->host_failed || shost->host_eh_scheduled) @@ -445,12 +444,7 @@ static inline bool scsi_target_is_busy(struct scsi_target *starget) static inline bool scsi_host_is_busy(struct Scsi_Host *shost) { - /* - * blk-mq can handle host queue busy efficiently via host-wide driver - * tag allocation - */ - - if (!shost->use_blk_mq && shost->can_queue > 0 && + if (shost->can_queue > 0 && atomic_read(&shost->host_busy) >= shost->can_queue) return true; if (atomic_read(&shost->host_blocked) > 0) @@ -1606,12 +1600,9 @@ static inline int scsi_host_queue_ready(struct request_queue *q, if (scsi_host_in_recovery(shost)) return 0; - if (!shost->use_blk_mq) - busy = atomic_inc_return(&shost->host_busy) - 1; - else - busy = 0; + busy = atomic_inc_return(&shost->host_busy) - 1; if (atomic_read(&shost->host_blocked) > 0) { - if (busy || scsi_host_busy(shost)) + if (busy) goto starved; /* @@ -1625,7 +1616,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q, "unblocking host at zero depth\n")); } - if (!shost->use_blk_mq && shost->can_queue > 0 && busy >= shost->can_queue) + if (shost->can_queue > 0 && busy >= shost->can_queue) goto starved; if (shost->host_self_blocked) goto starved; @@ -1711,9 +1702,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q) * with the locks as normal issue path does. */ atomic_inc(&sdev->device_busy); - - if (!shost->use_blk_mq) - atomic_inc(&shost->host_busy); + atomic_inc(&shost->host_busy); if (starget->can_queue > 0) atomic_inc(&starget->target_busy); -- GitLab From b9eb3b14f1dbf16bf27b6c1ffe6b8c00ec945c9b Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 27 Aug 2018 12:23:01 +0300 Subject: [PATCH 0525/1692] scsi: aacraid: fix a signedness bug The problem is that ->reset_state is a u8 but it can be set to -1 or -2 in aac_tmf_callback() and the error handling in aac_eh_target_reset() relies on it to be signed. [mkp: fixed typo] Fixes: 0d643ff3c353 ("scsi: aacraid: use aac_tmf_callback for reset fib") Signed-off-by: Dan Carpenter Signed-off-by: Martin K. Petersen --- drivers/scsi/aacraid/aacraid.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 29bf1e60f542..39eb415987fc 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -1346,7 +1346,7 @@ struct fib { struct aac_hba_map_info { __le32 rmw_nexus; /* nexus for native HBA devices */ u8 devtype; /* device type */ - u8 reset_state; /* 0 - no reset, 1..x - */ + s8 reset_state; /* 0 - no reset, 1..x - */ /* after xth TM LUN reset */ u16 qd_limit; u32 scan_counter; -- GitLab From 061a5427530633de93ace4ef001b99961984af62 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sun, 26 Aug 2018 10:09:06 -0600 Subject: [PATCH 0526/1692] blk-wbt: abstract out end IO completion handler Prep patch for calling the handler from a different context, no functional changes in this patch. Tested-by: Agarwal, Anchal Signed-off-by: Jens Axboe --- block/blk-wbt.c | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/block/blk-wbt.c b/block/blk-wbt.c index 84507d3e9a98..4575b4650370 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c @@ -123,16 +123,11 @@ static void rwb_wake_all(struct rq_wb *rwb) } } -static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct) +static void wbt_rqw_done(struct rq_wb *rwb, struct rq_wait *rqw, + enum wbt_flags wb_acct) { - struct rq_wb *rwb = RQWB(rqos); - struct rq_wait *rqw; int inflight, limit; - if (!(wb_acct & WBT_TRACKED)) - return; - - rqw = get_rq_wait(rwb, wb_acct); inflight = atomic_dec_return(&rqw->inflight); /* @@ -170,6 +165,18 @@ static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct) } } +static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct) +{ + struct rq_wb *rwb = RQWB(rqos); + struct rq_wait *rqw; + + if (!(wb_acct & WBT_TRACKED)) + return; + + rqw = get_rq_wait(rwb, wb_acct); + wbt_rqw_done(rwb, rqw, wb_acct); +} + /* * Called on completion of a request. Note that it's also called when * a request is merged, when the request gets freed. -- GitLab From 38cfb5a45ee013bfab5d1ae4c4738815e744b440 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sun, 26 Aug 2018 10:10:05 -0600 Subject: [PATCH 0527/1692] blk-wbt: improve waking of tasks We have two potential issues: 1) After commit 2887e41b910b, we only wake one process at the time when we finish an IO. We really want to wake up as many tasks as can queue IO. Before this commit, we woke up everyone, which could cause a thundering herd issue. 2) A task can potentially consume two wakeups, causing us to (in practice) miss a wakeup. Fix both by providing our own wakeup function, which stops __wake_up_common() from waking up more tasks if we fail to get a queueing token. With the strict ordering we have on the wait list, this wakes the right tasks and the right amount of tasks. Based on a patch from Jianchao Wang . Tested-by: Agarwal, Anchal Signed-off-by: Jens Axboe --- block/blk-wbt.c | 63 +++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 56 insertions(+), 7 deletions(-) diff --git a/block/blk-wbt.c b/block/blk-wbt.c index 4575b4650370..bfb0d21d19ce 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c @@ -161,7 +161,7 @@ static void wbt_rqw_done(struct rq_wb *rwb, struct rq_wait *rqw, int diff = limit - inflight; if (!inflight || diff >= rwb->wb_background / 2) - wake_up(&rqw->wait); + wake_up_all(&rqw->wait); } } @@ -488,6 +488,34 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw) return limit; } +struct wbt_wait_data { + struct wait_queue_entry wq; + struct task_struct *task; + struct rq_wb *rwb; + struct rq_wait *rqw; + unsigned long rw; + bool got_token; +}; + +static int wbt_wake_function(struct wait_queue_entry *curr, unsigned int mode, + int wake_flags, void *key) +{ + struct wbt_wait_data *data = container_of(curr, struct wbt_wait_data, + wq); + + /* + * If we fail to get a budget, return -1 to interrupt the wake up + * loop in __wake_up_common. + */ + if (!rq_wait_inc_below(data->rqw, get_limit(data->rwb, data->rw))) + return -1; + + data->got_token = true; + list_del_init(&curr->entry); + wake_up_process(data->task); + return 1; +} + /* * Block if we will exceed our limit, or if we are currently waiting for * the timer to kick off queuing again. @@ -498,19 +526,40 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct, __acquires(lock) { struct rq_wait *rqw = get_rq_wait(rwb, wb_acct); - DECLARE_WAITQUEUE(wait, current); + struct wbt_wait_data data = { + .wq = { + .func = wbt_wake_function, + .entry = LIST_HEAD_INIT(data.wq.entry), + }, + .task = current, + .rwb = rwb, + .rqw = rqw, + .rw = rw, + }; bool has_sleeper; has_sleeper = wq_has_sleeper(&rqw->wait); if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw))) return; - add_wait_queue_exclusive(&rqw->wait, &wait); + prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE); do { - set_current_state(TASK_UNINTERRUPTIBLE); + if (data.got_token) + break; - if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw))) + if (!has_sleeper && + rq_wait_inc_below(rqw, get_limit(rwb, rw))) { + finish_wait(&rqw->wait, &data.wq); + + /* + * We raced with wbt_wake_function() getting a token, + * which means we now have two. Put our local token + * and wake anyone else potentially waiting for one. + */ + if (data.got_token) + wbt_rqw_done(rwb, rqw, wb_acct); break; + } if (lock) { spin_unlock_irq(lock); @@ -518,11 +567,11 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct, spin_lock_irq(lock); } else io_schedule(); + has_sleeper = false; } while (1); - __set_current_state(TASK_RUNNING); - remove_wait_queue(&rqw->wait, &wait); + finish_wait(&rqw->wait, &data.wq); } static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio) -- GitLab From 849c70dacb169da751b171c7d230206a72cf7391 Mon Sep 17 00:00:00 2001 From: Todd Poynor Date: Thu, 9 Aug 2018 20:20:56 -0700 Subject: [PATCH 0528/1692] MAINTAINERS: Switch a maintainer for drivers/staging/gasket Todd Poynor takes over for John Joseph. Signed-off-by: John Joseph Signed-off-by: Todd Poynor Signed-off-by: Greg Kroah-Hartman --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index a5b256b25905..a726e22976bb 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6059,7 +6059,7 @@ F: Documentation/gcc-plugins.txt GASKET DRIVER FRAMEWORK M: Rob Springer -M: John Joseph +M: Todd Poynor M: Ben Chan S: Maintained F: drivers/staging/gasket/ -- GitLab From b2d7a075a1ccef2fb321d595802190c8e9b39004 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Tue, 21 Aug 2018 17:37:55 +0200 Subject: [PATCH 0529/1692] x86/pae: use 64 bit atomic xchg function in native_ptep_get_and_clear Using only 32-bit writes for the pte will result in an intermediate L1TF vulnerable PTE. When running as a Xen PV guest this will at once switch the guest to shadow mode resulting in a loss of performance. Use arch_atomic64_xchg() instead which will perform the requested operation atomically with all 64 bits. Some performance considerations according to: https://software.intel.com/sites/default/files/managed/ad/dc/Intel-Xeon-Scalable-Processor-throughput-latency.pdf The main number should be the latency, as there is no tight loop around native_ptep_get_and_clear(). "lock cmpxchg8b" has a latency of 20 cycles, while "lock xchg" (with a memory operand) isn't mentioned in that document. "lock xadd" (with xadd having 3 cycles less latency than xchg) has a latency of 11, so we can assume a latency of 14 for "lock xchg". Signed-off-by: Juergen Gross Reviewed-by: Thomas Gleixner Reviewed-by: Jan Beulich Tested-by: Jason Andryuk Signed-off-by: Boris Ostrovsky --- arch/x86/include/asm/pgtable-3level.h | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index a564084c6141..f8b1ad2c3828 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h @@ -2,6 +2,8 @@ #ifndef _ASM_X86_PGTABLE_3LEVEL_H #define _ASM_X86_PGTABLE_3LEVEL_H +#include + /* * Intel Physical Address Extension (PAE) Mode - three-level page * tables on PPro+ CPUs. @@ -150,10 +152,7 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep) { pte_t res; - /* xchg acts as a barrier before the setting of the high bits */ - res.pte_low = xchg(&ptep->pte_low, 0); - res.pte_high = ptep->pte_high; - ptep->pte_high = 0; + res.pte = (pteval_t)arch_atomic64_xchg((atomic64_t *)ptep, 0); return res; } -- GitLab From 336d139f8718b1336c9d22f0e462611ae1229850 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 27 Aug 2018 16:01:41 +0900 Subject: [PATCH 0530/1692] mtd: rawnand: denali: do not pass zero maxchips to nand_scan() Commit 49aa76b16676 ("mtd: rawnand: do not execute nand_scan_ident() if maxchips is zero") gave a new meaning for calling nand_scan_ident() with maxchips=0. It is a special usage for some drivers such as docg4, but actually the Denali driver may pass maxchips=0 to nand_scan() when the driver is enabled but no NAND chip is found on the board for some reasons. If nand_scan_with_ids() is called with maxchips=0, nand_scan_ident() is skipped, then nand_set_defaults() is skipped as well. Thus, the driver must set chip->controller beforehand. Otherwise, nand_attach() causes NULL pointer dereference. In fact, the Denali controller knows the number of connected chips before calling nand_scan_ident(); if DEVICE_RESET fails, there is no chip in that chip select. Then, denali_reset_banks() sets the maxchips to the number of detected chips. If no chip is found, maxchips is zero. In this case, there is no point for calling nand_scan() because we know it will fail for sure. Let's make the probe function fail immediately. Fixes: 49aa76b16676 ("mtd: rawnand: do not execute nand_scan_ident() if maxchips is zero") Signed-off-by: Masahiro Yamada Acked-by: Miquel Raynal Signed-off-by: Boris Brezillon --- drivers/mtd/nand/raw/denali.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c index ca18612c4201..67b2065e7a19 100644 --- a/drivers/mtd/nand/raw/denali.c +++ b/drivers/mtd/nand/raw/denali.c @@ -1338,6 +1338,11 @@ int denali_init(struct denali_nand_info *denali) denali_enable_irq(denali); denali_reset_banks(denali); + if (!denali->max_banks) { + /* Error out earlier if no chip is found for some reasons. */ + ret = -ENODEV; + goto disable_irq; + } denali->active_bank = DENALI_INVALID_BANK; -- GitLab From fd255f6e3704d183f6f5011efd01fcda70372cab Mon Sep 17 00:00:00 2001 From: Dhinakaran Pandiyan Date: Fri, 24 Aug 2018 16:08:43 -0700 Subject: [PATCH 0531/1692] drm/i915/psr: Remove wait_for_idle() for PSR2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit CI runs show PSR2 does not go to IDLE with selective update enabled on all PSR exit triggers. Specifically, logs indicate the hardware enters "SLEEP Selective Update" and not "IDLE Reset state', like the kernel expects, when vblank interrupts are enabled. This check was added for PSR1 but incorrectly extended to PSR2, remove the check as it breaks tests and prints out misleading error messages. v2: Split out non-code changes (Rodrigo) Cc: Tarun Vyas Cc: José Roberto de Souza Cc: Rodrigo Vivi Fixes: c43dbcbbcc8c ("drm/i915/psr: Lockless version of psr_wait_for_idle") Signed-off-by: Dhinakaran Pandiyan Reviewed-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20180824230844.12428-1-dhinakaran.pandiyan@intel.com --- drivers/gpu/drm/i915/intel_psr.c | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index da583a45e942..2cb931f3019b 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -771,8 +771,6 @@ int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state, { struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - i915_reg_t reg; - u32 mask; if (!dev_priv->psr.enabled || !new_crtc_state->has_psr) return 0; @@ -787,13 +785,10 @@ int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state, * not needed and will induce latencies in the atomic * update path. */ - if (dev_priv->psr.psr2_enabled) { - reg = EDP_PSR2_STATUS; - mask = EDP_PSR2_STATUS_STATE_MASK; - } else { - reg = EDP_PSR_STATUS; - mask = EDP_PSR_STATUS_STATE_MASK; - } + + /* FIXME: Update this for PSR2 if we need to wait for idle */ + if (READ_ONCE(dev_priv->psr.psr2_enabled)) + return 0; /* * Max time for PSR to idle = Inverse of the refresh rate + @@ -801,7 +796,8 @@ int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state, * handshake. 50 msec is defesive enough to cover everything. */ - return __intel_wait_for_register(dev_priv, reg, mask, + return __intel_wait_for_register(dev_priv, EDP_PSR_STATUS, + EDP_PSR_STATUS_STATE_MASK, EDP_PSR_STATUS_STATE_IDLE, 2, 50, out_value); } -- GitLab From 65df9c7947d70a8d78b2af5a1a835c713110d21e Mon Sep 17 00:00:00 2001 From: Dhinakaran Pandiyan Date: Fri, 24 Aug 2018 16:08:44 -0700 Subject: [PATCH 0532/1692] drm/i915/psr: Rewrite comments in intel_psr_wait_for_idle() Added bspec reference, aligned text and documented the function. Cc: Rodrigo Vivi Signed-off-by: Dhinakaran Pandiyan Reviewed-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20180824230844.12428-2-dhinakaran.pandiyan@intel.com --- drivers/gpu/drm/i915/intel_psr.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index 2cb931f3019b..aee64aee18fe 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -766,6 +766,16 @@ void intel_psr_disable(struct intel_dp *intel_dp, cancel_work_sync(&dev_priv->psr.work); } +/** + * intel_psr_wait_for_idle - wait for PSR1 to idle + * @new_crtc_state: new CRTC state + * @out_value: PSR status in case of failure + * + * This function is expected to be called from pipe_update_start() where it is + * not expected to race with PSR enable or disable. + * + * Returns: 0 on success or -ETIMEOUT if PSR status does not idle. + */ int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state, u32 *out_value) { @@ -775,25 +785,15 @@ int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state, if (!dev_priv->psr.enabled || !new_crtc_state->has_psr) return 0; - /* - * The sole user right now is intel_pipe_update_start(), - * which won't race with psr_enable/disable, which is - * where psr2_enabled is written to. So, we don't need - * to acquire the psr.lock. More importantly, we want the - * latency inside intel_pipe_update_start() to be as low - * as possible, so no need to acquire psr.lock when it is - * not needed and will induce latencies in the atomic - * update path. - */ - /* FIXME: Update this for PSR2 if we need to wait for idle */ if (READ_ONCE(dev_priv->psr.psr2_enabled)) return 0; /* - * Max time for PSR to idle = Inverse of the refresh rate + - * 6 ms of exit training time + 1.5 ms of aux channel - * handshake. 50 msec is defesive enough to cover everything. + * From bspec: Panel Self Refresh (BDW+) + * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of + * exit training time + 1.5 ms of aux channel handshake. 50 ms is + * defensive enough to cover everything. */ return __intel_wait_for_register(dev_priv, EDP_PSR_STATUS, -- GitLab From b0a84beb2e35536839ea289182684528f379b860 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 27 Aug 2018 13:32:12 -0600 Subject: [PATCH 0533/1692] blk-wbt: remove dead code We already note and mark discard and swap IO from bio_to_wbt_flags(). Signed-off-by: Jens Axboe --- block/blk-wbt.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/block/blk-wbt.c b/block/blk-wbt.c index bfb0d21d19ce..8e20a0677dcf 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c @@ -636,11 +636,6 @@ static void wbt_wait(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock) return; } - if (current_is_kswapd()) - flags |= WBT_KSWAPD; - if (bio_op(bio) == REQ_OP_DISCARD) - flags |= WBT_DISCARD; - __wbt_wait(rwb, flags, bio->bi_opf, lock); if (!blk_stat_is_active(rwb->cb)) -- GitLab From 88bc243a3f22b9938c0b53c577dee28025cdb920 Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Fri, 24 Aug 2018 14:49:41 +0200 Subject: [PATCH 0534/1692] selftests: android: move config up a level 'make kselftest-merge' assumes that the config files for the tests are located under the 'main' test dir, like tools/testing/selftests/android/ and not in a subdir to android. Signed-off-by: Anders Roxell Signed-off-by: Shuah Khan (Samsung OSG) --- tools/testing/selftests/android/{ion => }/config | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename tools/testing/selftests/android/{ion => }/config (100%) diff --git a/tools/testing/selftests/android/ion/config b/tools/testing/selftests/android/config similarity index 100% rename from tools/testing/selftests/android/ion/config rename to tools/testing/selftests/android/config -- GitLab From fca5d959972c18839f0306f19df2f121623447dd Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Tue, 21 Aug 2018 17:14:32 -0400 Subject: [PATCH 0535/1692] drm/amdgpu: Adjust the VM size based on system memory size v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Set the VM size based on system memory size between the ASIC-specific limits given by min_vm_size and max_bits. GFXv9 GPUs will keep their default VM size of 256TB (48 bit). Only older GPUs will adjust VM size depending on system memory size. This makes more VM space available for ROCm applications on GFXv8 GPUs that want to map all available VRAM and system memory in their SVM address space. v2: * Clarify comment * Round up memory size before >> 30 * Round up automatic vm_size to power of two Signed-off-by: Felix Kuehling Acked-by: Junwei Zhang Reviewed-by: Huang Rui Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 32 ++++++++++++++++++++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 2 +- 2 files changed, 29 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index e40ca8676418..d174d50e3bd3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2483,28 +2483,52 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size) * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size * * @adev: amdgpu_device pointer - * @vm_size: the default vm size if it's set auto + * @min_vm_size: the minimum vm size in GB if it's set auto * @fragment_size_default: Default PTE fragment size * @max_level: max VMPT level * @max_bits: max address space size in bits * */ -void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size, +void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, uint32_t fragment_size_default, unsigned max_level, unsigned max_bits) { + unsigned int max_size = 1 << (max_bits - 30); + unsigned int vm_size; uint64_t tmp; /* adjust vm size first */ if (amdgpu_vm_size != -1) { - unsigned max_size = 1 << (max_bits - 30); - vm_size = amdgpu_vm_size; if (vm_size > max_size) { dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n", amdgpu_vm_size, max_size); vm_size = max_size; } + } else { + struct sysinfo si; + unsigned int phys_ram_gb; + + /* Optimal VM size depends on the amount of physical + * RAM available. Underlying requirements and + * assumptions: + * + * - Need to map system memory and VRAM from all GPUs + * - VRAM from other GPUs not known here + * - Assume VRAM <= system memory + * - On GFX8 and older, VM space can be segmented for + * different MTYPEs + * - Need to allow room for fragmentation, guard pages etc. + * + * This adds up to a rough guess of system memory x3. + * Round up to power of two to maximize the available + * VM size with the given page table size. + */ + si_meminfo(&si); + phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit + + (1 << 30) - 1) >> 30; + vm_size = roundup_pow_of_two( + min(max(phys_ram_gb * 3, min_vm_size), max_size)); } adev->vm_manager.max_pfn = (uint64_t)vm_size << 18; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 67a15d439ac0..9fa9df0c5e7f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -321,7 +321,7 @@ struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm, void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket); void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va); -void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size, +void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, uint32_t fragment_size_default, unsigned max_level, unsigned max_bits); int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); -- GitLab From 8ef23364b654d44244400d79988e677e504b21ba Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 24 Aug 2018 17:26:23 +0800 Subject: [PATCH 0536/1692] drm/amdgpu: Enable/disable gfx PG feature in rlc safe mode This is required by gfx hw and can fix the rlc hang when do s3 stree test on Cz/St. Reviewed-by: Alex Deucher Signed-off-by: Hang Zhou Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 5cd45210113f..5a9534a82d40 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -5664,6 +5664,11 @@ static int gfx_v8_0_set_powergating_state(void *handle, if (amdgpu_sriov_vf(adev)) return 0; + if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG | + AMD_PG_SUPPORT_RLC_SMU_HS | + AMD_PG_SUPPORT_CP | + AMD_PG_SUPPORT_GFX_DMG)) + adev->gfx.rlc.funcs->enter_safe_mode(adev); switch (adev->asic_type) { case CHIP_CARRIZO: case CHIP_STONEY: @@ -5713,7 +5718,11 @@ static int gfx_v8_0_set_powergating_state(void *handle, default: break; } - + if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG | + AMD_PG_SUPPORT_RLC_SMU_HS | + AMD_PG_SUPPORT_CP | + AMD_PG_SUPPORT_GFX_DMG)) + adev->gfx.rlc.funcs->exit_safe_mode(adev); return 0; } -- GitLab From 6d39df146ff12fb5c71634ad135144d5423590ec Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 23 Aug 2018 15:30:45 +0800 Subject: [PATCH 0537/1692] drm/amdgpu: Fix vce initialize failed on Kaveri/Mullins MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Forgot to add vce pg support via smu for Kaveri/Mullins. Fixes: 561a5c83eadd ("drm/amd/pp: Unify powergate_uvd/vce/mmhub to set_powergating_by_smu") v2: refine patch descriptions suggested by Michel Reviewed-by: Alex Deucher Tested-by: Michel Dänzer Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/kv_dpm.c | 41 ++++++++++++++++++----------- 1 file changed, 26 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index 3f57f6463dc8..a713c8b6e09c 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -66,7 +66,6 @@ static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, static int kv_init_fps_limits(struct amdgpu_device *adev); static void kv_dpm_powergate_uvd(void *handle, bool gate); -static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate); static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate); static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate); @@ -1374,6 +1373,8 @@ static int kv_dpm_enable(struct amdgpu_device *adev) static void kv_dpm_disable(struct amdgpu_device *adev) { + struct kv_power_info *pi = kv_get_pi(adev); + amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, @@ -1387,7 +1388,8 @@ static void kv_dpm_disable(struct amdgpu_device *adev) /* powerup blocks */ kv_dpm_powergate_acp(adev, false); kv_dpm_powergate_samu(adev, false); - kv_dpm_powergate_vce(adev, false); + if (pi->caps_vce_pg) /* power on the VCE block */ + amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); kv_dpm_powergate_uvd(adev, false); kv_enable_smc_cac(adev, false); @@ -1551,7 +1553,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev, int ret; if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) { - kv_dpm_powergate_vce(adev, false); if (pi->caps_stable_p_state) pi->vce_boot_level = table->count - 1; else @@ -1573,7 +1574,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev, kv_enable_vce_dpm(adev, true); } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) { kv_enable_vce_dpm(adev, false); - kv_dpm_powergate_vce(adev, true); } return 0; @@ -1702,24 +1702,32 @@ static void kv_dpm_powergate_uvd(void *handle, bool gate) } } -static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate) +static void kv_dpm_powergate_vce(void *handle, bool gate) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct kv_power_info *pi = kv_get_pi(adev); - - if (pi->vce_power_gated == gate) - return; + int ret; pi->vce_power_gated = gate; - if (!pi->caps_vce_pg) - return; - - if (gate) - amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF); - else - amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); + if (gate) { + /* stop the VCE block */ + ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_GATE); + kv_enable_vce_dpm(adev, false); + if (pi->caps_vce_pg) /* power off the VCE block */ + amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF); + } else { + if (pi->caps_vce_pg) /* power on the VCE block */ + amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); + kv_enable_vce_dpm(adev, true); + /* re-init the VCE block */ + ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_UNGATE); + } } + static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate) { struct kv_power_info *pi = kv_get_pi(adev); @@ -3313,6 +3321,9 @@ static int kv_set_powergating_by_smu(void *handle, case AMD_IP_BLOCK_TYPE_UVD: kv_dpm_powergate_uvd(handle, gate); break; + case AMD_IP_BLOCK_TYPE_VCE: + kv_dpm_powergate_vce(handle, gate); + break; default: break; } -- GitLab From 2ab4d0e74256fc49b7b270f63c1d1e47c2455abc Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 24 Aug 2018 16:17:54 +0800 Subject: [PATCH 0538/1692] drm/amdgpu: Update power state at the end of smu hw_init. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For SI/Kv, the power state is managed by function amdgpu_pm_compute_clocks. when dpm enabled, we should call amdgpu_pm_compute_clocks to update current power state instand of set boot state. this change can fix the oops when kfd driver was enabled on Kv. Reviewed-by: Alex Deucher Tested-by: Michel Dänzer Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/kv_dpm.c | 4 +--- drivers/gpu/drm/amd/amdgpu/si_dpm.c | 3 +-- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index a713c8b6e09c..b497c37cef7e 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -1353,8 +1353,6 @@ static int kv_dpm_enable(struct amdgpu_device *adev) return ret; } - kv_update_current_ps(adev, adev->pm.dpm.boot_ps); - if (adev->irq.installed && amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) { ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX); @@ -3069,7 +3067,7 @@ static int kv_dpm_hw_init(void *handle) else adev->pm.dpm_enabled = true; mutex_unlock(&adev->pm.mutex); - + amdgpu_pm_compute_clocks(adev); return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index db327b412562..1de96995e690 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c @@ -6887,7 +6887,6 @@ static int si_dpm_enable(struct amdgpu_device *adev) si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true); si_thermal_start_thermal_controller(adev); - ni_update_current_ps(adev, boot_ps); return 0; } @@ -7763,7 +7762,7 @@ static int si_dpm_hw_init(void *handle) else adev->pm.dpm_enabled = true; mutex_unlock(&adev->pm.mutex); - + amdgpu_pm_compute_clocks(adev); return ret; } -- GitLab From 72ef23de207bad349ddc648296f330e176ac175b Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 23 Aug 2018 15:41:57 +0800 Subject: [PATCH 0539/1692] drm/amdgpu: Power on uvd block when hw_fini MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit when hw_fini/suspend, smu only need to power on uvd block if uvd pg is supported, don't need to call uvd to do hw_init. v2: fix typo in patch descriptions and comments. Reviewed-by: Alex Deucher Tested-by: Michel Dänzer Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/kv_dpm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index b497c37cef7e..cb79a93c2eb7 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -65,7 +65,6 @@ static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, int min_temp, int max_temp); static int kv_init_fps_limits(struct amdgpu_device *adev); -static void kv_dpm_powergate_uvd(void *handle, bool gate); static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate); static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate); @@ -1388,7 +1387,8 @@ static void kv_dpm_disable(struct amdgpu_device *adev) kv_dpm_powergate_samu(adev, false); if (pi->caps_vce_pg) /* power on the VCE block */ amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); - kv_dpm_powergate_uvd(adev, false); + if (pi->caps_uvd_pg) /* power on the UVD block */ + amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON); kv_enable_smc_cac(adev, false); kv_enable_didt(adev, false); -- GitLab From 4a2de54dc1d7668fa364d8483420ba64b120963b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 24 Aug 2018 14:48:02 +0200 Subject: [PATCH 0540/1692] drm/amdgpu: fix holding mn_lock while allocating memory MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We can't hold the mn_lock while allocating memory. Signed-off-by: Christian König Acked-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 31 ++++++++++++++++---------- 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 09703c87d676..b6e9df11115d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1203,26 +1203,24 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, int r; + job = p->job; + p->job = NULL; + + r = drm_sched_job_init(&job->base, entity, p->filp); + if (r) + goto error_unlock; + + /* No memory allocation is allowed while holding the mn lock */ amdgpu_mn_lock(p->mn); amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { struct amdgpu_bo *bo = e->robj; if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) { - amdgpu_mn_unlock(p->mn); - return -ERESTARTSYS; + r = -ERESTARTSYS; + goto error_abort; } } - job = p->job; - p->job = NULL; - - r = drm_sched_job_init(&job->base, entity, p->filp); - if (r) { - amdgpu_job_free(job); - amdgpu_mn_unlock(p->mn); - return r; - } - job->owner = p->filp; p->fence = dma_fence_get(&job->base.s_fence->finished); @@ -1260,6 +1258,15 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, amdgpu_mn_unlock(p->mn); return 0; + +error_abort: + dma_fence_put(&job->base.s_fence->finished); + job->base.s_fence = NULL; + +error_unlock: + amdgpu_job_free(job); + amdgpu_mn_unlock(p->mn); + return r; } int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) -- GitLab From e7603dadd3cc2fa65924d9e8ce0c6f2964866da0 Mon Sep 17 00:00:00 2001 From: SivapiriyanKumarasamy Date: Wed, 15 Aug 2018 16:55:18 -0400 Subject: [PATCH 0541/1692] drm/amd/display: Fix memory leak caused by missed dc_sink_release [Why] There is currently an intermittent hang from a memory leak in DTN stress testing. It is caused by unfreed memory during driver disable. [How] Do a dc_sink_release in the case that skips it incorrectly. Signed-off-by: SivapiriyanKumarasamy Reviewed-by: Aric Cyr Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 567867915d32..37eaf72ace54 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -754,8 +754,12 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) * fail-safe mode */ if (dc_is_hdmi_signal(link->connector_signal) || - dc_is_dvi_signal(link->connector_signal)) + dc_is_dvi_signal(link->connector_signal)) { + if (prev_sink != NULL) + dc_sink_release(prev_sink); + return false; + } default: break; } -- GitLab From 2f4e7db0f7456d8312de88d321b889dbd10c18fd Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 23 Aug 2018 11:46:13 +0800 Subject: [PATCH 0542/1692] drm/amdgpu: Remove duplicated power source update when ac/dc switch, driver will be notified by acpi event. then the power source will be updated. so don't need to get power source when set power state. Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 8f98629fbe59..7b4e657a95c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -1932,14 +1932,6 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) amdgpu_fence_wait_empty(ring); } - mutex_lock(&adev->pm.mutex); - /* update battery/ac status */ - if (power_supply_is_system_supplied() > 0) - adev->pm.ac_power = true; - else - adev->pm.ac_power = false; - mutex_unlock(&adev->pm.mutex); - if (adev->powerplay.pp_funcs->dispatch_tasks) { if (!amdgpu_device_has_dc_support(adev)) { mutex_lock(&adev->pm.mutex); -- GitLab From c31d02d1290e1e82a08015199e408228e152991f Mon Sep 17 00:00:00 2001 From: Thiago Jung Bauermann Date: Tue, 24 Jul 2018 23:57:25 -0300 Subject: [PATCH 0543/1692] selftests: kselftest: Remove outdated comment Commit 3c07aaef6598 ("selftests: kselftest: change KSFT_SKIP=4 instead of KSFT_PASS") reverted commit 11867a77eb85 ("selftests: kselftest framework: change skip exit code to 0") but missed removing the comment which that commit added, so do that now. Signed-off-by: Thiago Jung Bauermann Signed-off-by: Shuah Khan (Samsung OSG) --- tools/testing/selftests/kselftest.h | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/testing/selftests/kselftest.h b/tools/testing/selftests/kselftest.h index 15e6b75fc3a5..a3edb2c8e43d 100644 --- a/tools/testing/selftests/kselftest.h +++ b/tools/testing/selftests/kselftest.h @@ -19,7 +19,6 @@ #define KSFT_FAIL 1 #define KSFT_XFAIL 2 #define KSFT_XPASS 3 -/* Treat skip as pass */ #define KSFT_SKIP 4 /* counters */ -- GitLab From 7876fa4f55fda4a57348832f4a668279ed2b2fc4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 21 Aug 2018 11:11:36 +0200 Subject: [PATCH 0544/1692] drm/amdgpu: add ring soft recovery v4 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of hammering hard on the GPU try a soft recovery first. v2: reorder code a bit v3: increase timeout to 10ms, increment GPU reset counter v4: squash in compile fix (Christian) Signed-off-by: Christian König Reviewed-by: Huang Rui --- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 6 ++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 25 ++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 4 ++++ 3 files changed, 35 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index facc0f08d804..34e54d41f5ca 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -33,6 +33,12 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job) struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched); struct amdgpu_job *job = to_amdgpu_job(s_job); + if (amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { + DRM_ERROR("ring %s timeout, but soft recovered\n", + s_job->sched->name); + return; + } + DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n", job->base.sched->name, atomic_read(&ring->fence_drv.last_seq), ring->fence_drv.sync_seq); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 5dfd26be1eec..b70e85ec147d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -383,6 +383,31 @@ void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring, amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask); } +/** + * amdgpu_ring_soft_recovery - try to soft recover a ring lockup + * + * @ring: ring to try the recovery on + * @vmid: VMID we try to get going again + * @fence: timedout fence + * + * Tries to get a ring proceeding again when it is stuck. + */ +bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid, + struct dma_fence *fence) +{ + ktime_t deadline = ktime_add_us(ktime_get(), 10000); + + if (!ring->funcs->soft_recovery) + return false; + + atomic_inc(&ring->adev->gpu_reset_counter); + while (!dma_fence_is_signaled(fence) && + ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0) + ring->funcs->soft_recovery(ring, vmid); + + return dma_fence_is_signaled(fence); +} + /* * Debugfs info */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index 409fdd9b9710..9cc239968e40 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -168,6 +168,8 @@ struct amdgpu_ring_funcs { /* priority functions */ void (*set_priority) (struct amdgpu_ring *ring, enum drm_sched_priority priority); + /* Try to soft recover the ring to make the fence signal */ + void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid); }; struct amdgpu_ring { @@ -260,6 +262,8 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring); void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring, uint32_t reg0, uint32_t val0, uint32_t reg1, uint32_t val1); +bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid, + struct dma_fence *fence); static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring) { -- GitLab From efb6706405963047fb312efbe1af2d7490b58261 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 22 Aug 2018 11:55:23 +0200 Subject: [PATCH 0545/1692] drm/amdgpu: implement soft_recovery for GFX7 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Try to kill waves on the SQ. Signed-off-by: Christian König Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 95452c5a9df6..a15d9c0f233b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -4212,6 +4212,18 @@ static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring, amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base)); } +static void gfx_v7_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t value = 0; + + value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03); + value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); + value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); + value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); + WREG32(mmSQ_CMD, value); +} + static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address) { WREG32(mmSQ_IND_INDEX, @@ -5088,6 +5100,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = { .pad_ib = amdgpu_ring_generic_pad_ib, .emit_cntxcntl = gfx_v7_ring_emit_cntxcntl, .emit_wreg = gfx_v7_0_ring_emit_wreg, + .soft_recovery = gfx_v7_0_ring_soft_recovery, }; static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = { -- GitLab From f5d850331ea9bdf18e68ae298cff35c7b7233993 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 21 Aug 2018 12:45:31 +0200 Subject: [PATCH 0546/1692] drm/amdgpu: implement soft_recovery for GFX8 v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Try to kill waves on the SQ. v2: only for the GFX ring for now. Signed-off-by: Christian König Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 282dba6cce86..9de940a65c80 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -6714,6 +6714,18 @@ static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, amdgpu_ring_write(ring, val); } +static void gfx_v8_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t value = 0; + + value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03); + value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); + value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); + value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); + WREG32(mmSQ_CMD, value); +} + static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, enum amdgpu_interrupt_state state) { @@ -7171,6 +7183,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { .init_cond_exec = gfx_v8_0_ring_emit_init_cond_exec, .patch_cond_exec = gfx_v8_0_ring_emit_patch_cond_exec, .emit_wreg = gfx_v8_0_ring_emit_wreg, + .soft_recovery = gfx_v8_0_ring_soft_recovery, }; static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { -- GitLab From 80dbea4720bb43b473219fad0cf3b426f2cd04cc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 22 Aug 2018 12:04:11 +0200 Subject: [PATCH 0547/1692] drm/amdgpu: implement soft_recovery for GFX9 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Try to kill waves on the SQ. Signed-off-by: Christian König Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 44707f94b2c5..ab5cacea967b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -4421,6 +4421,18 @@ static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, ref, mask); } +static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t value = 0; + + value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03); + value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); + value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); + value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); + WREG32(mmSQ_CMD, value); +} + static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, enum amdgpu_interrupt_state state) { @@ -4743,6 +4755,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = { .emit_wreg = gfx_v9_0_ring_emit_wreg, .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait, + .soft_recovery = gfx_v9_0_ring_soft_recovery, }; static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = { -- GitLab From 43370c4ce5c6a1fae84b58f67f7834902ee74b7c Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Tue, 21 Aug 2018 17:14:32 -0400 Subject: [PATCH 0548/1692] drm/amdgpu: Adjust the VM size based on system memory size v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Set the VM size based on system memory size between the ASIC-specific limits given by min_vm_size and max_bits. GFXv9 GPUs will keep their default VM size of 256TB (48 bit). Only older GPUs will adjust VM size depending on system memory size. This makes more VM space available for ROCm applications on GFXv8 GPUs that want to map all available VRAM and system memory in their SVM address space. v2: * Clarify comment * Round up memory size before >> 30 * Round up automatic vm_size to power of two Signed-off-by: Felix Kuehling Acked-by: Junwei Zhang Reviewed-by: Huang Rui Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 32 ++++++++++++++++++++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 2 +- 2 files changed, 29 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 7e644bc6793e..b905d7901248 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2504,28 +2504,52 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size) * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size * * @adev: amdgpu_device pointer - * @vm_size: the default vm size if it's set auto + * @min_vm_size: the minimum vm size in GB if it's set auto * @fragment_size_default: Default PTE fragment size * @max_level: max VMPT level * @max_bits: max address space size in bits * */ -void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size, +void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, uint32_t fragment_size_default, unsigned max_level, unsigned max_bits) { + unsigned int max_size = 1 << (max_bits - 30); + unsigned int vm_size; uint64_t tmp; /* adjust vm size first */ if (amdgpu_vm_size != -1) { - unsigned max_size = 1 << (max_bits - 30); - vm_size = amdgpu_vm_size; if (vm_size > max_size) { dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n", amdgpu_vm_size, max_size); vm_size = max_size; } + } else { + struct sysinfo si; + unsigned int phys_ram_gb; + + /* Optimal VM size depends on the amount of physical + * RAM available. Underlying requirements and + * assumptions: + * + * - Need to map system memory and VRAM from all GPUs + * - VRAM from other GPUs not known here + * - Assume VRAM <= system memory + * - On GFX8 and older, VM space can be segmented for + * different MTYPEs + * - Need to allow room for fragmentation, guard pages etc. + * + * This adds up to a rough guess of system memory x3. + * Round up to power of two to maximize the available + * VM size with the given page table size. + */ + si_meminfo(&si); + phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit + + (1 << 30) - 1) >> 30; + vm_size = roundup_pow_of_two( + min(max(phys_ram_gb * 3, min_vm_size), max_size)); } adev->vm_manager.max_pfn = (uint64_t)vm_size << 18; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 14bafe771c9b..7a461eb76d44 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -351,7 +351,7 @@ struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm, void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket); void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va); -void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size, +void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, uint32_t fragment_size_default, unsigned max_level, unsigned max_bits); int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); -- GitLab From 1f06dee8f784e4f3af4add95076659ba95ffa9fb Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 24 Aug 2018 17:26:23 +0800 Subject: [PATCH 0549/1692] drm/amdgpu: Enable/disable gfx PG feature in rlc safe mode This is required by gfx hw and can fix the rlc hang when do s3 stree test on Cz/St. Reviewed-by: Alex Deucher Signed-off-by: Hang Zhou Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 9de940a65c80..56662d80602c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -5660,6 +5660,11 @@ static int gfx_v8_0_set_powergating_state(void *handle, if (amdgpu_sriov_vf(adev)) return 0; + if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG | + AMD_PG_SUPPORT_RLC_SMU_HS | + AMD_PG_SUPPORT_CP | + AMD_PG_SUPPORT_GFX_DMG)) + adev->gfx.rlc.funcs->enter_safe_mode(adev); switch (adev->asic_type) { case CHIP_CARRIZO: case CHIP_STONEY: @@ -5709,7 +5714,11 @@ static int gfx_v8_0_set_powergating_state(void *handle, default: break; } - + if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG | + AMD_PG_SUPPORT_RLC_SMU_HS | + AMD_PG_SUPPORT_CP | + AMD_PG_SUPPORT_GFX_DMG)) + adev->gfx.rlc.funcs->exit_safe_mode(adev); return 0; } -- GitLab From d3200a536ccbc047cee408c44a92235c4e84c91b Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 23 Aug 2018 11:46:13 +0800 Subject: [PATCH 0550/1692] drm/amdgpu: Remove duplicated power source update when ac/dc switch, driver will be notified by acpi event. then the power source will be updated. so don't need to get power source when set power state. Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index daa55fb06171..3e51e9c89f04 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -1933,14 +1933,6 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) amdgpu_fence_wait_empty(ring); } - mutex_lock(&adev->pm.mutex); - /* update battery/ac status */ - if (power_supply_is_system_supplied() > 0) - adev->pm.ac_power = true; - else - adev->pm.ac_power = false; - mutex_unlock(&adev->pm.mutex); - if (adev->powerplay.pp_funcs->dispatch_tasks) { if (!amdgpu_device_has_dc_support(adev)) { mutex_lock(&adev->pm.mutex); -- GitLab From 3510bafe561bd86ce16ef67bf06a0ae6b9202043 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 23 Aug 2018 15:30:45 +0800 Subject: [PATCH 0551/1692] drm/amdgpu: Fix vce initialize failed on Kaveri/Mullins MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Forgot to add vce pg support via smu for Kaveri/Mullins. Fixes: 561a5c83eadd ("drm/amd/pp: Unify powergate_uvd/vce/mmhub to set_powergating_by_smu") v2: refine patch descriptions suggested by Michel Reviewed-by: Alex Deucher Tested-by: Michel Dänzer Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/kv_dpm.c | 41 ++++++++++++++++++----------- 1 file changed, 26 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index 3f57f6463dc8..a713c8b6e09c 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -66,7 +66,6 @@ static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, static int kv_init_fps_limits(struct amdgpu_device *adev); static void kv_dpm_powergate_uvd(void *handle, bool gate); -static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate); static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate); static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate); @@ -1374,6 +1373,8 @@ static int kv_dpm_enable(struct amdgpu_device *adev) static void kv_dpm_disable(struct amdgpu_device *adev) { + struct kv_power_info *pi = kv_get_pi(adev); + amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, @@ -1387,7 +1388,8 @@ static void kv_dpm_disable(struct amdgpu_device *adev) /* powerup blocks */ kv_dpm_powergate_acp(adev, false); kv_dpm_powergate_samu(adev, false); - kv_dpm_powergate_vce(adev, false); + if (pi->caps_vce_pg) /* power on the VCE block */ + amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); kv_dpm_powergate_uvd(adev, false); kv_enable_smc_cac(adev, false); @@ -1551,7 +1553,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev, int ret; if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) { - kv_dpm_powergate_vce(adev, false); if (pi->caps_stable_p_state) pi->vce_boot_level = table->count - 1; else @@ -1573,7 +1574,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev, kv_enable_vce_dpm(adev, true); } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) { kv_enable_vce_dpm(adev, false); - kv_dpm_powergate_vce(adev, true); } return 0; @@ -1702,24 +1702,32 @@ static void kv_dpm_powergate_uvd(void *handle, bool gate) } } -static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate) +static void kv_dpm_powergate_vce(void *handle, bool gate) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct kv_power_info *pi = kv_get_pi(adev); - - if (pi->vce_power_gated == gate) - return; + int ret; pi->vce_power_gated = gate; - if (!pi->caps_vce_pg) - return; - - if (gate) - amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF); - else - amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); + if (gate) { + /* stop the VCE block */ + ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_GATE); + kv_enable_vce_dpm(adev, false); + if (pi->caps_vce_pg) /* power off the VCE block */ + amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF); + } else { + if (pi->caps_vce_pg) /* power on the VCE block */ + amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); + kv_enable_vce_dpm(adev, true); + /* re-init the VCE block */ + ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_UNGATE); + } } + static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate) { struct kv_power_info *pi = kv_get_pi(adev); @@ -3313,6 +3321,9 @@ static int kv_set_powergating_by_smu(void *handle, case AMD_IP_BLOCK_TYPE_UVD: kv_dpm_powergate_uvd(handle, gate); break; + case AMD_IP_BLOCK_TYPE_VCE: + kv_dpm_powergate_vce(handle, gate); + break; default: break; } -- GitLab From 3442516d14816b862bf7d1150c84ef1d0bfdd915 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 24 Aug 2018 16:17:54 +0800 Subject: [PATCH 0552/1692] drm/amdgpu: Update power state at the end of smu hw_init. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For SI/Kv, the power state is managed by function amdgpu_pm_compute_clocks. when dpm enabled, we should call amdgpu_pm_compute_clocks to update current power state instand of set boot state. this change can fix the oops when kfd driver was enabled on Kv. Reviewed-by: Alex Deucher Tested-by: Michel Dänzer Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/kv_dpm.c | 4 +--- drivers/gpu/drm/amd/amdgpu/si_dpm.c | 3 +-- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index a713c8b6e09c..b497c37cef7e 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -1353,8 +1353,6 @@ static int kv_dpm_enable(struct amdgpu_device *adev) return ret; } - kv_update_current_ps(adev, adev->pm.dpm.boot_ps); - if (adev->irq.installed && amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) { ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX); @@ -3069,7 +3067,7 @@ static int kv_dpm_hw_init(void *handle) else adev->pm.dpm_enabled = true; mutex_unlock(&adev->pm.mutex); - + amdgpu_pm_compute_clocks(adev); return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index db327b412562..1de96995e690 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c @@ -6887,7 +6887,6 @@ static int si_dpm_enable(struct amdgpu_device *adev) si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true); si_thermal_start_thermal_controller(adev); - ni_update_current_ps(adev, boot_ps); return 0; } @@ -7763,7 +7762,7 @@ static int si_dpm_hw_init(void *handle) else adev->pm.dpm_enabled = true; mutex_unlock(&adev->pm.mutex); - + amdgpu_pm_compute_clocks(adev); return ret; } -- GitLab From e851abd830885c37c18183e36541d2fc11e3c674 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 23 Aug 2018 15:41:57 +0800 Subject: [PATCH 0553/1692] drm/amdgpu: Power on uvd block when hw_fini MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit when hw_fini/suspend, smu only need to power on uvd block if uvd pg is supported, don't need to call uvd to do hw_init. v2: fix typo in patch descriptions and comments. Reviewed-by: Alex Deucher Tested-by: Michel Dänzer Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/kv_dpm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index b497c37cef7e..cb79a93c2eb7 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -65,7 +65,6 @@ static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, int min_temp, int max_temp); static int kv_init_fps_limits(struct amdgpu_device *adev); -static void kv_dpm_powergate_uvd(void *handle, bool gate); static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate); static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate); @@ -1388,7 +1387,8 @@ static void kv_dpm_disable(struct amdgpu_device *adev) kv_dpm_powergate_samu(adev, false); if (pi->caps_vce_pg) /* power on the VCE block */ amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); - kv_dpm_powergate_uvd(adev, false); + if (pi->caps_uvd_pg) /* power on the UVD block */ + amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON); kv_enable_smc_cac(adev, false); kv_enable_didt(adev, false); -- GitLab From f1df06d0f9bc8d2e9f0736a23b78d6db033460dc Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 23 Aug 2018 15:45:15 +0800 Subject: [PATCH 0554/1692] drm/amdgpu: Remove dead code in amdgpu_pm.c As we have unify powergate_uvd/vce/mmhub to set_powergating_by_smu, and set_powergating_by_smu was supported by both dpm and powerplay. so remove the else case. Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 35 -------------------------- 1 file changed, 35 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 3e51e9c89f04..b7b16cb5ff0f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -1720,18 +1720,6 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) mutex_lock(&adev->pm.mutex); amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); mutex_unlock(&adev->pm.mutex); - } else { - if (enable) { - mutex_lock(&adev->pm.mutex); - adev->pm.dpm.uvd_active = true; - adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; - mutex_unlock(&adev->pm.mutex); - } else { - mutex_lock(&adev->pm.mutex); - adev->pm.dpm.uvd_active = false; - mutex_unlock(&adev->pm.mutex); - } - amdgpu_pm_compute_clocks(adev); } } @@ -1742,29 +1730,6 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) mutex_lock(&adev->pm.mutex); amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); mutex_unlock(&adev->pm.mutex); - } else { - if (enable) { - mutex_lock(&adev->pm.mutex); - adev->pm.dpm.vce_active = true; - /* XXX select vce level based on ring/task */ - adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; - mutex_unlock(&adev->pm.mutex); - amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, - AMD_CG_STATE_UNGATE); - amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, - AMD_PG_STATE_UNGATE); - amdgpu_pm_compute_clocks(adev); - } else { - amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, - AMD_PG_STATE_GATE); - amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, - AMD_CG_STATE_GATE); - mutex_lock(&adev->pm.mutex); - adev->pm.dpm.vce_active = false; - mutex_unlock(&adev->pm.mutex); - amdgpu_pm_compute_clocks(adev); - } - } } -- GitLab From 6c10b5cc4eaa31ecedfb98a7f3e1fa5be032e189 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 17 Aug 2018 13:13:12 +0800 Subject: [PATCH 0555/1692] drm/amdgpu: Remove duplicate code in gfx_v8_0.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There are no any logical changes here. 1. if kcq can be enabled via kiq, we don't need to do kiq ring test. 2. amdgpu_ring_test_ring function can be used to sync the ring complete, remove the duplicate code. v2: alloc 6 (not 7) dws for unmap_queues Reviewed-by: Alex Deucher Acked-by: Christian König Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 80 +++++---------------------- 1 file changed, 13 insertions(+), 67 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 56662d80602c..7b892d35e14c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -4604,7 +4604,6 @@ static void gfx_v8_0_kiq_setting(struct amdgpu_ring *ring) static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev) { struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; - uint32_t scratch, tmp = 0; uint64_t queue_mask = 0; int r, i; @@ -4623,17 +4622,10 @@ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev) queue_mask |= (1ull << i); } - r = amdgpu_gfx_scratch_get(adev, &scratch); - if (r) { - DRM_ERROR("Failed to get scratch reg (%d).\n", r); - return r; - } - WREG32(scratch, 0xCAFEDEAD); - - r = amdgpu_ring_alloc(kiq_ring, (8 * adev->gfx.num_compute_rings) + 11); + kiq_ring->ready = true; + r = amdgpu_ring_alloc(kiq_ring, (8 * adev->gfx.num_compute_rings) + 8); if (r) { DRM_ERROR("Failed to lock KIQ (%d).\n", r); - amdgpu_gfx_scratch_free(adev, scratch); return r; } /* set resources */ @@ -4665,25 +4657,12 @@ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev) amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr)); amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); } - /* write to scratch for completion */ - amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); - amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); - amdgpu_ring_write(kiq_ring, 0xDEADBEEF); - amdgpu_ring_commit(kiq_ring); - for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(scratch); - if (tmp == 0xDEADBEEF) - break; - DRM_UDELAY(1); - } - if (i >= adev->usec_timeout) { - DRM_ERROR("KCQ enable failed (scratch(0x%04X)=0x%08X)\n", - scratch, tmp); - r = -EINVAL; + r = amdgpu_ring_test_ring(kiq_ring); + if (r) { + DRM_ERROR("KCQ enable failed\n"); + kiq_ring->ready = false; } - amdgpu_gfx_scratch_free(adev, scratch); - return r; } @@ -5014,15 +4993,6 @@ static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev) if (r) goto done; - /* Test KIQ */ - ring = &adev->gfx.kiq.ring; - ring->ready = true; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->ready = false; - goto done; - } - /* Test KCQs */ for (i = 0; i < adev->gfx.num_compute_rings; i++) { ring = &adev->gfx.compute_ring[i]; @@ -5092,23 +5062,11 @@ static int gfx_v8_0_hw_init(void *handle) static int gfx_v8_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring *ring) { - struct amdgpu_device *adev = kiq_ring->adev; - uint32_t scratch, tmp = 0; - int r, i; - - r = amdgpu_gfx_scratch_get(adev, &scratch); - if (r) { - DRM_ERROR("Failed to get scratch reg (%d).\n", r); - return r; - } - WREG32(scratch, 0xCAFEDEAD); + int r; - r = amdgpu_ring_alloc(kiq_ring, 10); - if (r) { + r = amdgpu_ring_alloc(kiq_ring, 6); + if (r) DRM_ERROR("Failed to lock KIQ (%d).\n", r); - amdgpu_gfx_scratch_free(adev, scratch); - return r; - } /* unmap queues */ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4)); @@ -5121,23 +5079,11 @@ static int gfx_v8_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring amdgpu_ring_write(kiq_ring, 0); amdgpu_ring_write(kiq_ring, 0); amdgpu_ring_write(kiq_ring, 0); - /* write to scratch for completion */ - amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); - amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); - amdgpu_ring_write(kiq_ring, 0xDEADBEEF); - amdgpu_ring_commit(kiq_ring); - for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(scratch); - if (tmp == 0xDEADBEEF) - break; - DRM_UDELAY(1); - } - if (i >= adev->usec_timeout) { - DRM_ERROR("KCQ disabled failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp); - r = -EINVAL; - } - amdgpu_gfx_scratch_free(adev, scratch); + r = amdgpu_ring_test_ring(kiq_ring); + if (r) + DRM_ERROR("KCQ disable failed\n"); + return r; } -- GitLab From a62a49e5b968a58266ee04d63ddaa81a01510b39 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 17 Aug 2018 14:57:18 +0800 Subject: [PATCH 0556/1692] drm/amdgpu: Refine gfx_v8_0_kcq_disable function MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Send all kcq unmap_queue packets and then wait for complete. Reviewed-by: Alex Deucher Acked-by: Christian König Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 29 ++++++++++++++------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 7b892d35e14c..d5470d449f6d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -5060,26 +5060,29 @@ static int gfx_v8_0_hw_init(void *handle) return r; } -static int gfx_v8_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring *ring) +static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev) { - int r; + int r, i; + struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; - r = amdgpu_ring_alloc(kiq_ring, 6); + r = amdgpu_ring_alloc(kiq_ring, 6 * adev->gfx.num_compute_rings); if (r) DRM_ERROR("Failed to lock KIQ (%d).\n", r); - /* unmap queues */ - amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4)); - amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; + + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4)); + amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */ PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) | PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) | PACKET3_UNMAP_QUEUES_NUM_QUEUES(1)); - amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index)); - amdgpu_ring_write(kiq_ring, 0); - amdgpu_ring_write(kiq_ring, 0); - amdgpu_ring_write(kiq_ring, 0); - + amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index)); + amdgpu_ring_write(kiq_ring, 0); + amdgpu_ring_write(kiq_ring, 0); + amdgpu_ring_write(kiq_ring, 0); + } r = amdgpu_ring_test_ring(kiq_ring); if (r) DRM_ERROR("KCQ disable failed\n"); @@ -5090,7 +5093,6 @@ static int gfx_v8_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring static int gfx_v8_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int i; amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); @@ -5100,8 +5102,7 @@ static int gfx_v8_0_hw_fini(void *handle) amdgpu_irq_put(adev, &adev->gfx.sq_irq, 0); /* disable KCQ to avoid CPC touch memory not valid anymore */ - for (i = 0; i < adev->gfx.num_compute_rings; i++) - gfx_v8_0_kcq_disable(&adev->gfx.kiq.ring, &adev->gfx.compute_ring[i]); + gfx_v8_0_kcq_disable(adev); if (amdgpu_sriov_vf(adev)) { pr_debug("For SRIOV client, shouldn't do anything.\n"); -- GitLab From 841cf911fb9e4abd7b8bac2776943c60da9069f4 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 17 Aug 2018 16:42:35 +0800 Subject: [PATCH 0557/1692] drm/amdgpu: Remove duplicate code in gfx_v9_0.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There are no any logical changes here. 1. if kcq can be enabled via kiq, we don't need to do kiq ring test. 2. amdgpu_ring_test_ring function can be used to sync the ring complete, remove the duplicate code. Reviewed-by: Alex Deucher Acked-by: Christian König Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 72 +++++---------------------- 1 file changed, 12 insertions(+), 60 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index ab5cacea967b..37c95c479002 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2666,7 +2666,6 @@ static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring) static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev) { struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; - uint32_t scratch, tmp = 0; uint64_t queue_mask = 0; int r, i; @@ -2685,17 +2684,10 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev) queue_mask |= (1ull << i); } - r = amdgpu_gfx_scratch_get(adev, &scratch); - if (r) { - DRM_ERROR("Failed to get scratch reg (%d).\n", r); - return r; - } - WREG32(scratch, 0xCAFEDEAD); - - r = amdgpu_ring_alloc(kiq_ring, (7 * adev->gfx.num_compute_rings) + 11); + kiq_ring->ready = true; + r = amdgpu_ring_alloc(kiq_ring, (7 * adev->gfx.num_compute_rings) + 8); if (r) { DRM_ERROR("Failed to lock KIQ (%d).\n", r); - amdgpu_gfx_scratch_free(adev, scratch); return r; } @@ -2732,24 +2724,12 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev) amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr)); amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); } - /* write to scratch for completion */ - amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); - amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); - amdgpu_ring_write(kiq_ring, 0xDEADBEEF); - amdgpu_ring_commit(kiq_ring); - for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(scratch); - if (tmp == 0xDEADBEEF) - break; - DRM_UDELAY(1); - } - if (i >= adev->usec_timeout) { - DRM_ERROR("KCQ enable failed (scratch(0x%04X)=0x%08X)\n", - scratch, tmp); - r = -EINVAL; + r = amdgpu_ring_test_ring(kiq_ring); + if (r) { + DRM_ERROR("KCQ enable failed\n"); + kiq_ring->ready = false; } - amdgpu_gfx_scratch_free(adev, scratch); return r; } @@ -3188,12 +3168,6 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev) return r; } - ring = &adev->gfx.kiq.ring; - ring->ready = true; - r = amdgpu_ring_test_ring(ring); - if (r) - ring->ready = false; - for (i = 0; i < adev->gfx.num_compute_rings; i++) { ring = &adev->gfx.compute_ring[i]; @@ -3244,21 +3218,11 @@ static int gfx_v9_0_hw_init(void *handle) static int gfx_v9_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring *ring) { - struct amdgpu_device *adev = kiq_ring->adev; - uint32_t scratch, tmp = 0; - int r, i; - - r = amdgpu_gfx_scratch_get(adev, &scratch); - if (r) { - DRM_ERROR("Failed to get scratch reg (%d).\n", r); - return r; - } - WREG32(scratch, 0xCAFEDEAD); + int r; - r = amdgpu_ring_alloc(kiq_ring, 10); + r = amdgpu_ring_alloc(kiq_ring, 6); if (r) { DRM_ERROR("Failed to lock KIQ (%d).\n", r); - amdgpu_gfx_scratch_free(adev, scratch); return r; } @@ -3273,23 +3237,11 @@ static int gfx_v9_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring amdgpu_ring_write(kiq_ring, 0); amdgpu_ring_write(kiq_ring, 0); amdgpu_ring_write(kiq_ring, 0); - /* write to scratch for completion */ - amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); - amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); - amdgpu_ring_write(kiq_ring, 0xDEADBEEF); - amdgpu_ring_commit(kiq_ring); - for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(scratch); - if (tmp == 0xDEADBEEF) - break; - DRM_UDELAY(1); - } - if (i >= adev->usec_timeout) { - DRM_ERROR("KCQ disabled failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp); - r = -EINVAL; - } - amdgpu_gfx_scratch_free(adev, scratch); + r = amdgpu_ring_test_ring(kiq_ring); + if (r) + DRM_ERROR("KCQ disable failed\n"); + return r; } -- GitLab From ffabea84c55b1c6446b2245b87cdf6827b22e366 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 17 Aug 2018 16:45:16 +0800 Subject: [PATCH 0558/1692] drm/amdgpu: Refine gfx_v9_0_kcq_disable function MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Send all kcq unmap_queue packets and then wait for complete. Reviewed-by: Alex Deucher Acked-by: Christian König Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 33 +++++++++++++-------------- 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 37c95c479002..21e66f86de92 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -3216,28 +3216,29 @@ static int gfx_v9_0_hw_init(void *handle) return r; } -static int gfx_v9_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring *ring) +static int gfx_v9_0_kcq_disable(struct amdgpu_device *adev) { - int r; + int r, i; + struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; - r = amdgpu_ring_alloc(kiq_ring, 6); - if (r) { + r = amdgpu_ring_alloc(kiq_ring, 6 * adev->gfx.num_compute_rings); + if (r) DRM_ERROR("Failed to lock KIQ (%d).\n", r); - return r; - } - /* unmap queues */ - amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4)); - amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; + + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4)); + amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */ PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) | PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) | PACKET3_UNMAP_QUEUES_NUM_QUEUES(1)); - amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index)); - amdgpu_ring_write(kiq_ring, 0); - amdgpu_ring_write(kiq_ring, 0); - amdgpu_ring_write(kiq_ring, 0); - + amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index)); + amdgpu_ring_write(kiq_ring, 0); + amdgpu_ring_write(kiq_ring, 0); + amdgpu_ring_write(kiq_ring, 0); + } r = amdgpu_ring_test_ring(kiq_ring); if (r) DRM_ERROR("KCQ disable failed\n"); @@ -3248,14 +3249,12 @@ static int gfx_v9_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring static int gfx_v9_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int i; amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); /* disable KCQ to avoid CPC touch memory not valid anymore */ - for (i = 0; i < adev->gfx.num_compute_rings; i++) - gfx_v9_0_kcq_disable(&adev->gfx.kiq.ring, &adev->gfx.compute_ring[i]); + gfx_v9_0_kcq_disable(adev); if (amdgpu_sriov_vf(adev)) { gfx_v9_0_cp_gfx_enable(adev, false); -- GitLab From 36859cd5354b9cb418c28930936a8a6fce18a1d7 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 22 Aug 2018 17:58:31 +0800 Subject: [PATCH 0559/1692] drm/amdgpu: Change kiq initialize/reset sequence on gfx8 1. initialize kiq before initialize gfx ring. 2. set kiq ring ready immediately when kiq initialize successfully. 3. split function gfx_v8_0_kiq_resume into two functions. gfx_v8_0_kiq_resume is for kiq initialize. gfx_v8_0_kcq_resume is for kcq initialize. Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 49 ++++++++++++++++----------- 1 file changed, 30 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index d5470d449f6d..3882689b2d8f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -4622,7 +4622,6 @@ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev) queue_mask |= (1ull << i); } - kiq_ring->ready = true; r = amdgpu_ring_alloc(kiq_ring, (8 * adev->gfx.num_compute_rings) + 8); if (r) { DRM_ERROR("Failed to lock KIQ (%d).\n", r); @@ -4949,26 +4948,33 @@ static void gfx_v8_0_set_mec_doorbell_range(struct amdgpu_device *adev) static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev) { - struct amdgpu_ring *ring = NULL; - int r = 0, i; - - gfx_v8_0_cp_compute_enable(adev, true); + struct amdgpu_ring *ring; + int r; ring = &adev->gfx.kiq.ring; r = amdgpu_bo_reserve(ring->mqd_obj, false); if (unlikely(r != 0)) - goto done; + return r; r = amdgpu_bo_kmap(ring->mqd_obj, &ring->mqd_ptr); - if (!r) { - r = gfx_v8_0_kiq_init_queue(ring); - amdgpu_bo_kunmap(ring->mqd_obj); - ring->mqd_ptr = NULL; - } + if (unlikely(r != 0)) + return r; + + gfx_v8_0_kiq_init_queue(ring); + amdgpu_bo_kunmap(ring->mqd_obj); + ring->mqd_ptr = NULL; amdgpu_bo_unreserve(ring->mqd_obj); - if (r) - goto done; + ring->ready = true; + return 0; +} + +static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring = NULL; + int r = 0, i; + + gfx_v8_0_cp_compute_enable(adev, true); for (i = 0; i < adev->gfx.num_compute_rings; i++) { ring = &adev->gfx.compute_ring[i]; @@ -5024,14 +5030,17 @@ static int gfx_v8_0_cp_resume(struct amdgpu_device *adev) return r; } - r = gfx_v8_0_cp_gfx_resume(adev); + r = gfx_v8_0_kiq_resume(adev); if (r) return r; - r = gfx_v8_0_kiq_resume(adev); + r = gfx_v8_0_cp_gfx_resume(adev); if (r) return r; + r = gfx_v8_0_kcq_resume(adev); + if (r) + return r; gfx_v8_0_enable_gui_idle_interrupt(adev, true); return 0; @@ -5333,10 +5342,6 @@ static int gfx_v8_0_post_soft_reset(void *handle) grbm_soft_reset = adev->gfx.grbm_soft_reset; srbm_soft_reset = adev->gfx.srbm_soft_reset; - if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) || - REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX)) - gfx_v8_0_cp_gfx_resume(adev); - if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) || REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) || REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) || @@ -5353,7 +5358,13 @@ static int gfx_v8_0_post_soft_reset(void *handle) mutex_unlock(&adev->srbm_mutex); } gfx_v8_0_kiq_resume(adev); + gfx_v8_0_kcq_resume(adev); } + + if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) || + REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX)) + gfx_v8_0_cp_gfx_resume(adev); + gfx_v8_0_rlc_start(adev); return 0; -- GitLab From a9a8a788e5e946a9835a1365256fc4ce9e96ba2c Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 22 Aug 2018 18:54:45 +0800 Subject: [PATCH 0560/1692] drm/amdgpu: Change kiq ring initialize sequence on gfx9 1. initialize kiq before initialize gfx ring. 2. set kiq ring ready immediately when kiq initialize successfully. 3. split function gfx_v9_0_kiq_resume into two functions. gfx_v9_0_kiq_resume is for kiq initialize. gfx_v9_0_kcq_resume is for kcq initialize. Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 38 +++++++++++++++++---------- 1 file changed, 24 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 21e66f86de92..3594704a6f9b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2684,7 +2684,6 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev) queue_mask |= (1ull << i); } - kiq_ring->ready = true; r = amdgpu_ring_alloc(kiq_ring, (7 * adev->gfx.num_compute_rings) + 8); if (r) { DRM_ERROR("Failed to lock KIQ (%d).\n", r); @@ -3091,26 +3090,33 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring) static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev) { - struct amdgpu_ring *ring = NULL; - int r = 0, i; - - gfx_v9_0_cp_compute_enable(adev, true); + struct amdgpu_ring *ring; + int r; ring = &adev->gfx.kiq.ring; r = amdgpu_bo_reserve(ring->mqd_obj, false); if (unlikely(r != 0)) - goto done; + return r; r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); - if (!r) { - r = gfx_v9_0_kiq_init_queue(ring); - amdgpu_bo_kunmap(ring->mqd_obj); - ring->mqd_ptr = NULL; - } + if (unlikely(r != 0)) + return r; + + gfx_v9_0_kiq_init_queue(ring); + amdgpu_bo_kunmap(ring->mqd_obj); + ring->mqd_ptr = NULL; amdgpu_bo_unreserve(ring->mqd_obj); - if (r) - goto done; + ring->ready = true; + return 0; +} + +static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring = NULL; + int r = 0, i; + + gfx_v9_0_cp_compute_enable(adev, true); for (i = 0; i < adev->gfx.num_compute_rings; i++) { ring = &adev->gfx.compute_ring[i]; @@ -3153,11 +3159,15 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev) return r; } + r = gfx_v9_0_kiq_resume(adev); + if (r) + return r; + r = gfx_v9_0_cp_gfx_resume(adev); if (r) return r; - r = gfx_v9_0_kiq_resume(adev); + r = gfx_v9_0_kcq_resume(adev); if (r) return r; -- GitLab From 85eff20020a656b2d13b33dc4681523508fee037 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 24 Aug 2018 14:23:33 +0200 Subject: [PATCH 0561/1692] drm/amdgpu: amdgpu_ctx_add_fence can't fail MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit No more waiting for a fence done here. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 10 +--------- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 8 +++----- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h | 6 +++--- 3 files changed, 7 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index b62bbe71662d..adc6a43e2333 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1217,15 +1217,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, job->owner = p->filp; p->fence = dma_fence_get(&job->base.s_fence->finished); - r = amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq); - if (r) { - dma_fence_put(p->fence); - dma_fence_put(&job->base.s_fence->finished); - amdgpu_job_free(job); - amdgpu_mn_unlock(p->mn); - return r; - } - + amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq); amdgpu_cs_post_dependencies(p); if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) && diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 987b7f256463..f9b54236102d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -434,9 +434,9 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx) return 0; } -int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, - struct drm_sched_entity *entity, - struct dma_fence *fence, uint64_t* handle) +void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, + struct drm_sched_entity *entity, + struct dma_fence *fence, uint64_t* handle) { struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity); uint64_t seq = centity->sequence; @@ -458,8 +458,6 @@ int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, dma_fence_put(other); if (handle) *handle = seq; - - return 0; } struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h index d67c1d285a4f..b3b012c0a7da 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h @@ -65,9 +65,9 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx); int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance, u32 ring, struct drm_sched_entity **entity); -int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, - struct drm_sched_entity *entity, - struct dma_fence *fence, uint64_t *seq); +void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, + struct drm_sched_entity *entity, + struct dma_fence *fence, uint64_t *seq); struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, struct drm_sched_entity *entity, uint64_t seq); -- GitLab From 4f9ea1d0d1ed914092d9e03d87d80fa7e63ecc8f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 24 Aug 2018 14:48:02 +0200 Subject: [PATCH 0562/1692] drm/amdgpu: fix holding mn_lock while allocating memory MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We can't hold the mn_lock while allocating memory. Signed-off-by: Christian König Acked-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 31 ++++++++++++++++---------- 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index adc6a43e2333..dd734970e167 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1194,26 +1194,24 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, int r; + job = p->job; + p->job = NULL; + + r = drm_sched_job_init(&job->base, entity, p->filp); + if (r) + goto error_unlock; + + /* No memory allocation is allowed while holding the mn lock */ amdgpu_mn_lock(p->mn); amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { struct amdgpu_bo *bo = e->robj; if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) { - amdgpu_mn_unlock(p->mn); - return -ERESTARTSYS; + r = -ERESTARTSYS; + goto error_abort; } } - job = p->job; - p->job = NULL; - - r = drm_sched_job_init(&job->base, entity, p->filp); - if (r) { - amdgpu_job_free(job); - amdgpu_mn_unlock(p->mn); - return r; - } - job->owner = p->filp; p->fence = dma_fence_get(&job->base.s_fence->finished); @@ -1243,6 +1241,15 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, amdgpu_mn_unlock(p->mn); return 0; + +error_abort: + dma_fence_put(&job->base.s_fence->finished); + job->base.s_fence = NULL; + +error_unlock: + amdgpu_job_free(job); + amdgpu_mn_unlock(p->mn); + return r; } int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) -- GitLab From 248f2b8ef25c9505fc763d42bf5e2c9fcf94fd16 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 22 Aug 2018 15:47:37 +0200 Subject: [PATCH 0563/1692] drm/amdgpu: remove extra root PD alignment MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Just another leftover from radeon. Signed-off-by: Christian König Reviewed-by: Alex Deucher Reviewed-by: Junwei Zhang Acked-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 4 +--- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 3 --- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index b905d7901248..5ef755458d3e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2612,8 +2612,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, { struct amdgpu_bo_param bp; struct amdgpu_bo *root; - const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE, - AMDGPU_VM_PTE_COUNT(adev) * 8); unsigned long size; uint64_t flags; int r, i; @@ -2662,7 +2660,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level); memset(&bp, 0, sizeof(bp)); bp.size = size; - bp.byte_align = align; + bp.byte_align = AMDGPU_GPU_PAGE_SIZE; bp.domain = AMDGPU_GEM_DOMAIN_VRAM; bp.flags = flags; bp.type = ttm_bo_type_kernel; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 7a461eb76d44..94fe47890adf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -49,9 +49,6 @@ struct amdgpu_bo_list_entry; /* number of entries in page table */ #define AMDGPU_VM_PTE_COUNT(adev) (1 << (adev)->vm_manager.block_size) -/* PTBs (Page Table Blocks) need to be aligned to 32K */ -#define AMDGPU_VM_PTB_ALIGN_SIZE 32768 - #define AMDGPU_PTE_VALID (1ULL << 0) #define AMDGPU_PTE_SYSTEM (1ULL << 1) #define AMDGPU_PTE_SNOOPED (1ULL << 2) -- GitLab From e21eb2613d071abfaa40e353b106f01f4ce83d77 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 27 Aug 2018 15:17:59 -0500 Subject: [PATCH 0564/1692] drm/amdgpu: add helper for VM PD/PT allocation parameters v3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a helper function to figure them out only once. v2: fix typo with memset v3: rebase on kfd changes (Alex) Signed-off-by: Christian König Reviewed-by: Junwei Zhang Reviewed-by: Alex Deucher Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 63 ++++++++++++-------------- 1 file changed, 29 insertions(+), 34 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 5ef755458d3e..f78be285d296 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -467,6 +467,32 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, return r; } +/** + * amdgpu_vm_bo_param - fill in parameters for PD/PT allocation + * + * @adev: amdgpu_device pointer + * @vm: requesting vm + * @bp: resulting BO allocation parameters + */ +static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm, + int level, struct amdgpu_bo_param *bp) +{ + memset(bp, 0, sizeof(*bp)); + + bp->size = amdgpu_vm_bo_size(adev, level); + bp->byte_align = AMDGPU_GPU_PAGE_SIZE; + bp->domain = AMDGPU_GEM_DOMAIN_VRAM; + bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; + if (vm->use_cpu_for_update) + bp->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; + else + bp->flags |= AMDGPU_GEM_CREATE_SHADOW | + AMDGPU_GEM_CREATE_NO_CPU_ACCESS; + bp->type = ttm_bo_type_kernel; + if (vm->root.base.bo) + bp->resv = vm->root.base.bo->tbo.resv; +} + /** * amdgpu_vm_alloc_levels - allocate the PD/PT levels * @@ -490,8 +516,8 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, unsigned level, bool ats) { unsigned shift = amdgpu_vm_level_shift(adev, level); + struct amdgpu_bo_param bp; unsigned pt_idx, from, to; - u64 flags; int r; if (!parent->entries) { @@ -515,30 +541,14 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, saddr = saddr & ((1 << shift) - 1); eaddr = eaddr & ((1 << shift) - 1); - flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; - if (vm->root.base.bo->shadow) - flags |= AMDGPU_GEM_CREATE_SHADOW; - if (vm->use_cpu_for_update) - flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; - else - flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS; + amdgpu_vm_bo_param(adev, vm, level, &bp); /* walk over the address space and allocate the page tables */ for (pt_idx = from; pt_idx <= to; ++pt_idx) { - struct reservation_object *resv = vm->root.base.bo->tbo.resv; struct amdgpu_vm_pt *entry = &parent->entries[pt_idx]; struct amdgpu_bo *pt; if (!entry->base.bo) { - struct amdgpu_bo_param bp; - - memset(&bp, 0, sizeof(bp)); - bp.size = amdgpu_vm_bo_size(adev, level); - bp.byte_align = AMDGPU_GPU_PAGE_SIZE; - bp.domain = AMDGPU_GEM_DOMAIN_VRAM; - bp.flags = flags; - bp.type = ttm_bo_type_kernel; - bp.resv = resv; r = amdgpu_bo_create(adev, &bp, &pt); if (r) return r; @@ -2612,8 +2622,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, { struct amdgpu_bo_param bp; struct amdgpu_bo *root; - unsigned long size; - uint64_t flags; int r, i; vm->va = RB_ROOT_CACHED; @@ -2651,20 +2659,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, "CPU update of VM recommended only for large BAR system\n"); vm->last_update = NULL; - flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; - if (vm->use_cpu_for_update) - flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; - else if (vm_context != AMDGPU_VM_CONTEXT_COMPUTE) - flags |= AMDGPU_GEM_CREATE_SHADOW; - - size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level); - memset(&bp, 0, sizeof(bp)); - bp.size = size; - bp.byte_align = AMDGPU_GPU_PAGE_SIZE; - bp.domain = AMDGPU_GEM_DOMAIN_VRAM; - bp.flags = flags; - bp.type = ttm_bo_type_kernel; - bp.resv = NULL; + amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, &bp); r = amdgpu_bo_create(adev, &bp, &root); if (r) goto error_free_sched_entity; -- GitLab From bbc9fb10e581c5463961506df7504356b3bd0a61 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 22 Aug 2018 12:27:05 +0200 Subject: [PATCH 0565/1692] drm/amdgpu: add GMC9 support for PDs/PTs in system memory MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add the necessary handling. Signed-off-by: Christian König Reviewed-by: Junwei Zhang Reviewed-by: Alex Deucher Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index a82b3eb429e8..453bd7ea50e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -560,7 +560,7 @@ static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev, static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level, uint64_t *addr, uint64_t *flags) { - if (!(*flags & AMDGPU_PDE_PTE)) + if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM)) *addr = adev->vm_manager.vram_base_offset + *addr - adev->gmc.vram_start; BUG_ON(*addr & 0xFFFF00000000003FULL); -- GitLab From 24a8d289d532003a167b8f52f97c50430db76ca3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 22 Aug 2018 14:11:19 +0200 Subject: [PATCH 0566/1692] drm/amdgpu: add amdgpu_gmc_get_pde_for_bo helper v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Helper to get the PDE for a PD/PT. v2: improve documentation Signed-off-by: Christian König Reviewed-by: Junwei Zhang Reviewed-by: Huang Rui Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 37 +++++++++++++++++++++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | 2 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 23 +++++++++++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 4 +-- 5 files changed, 59 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 36058feac64f..a249931ef512 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -26,6 +26,38 @@ #include "amdgpu.h" +/** + * amdgpu_gmc_get_pde_for_bo - get the PDE for a BO + * + * @bo: the BO to get the PDE for + * @level: the level in the PD hirarchy + * @addr: resulting addr + * @flags: resulting flags + * + * Get the address and flags to be used for a PDE (Page Directory Entry). + */ +void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level, + uint64_t *addr, uint64_t *flags) +{ + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + struct ttm_dma_tt *ttm; + + switch (bo->tbo.mem.mem_type) { + case TTM_PL_TT: + ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm); + *addr = ttm->dma_address[0]; + break; + case TTM_PL_VRAM: + *addr = amdgpu_bo_gpu_offset(bo); + break; + default: + *addr = 0; + break; + } + *flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, &bo->tbo.mem); + amdgpu_gmc_get_vm_pde(adev, level, addr, flags); +} + /** * amdgpu_gmc_pd_addr - return the address of the root directory * @@ -35,13 +67,14 @@ uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo) struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); uint64_t pd_addr; - pd_addr = amdgpu_bo_gpu_offset(bo); /* TODO: move that into ASIC specific code */ if (adev->asic_type >= CHIP_VEGA10) { uint64_t flags = AMDGPU_PTE_VALID; - amdgpu_gmc_get_vm_pde(adev, -1, &pd_addr, &flags); + amdgpu_gmc_get_pde_for_bo(bo, -1, &pd_addr, &flags); pd_addr |= flags; + } else { + pd_addr = amdgpu_bo_gpu_offset(bo); } return pd_addr; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index 1c6974a33467..85030c04c443 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -133,6 +133,8 @@ static inline bool amdgpu_gmc_vram_full_visible(struct amdgpu_gmc *gmc) return (gmc->real_vram_size == gmc->visible_vram_size); } +void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level, + uint64_t *addr, uint64_t *flags); uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index eb08a03b82a0..2f304f9dd543 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1428,13 +1428,14 @@ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) } /** - * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object + * amdgpu_ttm_tt_pde_flags - Compute PDE flags for ttm_tt object * * @ttm: The ttm_tt object to compute the flags for * @mem: The memory registry backing this ttm_tt object + * + * Figure out the flags to use for a VM PDE (Page Directory Entry). */ -uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, - struct ttm_mem_reg *mem) +uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem) { uint64_t flags = 0; @@ -1448,6 +1449,22 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, flags |= AMDGPU_PTE_SNOOPED; } + return flags; +} + +/** + * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object + * + * @ttm: The ttm_tt object to compute the flags for + * @mem: The memory registry backing this ttm_tt object + + * Figure out the flags to use for a VM PTE (Page Table Entry). + */ +uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, + struct ttm_mem_reg *mem) +{ + uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem); + flags |= adev->gart.gart_pte_flags; flags |= AMDGPU_PTE_READABLE; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 8b3cc6687769..fe8f276e9811 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -116,6 +116,7 @@ bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm, int *last_invalidated); bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm); bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm); +uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem); uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, struct ttm_mem_reg *mem); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index f78be285d296..f17fb3c63f43 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1014,9 +1014,7 @@ static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params, pbo = pbo->parent; level += params->adev->vm_manager.root_level; - pt = amdgpu_bo_gpu_offset(entry->base.bo); - flags = AMDGPU_PTE_VALID; - amdgpu_gmc_get_vm_pde(params->adev, level, &pt, &flags); + amdgpu_gmc_get_pde_for_bo(entry->base.bo, level, &pt, &flags); pde = (entry - parent->entries) * 8; if (bo->shadow) params->func(params, bo->shadow, pde, pt, 1, 0, flags); -- GitLab From 284dec4317c8e76f45d3ce922f673c80331812f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 22 Aug 2018 16:44:56 +0200 Subject: [PATCH 0567/1692] drm/amdgpu: enable GTT PD/PT for raven v3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Should work on Vega10 as well, but with an obvious performance hit. Older APUs can be enabled as well, but will probably be more work. v2: fix error checking v3: use more general check Signed-off-by: Christian König Acked-by: Andrey Grodzovsky Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index f17fb3c63f43..25b390dc8636 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -342,6 +342,9 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, list_move(&bo_base->vm_status, &vm->moved); spin_unlock(&vm->moved_lock); } else { + r = amdgpu_ttm_alloc_gart(&bo->tbo); + if (r) + break; list_move(&bo_base->vm_status, &vm->relocated); } } @@ -417,6 +420,10 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, if (r) goto error; + r = amdgpu_ttm_alloc_gart(&bo->tbo); + if (r) + return r; + r = amdgpu_job_alloc_with_ib(adev, 64, &job); if (r) goto error; @@ -482,7 +489,12 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm, bp->size = amdgpu_vm_bo_size(adev, level); bp->byte_align = AMDGPU_GPU_PAGE_SIZE; bp->domain = AMDGPU_GEM_DOMAIN_VRAM; - bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; + if (bp->size <= PAGE_SIZE && adev->asic_type >= CHIP_VEGA10 && + adev->flags & AMD_IS_APU) + bp->domain |= AMDGPU_GEM_DOMAIN_GTT; + bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain); + bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | + AMDGPU_GEM_CREATE_CPU_GTT_USWC; if (vm->use_cpu_for_update) bp->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; else -- GitLab From 9bbdb0f345f50e2a9afd7d7c475e3b788eec402b Mon Sep 17 00:00:00 2001 From: David Francis Date: Thu, 9 Aug 2018 13:20:04 -0400 Subject: [PATCH 0568/1692] drm/amd/display: Eliminate i2c hw function pointers [Why] The function pointers of the dce_i2c_hw struct were never accessed from outside dce_i2c_hw.c and had only one version. As function pointers take up space and make debugging difficult, and they are not needed in this case, they should be removed. [How] Remove the dce_i2c_hw_funcs struct and make static all functions that were previously a part of it. Reorder the functions in dce_i2c_hw.c. Signed-off-by: David Francis Reviewed-by: Sun peng Li Acked-by: Leo Li Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/dce/dce_i2c_hw.c | 607 +++++++++--------- .../gpu/drm/amd/display/dc/dce/dce_i2c_hw.h | 29 - 2 files changed, 291 insertions(+), 345 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c index 3a63e3cbb91d..cd7da59794d0 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c @@ -36,223 +36,41 @@ #define FN(reg_name, field_name) \ dce_i2c_hw->shifts->field_name, dce_i2c_hw->masks->field_name - -static inline void reset_hw_engine(struct dce_i2c_hw *dce_i2c_hw) -{ - REG_UPDATE_2(DC_I2C_CONTROL, - DC_I2C_SW_STATUS_RESET, 1, - DC_I2C_SW_STATUS_RESET, 1); -} - -static bool is_hw_busy(struct dce_i2c_hw *dce_i2c_hw) -{ - uint32_t i2c_sw_status = 0; - - REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status); - if (i2c_sw_status == DC_I2C_STATUS__DC_I2C_STATUS_IDLE) - return false; - - reset_hw_engine(dce_i2c_hw); - - REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status); - return i2c_sw_status != DC_I2C_STATUS__DC_I2C_STATUS_IDLE; -} - -static void set_speed( - struct dce_i2c_hw *dce_i2c_hw, - uint32_t speed) -{ - - if (speed) { - if (dce_i2c_hw->masks->DC_I2C_DDC1_START_STOP_TIMING_CNTL) - REG_UPDATE_N(SPEED, 3, - FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), dce_i2c_hw->reference_frequency / speed, - FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2, - FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL), speed > 50 ? 2:1); - else - REG_UPDATE_N(SPEED, 2, - FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), dce_i2c_hw->reference_frequency / speed, - FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2); - } -} - -bool dce_i2c_hw_engine_acquire_engine( - struct dce_i2c_hw *dce_i2c_hw, - struct ddc *ddc) -{ - - enum gpio_result result; - uint32_t current_speed; - - result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE, - GPIO_DDC_CONFIG_TYPE_MODE_I2C); - - if (result != GPIO_RESULT_OK) - return false; - - dce_i2c_hw->ddc = ddc; - - - current_speed = dce_i2c_hw->funcs->get_speed(dce_i2c_hw); - - if (current_speed) - dce_i2c_hw->original_speed = current_speed; - - return true; -} -bool dce_i2c_engine_acquire_hw( - struct dce_i2c_hw *dce_i2c_hw, - struct ddc *ddc_handle) -{ - - uint32_t counter = 0; - bool result; - - do { - result = dce_i2c_hw_engine_acquire_engine( - dce_i2c_hw, ddc_handle); - - if (result) - break; - - /* i2c_engine is busy by VBios, lets wait and retry */ - - udelay(10); - - ++counter; - } while (counter < 2); - - if (result) { - if (!dce_i2c_hw->funcs->setup_engine(dce_i2c_hw)) { - dce_i2c_hw->funcs->release_engine(dce_i2c_hw); - result = false; - } - } - - return result; -} -struct dce_i2c_hw *acquire_i2c_hw_engine( - struct resource_pool *pool, - struct ddc *ddc) +static void disable_i2c_hw_engine( + struct dce_i2c_hw *dce_i2c_hw) { - - struct dce_i2c_hw *engine = NULL; - - if (!ddc) - return NULL; - - if (ddc->hw_info.hw_supported) { - enum gpio_ddc_line line = dal_ddc_get_line(ddc); - - if (line < pool->pipe_count) - engine = pool->hw_i2cs[line]; - } - - if (!engine) - return NULL; - - - if (!pool->i2c_hw_buffer_in_use && - dce_i2c_engine_acquire_hw(engine, ddc)) { - pool->i2c_hw_buffer_in_use = true; - return engine; - } - - - return NULL; + REG_UPDATE_N(SETUP, 1, FN(SETUP, DC_I2C_DDC1_ENABLE), 0); } -static bool setup_engine( +static void execute_transaction( struct dce_i2c_hw *dce_i2c_hw) { - uint32_t i2c_setup_limit = I2C_SETUP_TIME_LIMIT_DCE; + REG_UPDATE_N(SETUP, 5, + FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_EN), 0, + FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_CLK_DRIVE_EN), 0, + FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_SEL), 0, + FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_TRANSACTION_DELAY), 0, + FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_BYTE_DELAY), 0); - if (dce_i2c_hw->setup_limit != 0) - i2c_setup_limit = dce_i2c_hw->setup_limit; - /* Program pin select */ - REG_UPDATE_6(DC_I2C_CONTROL, - DC_I2C_GO, 0, + + REG_UPDATE_5(DC_I2C_CONTROL, DC_I2C_SOFT_RESET, 0, + DC_I2C_SW_STATUS_RESET, 0, DC_I2C_SEND_RESET, 0, - DC_I2C_SW_STATUS_RESET, 1, - DC_I2C_TRANSACTION_COUNT, 0, - DC_I2C_DDC_SELECT, dce_i2c_hw->engine_id); - - /* Program time limit */ - if (dce_i2c_hw->send_reset_length == 0) { - /*pre-dcn*/ - REG_UPDATE_N(SETUP, 2, - FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT), i2c_setup_limit, - FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 1); - } - /* Program HW priority - * set to High - interrupt software I2C at any time - * Enable restart of SW I2C that was interrupted by HW - * disable queuing of software while I2C is in use by HW - */ - REG_UPDATE_2(DC_I2C_ARBITRATION, - DC_I2C_NO_QUEUED_SW_GO, 0, - DC_I2C_SW_PRIORITY, DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_NORMAL); - - return true; -} - - - - -static void process_channel_reply( - struct dce_i2c_hw *dce_i2c_hw, - struct i2c_reply_transaction_data *reply) -{ - uint32_t length = reply->length; - uint8_t *buffer = reply->data; - - REG_SET_3(DC_I2C_DATA, 0, - DC_I2C_INDEX, dce_i2c_hw->buffer_used_write, - DC_I2C_DATA_RW, 1, - DC_I2C_INDEX_WRITE, 1); - - while (length) { - /* after reading the status, - * if the I2C operation executed successfully - * (i.e. DC_I2C_STATUS_DONE = 1) then the I2C controller - * should read data bytes from I2C circular data buffer - */ - - uint32_t i2c_data; + DC_I2C_GO, 0, + DC_I2C_TRANSACTION_COUNT, dce_i2c_hw->transaction_count - 1); - REG_GET(DC_I2C_DATA, DC_I2C_DATA, &i2c_data); - *buffer++ = i2c_data; + /* start I2C transfer */ + REG_UPDATE(DC_I2C_CONTROL, DC_I2C_GO, 1); - --length; - } + /* all transactions were executed and HW buffer became empty + * (even though it actually happens when status becomes DONE) + */ + dce_i2c_hw->transaction_count = 0; + dce_i2c_hw->buffer_used_bytes = 0; } -enum i2c_channel_operation_result dce_i2c_hw_engine_wait_on_operation_result( - struct dce_i2c_hw *dce_i2c_hw, - uint32_t timeout, - enum i2c_channel_operation_result expected_result) -{ - enum i2c_channel_operation_result result; - uint32_t i = 0; - - if (!timeout) - return I2C_CHANNEL_OPERATION_SUCCEEDED; - - do { - result = dce_i2c_hw->funcs->get_channel_status( - dce_i2c_hw, NULL); - - if (result != expected_result) - break; - - udelay(1); - - ++i; - } while (i < timeout); - return result; -} -static enum i2c_channel_operation_result get_channel_status_hw( +static enum i2c_channel_operation_result get_channel_status( struct dce_i2c_hw *dce_i2c_hw, uint8_t *returned_bytes) { @@ -277,24 +95,13 @@ static enum i2c_channel_operation_result get_channel_status_hw( return I2C_CHANNEL_OPERATION_SUCCEEDED; } -static void submit_channel_request_hw( - struct dce_i2c_hw *dce_i2c_hw, - struct i2c_request_transaction_data *request) +static uint32_t get_hw_buffer_available_size( + const struct dce_i2c_hw *dce_i2c_hw) { - request->status = I2C_CHANNEL_OPERATION_SUCCEEDED; - - if (!dce_i2c_hw->funcs->process_transaction(dce_i2c_hw, request)) - return; - - if (dce_i2c_hw->funcs->is_hw_busy(dce_i2c_hw)) { - request->status = I2C_CHANNEL_OPERATION_ENGINE_BUSY; - return; - } - - dce_i2c_hw->funcs->execute_transaction(dce_i2c_hw); - - + return dce_i2c_hw->buffer_size - + dce_i2c_hw->buffer_used_bytes; } + uint32_t get_reference_clock( struct dc_bios *bios) { @@ -306,33 +113,48 @@ uint32_t get_reference_clock( return info.pll_info.crystal_frequency; } -static void execute_transaction_hw( - struct dce_i2c_hw *dce_i2c_hw) +static uint32_t get_speed( + const struct dce_i2c_hw *dce_i2c_hw) { - REG_UPDATE_N(SETUP, 5, - FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_EN), 0, - FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_CLK_DRIVE_EN), 0, - FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_SEL), 0, - FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_TRANSACTION_DELAY), 0, - FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_BYTE_DELAY), 0); + uint32_t pre_scale = 0; + REG_GET(SPEED, DC_I2C_DDC1_PRESCALE, &pre_scale); - REG_UPDATE_5(DC_I2C_CONTROL, - DC_I2C_SOFT_RESET, 0, - DC_I2C_SW_STATUS_RESET, 0, - DC_I2C_SEND_RESET, 0, - DC_I2C_GO, 0, - DC_I2C_TRANSACTION_COUNT, dce_i2c_hw->transaction_count - 1); + /* [anaumov] it seems following is unnecessary */ + /*ASSERT(value.bits.DC_I2C_DDC1_PRESCALE);*/ + return pre_scale ? + dce_i2c_hw->reference_frequency / pre_scale : + dce_i2c_hw->default_speed; +} - /* start I2C transfer */ - REG_UPDATE(DC_I2C_CONTROL, DC_I2C_GO, 1); +static void process_channel_reply( + struct dce_i2c_hw *dce_i2c_hw, + struct i2c_reply_transaction_data *reply) +{ + uint32_t length = reply->length; + uint8_t *buffer = reply->data; - /* all transactions were executed and HW buffer became empty - * (even though it actually happens when status becomes DONE) - */ - dce_i2c_hw->transaction_count = 0; - dce_i2c_hw->buffer_used_bytes = 0; + REG_SET_3(DC_I2C_DATA, 0, + DC_I2C_INDEX, dce_i2c_hw->buffer_used_write, + DC_I2C_DATA_RW, 1, + DC_I2C_INDEX_WRITE, 1); + + while (length) { + /* after reading the status, + * if the I2C operation executed successfully + * (i.e. DC_I2C_STATUS_DONE = 1) then the I2C controller + * should read data bytes from I2C circular data buffer + */ + + uint32_t i2c_data; + + REG_GET(DC_I2C_DATA, DC_I2C_DATA, &i2c_data); + *buffer++ = i2c_data; + + --length; + } } + static bool process_transaction( struct dce_i2c_hw *dce_i2c_hw, struct i2c_request_transaction_data *request) @@ -422,51 +244,89 @@ static bool process_transaction( return last_transaction; } -static uint32_t get_transaction_timeout_hw( - const struct dce_i2c_hw *dce_i2c_hw, - uint32_t length) -{ - - uint32_t speed = dce_i2c_hw->funcs->get_speed(dce_i2c_hw); +static inline void reset_hw_engine(struct dce_i2c_hw *dce_i2c_hw) +{ + REG_UPDATE_2(DC_I2C_CONTROL, + DC_I2C_SW_STATUS_RESET, 1, + DC_I2C_SW_STATUS_RESET, 1); +} +static void set_speed( + struct dce_i2c_hw *dce_i2c_hw, + uint32_t speed) +{ - uint32_t period_timeout; - uint32_t num_of_clock_stretches; + if (speed) { + if (dce_i2c_hw->masks->DC_I2C_DDC1_START_STOP_TIMING_CNTL) + REG_UPDATE_N(SPEED, 3, + FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), dce_i2c_hw->reference_frequency / speed, + FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2, + FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL), speed > 50 ? 2:1); + else + REG_UPDATE_N(SPEED, 2, + FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), dce_i2c_hw->reference_frequency / speed, + FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2); + } +} - if (!speed) - return 0; +static bool setup_engine( + struct dce_i2c_hw *dce_i2c_hw) +{ + uint32_t i2c_setup_limit = I2C_SETUP_TIME_LIMIT_DCE; - period_timeout = (1000 * TRANSACTION_TIMEOUT_IN_I2C_CLOCKS) / speed; + if (dce_i2c_hw->setup_limit != 0) + i2c_setup_limit = dce_i2c_hw->setup_limit; + /* Program pin select */ + REG_UPDATE_6(DC_I2C_CONTROL, + DC_I2C_GO, 0, + DC_I2C_SOFT_RESET, 0, + DC_I2C_SEND_RESET, 0, + DC_I2C_SW_STATUS_RESET, 1, + DC_I2C_TRANSACTION_COUNT, 0, + DC_I2C_DDC_SELECT, dce_i2c_hw->engine_id); - num_of_clock_stretches = 1 + (length << 3) + 1; - num_of_clock_stretches += - (dce_i2c_hw->buffer_used_bytes << 3) + - (dce_i2c_hw->transaction_count << 1); + /* Program time limit */ + if (dce_i2c_hw->send_reset_length == 0) { + /*pre-dcn*/ + REG_UPDATE_N(SETUP, 2, + FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT), i2c_setup_limit, + FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 1); + } + /* Program HW priority + * set to High - interrupt software I2C at any time + * Enable restart of SW I2C that was interrupted by HW + * disable queuing of software while I2C is in use by HW + */ + REG_UPDATE_2(DC_I2C_ARBITRATION, + DC_I2C_NO_QUEUED_SW_GO, 0, + DC_I2C_SW_PRIORITY, DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_NORMAL); - return period_timeout * num_of_clock_stretches; + return true; } -static void release_engine_dce_hw( - struct resource_pool *pool, - struct dce_i2c_hw *dce_i2c_hw) +static bool is_hw_busy(struct dce_i2c_hw *dce_i2c_hw) { - pool->i2c_hw_buffer_in_use = false; + uint32_t i2c_sw_status = 0; - dce_i2c_hw->funcs->release_engine(dce_i2c_hw); - dal_ddc_close(dce_i2c_hw->ddc); + REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status); + if (i2c_sw_status == DC_I2C_STATUS__DC_I2C_STATUS_IDLE) + return false; - dce_i2c_hw->ddc = NULL; + reset_hw_engine(dce_i2c_hw); + + REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status); + return i2c_sw_status != DC_I2C_STATUS__DC_I2C_STATUS_IDLE; } -static void release_engine_hw( +static void release_engine( struct dce_i2c_hw *dce_i2c_hw) { bool safe_to_reset; /* Restore original HW engine speed */ - dce_i2c_hw->funcs->set_speed(dce_i2c_hw, dce_i2c_hw->original_speed); + set_speed(dce_i2c_hw, dce_i2c_hw->original_speed); /* Release I2C */ REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, 1); @@ -488,35 +348,180 @@ static void release_engine_hw( REG_UPDATE(DC_I2C_CONTROL, DC_I2C_SW_STATUS_RESET, 1); /* HW I2c engine - clock gating feature */ if (!dce_i2c_hw->engine_keep_power_up_count) - dce_i2c_hw->funcs->disable_i2c_hw_engine(dce_i2c_hw); + disable_i2c_hw_engine(dce_i2c_hw); } - -static void disable_i2c_hw_engine( +static void release_engine_dce_hw( + struct resource_pool *pool, struct dce_i2c_hw *dce_i2c_hw) { - REG_UPDATE_N(SETUP, 1, FN(SETUP, DC_I2C_DDC1_ENABLE), 0); + pool->i2c_hw_buffer_in_use = false; + + release_engine(dce_i2c_hw); + dal_ddc_close(dce_i2c_hw->ddc); + + dce_i2c_hw->ddc = NULL; } -static uint32_t get_speed_hw( - const struct dce_i2c_hw *dce_i2c_hw) + +bool dce_i2c_hw_engine_acquire_engine( + struct dce_i2c_hw *dce_i2c_hw, + struct ddc *ddc) { - uint32_t pre_scale = 0; - REG_GET(SPEED, DC_I2C_DDC1_PRESCALE, &pre_scale); + enum gpio_result result; + uint32_t current_speed; - /* [anaumov] it seems following is unnecessary */ - /*ASSERT(value.bits.DC_I2C_DDC1_PRESCALE);*/ - return pre_scale ? - dce_i2c_hw->reference_frequency / pre_scale : - dce_i2c_hw->default_speed; + result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE, + GPIO_DDC_CONFIG_TYPE_MODE_I2C); + + if (result != GPIO_RESULT_OK) + return false; + + dce_i2c_hw->ddc = ddc; + + + current_speed = get_speed(dce_i2c_hw); + + if (current_speed) + dce_i2c_hw->original_speed = current_speed; + + return true; } -static uint32_t get_hw_buffer_available_size( - const struct dce_i2c_hw *dce_i2c_hw) + +bool dce_i2c_engine_acquire_hw( + struct dce_i2c_hw *dce_i2c_hw, + struct ddc *ddc_handle) { - return dce_i2c_hw->buffer_size - - dce_i2c_hw->buffer_used_bytes; + + uint32_t counter = 0; + bool result; + + do { + result = dce_i2c_hw_engine_acquire_engine( + dce_i2c_hw, ddc_handle); + + if (result) + break; + + /* i2c_engine is busy by VBios, lets wait and retry */ + + udelay(10); + + ++counter; + } while (counter < 2); + + if (result) { + if (!setup_engine(dce_i2c_hw)) { + release_engine(dce_i2c_hw); + result = false; + } + } + + return result; +} + +struct dce_i2c_hw *acquire_i2c_hw_engine( + struct resource_pool *pool, + struct ddc *ddc) +{ + + struct dce_i2c_hw *engine = NULL; + + if (!ddc) + return NULL; + + if (ddc->hw_info.hw_supported) { + enum gpio_ddc_line line = dal_ddc_get_line(ddc); + + if (line < pool->pipe_count) + engine = pool->hw_i2cs[line]; + } + + if (!engine) + return NULL; + + + if (!pool->i2c_hw_buffer_in_use && + dce_i2c_engine_acquire_hw(engine, ddc)) { + pool->i2c_hw_buffer_in_use = true; + return engine; + } + + + return NULL; +} + +enum i2c_channel_operation_result dce_i2c_hw_engine_wait_on_operation_result( + struct dce_i2c_hw *dce_i2c_hw, + uint32_t timeout, + enum i2c_channel_operation_result expected_result) +{ + enum i2c_channel_operation_result result; + uint32_t i = 0; + + if (!timeout) + return I2C_CHANNEL_OPERATION_SUCCEEDED; + + do { + + result = get_channel_status( + dce_i2c_hw, NULL); + + if (result != expected_result) + break; + + udelay(1); + + ++i; + } while (i < timeout); + return result; +} + +static void submit_channel_request_hw( + struct dce_i2c_hw *dce_i2c_hw, + struct i2c_request_transaction_data *request) +{ + request->status = I2C_CHANNEL_OPERATION_SUCCEEDED; + + if (!process_transaction(dce_i2c_hw, request)) + return; + + if (is_hw_busy(dce_i2c_hw)) { + request->status = I2C_CHANNEL_OPERATION_ENGINE_BUSY; + return; + } + + execute_transaction(dce_i2c_hw); + + +} + +static uint32_t get_transaction_timeout_hw( + const struct dce_i2c_hw *dce_i2c_hw, + uint32_t length) +{ + + uint32_t speed = get_speed(dce_i2c_hw); + + + + uint32_t period_timeout; + uint32_t num_of_clock_stretches; + + if (!speed) + return 0; + + period_timeout = (1000 * TRANSACTION_TIMEOUT_IN_I2C_CLOCKS) / speed; + + num_of_clock_stretches = 1 + (length << 3) + 1; + num_of_clock_stretches += + (dce_i2c_hw->buffer_used_bytes << 3) + + (dce_i2c_hw->transaction_count << 1); + + return period_timeout * num_of_clock_stretches; } + bool dce_i2c_hw_engine_submit_request( struct dce_i2c_hw *dce_i2c_hw, struct dce_i2c_transaction_request *dce_i2c_request, @@ -615,9 +620,7 @@ bool dce_i2c_hw_engine_submit_request( reply.data = dce_i2c_request->payload.data; reply.length = dce_i2c_request->payload.length; - dce_i2c_hw->funcs->process_channel_reply(dce_i2c_hw, &reply); - - + process_channel_reply(dce_i2c_hw, &reply); } return result; @@ -632,7 +635,7 @@ bool dce_i2c_submit_command_hw( uint8_t index_of_payload = 0; bool result; - dce_i2c_hw->funcs->set_speed(dce_i2c_hw, cmd->speed); + set_speed(dce_i2c_hw, cmd->speed); result = true; @@ -670,32 +673,6 @@ bool dce_i2c_submit_command_hw( return result; } -static const struct dce_i2c_hw_funcs dce100_i2c_hw_funcs = { - .setup_engine = setup_engine, - .set_speed = set_speed, - .get_speed = get_speed_hw, - .release_engine = release_engine_hw, - .process_transaction = process_transaction, - .process_channel_reply = process_channel_reply, - .is_hw_busy = is_hw_busy, - .get_channel_status = get_channel_status_hw, - .execute_transaction = execute_transaction_hw, - .disable_i2c_hw_engine = disable_i2c_hw_engine -}; -static const struct dce_i2c_hw_funcs dce80_i2c_hw_funcs = { - .setup_engine = setup_engine, - .set_speed = set_speed, - .get_speed = get_speed_hw, - .release_engine = release_engine_hw, - .process_transaction = process_transaction, - .process_channel_reply = process_channel_reply, - .is_hw_busy = is_hw_busy, - .get_channel_status = get_channel_status_hw, - .execute_transaction = execute_transaction_hw, - .disable_i2c_hw_engine = disable_i2c_hw_engine -}; - - void dce_i2c_hw_construct( struct dce_i2c_hw *dce_i2c_hw, @@ -718,7 +695,6 @@ void dce_i2c_hw_construct( dce_i2c_hw->default_speed = DEFAULT_I2C_HW_SPEED; dce_i2c_hw->send_reset_length = 0; dce_i2c_hw->setup_limit = I2C_SETUP_TIME_LIMIT_DCE; - dce_i2c_hw->funcs = &dce80_i2c_hw_funcs; dce_i2c_hw->buffer_size = I2C_HW_BUFFER_SIZE_DCE; } @@ -739,7 +715,6 @@ void dce100_i2c_hw_construct( regs, shifts, masks); - dce_i2c_hw->funcs = &dce100_i2c_hw_funcs; dce_i2c_hw->buffer_size = I2C_HW_BUFFER_SIZE_DCE100; REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h index 8baef3916246..742c1da84d45 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h @@ -256,40 +256,11 @@ struct dce_i2c_hw { uint32_t buffer_size; struct dc_context *ctx; - const struct dce_i2c_hw_funcs *funcs; const struct dce_i2c_registers *regs; const struct dce_i2c_shift *shifts; const struct dce_i2c_mask *masks; }; - -struct dce_i2c_hw_funcs { - bool (*setup_engine)( - struct dce_i2c_hw *dce_i2c_hw); - void (*set_speed)( - struct dce_i2c_hw *dce_i2c_hw, - uint32_t speed); - uint32_t (*get_speed)( - const struct dce_i2c_hw *dce_i2c_hw); - void (*release_engine)( - struct dce_i2c_hw *dce_i2c_hw); - bool (*process_transaction)( - struct dce_i2c_hw *dce_i2c_hw, - struct i2c_request_transaction_data *request); - void (*process_channel_reply)( - struct dce_i2c_hw *dce_i2c_hw, - struct i2c_reply_transaction_data *reply); - bool (*is_hw_busy)( - struct dce_i2c_hw *dce_i2c_hw); - enum i2c_channel_operation_result (*get_channel_status)( - struct dce_i2c_hw *dce_i2c_hw, - uint8_t *returned_bytes); - void (*execute_transaction)( - struct dce_i2c_hw *dce_i2c_hw); - void (*disable_i2c_hw_engine)( - struct dce_i2c_hw *dce_i2c_hw); -}; - void dce_i2c_hw_construct( struct dce_i2c_hw *dce_i2c_hw, struct dc_context *ctx, -- GitLab From 58382a445b7616ef60f2d8fc4717f90fc8472d45 Mon Sep 17 00:00:00 2001 From: Tony Cheng Date: Wed, 18 Jul 2018 20:29:46 -0400 Subject: [PATCH 0569/1692] drm/amd/display: dc 3.1.63 Signed-off-by: Tony Cheng Reviewed-by: Steven Chiu Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 3564f4fe420a..2faff1b8821d 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -38,7 +38,7 @@ #include "inc/compressor.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.1.62" +#define DC_VER "3.1.63" #define MAX_SURFACES 3 #define MAX_STREAMS 6 -- GitLab From e5d0170e5644508a28e063995b89af4fdabd38b9 Mon Sep 17 00:00:00 2001 From: "Leo (Sunpeng) Li" Date: Mon, 13 Aug 2018 17:45:05 -0400 Subject: [PATCH 0570/1692] drm/amd/display: Use non-deprecated vblank handler [Why] drm_handle_vblank is deprecated. Use drm_crtc_handle_vblank instead. Signed-off-by: Leo (Sunpeng) Li Reviewed-by: David Francis Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index c18bad387635..ad11dc9e8c1d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -311,16 +311,14 @@ static void dm_crtc_high_irq(void *interrupt_params) { struct common_irq_params *irq_params = interrupt_params; struct amdgpu_device *adev = irq_params->adev; - uint8_t crtc_index = 0; struct amdgpu_crtc *acrtc; acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK); - if (acrtc) - crtc_index = acrtc->crtc_id; - - drm_handle_vblank(adev->ddev, crtc_index); - amdgpu_dm_crtc_handle_crc_irq(&acrtc->base); + if (acrtc) { + drm_crtc_handle_vblank(&acrtc->base); + amdgpu_dm_crtc_handle_crc_irq(&acrtc->base); + } } static int dm_set_clockgating_state(void *handle, -- GitLab From e498eb7136042aa9a352b1039c678537f4694158 Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Tue, 14 Aug 2018 15:40:57 -0400 Subject: [PATCH 0571/1692] drm/amd/display: Add support for hw_state logging via debugfs [Why] We have logging methods for printing hardware state for newer ASICs but no way to trigger the log output. [How] Add support for triggering the output via writing to a debugfs file entry. Log output currently goes into dmesg for convenience, but accessing via a read should be possible later. Signed-off-by: Nicholas Kazlauskas Reviewed-by: Jordan Lazare Acked-by: Leo Li Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 5 ++ .../amd/display/amdgpu_dm/amdgpu_dm_debugfs.c | 53 +++++++++++++++++++ .../amd/display/amdgpu_dm/amdgpu_dm_debugfs.h | 1 + .../amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 22 ++++++-- 4 files changed, 77 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index ad11dc9e8c1d..7b857d47fadd 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -480,6 +480,11 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) goto error; } +#if defined(CONFIG_DEBUG_FS) + if (dtn_debugfs_init(adev)) + DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n"); +#endif + DRM_DEBUG_DRIVER("KMS initialized.\n"); return 0; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 0d9e410ca01e..e79ac1e2c460 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -720,3 +720,56 @@ int connector_debugfs_init(struct amdgpu_dm_connector *connector) return 0; } +static ssize_t dtn_log_read( + struct file *f, + char __user *buf, + size_t size, + loff_t *pos) +{ + /* TODO: Write log output to the user supplied buffer. */ + return 0; +} + +static ssize_t dtn_log_write( + struct file *f, + const char __user *buf, + size_t size, + loff_t *pos) +{ + struct amdgpu_device *adev = file_inode(f)->i_private; + struct dc *dc = adev->dm.dc; + + /* Write triggers log output via dmesg. */ + if (size == 0) + return 0; + + if (dc->hwss.log_hw_state) + dc->hwss.log_hw_state(dc); + + return size; +} + +int dtn_debugfs_init(struct amdgpu_device *adev) +{ + static const struct file_operations dtn_log_fops = { + .owner = THIS_MODULE, + .read = dtn_log_read, + .write = dtn_log_write, + .llseek = default_llseek + }; + + struct drm_minor *minor = adev->ddev->primary; + struct dentry *root = minor->debugfs_root; + + struct dentry *ent = debugfs_create_file( + "amdgpu_dm_dtn_log", + 0644, + root, + adev, + &dtn_log_fops); + + if (IS_ERR(ent)) + return PTR_ERR(ent); + + return 0; +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h index d9ed1b2aa811..bdef1587b0a0 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h @@ -30,5 +30,6 @@ #include "amdgpu_dm.h" int connector_debugfs_init(struct amdgpu_dm_connector *connector); +int dtn_debugfs_init(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 8403b6a9a77b..86b63ce1dbf6 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -336,14 +336,28 @@ bool dm_helpers_dp_mst_send_payload_allocation( } void dm_dtn_log_begin(struct dc_context *ctx) -{} +{ + pr_info("[dtn begin]\n"); +} void dm_dtn_log_append_v(struct dc_context *ctx, - const char *pMsg, ...) -{} + const char *msg, ...) +{ + struct va_format vaf; + va_list args; + + va_start(args, msg); + vaf.fmt = msg; + vaf.va = &args; + + pr_info("%pV", &vaf); + va_end(args); +} void dm_dtn_log_end(struct dc_context *ctx) -{} +{ + pr_info("[dtn end]\n"); +} bool dm_helpers_dp_mst_start_top_mgr( struct dc_context *ctx, -- GitLab From 18e4aa33bdfba0e7ac4e5a62d0665becb78ce012 Mon Sep 17 00:00:00 2001 From: Ken Chalmers Date: Fri, 10 Aug 2018 15:51:59 -0400 Subject: [PATCH 0572/1692] drm/amd/display: eliminate long wait between register polls on Maximus [Why] Now that we "scale" time delays correctly on Maximus (as of diags svn r170115), the forced "35 ms" wait time now becomes 35 ms * 500 = 17.5 seconds, which is far too long. Even having to repeat polling a register once causes excessive delays on Maximus. [How] Just use the regular wait time passed to the generic_reg_wait() function. This is sufficient for Maximus now, and it also means that there's one less "Maximus-only" code path in DAL. Also disable the "REG_WAIT taking a while:" message on Maximus, since things do take a while longer there and 1-2ms delays are not uncommon (and nothing to worry about). Signed-off-by: Ken Chalmers Reviewed-by: Eric Bernstein Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc_helper.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c index e68077e65565..fcfd50b5dba0 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c @@ -219,12 +219,6 @@ uint32_t generic_reg_wait(const struct dc_context *ctx, /* something is terribly wrong if time out is > 200ms. (5Hz) */ ASSERT(delay_between_poll_us * time_out_num_tries <= 200000); - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { - /* 35 seconds */ - delay_between_poll_us = 35000; - time_out_num_tries = 1000; - } - for (i = 0; i <= time_out_num_tries; i++) { if (i) { if (delay_between_poll_us >= 1000) @@ -238,7 +232,8 @@ uint32_t generic_reg_wait(const struct dc_context *ctx, field_value = get_reg_field_value_ex(reg_val, mask, shift); if (field_value == condition_value) { - if (i * delay_between_poll_us > 1000) + if (i * delay_between_poll_us > 1000 && + !IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) dm_output_to_console("REG_WAIT taking a while: %dms in %s line:%d\n", delay_between_poll_us * i / 1000, func_name, line); -- GitLab From 219097df0f9d47fd882791144c60f3155750a6a7 Mon Sep 17 00:00:00 2001 From: SivapiriyanKumarasamy Date: Wed, 15 Aug 2018 16:55:18 -0400 Subject: [PATCH 0573/1692] drm/amd/display: Fix memory leak caused by missed dc_sink_release [Why] There is currently an intermittent hang from a memory leak in DTN stress testing. It is caused by unfreed memory during driver disable. [How] Do a dc_sink_release in the case that skips it incorrectly. Signed-off-by: SivapiriyanKumarasamy Reviewed-by: Aric Cyr Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 309059871706..9d8dc2c1ca65 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -772,8 +772,12 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) * fail-safe mode */ if (dc_is_hdmi_signal(link->connector_signal) || - dc_is_dvi_signal(link->connector_signal)) + dc_is_dvi_signal(link->connector_signal)) { + if (prev_sink != NULL) + dc_sink_release(prev_sink); + return false; + } default: break; } -- GitLab From 1f6010a96273c3111ecdc12aa274c932da920493 Mon Sep 17 00:00:00 2001 From: David Francis Date: Wed, 15 Aug 2018 14:38:30 -0400 Subject: [PATCH 0574/1692] drm/amd/display: Improve spelling, grammar, and formatting of amdgpu_dm.c comments [Why] Good spelling and grammar makes comments more pleasant and clearer. Linux has coding standards for comments that we should try to follow. [How] Fix obvious spelling and grammar issues Ensure all comments use '/*' and '*/' and multi-line comments follow linux convention Remove line-of-stars comments that do not separate sections of code and comments referring to lines of code that have since been removed Signed-off-by: David Francis Reviewed-by: Nicholas Kazlauskas Acked-by: Leo Li Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 189 ++++++++++-------- 1 file changed, 109 insertions(+), 80 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 7b857d47fadd..d878b124dd20 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -75,7 +75,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev); static void amdgpu_dm_fini(struct amdgpu_device *adev); -/* initializes drm_device display related structures, based on the information +/* + * initializes drm_device display related structures, based on the information * provided by DAL. The drm strcutures are: drm_crtc, drm_connector, * drm_encoder, drm_mode_config * @@ -237,10 +238,6 @@ get_crtc_by_otg_inst(struct amdgpu_device *adev, struct drm_crtc *crtc; struct amdgpu_crtc *amdgpu_crtc; - /* - * following if is check inherited from both functions where this one is - * used now. Need to be checked why it could happen. - */ if (otg_inst == -1) { WARN_ON(1); return adev->mode_info.crtcs[0]; @@ -266,7 +263,7 @@ static void dm_pflip_high_irq(void *interrupt_params) amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP); /* IRQ could occur when in initial stage */ - /*TODO work and BO cleanup */ + /* TODO work and BO cleanup */ if (amdgpu_crtc == NULL) { DRM_DEBUG_DRIVER("CRTC is null, returning.\n"); return; @@ -285,9 +282,9 @@ static void dm_pflip_high_irq(void *interrupt_params) } - /* wakeup usersapce */ + /* wake up userspace */ if (amdgpu_crtc->event) { - /* Update to correct count/ts if racing with vblank irq */ + /* Update to correct count(s) if racing with vblank irq */ drm_crtc_accurate_vblank_count(&amdgpu_crtc->base); drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event); @@ -385,8 +382,8 @@ static void amdgpu_dm_fbc_init(struct drm_connector *connector) } - -/* Init display KMS +/* + * Init display KMS * * Returns 0 on success */ @@ -695,7 +692,7 @@ static int dm_resume(void *handle) mutex_unlock(&aconnector->hpd_lock); } - /* Force mode set in atomic comit */ + /* Force mode set in atomic commit */ for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) new_crtc_state->active_changed = true; @@ -826,24 +823,27 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) sink = aconnector->dc_link->local_sink; - /* Edid mgmt connector gets first update only in mode_valid hook and then + /* + * Edid mgmt connector gets first update only in mode_valid hook and then * the connector sink is set to either fake or physical sink depends on link status. - * don't do it here if u are during boot + * Skip if already done during boot. */ if (aconnector->base.force != DRM_FORCE_UNSPECIFIED && aconnector->dc_em_sink) { - /* For S3 resume with headless use eml_sink to fake stream - * because on resume connecotr->sink is set ti NULL + /* + * For S3 resume with headless use eml_sink to fake stream + * because on resume connector->sink is set to NULL */ mutex_lock(&dev->mode_config.mutex); if (sink) { if (aconnector->dc_sink) { amdgpu_dm_update_freesync_caps(connector, NULL); - /* retain and release bellow are used for - * bump up refcount for sink because the link don't point - * to it anymore after disconnect so on next crtc to connector + /* + * retain and release below are used to + * bump up refcount for sink because the link doesn't point + * to it anymore after disconnect, so on next crtc to connector * reshuffle by UMD we will get into unwanted dc_sink release */ if (aconnector->dc_sink != aconnector->dc_em_sink) @@ -872,8 +872,10 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) return; if (aconnector->dc_sink == sink) { - /* We got a DP short pulse (Link Loss, DP CTS, etc...). - * Do nothing!! */ + /* + * We got a DP short pulse (Link Loss, DP CTS, etc...). + * Do nothing!! + */ DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n", aconnector->connector_id); return; @@ -884,11 +886,15 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) mutex_lock(&dev->mode_config.mutex); - /* 1. Update status of the drm connector - * 2. Send an event and let userspace tell us what to do */ + /* + * 1. Update status of the drm connector + * 2. Send an event and let userspace tell us what to do + */ if (sink) { - /* TODO: check if we still need the S3 mode update workaround. - * If yes, put it here. */ + /* + * TODO: check if we still need the S3 mode update workaround. + * If yes, put it here. + */ if (aconnector->dc_sink) amdgpu_dm_update_freesync_caps(connector, NULL); @@ -922,8 +928,9 @@ static void handle_hpd_irq(void *param) struct drm_connector *connector = &aconnector->base; struct drm_device *dev = connector->dev; - /* In case of failure or MST no need to update connector status or notify the OS - * since (for MST case) MST does this in it's own context. + /* + * In case of failure or MST no need to update connector status or notify the OS + * since (for MST case) MST does this in its own context. */ mutex_lock(&aconnector->hpd_lock); @@ -1006,7 +1013,7 @@ static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector) break; } - /* check if there is new irq to be handle */ + /* check if there is new irq to be handled */ dret = drm_dp_dpcd_read( &aconnector->dm_dp_aux.aux, dpcd_addr, @@ -1031,7 +1038,8 @@ static void handle_hpd_rx_irq(void *param) struct dc_link *dc_link = aconnector->dc_link; bool is_mst_root_connector = aconnector->mst_mgr.mst_state; - /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio + /* + * TODO:Temporary add mutex to protect hpd interrupt not have a gpio * conflict, after implement i2c helper, this mutex should be * retired. */ @@ -1122,7 +1130,8 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev) int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; - /* Actions of amdgpu_irq_add_id(): + /* + * Actions of amdgpu_irq_add_id(): * 1. Register a set() function with base driver. * Base driver will call set() function to enable/disable an * interrupt in DC hardware. @@ -1202,7 +1211,8 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; - /* Actions of amdgpu_irq_add_id(): + /* + * Actions of amdgpu_irq_add_id(): * 1. Register a set() function with base driver. * Base driver will call set() function to enable/disable an * interrupt in DC hardware. @@ -1211,7 +1221,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) * coming from DC hardware. * amdgpu_dm_irq_handler() will re-direct the interrupt to DC * for acknowledging and handling. - * */ + */ /* Use VSTARTUP interrupt */ for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP; @@ -1289,7 +1299,7 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) adev->ddev->mode_config.preferred_depth = 24; adev->ddev->mode_config.prefer_shadow = 1; - /* indicate support of immediate flip */ + /* indicates support for immediate flip */ adev->ddev->mode_config.async_page_flip = true; adev->ddev->mode_config.fb_base = adev->gmc.aper_base; @@ -1375,7 +1385,7 @@ static int initialize_plane(struct amdgpu_display_manager *dm, plane->base.type = mode_info->plane_type[plane_id]; /* - * HACK: IGT tests expect that each plane can only have one + * HACK: IGT tests expect that each plane can only have * one possible CRTC. For now, set one CRTC for each * plane that is not an underlay, but still allow multiple * CRTCs for underlay planes. @@ -1403,7 +1413,8 @@ static void register_backlight_device(struct amdgpu_display_manager *dm, if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) && link->type != dc_connection_none) { - /* Event if registration failed, we should continue with + /* + * Event if registration failed, we should continue with * DM initialization because not having a backlight control * is better then a black screen. */ @@ -1416,7 +1427,8 @@ static void register_backlight_device(struct amdgpu_display_manager *dm, } -/* In this architecture, the association +/* + * In this architecture, the association * connector -> encoder -> crtc * id not really requried. The crtc and connector will hold the * display_index as an abstraction to use with DAL component @@ -1564,7 +1576,7 @@ static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm) * amdgpu_display_funcs functions *****************************************************************************/ -/** +/* * dm_bandwidth_update - program display watermarks * * @adev: amdgpu_device pointer @@ -1773,9 +1785,11 @@ static int dm_early_init(void *handle) if (adev->mode_info.funcs == NULL) adev->mode_info.funcs = &dm_display_funcs; - /* Note: Do NOT change adev->audio_endpt_rreg and + /* + * Note: Do NOT change adev->audio_endpt_rreg and * adev->audio_endpt_wreg because they are initialised in - * amdgpu_device_init() */ + * amdgpu_device_init() + */ #if defined(CONFIG_DEBUG_KERNEL_DC) device_create_file( adev->ddev->dev, @@ -1821,7 +1835,7 @@ static bool fill_rects_from_plane_state(const struct drm_plane_state *state, { plane_state->src_rect.x = state->src_x >> 16; plane_state->src_rect.y = state->src_y >> 16; - /*we ignore for now mantissa and do not to deal with floating pixels :(*/ + /* we ignore the mantissa for now and do not deal with floating pixels :( */ plane_state->src_rect.width = state->src_w >> 16; if (plane_state->src_rect.width == 0) @@ -1873,7 +1887,7 @@ static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb, int r = amdgpu_bo_reserve(rbo, false); if (unlikely(r)) { - // Don't show error msg. when return -ERESTARTSYS + /* Don't show error message when returning -ERESTARTSYS */ if (r != -ERESTARTSYS) DRM_ERROR("Unable to reserve buffer: %d\n", r); return r; @@ -2069,8 +2083,6 @@ static int fill_plane_attributes(struct amdgpu_device *adev, return ret; } -/*****************************************************************************/ - static void update_stream_scaling_settings(const struct drm_display_mode *mode, const struct dm_connector_state *dm_state, struct dc_stream_state *stream) @@ -2133,7 +2145,8 @@ convert_color_depth_from_display_info(const struct drm_connector *connector) switch (bpc) { case 0: - /* Temporary Work around, DRM don't parse color depth for + /* + * Temporary Work around, DRM doesn't parse color depth for * EDID revision before 1.4 * TODO: Fix edid parsing */ @@ -2245,7 +2258,6 @@ static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_ } while (timing_out->display_color_depth > COLOR_DEPTH_888); } -/*****************************************************************************/ static void fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream, @@ -2445,9 +2457,10 @@ static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context) for (i = 0; i < context->stream_count ; i++) { if (!context->streams[i]) continue; - /* TODO: add a function to read AMD VSDB bits and will set + /* + * TODO: add a function to read AMD VSDB bits and set * crtc_sync_master.multi_sync_enabled flag - * For now its set to false + * For now it's set to false */ set_multisync_trigger_params(context->streams[i]); } @@ -2510,7 +2523,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, head); if (preferred_mode == NULL) { - /* This may not be an error, the use case is when we we have no + /* + * This may not be an error, the use case is when we have no * usermode calls to reset and set mode upon hotplug. In this * case, we call set mode ourselves to restore the previous mode * and the modelist may not be filled in in time. @@ -2654,10 +2668,12 @@ amdgpu_dm_connector_detect(struct drm_connector *connector, bool force) bool connected; struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); - /* Notes: + /* + * Notes: * 1. This interface is NOT called in context of HPD irq. * 2. This interface *is called* in context of user-mode ioctl. Which - * makes it a bad place for *any* MST-related activit. */ + * makes it a bad place for *any* MST-related activity. + */ if (aconnector->base.force == DRM_FORCE_UNSPECIFIED && !aconnector->fake_enable) @@ -2901,7 +2917,8 @@ static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector) { struct dc_link *link = (struct dc_link *)aconnector->dc_link; - /* In case of headless boot with force on for DP managed connector + /* + * In case of headless boot with force on for DP managed connector * Those settings have to be != 0 to get initial modeset */ if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) { @@ -2929,7 +2946,8 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec (mode->flags & DRM_MODE_FLAG_DBLSCAN)) return result; - /* Only run this the first time mode_valid is called to initilialize + /* + * Only run this the first time mode_valid is called to initilialize * EDID mgmt */ if (aconnector->base.force != DRM_FORCE_UNSPECIFIED && @@ -2970,9 +2988,9 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec static const struct drm_connector_helper_funcs amdgpu_dm_connector_helper_funcs = { /* - * If hotplug a second bigger display in FB Con mode, bigger resolution + * If hotplugging a second bigger display in FB Con mode, bigger resolution * modes will be filtered by drm_mode_validate_size(), and those modes - * is missing after user start lightdm. So we need to renew modes list. + * are missing after user start lightdm. So we need to renew modes list. * in get_modes call back, not just return the modes count */ .get_modes = get_modes, @@ -2998,7 +3016,7 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, return ret; } - /* In some use cases, like reset, no stream is attached */ + /* In some use cases, like reset, no stream is attached */ if (!dm_crtc_state->stream) return 0; @@ -3047,7 +3065,7 @@ static void dm_drm_plane_reset(struct drm_plane *plane) amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL); WARN_ON(amdgpu_state == NULL); - + if (amdgpu_state) { plane->state = &amdgpu_state->base; plane->state->plane = plane; @@ -3225,7 +3243,7 @@ static const struct drm_plane_helper_funcs dm_plane_helper_funcs = { * TODO: these are currently initialized to rgb formats only. * For future use cases we should either initialize them dynamically based on * plane capabilities, or initialize this array to all formats, so internal drm - * check will succeed, and let DC to implement proper check + * check will succeed, and let DC implement proper check */ static const uint32_t rgb_formats[] = { DRM_FORMAT_RGB888, @@ -3551,7 +3569,8 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */ mutex_init(&aconnector->hpd_lock); - /* configure support HPD hot plug connector_>polled default value is 0 + /* + * configure support HPD hot plug connector_>polled default value is 0 * which means HPD hot plug not supported */ switch (connector_type) { @@ -3657,7 +3676,8 @@ create_i2c(struct ddc_service *ddc_service, } -/* Note: this function assumes that dc_link_detect() was called for the +/* + * Note: this function assumes that dc_link_detect() was called for the * dc_link which will be represented by this aconnector. */ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, @@ -3989,7 +4009,8 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc, target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) + amdgpu_get_vblank_counter_kms(crtc->dev, acrtc->crtc_id); - /* TODO This might fail and hence better not used, wait + /* + * TODO This might fail and hence better not used, wait * explicitly on fences instead * and in general should be called for * blocking commit to as per framework helpers @@ -4006,7 +4027,8 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc, amdgpu_bo_unreserve(abo); - /* Wait until we're out of the vertical blank period before the one + /* + * Wait until we're out of the vertical blank period before the one * targeted by the flip */ while ((acrtc->enabled && @@ -4258,7 +4280,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, } } -/** +/* * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC * @crtc_state: the DRM CRTC state * @stream_state: the DC stream state. @@ -4295,8 +4317,10 @@ static int amdgpu_dm_atomic_commit(struct drm_device *dev, if (drm_atomic_crtc_needs_modeset(new_crtc_state) && dm_old_crtc_state->stream) manage_dm_interrupts(adev, acrtc, false); } - /* Add check here for SoC's that support hardware cursor plane, to - * unset legacy_cursor_update */ + /* + * Add check here for SoC's that support hardware cursor plane, to + * unset legacy_cursor_update + */ return drm_atomic_helper_commit(dev, state, nonblock); @@ -4361,8 +4385,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) * this could happen because of issues with * userspace notifications delivery. * In this case userspace tries to set mode on - * display which is disconnect in fact. - * dc_sink in NULL in this case on aconnector. + * display which is disconnected in fact. + * dc_sink is NULL in this case on aconnector. * We expect reset mode will come soon. * * This can also happen when unplug is done @@ -4431,7 +4455,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state)) continue; - /* Skip any thing not scale or underscan changes */ + /* Skip anything that is not scaling or underscan changes */ if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state)) continue; @@ -4516,7 +4540,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) drm_atomic_helper_cleanup_planes(dev, state); - /* Finally, drop a runtime PM reference for each newly disabled CRTC, + /* + * Finally, drop a runtime PM reference for each newly disabled CRTC, * so we can put the GPU into runtime suspend if we're not driving any * displays anymore */ @@ -4584,9 +4609,9 @@ static int dm_force_atomic_commit(struct drm_connector *connector) } /* - * This functions handle all cases when set mode does not come upon hotplug. - * This include when the same display is unplugged then plugged back into the - * same port and when we are running without usermode desktop manager supprot + * This function handles all cases when set mode does not come upon hotplug. + * This includes when a display is unplugged then plugged back into the + * same port and when running without usermode desktop manager supprot */ void dm_restore_drm_connector_state(struct drm_device *dev, struct drm_connector *connector) @@ -4615,7 +4640,7 @@ void dm_restore_drm_connector_state(struct drm_device *dev, dm_force_atomic_commit(&aconnector->base); } -/*` +/* * Grabs all modesetting locks to serialize against any blocking commits, * Waits for completion of all non blocking commits. */ @@ -4626,7 +4651,8 @@ static int do_aquire_global_lock(struct drm_device *dev, struct drm_crtc_commit *commit; long ret; - /* Adding all modeset locks to aquire_ctx will + /* + * Adding all modeset locks to aquire_ctx will * ensure that when the framework release it the * extra locks we are locking here will get released to */ @@ -4645,7 +4671,8 @@ static int do_aquire_global_lock(struct drm_device *dev, if (!commit) continue; - /* Make sure all pending HW programming completed and + /* + * Make sure all pending HW programming completed and * page flips done */ ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ); @@ -4713,8 +4740,10 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm, struct dc_stream_state *new_stream; int ret = 0; - /*TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set */ - /* update changed items */ + /* + * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set + * update changed items + */ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { struct amdgpu_crtc *acrtc = NULL; struct amdgpu_dm_connector *aconnector = NULL; @@ -4739,7 +4768,7 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm, /* TODO This hack should go away */ if (aconnector && enable) { - // Make sure fake sink is created in plug-in scenario + /* Make sure fake sink is created in plug-in scenario */ drm_new_conn_state = drm_atomic_get_new_connector_state(state, &aconnector->base); drm_old_conn_state = drm_atomic_get_old_connector_state(state, @@ -4759,9 +4788,9 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm, /* * we can have no stream on ACTION_SET if a display - * was disconnected during S3, in this case it not and + * was disconnected during S3, in this case it is not an * error, the OS will be updated after detection, and - * do the right thing on next atomic commit + * will do the right thing on next atomic commit */ if (!new_stream) { @@ -5119,7 +5148,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, goto fail; /* Check scaling and underscan changes*/ - /*TODO Removed scaling changes validation due to inability to commit + /* TODO Removed scaling changes validation due to inability to commit * new stream into context w\o causing full reset. Need to * decide how to handle. */ @@ -5142,7 +5171,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, /* * For full updates case when - * removing/adding/updating streams on once CRTC while flipping + * removing/adding/updating streams on one CRTC while flipping * on another CRTC, * acquiring global lock will guarantee that any such full * update commit -- GitLab From 46659a83e4662ed92000ec13445b8c0ca96fd2cc Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Wed, 15 Aug 2018 12:00:23 -0400 Subject: [PATCH 0575/1692] drm/amd/display: Support reading hw state from debugfs file [Why] Logging hardware state can be done by triggering a write to the debugfs file. It would also be useful to be able to read the hardware state from the debugfs file to be able to generate a clean log without timestamps. [How] Usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dtn_log Threading is an obvious concern when dealing with multiple debugfs operations and blocking on global state in dm or dc seems unfavorable. Adding an extra parameter for the debugfs log context state is the implementation done here. Existing code that made use of DTN_INFO and its associated macros needed to be refactored to support this. We don't know the size of the log in advance so it reallocates the log string dynamically. Once the log has been generated it's copied into the user supplied buffer for the debugfs. This allows for seeking support but it's worth nothing that unlike triggering output via dmesg the hardware state might change in-between reads if your buffer size is too small. Signed-off-by: Nicholas Kazlauskas Reviewed-by: Jordan Lazare Acked-by: Leo Li Signed-off-by: Alex Deucher --- .../amd/display/amdgpu_dm/amdgpu_dm_debugfs.c | 39 ++++++++- .../amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 81 ++++++++++++++++--- .../amd/display/dc/dcn10/dcn10_hw_sequencer.c | 24 +++--- drivers/gpu/drm/amd/display/dc/dm_services.h | 10 ++- .../gpu/drm/amd/display/dc/inc/hw_sequencer.h | 3 +- .../amd/display/include/logger_interface.h | 6 +- .../drm/amd/display/include/logger_types.h | 6 ++ 7 files changed, 140 insertions(+), 29 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index e79ac1e2c460..35ca732f7ffe 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -720,16 +720,49 @@ int connector_debugfs_init(struct amdgpu_dm_connector *connector) return 0; } +/* + * Writes DTN log state to the user supplied buffer. + * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dtn_log + */ static ssize_t dtn_log_read( struct file *f, char __user *buf, size_t size, loff_t *pos) { - /* TODO: Write log output to the user supplied buffer. */ - return 0; + struct amdgpu_device *adev = file_inode(f)->i_private; + struct dc *dc = adev->dm.dc; + struct dc_log_buffer_ctx log_ctx = { 0 }; + ssize_t result = 0; + + if (!buf || !size) + return -EINVAL; + + if (!dc->hwss.log_hw_state) + return 0; + + dc->hwss.log_hw_state(dc, &log_ctx); + + if (*pos < log_ctx.pos) { + size_t to_copy = log_ctx.pos - *pos; + + to_copy = min(to_copy, size); + + if (!copy_to_user(buf, log_ctx.buf + *pos, to_copy)) { + *pos += to_copy; + result = to_copy; + } + } + + kfree(log_ctx.buf); + + return result; } +/* + * Writes DTN log state to dmesg when triggered via a write. + * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dtn_log + */ static ssize_t dtn_log_write( struct file *f, const char __user *buf, @@ -744,7 +777,7 @@ static ssize_t dtn_log_write( return 0; if (dc->hwss.log_hw_state) - dc->hwss.log_hw_state(dc); + dc->hwss.log_hw_state(dc, NULL); return size; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 86b63ce1dbf6..39997d977efb 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -335,28 +335,91 @@ bool dm_helpers_dp_mst_send_payload_allocation( return true; } -void dm_dtn_log_begin(struct dc_context *ctx) +void dm_dtn_log_begin(struct dc_context *ctx, + struct dc_log_buffer_ctx *log_ctx) { - pr_info("[dtn begin]\n"); + static const char msg[] = "[dtn begin]\n"; + + if (!log_ctx) { + pr_info("%s", msg); + return; + } + + dm_dtn_log_append_v(ctx, log_ctx, "%s", msg); } void dm_dtn_log_append_v(struct dc_context *ctx, - const char *msg, ...) + struct dc_log_buffer_ctx *log_ctx, + const char *msg, ...) { - struct va_format vaf; va_list args; + size_t total; + int n; + + if (!log_ctx) { + /* No context, redirect to dmesg. */ + struct va_format vaf; + + vaf.fmt = msg; + vaf.va = &args; + + va_start(args, msg); + pr_info("%pV", &vaf); + va_end(args); + return; + } + + /* Measure the output. */ va_start(args, msg); - vaf.fmt = msg; - vaf.va = &args; + n = vsnprintf(NULL, 0, msg, args); + va_end(args); + + if (n <= 0) + return; + + /* Reallocate the string buffer as needed. */ + total = log_ctx->pos + n + 1; - pr_info("%pV", &vaf); + if (total > log_ctx->size) { + char *buf = (char *)kvcalloc(total, sizeof(char), GFP_KERNEL); + + if (buf) { + memcpy(buf, log_ctx->buf, log_ctx->pos); + kfree(log_ctx->buf); + + log_ctx->buf = buf; + log_ctx->size = total; + } + } + + if (!log_ctx->buf) + return; + + /* Write the formatted string to the log buffer. */ + va_start(args, msg); + n = vscnprintf( + log_ctx->buf + log_ctx->pos, + log_ctx->size - log_ctx->pos, + msg, + args); va_end(args); + + if (n > 0) + log_ctx->pos += n; } -void dm_dtn_log_end(struct dc_context *ctx) +void dm_dtn_log_end(struct dc_context *ctx, + struct dc_log_buffer_ctx *log_ctx) { - pr_info("[dtn end]\n"); + static const char msg[] = "[dtn end]\n"; + + if (!log_ctx) { + pr_info("%s", msg); + return; + } + + dm_dtn_log_append_v(ctx, log_ctx, "%s", msg); } bool dm_helpers_dp_mst_start_top_mgr( diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 1c5bb148efb7..6bd4ec39f869 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -58,9 +58,11 @@ /*print is 17 wide, first two characters are spaces*/ #define DTN_INFO_MICRO_SEC(ref_cycle) \ - print_microsec(dc_ctx, ref_cycle) + print_microsec(dc_ctx, log_ctx, ref_cycle) -void print_microsec(struct dc_context *dc_ctx, uint32_t ref_cycle) +void print_microsec(struct dc_context *dc_ctx, + struct dc_log_buffer_ctx *log_ctx, + uint32_t ref_cycle) { const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clock_inKhz / 1000; static const unsigned int frac = 1000; @@ -71,7 +73,8 @@ void print_microsec(struct dc_context *dc_ctx, uint32_t ref_cycle) us_x10 % frac); } -static void log_mpc_crc(struct dc *dc) +static void log_mpc_crc(struct dc *dc, + struct dc_log_buffer_ctx *log_ctx) { struct dc_context *dc_ctx = dc->ctx; struct dce_hwseq *hws = dc->hwseq; @@ -84,7 +87,7 @@ static void log_mpc_crc(struct dc *dc) REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G)); } -void dcn10_log_hubbub_state(struct dc *dc) +void dcn10_log_hubbub_state(struct dc *dc, struct dc_log_buffer_ctx *log_ctx) { struct dc_context *dc_ctx = dc->ctx; struct dcn_hubbub_wm wm = {0}; @@ -111,7 +114,7 @@ void dcn10_log_hubbub_state(struct dc *dc) DTN_INFO("\n"); } -static void dcn10_log_hubp_states(struct dc *dc) +static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx) { struct dc_context *dc_ctx = dc->ctx; struct resource_pool *pool = dc->res_pool; @@ -226,7 +229,8 @@ static void dcn10_log_hubp_states(struct dc *dc) DTN_INFO("\n"); } -void dcn10_log_hw_state(struct dc *dc) +void dcn10_log_hw_state(struct dc *dc, + struct dc_log_buffer_ctx *log_ctx) { struct dc_context *dc_ctx = dc->ctx; struct resource_pool *pool = dc->res_pool; @@ -234,9 +238,9 @@ void dcn10_log_hw_state(struct dc *dc) DTN_INFO_BEGIN(); - dcn10_log_hubbub_state(dc); + dcn10_log_hubbub_state(dc, log_ctx); - dcn10_log_hubp_states(dc); + dcn10_log_hubp_states(dc, log_ctx); DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode" " GAMUT mode C11 C12 C13 C14 C21 C22 C23 C24 " @@ -347,7 +351,7 @@ void dcn10_log_hw_state(struct dc *dc) dc->current_state->bw.dcn.clk.fclk_khz, dc->current_state->bw.dcn.clk.socclk_khz); - log_mpc_crc(dc); + log_mpc_crc(dc, log_ctx); DTN_INFO_END(); } @@ -857,7 +861,7 @@ void dcn10_verify_allow_pstate_change_high(struct dc *dc) if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub)) { if (should_log_hw_state) { - dcn10_log_hw_state(dc); + dcn10_log_hw_state(dc, NULL); } BREAK_TO_DEBUGGER(); if (dcn10_hw_wa_force_recovery(dc)) { diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h index eb5ab3978e84..28128c02de00 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_services.h +++ b/drivers/gpu/drm/amd/display/dc/dm_services.h @@ -359,8 +359,12 @@ void dm_perf_trace_timestamp(const char *func_name, unsigned int line); * Debug and verification hooks */ -void dm_dtn_log_begin(struct dc_context *ctx); -void dm_dtn_log_append_v(struct dc_context *ctx, const char *msg, ...); -void dm_dtn_log_end(struct dc_context *ctx); +void dm_dtn_log_begin(struct dc_context *ctx, + struct dc_log_buffer_ctx *log_ctx); +void dm_dtn_log_append_v(struct dc_context *ctx, + struct dc_log_buffer_ctx *log_ctx, + const char *msg, ...); +void dm_dtn_log_end(struct dc_context *ctx, + struct dc_log_buffer_ctx *log_ctx); #endif /* __DM_SERVICES_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index 9a97356923e2..26f29d5da3d8 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -202,7 +202,8 @@ struct hw_sequencer_funcs { void (*set_avmute)(struct pipe_ctx *pipe_ctx, bool enable); - void (*log_hw_state)(struct dc *dc); + void (*log_hw_state)(struct dc *dc, + struct dc_log_buffer_ctx *log_ctx); void (*get_hw_state)(struct dc *dc, char *pBuf, unsigned int bufSize, unsigned int mask); void (*wait_for_mpcc_disconnect)(struct dc *dc, diff --git a/drivers/gpu/drm/amd/display/include/logger_interface.h b/drivers/gpu/drm/amd/display/include/logger_interface.h index e3c79616682d..a0b68c266dab 100644 --- a/drivers/gpu/drm/amd/display/include/logger_interface.h +++ b/drivers/gpu/drm/amd/display/include/logger_interface.h @@ -129,13 +129,13 @@ void context_clock_trace( * Display Test Next logging */ #define DTN_INFO_BEGIN() \ - dm_dtn_log_begin(dc_ctx) + dm_dtn_log_begin(dc_ctx, log_ctx) #define DTN_INFO(msg, ...) \ - dm_dtn_log_append_v(dc_ctx, msg, ##__VA_ARGS__) + dm_dtn_log_append_v(dc_ctx, log_ctx, msg, ##__VA_ARGS__) #define DTN_INFO_END() \ - dm_dtn_log_end(dc_ctx) + dm_dtn_log_end(dc_ctx, log_ctx) #define PERFORMANCE_TRACE_START() \ unsigned long long perf_trc_start_stmp = dm_get_timestamp(dc->ctx) diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h index bc5732668092..d96550d6434d 100644 --- a/drivers/gpu/drm/amd/display/include/logger_types.h +++ b/drivers/gpu/drm/amd/display/include/logger_types.h @@ -66,6 +66,12 @@ struct dal_logger; +struct dc_log_buffer_ctx { + char *buf; + size_t pos; + size_t size; +}; + enum dc_log_type { LOG_ERROR = 0, LOG_WARNING, -- GitLab From d377ae4e3754ee3f81f63a0d9e3eadba7830d3e3 Mon Sep 17 00:00:00 2001 From: David Francis Date: Fri, 3 Aug 2018 14:25:19 -0400 Subject: [PATCH 0576/1692] drm/amd/display: Remove redundant i2c structs [Why] The i2c code contains two structs that contain the same information as i2c_payload [How] Replace references to those structs with references to i2c_payload dce_i2c_transaction_request->status was written to but never read, so all references to it are removed Signed-off-by: David Francis Reviewed-by: Jordan Lazare Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dce/dce_i2c.h | 33 -------- .../gpu/drm/amd/display/dc/dce/dce_i2c_hw.c | 84 ++++--------------- .../gpu/drm/amd/display/dc/dce/dce_i2c_hw.h | 5 -- .../gpu/drm/amd/display/dc/dce/dce_i2c_sw.c | 83 +++--------------- 4 files changed, 28 insertions(+), 177 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.h b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.h index d655f89578ca..a171c5cd8439 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c.h @@ -30,39 +30,6 @@ #include "dce_i2c_hw.h" #include "dce_i2c_sw.h" -enum dce_i2c_transaction_status { - DCE_I2C_TRANSACTION_STATUS_UNKNOWN = (-1L), - DCE_I2C_TRANSACTION_STATUS_SUCCEEDED, - DCE_I2C_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY, - DCE_I2C_TRANSACTION_STATUS_FAILED_TIMEOUT, - DCE_I2C_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR, - DCE_I2C_TRANSACTION_STATUS_FAILED_NACK, - DCE_I2C_TRANSACTION_STATUS_FAILED_INCOMPLETE, - DCE_I2C_TRANSACTION_STATUS_FAILED_OPERATION, - DCE_I2C_TRANSACTION_STATUS_FAILED_INVALID_OPERATION, - DCE_I2C_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW, - DCE_I2C_TRANSACTION_STATUS_FAILED_HPD_DISCON -}; - -enum dce_i2c_transaction_operation { - DCE_I2C_TRANSACTION_READ, - DCE_I2C_TRANSACTION_WRITE -}; - -struct dce_i2c_transaction_payload { - enum dce_i2c_transaction_address_space address_space; - uint32_t address; - uint32_t length; - uint8_t *data; -}; - -struct dce_i2c_transaction_request { - enum dce_i2c_transaction_operation operation; - struct dce_i2c_transaction_payload payload; - enum dce_i2c_transaction_status status; -}; - - bool dce_i2c_submit_command( struct resource_pool *pool, struct ddc *ddc, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c index cd7da59794d0..2800d3fa49da 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c @@ -129,7 +129,7 @@ static uint32_t get_speed( static void process_channel_reply( struct dce_i2c_hw *dce_i2c_hw, - struct i2c_reply_transaction_data *reply) + struct i2c_payload *reply) { uint32_t length = reply->length; uint8_t *buffer = reply->data; @@ -522,9 +522,9 @@ static uint32_t get_transaction_timeout_hw( return period_timeout * num_of_clock_stretches; } -bool dce_i2c_hw_engine_submit_request( +bool dce_i2c_hw_engine_submit_payload( struct dce_i2c_hw *dce_i2c_hw, - struct dce_i2c_transaction_request *dce_i2c_request, + struct i2c_payload *payload, bool middle_of_transaction) { @@ -541,46 +541,36 @@ bool dce_i2c_hw_engine_submit_request( * the number of free bytes in HW buffer (minus one for address) */ - if (dce_i2c_request->payload.length >= + if (payload->length >= get_hw_buffer_available_size(dce_i2c_hw)) { - dce_i2c_request->status = - DCE_I2C_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW; return false; } - if (dce_i2c_request->operation == DCE_I2C_TRANSACTION_READ) + if (!payload->write) request.action = middle_of_transaction ? DCE_I2C_TRANSACTION_ACTION_I2C_READ_MOT : DCE_I2C_TRANSACTION_ACTION_I2C_READ; - else if (dce_i2c_request->operation == DCE_I2C_TRANSACTION_WRITE) + else request.action = middle_of_transaction ? DCE_I2C_TRANSACTION_ACTION_I2C_WRITE_MOT : DCE_I2C_TRANSACTION_ACTION_I2C_WRITE; - else { - dce_i2c_request->status = - DCE_I2C_TRANSACTION_STATUS_FAILED_INVALID_OPERATION; - /* [anaumov] in DAL2, there was no "return false" */ - return false; - } - request.address = (uint8_t) dce_i2c_request->payload.address; - request.length = dce_i2c_request->payload.length; - request.data = dce_i2c_request->payload.data; + + request.address = (uint8_t) ((payload->address << 1) | !payload->write); + request.length = payload->length; + request.data = payload->data; /* obtain timeout value before submitting request */ transaction_timeout = get_transaction_timeout_hw( - dce_i2c_hw, dce_i2c_request->payload.length + 1); + dce_i2c_hw, payload->length + 1); submit_channel_request_hw( dce_i2c_hw, &request); if ((request.status == I2C_CHANNEL_OPERATION_FAILED) || - (request.status == I2C_CHANNEL_OPERATION_ENGINE_BUSY)) { - dce_i2c_request->status = - DCE_I2C_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY; + (request.status == I2C_CHANNEL_OPERATION_ENGINE_BUSY)) return false; - } /* wait until transaction proceed */ @@ -591,37 +581,11 @@ bool dce_i2c_hw_engine_submit_request( /* update transaction status */ - switch (operation_result) { - case I2C_CHANNEL_OPERATION_SUCCEEDED: - dce_i2c_request->status = - DCE_I2C_TRANSACTION_STATUS_SUCCEEDED; + if (operation_result == I2C_CHANNEL_OPERATION_SUCCEEDED) result = true; - break; - case I2C_CHANNEL_OPERATION_NO_RESPONSE: - dce_i2c_request->status = - DCE_I2C_TRANSACTION_STATUS_FAILED_NACK; - break; - case I2C_CHANNEL_OPERATION_TIMEOUT: - dce_i2c_request->status = - DCE_I2C_TRANSACTION_STATUS_FAILED_TIMEOUT; - break; - case I2C_CHANNEL_OPERATION_FAILED: - dce_i2c_request->status = - DCE_I2C_TRANSACTION_STATUS_FAILED_INCOMPLETE; - break; - default: - dce_i2c_request->status = - DCE_I2C_TRANSACTION_STATUS_FAILED_OPERATION; - } - if (result && (dce_i2c_request->operation == DCE_I2C_TRANSACTION_READ)) { - struct i2c_reply_transaction_data reply; - - reply.data = dce_i2c_request->payload.data; - reply.length = dce_i2c_request->payload.length; - - process_channel_reply(dce_i2c_hw, &reply); - } + if (result && (!payload->write)) + process_channel_reply(dce_i2c_hw, payload); return result; } @@ -644,22 +608,8 @@ bool dce_i2c_submit_command_hw( struct i2c_payload *payload = cmd->payloads + index_of_payload; - struct dce_i2c_transaction_request request = { 0 }; - - request.operation = payload->write ? - DCE_I2C_TRANSACTION_WRITE : - DCE_I2C_TRANSACTION_READ; - - request.payload.address_space = - DCE_I2C_TRANSACTION_ADDRESS_SPACE_I2C; - request.payload.address = (payload->address << 1) | - !payload->write; - request.payload.length = payload->length; - request.payload.data = payload->data; - - - if (!dce_i2c_hw_engine_submit_request( - dce_i2c_hw, &request, mot)) { + if (!dce_i2c_hw_engine_submit_payload( + dce_i2c_hw, payload, mot)) { result = false; break; } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h index 742c1da84d45..7f19bb439665 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h @@ -236,11 +236,6 @@ struct i2c_request_transaction_data { uint8_t *data; }; -struct i2c_reply_transaction_data { - uint32_t length; - uint8_t *data; -}; - struct dce_i2c_hw { struct ddc *ddc; uint32_t original_speed; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c index ab11129ea425..f0266694cb56 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c @@ -70,13 +70,6 @@ static void release_engine_dce_sw( dce_i2c_sw->ddc = NULL; } -enum i2c_channel_operation_result dce_i2c_sw_engine_get_channel_status( - struct dce_i2c_sw *engine, - uint8_t *returned_bytes) -{ - /* No arbitration with VBIOS is performed since DCE 6.0 */ - return I2C_CHANNEL_OPERATION_SUCCEEDED; -} static bool get_hw_supported_ddc_line( struct ddc *ddc, enum gpio_ddc_line *line) @@ -469,73 +462,33 @@ void dce_i2c_sw_engine_submit_channel_request( I2C_CHANNEL_OPERATION_SUCCEEDED : I2C_CHANNEL_OPERATION_FAILED; } -bool dce_i2c_sw_engine_submit_request( +bool dce_i2c_sw_engine_submit_payload( struct dce_i2c_sw *engine, - struct dce_i2c_transaction_request *dce_i2c_request, + struct i2c_payload *payload, bool middle_of_transaction) { struct i2c_request_transaction_data request; - bool operation_succeeded = false; - if (dce_i2c_request->operation == DCE_I2C_TRANSACTION_READ) + if (!payload->write) request.action = middle_of_transaction ? DCE_I2C_TRANSACTION_ACTION_I2C_READ_MOT : DCE_I2C_TRANSACTION_ACTION_I2C_READ; - else if (dce_i2c_request->operation == DCE_I2C_TRANSACTION_WRITE) + else request.action = middle_of_transaction ? DCE_I2C_TRANSACTION_ACTION_I2C_WRITE_MOT : DCE_I2C_TRANSACTION_ACTION_I2C_WRITE; - else { - dce_i2c_request->status = - DCE_I2C_TRANSACTION_STATUS_FAILED_INVALID_OPERATION; - /* in DAL2, there was no "return false" */ - return false; - } - request.address = (uint8_t)dce_i2c_request->payload.address; - request.length = dce_i2c_request->payload.length; - request.data = dce_i2c_request->payload.data; + request.address = (uint8_t) ((payload->address << 1) | !payload->write); + request.length = payload->length; + request.data = payload->data; dce_i2c_sw_engine_submit_channel_request(engine, &request); if ((request.status == I2C_CHANNEL_OPERATION_ENGINE_BUSY) || (request.status == I2C_CHANNEL_OPERATION_FAILED)) - dce_i2c_request->status = - DCE_I2C_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY; - else { - enum i2c_channel_operation_result operation_result; - - do { - operation_result = - dce_i2c_sw_engine_get_channel_status(engine, NULL); - - switch (operation_result) { - case I2C_CHANNEL_OPERATION_SUCCEEDED: - dce_i2c_request->status = - DCE_I2C_TRANSACTION_STATUS_SUCCEEDED; - operation_succeeded = true; - break; - case I2C_CHANNEL_OPERATION_NO_RESPONSE: - dce_i2c_request->status = - DCE_I2C_TRANSACTION_STATUS_FAILED_NACK; - break; - case I2C_CHANNEL_OPERATION_TIMEOUT: - dce_i2c_request->status = - DCE_I2C_TRANSACTION_STATUS_FAILED_TIMEOUT; - break; - case I2C_CHANNEL_OPERATION_FAILED: - dce_i2c_request->status = - DCE_I2C_TRANSACTION_STATUS_FAILED_INCOMPLETE; - break; - default: - dce_i2c_request->status = - DCE_I2C_TRANSACTION_STATUS_FAILED_OPERATION; - break; - } - } while (operation_result == I2C_CHANNEL_OPERATION_ENGINE_BUSY); - } + return false; - return operation_succeeded; + return true; } bool dce_i2c_submit_command_sw( struct resource_pool *pool, @@ -555,22 +508,8 @@ bool dce_i2c_submit_command_sw( struct i2c_payload *payload = cmd->payloads + index_of_payload; - struct dce_i2c_transaction_request request = { 0 }; - - request.operation = payload->write ? - DCE_I2C_TRANSACTION_WRITE : - DCE_I2C_TRANSACTION_READ; - - request.payload.address_space = - DCE_I2C_TRANSACTION_ADDRESS_SPACE_I2C; - request.payload.address = (payload->address << 1) | - !payload->write; - request.payload.length = payload->length; - request.payload.data = payload->data; - - - if (!dce_i2c_sw_engine_submit_request( - dce_i2c_sw, &request, mot)) { + if (!dce_i2c_sw_engine_submit_payload( + dce_i2c_sw, payload, mot)) { result = false; break; } -- GitLab From ad908423ef86f1787b635a8830d49f50ff862295 Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Wed, 15 Aug 2018 17:35:50 -0400 Subject: [PATCH 0577/1692] drm/amd/display: support 48 MHZ refclk off [Why] On PCO and up, whenever SMU receive message to indicate active display count = 0. SMU will turn off 48MHZ TMDP reference clock by writing to 1 TMDP_48M_Refclk_Driver_PWDN. Once this clock is off, no PHY register will respond to register access. This means our current sequence of notifying display count along with requesting clock will cause driver to hang when accessing PHY registers after displays count goes to 0. [How] Separate the PPSMC_MSG_SetDisplayCount message from the SMU messages that request clocks, have display own sequencing of this message so that we can send it at the appropriate time. Do not redundantly power off HW when entering S3, S4, since display should already be called to disable all streams. And ASIC soon be powered down. Signed-off-by: Eric Yang Reviewed-by: Tony Cheng Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 36 ++++++++++++++++++++++-- 1 file changed, 33 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 32318b4e0d1e..1c438eedf77a 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1367,6 +1367,34 @@ static struct dc_stream_status *stream_get_status( static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL; +static void notify_display_count_to_smu( + struct dc *dc, + struct dc_state *context) +{ + int i, display_count; + struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu; + + /* + * if function pointer not set up, this message is + * sent as part of pplib_apply_display_requirements. + * So just return. + */ + if (!pp_smu->set_display_count) + return; + + display_count = 0; + for (i = 0; i < context->stream_count; i++) { + const struct dc_stream_state *stream = context->streams[i]; + + /* only notify active stream */ + if (stream->dpms_off) + continue; + + display_count++; + } + + pp_smu->set_display_count(&pp_smu->pp_smu, display_count); +} static void commit_planes_do_stream_update(struct dc *dc, struct dc_stream_state *stream, @@ -1420,13 +1448,17 @@ static void commit_planes_do_stream_update(struct dc *dc, core_link_disable_stream(pipe_ctx, KEEP_ACQUIRED_RESOURCE); dc->hwss.pplib_apply_display_requirements( dc, dc->current_state); + notify_display_count_to_smu(dc, dc->current_state); } else { dc->hwss.pplib_apply_display_requirements( dc, dc->current_state); + notify_display_count_to_smu(dc, dc->current_state); core_link_enable_stream(dc->current_state, pipe_ctx); } } + + if (stream_update->abm_level && pipe_ctx->stream_res.abm) { if (pipe_ctx->stream_res.tg->funcs->is_blanked) { // if otg funcs defined check if blanked before programming @@ -1662,9 +1694,7 @@ void dc_set_power_state( dc->hwss.init_hw(dc); break; default: - - dc->hwss.power_down(dc); - + ASSERT(dc->current_state->stream_count == 0); /* Zero out the current context so that on resume we start with * clean state, and dc hw programming optimizations will not * cause any trouble. -- GitLab From 9bbf6a5341092e8a9b4e7b02bea6721e29ced9ef Mon Sep 17 00:00:00 2001 From: David Francis Date: Fri, 3 Aug 2018 13:24:28 -0400 Subject: [PATCH 0578/1692] drm/amd/display: Flatten unnecessary i2c functions [Why] The dce_i2c_hw code contained four funtcions that were only called in one place and did not have a clearly delineated purpose. [How] Inline these functions, keeping the same functionality. This is not a functional change. The functions disable_i2c_hw_engine and release_engine_dce_hw were pulled into their respective callers. The most interesting part of this change is the acquire functions. dce_i2c_hw_engine_acquire_engine was pulled into dce_i2c_engine_acquire_hw, and dce_i2c_engine_acquire_hw was pulled into acquire_i2c_hw_engine. Some notes to show that this change is not functional: -Failure conditions in any function resulted in a cascade of calls that ended in a 'return NULL'. Those are replaced with a direct 'return NULL'. -The variable result is the one from dce_i2c_hw_engine_acquire_engine. The boolean result used as part of return logic was removed. -As the second half of dce_i2c_hw_engine_acquire_engine is only executed if that function is returning true and therefore exiting the do-while loop in dce_i2c_engine_acquire_hw, those lines were moved outside of the loop. Signed-off-by: David Francis Acked-by: Leo Li Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/dce/dce_i2c_hw.c | 111 ++++++------------ 1 file changed, 34 insertions(+), 77 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c index 2800d3fa49da..40f2d6e0b122 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c @@ -36,12 +36,6 @@ #define FN(reg_name, field_name) \ dce_i2c_hw->shifts->field_name, dce_i2c_hw->masks->field_name -static void disable_i2c_hw_engine( - struct dce_i2c_hw *dce_i2c_hw) -{ - REG_UPDATE_N(SETUP, 1, FN(SETUP, DC_I2C_DDC1_ENABLE), 0); -} - static void execute_transaction( struct dce_i2c_hw *dce_i2c_hw) { @@ -348,60 +342,40 @@ static void release_engine( REG_UPDATE(DC_I2C_CONTROL, DC_I2C_SW_STATUS_RESET, 1); /* HW I2c engine - clock gating feature */ if (!dce_i2c_hw->engine_keep_power_up_count) - disable_i2c_hw_engine(dce_i2c_hw); + REG_UPDATE_N(SETUP, 1, FN(SETUP, DC_I2C_DDC1_ENABLE), 0); } -static void release_engine_dce_hw( +struct dce_i2c_hw *acquire_i2c_hw_engine( struct resource_pool *pool, - struct dce_i2c_hw *dce_i2c_hw) -{ - pool->i2c_hw_buffer_in_use = false; - - release_engine(dce_i2c_hw); - dal_ddc_close(dce_i2c_hw->ddc); - - dce_i2c_hw->ddc = NULL; -} - -bool dce_i2c_hw_engine_acquire_engine( - struct dce_i2c_hw *dce_i2c_hw, struct ddc *ddc) { - + uint32_t counter = 0; enum gpio_result result; uint32_t current_speed; + struct dce_i2c_hw *dce_i2c_hw = NULL; - result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE, - GPIO_DDC_CONFIG_TYPE_MODE_I2C); - - if (result != GPIO_RESULT_OK) - return false; - - dce_i2c_hw->ddc = ddc; - - - current_speed = get_speed(dce_i2c_hw); + if (!ddc) + return NULL; - if (current_speed) - dce_i2c_hw->original_speed = current_speed; + if (ddc->hw_info.hw_supported) { + enum gpio_ddc_line line = dal_ddc_get_line(ddc); - return true; -} + if (line < pool->pipe_count) + dce_i2c_hw = pool->hw_i2cs[line]; + } -bool dce_i2c_engine_acquire_hw( - struct dce_i2c_hw *dce_i2c_hw, - struct ddc *ddc_handle) -{ + if (!dce_i2c_hw) + return NULL; - uint32_t counter = 0; - bool result; + if (pool->i2c_hw_buffer_in_use) + return NULL; do { - result = dce_i2c_hw_engine_acquire_engine( - dce_i2c_hw, ddc_handle); + result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE, + GPIO_DDC_CONFIG_TYPE_MODE_I2C); - if (result) + if (result == GPIO_RESULT_OK) break; /* i2c_engine is busy by VBios, lets wait and retry */ @@ -411,45 +385,23 @@ bool dce_i2c_engine_acquire_hw( ++counter; } while (counter < 2); - if (result) { - if (!setup_engine(dce_i2c_hw)) { - release_engine(dce_i2c_hw); - result = false; - } - } - - return result; -} - -struct dce_i2c_hw *acquire_i2c_hw_engine( - struct resource_pool *pool, - struct ddc *ddc) -{ - - struct dce_i2c_hw *engine = NULL; - - if (!ddc) + if (result != GPIO_RESULT_OK) return NULL; - if (ddc->hw_info.hw_supported) { - enum gpio_ddc_line line = dal_ddc_get_line(ddc); - - if (line < pool->pipe_count) - engine = pool->hw_i2cs[line]; - } + dce_i2c_hw->ddc = ddc; - if (!engine) - return NULL; + current_speed = get_speed(dce_i2c_hw); + if (current_speed) + dce_i2c_hw->original_speed = current_speed; - if (!pool->i2c_hw_buffer_in_use && - dce_i2c_engine_acquire_hw(engine, ddc)) { - pool->i2c_hw_buffer_in_use = true; - return engine; + if (!setup_engine(dce_i2c_hw)) { + release_engine(dce_i2c_hw); + return NULL; } - - return NULL; + pool->i2c_hw_buffer_in_use = true; + return dce_i2c_hw; } enum i2c_channel_operation_result dce_i2c_hw_engine_wait_on_operation_result( @@ -619,7 +571,12 @@ bool dce_i2c_submit_command_hw( ++index_of_payload; } - release_engine_dce_hw(pool, dce_i2c_hw); + pool->i2c_hw_buffer_in_use = false; + + release_engine(dce_i2c_hw); + dal_ddc_close(dce_i2c_hw->ddc); + + dce_i2c_hw->ddc = NULL; return result; } -- GitLab From 7d0aa3765fa839c0047a8924807e5201f4d9e76d Mon Sep 17 00:00:00 2001 From: Andrey Grodzovsky Date: Mon, 27 Aug 2018 14:17:26 -0400 Subject: [PATCH 0579/1692] drm/amdgpu: Refine gmc9 VM fault print. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The fault reports the page number where the fault happend and not the exact faulty address. Update the print message to reflect that. Signed-off-by: Andrey Grodzovsky Reviewed-by: Alex Deucher Reviewed-by: Marek Olšák Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 453bd7ea50e7..39bee98155ee 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -270,7 +270,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, entry->src_id, entry->ring_id, entry->vmid, entry->pasid, task_info.process_name, task_info.tgid, task_info.task_name, task_info.pid); - dev_err(adev->dev, " at address 0x%016llx from %d\n", + dev_err(adev->dev, " in page starting at address 0x%016llx from %d\n", addr, entry->client_id); if (!amdgpu_sriov_vf(adev)) dev_err(adev->dev, -- GitLab From 46cb52ad414ac829680d0bb8cc7090ac2b577ca7 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Sun, 15 Jul 2018 22:09:29 +0200 Subject: [PATCH 0580/1692] ata: ftide010: Add a quirk for SQ201 The DMA is broken on this specific device for some unknown reason (probably badly designed or plain broken interface electronics) and will only work with PIO. Other users of the same hardware does not have this problem. Add a specific quirk so that this Gemini device gets DMA turned off. Also fix up some code around passing the port information around in probe while we're at it. Signed-off-by: Linus Walleij Signed-off-by: Jens Axboe --- drivers/ata/pata_ftide010.c | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/drivers/ata/pata_ftide010.c b/drivers/ata/pata_ftide010.c index 5d4b72e21161..569a4a662dcd 100644 --- a/drivers/ata/pata_ftide010.c +++ b/drivers/ata/pata_ftide010.c @@ -256,14 +256,12 @@ static struct ata_port_operations pata_ftide010_port_ops = { .qc_issue = ftide010_qc_issue, }; -static struct ata_port_info ftide010_port_info[] = { - { - .flags = ATA_FLAG_SLAVE_POSS, - .mwdma_mask = ATA_MWDMA2, - .udma_mask = ATA_UDMA6, - .pio_mask = ATA_PIO4, - .port_ops = &pata_ftide010_port_ops, - }, +static struct ata_port_info ftide010_port_info = { + .flags = ATA_FLAG_SLAVE_POSS, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + .pio_mask = ATA_PIO4, + .port_ops = &pata_ftide010_port_ops, }; #if IS_ENABLED(CONFIG_SATA_GEMINI) @@ -349,6 +347,7 @@ static int pata_ftide010_gemini_cable_detect(struct ata_port *ap) } static int pata_ftide010_gemini_init(struct ftide010 *ftide, + struct ata_port_info *pi, bool is_ata1) { struct device *dev = ftide->dev; @@ -373,7 +372,13 @@ static int pata_ftide010_gemini_init(struct ftide010 *ftide, /* Flag port as SATA-capable */ if (gemini_sata_bridge_enabled(sg, is_ata1)) - ftide010_port_info[0].flags |= ATA_FLAG_SATA; + pi->flags |= ATA_FLAG_SATA; + + /* This device has broken DMA, only PIO works */ + if (of_machine_is_compatible("itian,sq201")) { + pi->mwdma_mask = 0; + pi->udma_mask = 0; + } /* * We assume that a simple 40-wire cable is used in the PATA mode. @@ -435,6 +440,7 @@ static int pata_ftide010_gemini_init(struct ftide010 *ftide, } #else static int pata_ftide010_gemini_init(struct ftide010 *ftide, + struct ata_port_info *pi, bool is_ata1) { return -ENOTSUPP; @@ -446,7 +452,7 @@ static int pata_ftide010_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; - const struct ata_port_info pi = ftide010_port_info[0]; + struct ata_port_info pi = ftide010_port_info; const struct ata_port_info *ppi[] = { &pi, NULL }; struct ftide010 *ftide; struct resource *res; @@ -490,6 +496,7 @@ static int pata_ftide010_probe(struct platform_device *pdev) * are ATA0. This will also set up the cable types. */ ret = pata_ftide010_gemini_init(ftide, + &pi, (res->start == 0x63400000)); if (ret) goto err_dis_clk; -- GitLab From bab1be79a5169ac748d8292b20c86d874022d7ba Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 27 Aug 2018 18:38:31 +0800 Subject: [PATCH 0581/1692] sctp: hold transport before accessing its asoc in sctp_transport_get_next As Marcelo noticed, in sctp_transport_get_next, it is iterating over transports but then also accessing the association directly, without checking any refcnts before that, which can cause an use-after-free Read. So fix it by holding transport before accessing the association. With that, sctp_transport_hold calls can be removed in the later places. Fixes: 626d16f50f39 ("sctp: export some apis or variables for sctp_diag and reuse some for proc") Reported-by: syzbot+fe62a0c9aa6a85c6de16@syzkaller.appspotmail.com Signed-off-by: Xin Long Acked-by: Neil Horman Acked-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller --- net/sctp/proc.c | 4 ---- net/sctp/socket.c | 22 +++++++++++++++------- 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/net/sctp/proc.c b/net/sctp/proc.c index ef5c9a82d4e8..4d6f1c8d6659 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c @@ -264,8 +264,6 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v) } transport = (struct sctp_transport *)v; - if (!sctp_transport_hold(transport)) - return 0; assoc = transport->asoc; epb = &assoc->base; sk = epb->sk; @@ -322,8 +320,6 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v) } transport = (struct sctp_transport *)v; - if (!sctp_transport_hold(transport)) - return 0; assoc = transport->asoc; list_for_each_entry_rcu(tsp, &assoc->peer.transport_addr_list, diff --git a/net/sctp/socket.c b/net/sctp/socket.c index e96b15a66aba..aa76586a1a1c 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -5005,9 +5005,14 @@ struct sctp_transport *sctp_transport_get_next(struct net *net, break; } + if (!sctp_transport_hold(t)) + continue; + if (net_eq(sock_net(t->asoc->base.sk), net) && t->asoc->peer.primary_path == t) break; + + sctp_transport_put(t); } return t; @@ -5017,13 +5022,18 @@ struct sctp_transport *sctp_transport_get_idx(struct net *net, struct rhashtable_iter *iter, int pos) { - void *obj = SEQ_START_TOKEN; + struct sctp_transport *t; - while (pos && (obj = sctp_transport_get_next(net, iter)) && - !IS_ERR(obj)) - pos--; + if (!pos) + return SEQ_START_TOKEN; - return obj; + while ((t = sctp_transport_get_next(net, iter)) && !IS_ERR(t)) { + if (!--pos) + break; + sctp_transport_put(t); + } + + return t; } int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), @@ -5082,8 +5092,6 @@ int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *), tsp = sctp_transport_get_idx(net, &hti, *pos + 1); for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) { - if (!sctp_transport_hold(tsp)) - continue; ret = cb(tsp, p); if (ret) break; -- GitLab From 834539e69a5fe2aab33cc777ccfd4a4fcc5b9770 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 27 Aug 2018 18:40:18 +0800 Subject: [PATCH 0582/1692] sctp: remove useless start_fail from sctp_ht_iter in proc After changing rhashtable_walk_start to return void, start_fail would never be set other value than 0, and the checking for start_fail is pointless, so remove it. Fixes: 97a6ec4ac021 ("rhashtable: Change rhashtable_walk_start to return void") Signed-off-by: Xin Long Acked-by: Neil Horman Acked-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller --- net/sctp/proc.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/net/sctp/proc.c b/net/sctp/proc.c index 4d6f1c8d6659..a644292f9faf 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c @@ -215,7 +215,6 @@ static const struct seq_operations sctp_eps_ops = { struct sctp_ht_iter { struct seq_net_private p; struct rhashtable_iter hti; - int start_fail; }; static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos) @@ -224,7 +223,6 @@ static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos) sctp_transport_walk_start(&iter->hti); - iter->start_fail = 0; return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos); } @@ -232,8 +230,6 @@ static void sctp_transport_seq_stop(struct seq_file *seq, void *v) { struct sctp_ht_iter *iter = seq->private; - if (iter->start_fail) - return; sctp_transport_walk_stop(&iter->hti); } -- GitLab From 84581bdae9587023cea1d139523f0ef0f28bd88d Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 27 Aug 2018 18:41:32 +0800 Subject: [PATCH 0583/1692] erspan: set erspan_ver to 1 by default when adding an erspan dev After erspan_ver is introudced, if erspan_ver is not set in iproute, its value will be left 0 by default. Since Commit 02f99df1875c ("erspan: fix invalid erspan version."), it has broken the traffic due to the version check in erspan_xmit if users are not aware of 'erspan_ver' param, like using an old version of iproute. To fix this compatibility problem, it sets erspan_ver to 1 by default when adding an erspan dev in erspan_setup. Note that we can't do it in ipgre_netlink_parms, as this function is also used by ipgre_changelink. Fixes: 02f99df1875c ("erspan: fix invalid erspan version.") Reported-by: Jianlin Shi Signed-off-by: Xin Long Signed-off-by: David S. Miller --- net/ipv4/ip_gre.c | 3 +++ net/ipv6/ip6_gre.c | 1 + 2 files changed, 4 insertions(+) diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 51a5d06085ac..ae714aecc31c 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -1508,11 +1508,14 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev) static void erspan_setup(struct net_device *dev) { + struct ip_tunnel *t = netdev_priv(dev); + ether_setup(dev); dev->netdev_ops = &erspan_netdev_ops; dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; ip_tunnel_setup(dev, erspan_net_id); + t->erspan_ver = 1; } static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = { diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 18a3794b0f52..e493b041d4ac 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -1778,6 +1778,7 @@ static void ip6gre_netlink_parms(struct nlattr *data[], if (data[IFLA_GRE_COLLECT_METADATA]) parms->collect_md = true; + parms->erspan_ver = 1; if (data[IFLA_GRE_ERSPAN_VER]) parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]); -- GitLab From d5ed72a55bc0a321ef33d272e2b0bf0d2b06d1fe Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 27 Aug 2018 20:58:43 +0200 Subject: [PATCH 0584/1692] net: sched: fix extack error message when chain is failed to be created Instead "Cannot find" say "Cannot create". Fixes: c35a4acc2985 ("net: sched: cls_api: handle generic cls errors") Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- net/sched/cls_api.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 31bd1439cf60..2d41c5b21b48 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -1252,7 +1252,7 @@ static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n, } chain = tcf_chain_get(block, chain_index, true); if (!chain) { - NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); + NL_SET_ERR_MSG(extack, "Cannot create specified filter chain"); err = -ENOMEM; goto errout; } -- GitLab From b7b4247d553939ccf02ff597ec60f41a2f93ee8e Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 27 Aug 2018 20:58:44 +0200 Subject: [PATCH 0585/1692] net: sched: return -ENOENT when trying to remove filter from non-existent chain When chain 0 was implicitly created, removal of non-existent filter from chain 0 gave -ENOENT. Once chain 0 became non-implicit, the same call is giving -EINVAL. Fix this by returning -ENOENT in that case. Reported-by: Roman Mashak Fixes: f71e0ca4db18 ("net: sched: Avoid implicit chain 0 creation") Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- net/sched/cls_api.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 2d41c5b21b48..1a67af8a6e8c 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -1399,7 +1399,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n, goto errout; } NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); - err = -EINVAL; + err = -ENOENT; goto errout; } -- GitLab From 30935198b7d0be12b1c45c328b66a7fdefb16256 Mon Sep 17 00:00:00 2001 From: Haiqing Bai Date: Mon, 27 Aug 2018 09:32:26 +0800 Subject: [PATCH 0586/1692] tipc: fix the big/little endian issue in tipc_dest In function tipc_dest_push, the 32bit variables 'node' and 'port' are stored separately in uppper and lower part of 64bit 'value'. Then this value is assigned to dst->value which is a union like: union { struct { u32 port; u32 node; }; u64 value; } This works on little-endian machines like x86 but fails on big-endian machines. The fix remove the 'value' stack parameter and even the 'value' member of the union in tipc_dest, assign the 'node' and 'port' member directly with the input parameter to avoid the endian issue. Fixes: a80ae5306a73 ("tipc: improve destination linked list") Signed-off-by: Zhenbo Gao Acked-by: Jon Maloy Signed-off-by: Haiqing Bai Signed-off-by: David S. Miller --- net/tipc/name_table.c | 10 ++++------ net/tipc/name_table.h | 9 ++------- 2 files changed, 6 insertions(+), 13 deletions(-) diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index 88f027b502f6..66d5b2c5987a 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c @@ -980,20 +980,17 @@ int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb) struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port) { - u64 value = (u64)node << 32 | port; struct tipc_dest *dst; list_for_each_entry(dst, l, list) { - if (dst->value != value) - continue; - return dst; + if (dst->node == node && dst->port == port) + return dst; } return NULL; } bool tipc_dest_push(struct list_head *l, u32 node, u32 port) { - u64 value = (u64)node << 32 | port; struct tipc_dest *dst; if (tipc_dest_find(l, node, port)) @@ -1002,7 +999,8 @@ bool tipc_dest_push(struct list_head *l, u32 node, u32 port) dst = kmalloc(sizeof(*dst), GFP_ATOMIC); if (unlikely(!dst)) return false; - dst->value = value; + dst->node = node; + dst->port = port; list_add(&dst->list, l); return true; } diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h index 0febba41da86..892bd750b85f 100644 --- a/net/tipc/name_table.h +++ b/net/tipc/name_table.h @@ -133,13 +133,8 @@ void tipc_nametbl_stop(struct net *net); struct tipc_dest { struct list_head list; - union { - struct { - u32 port; - u32 node; - }; - u64 value; - }; + u32 port; + u32 node; }; struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port); -- GitLab From ad8619864f0c9bd89e14d957afa3fd8aaf0720da Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Mon, 27 Aug 2018 00:20:11 +0200 Subject: [PATCH 0587/1692] net: dsa: Drop GPIO includes Commit 52638f71fcff ("dsa: Move gpio reset into switch driver") moved the GPIO handling into the switch drivers but forgot to remove the GPIO header includes. Signed-off-by: Linus Walleij Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- net/dsa/dsa.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index e63c554e0623..9f3209ff7ffd 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -19,12 +19,10 @@ #include #include #include -#include #include #include #include #include -#include #include #include "dsa_priv.h" -- GitLab From 53ae914d898e5dd5984d352d5fa0b23410f966a0 Mon Sep 17 00:00:00 2001 From: Zhu Yanjun Date: Sat, 25 Aug 2018 15:19:05 +0800 Subject: [PATCH 0588/1692] net/rds: Use rdma_read_gids to get connection SGID/DGID in IPv6 In IPv4, the newly introduced rdma_read_gids is used to read the SGID/DGID for the connection which returns GID correctly for RoCE transport as well. In IPv6, rdma_read_gids is also used. The following are why rdma_read_gids is introduced. rdma_addr_get_dgid() for RoCE for client side connections returns MAC address, instead of DGID. rdma_addr_get_sgid() for RoCE doesn't return correct SGID for IPv6 and when more than one IP address is assigned to the netdevice. So the transport agnostic rdma_read_gids() API is provided by rdma_cm module. Signed-off-by: Zhu Yanjun Acked-by: Santosh Shilimkar Signed-off-by: David S. Miller --- net/rds/ib.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/net/rds/ib.c b/net/rds/ib.c index c1d97640c0be..eba75c1ba359 100644 --- a/net/rds/ib.c +++ b/net/rds/ib.c @@ -341,15 +341,10 @@ static int rds6_ib_conn_info_visitor(struct rds_connection *conn, if (rds_conn_state(conn) == RDS_CONN_UP) { struct rds_ib_device *rds_ibdev; - struct rdma_dev_addr *dev_addr; ic = conn->c_transport_data; - dev_addr = &ic->i_cm_id->route.addr.dev_addr; - rdma_addr_get_sgid(dev_addr, - (union ib_gid *)&iinfo6->src_gid); - rdma_addr_get_dgid(dev_addr, - (union ib_gid *)&iinfo6->dst_gid); - + rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo6->src_gid, + (union ib_gid *)&iinfo6->dst_gid); rds_ibdev = ic->rds_ibdev; iinfo6->max_send_wr = ic->i_send_ring.w_nr; iinfo6->max_recv_wr = ic->i_recv_ring.w_nr; -- GitLab From 62d2a1940709198a522a43ff8be8b8f6b3654dec Mon Sep 17 00:00:00 2001 From: Chengguang Xu Date: Tue, 28 Aug 2018 07:31:11 +0800 Subject: [PATCH 0589/1692] block: remove unnecessary condition check kmem_cache_destroy() can handle NULL pointer correctly, so there is no need to check e->icq_cache before calling kmem_cache_destroy(). Signed-off-by: Chengguang Xu Signed-off-by: Jens Axboe --- block/elevator.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/block/elevator.c b/block/elevator.c index 5ea6e7d600e4..6a06b5d040e5 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -895,8 +895,7 @@ int elv_register(struct elevator_type *e) spin_lock(&elv_list_lock); if (elevator_find(e->elevator_name, e->uses_mq)) { spin_unlock(&elv_list_lock); - if (e->icq_cache) - kmem_cache_destroy(e->icq_cache); + kmem_cache_destroy(e->icq_cache); return -EBUSY; } list_add_tail(&e->list, &elv_list); -- GitLab From db193954ed9e35701b6e489fa4cc97b08589341b Mon Sep 17 00:00:00 2001 From: John Pittman Date: Mon, 27 Aug 2018 14:33:05 -0400 Subject: [PATCH 0590/1692] block: bsg: move atomic_t ref_count variable to refcount API Currently, variable ref_count within the bsg_device struct is of type atomic_t. For variables being used as reference counters, the refcount API should be used instead of atomic. The newer refcount API works to prevent counter overflows and use-after-free bugs. So, move this varable from the atomic API to refcount, potentially avoiding the issues mentioned. Signed-off-by: John Pittman Signed-off-by: Jens Axboe --- block/bsg.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/block/bsg.c b/block/bsg.c index db588add6ba6..9a442c23a715 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -37,7 +37,7 @@ struct bsg_device { struct request_queue *queue; spinlock_t lock; struct hlist_node dev_list; - atomic_t ref_count; + refcount_t ref_count; char name[20]; int max_queue; }; @@ -252,7 +252,7 @@ static int bsg_put_device(struct bsg_device *bd) mutex_lock(&bsg_mutex); - if (!atomic_dec_and_test(&bd->ref_count)) { + if (!refcount_dec_and_test(&bd->ref_count)) { mutex_unlock(&bsg_mutex); return 0; } @@ -290,7 +290,7 @@ static struct bsg_device *bsg_add_device(struct inode *inode, bd->queue = rq; - atomic_set(&bd->ref_count, 1); + refcount_set(&bd->ref_count, 1); hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode))); strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1); @@ -308,7 +308,7 @@ static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q) hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) { if (bd->queue == q) { - atomic_inc(&bd->ref_count); + refcount_inc(&bd->ref_count); goto found; } } -- GitLab From e06fa9c16ce4b740996189fa5610eabcee734e6c Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 24 Aug 2018 22:08:50 +0200 Subject: [PATCH 0591/1692] bpf, sockmap: fix potential use after free in bpf_tcp_close bpf_tcp_close() we pop the psock linkage to a map via psock_map_pop(). A parallel update on the sock hash map can happen between psock_map_pop() and lookup_elem_raw() where we override the element under link->hash / link->key. In bpf_tcp_close()'s lookup_elem_raw() we subsequently only test whether an element is present, but we do not test whether the element is infact the element we were looking for. We lock the sock in bpf_tcp_close() during that time, so do we hold the lock in sock_hash_update_elem(). However, the latter locks the sock which is newly updated, not the one we're purging from the hash table. This means that while one CPU is doing the lookup from bpf_tcp_close(), another CPU is doing the map update in parallel, dropped our sock from the hlist and released the psock. Subsequently the first CPU will find the new sock and attempts to drop and release the old sock yet another time. Fix is that we need to check the elements for a match after lookup, similar as we do in the sock map. Note that the hash tab elems are freed via RCU, so access to their link->hash / link->key is fine since we're under RCU read side there. Fixes: e9db4ef6bf4c ("bpf: sockhash fix omitted bucket lock in sock_close") Signed-off-by: Daniel Borkmann Acked-by: John Fastabend Signed-off-by: Alexei Starovoitov --- kernel/bpf/sockmap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index cf5195c7c331..01879e4d599a 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c @@ -369,7 +369,7 @@ static void bpf_tcp_close(struct sock *sk, long timeout) /* If another thread deleted this object skip deletion. * The refcnt on psock may or may not be zero. */ - if (l) { + if (l && l == link) { hlist_del_rcu(&link->hash_node); smap_release_sock(psock, link->sk); free_htab_elem(htab, link); -- GitLab From 15c480efab01197c965ce0562a43ffedd852b8f9 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 24 Aug 2018 22:08:51 +0200 Subject: [PATCH 0592/1692] bpf, sockmap: fix psock refcount leak in bpf_tcp_recvmsg In bpf_tcp_recvmsg() we first took a reference on the psock, however once we find that there are skbs in the normal socket's receive queue we return with processing them through tcp_recvmsg(). Problem is that we leak the taken reference on the psock in that path. Given we don't really do anything with the psock at this point, move the skb_queue_empty() test before we fetch the psock to fix this case. Fixes: 8934ce2fd081 ("bpf: sockmap redirect ingress support") Signed-off-by: Daniel Borkmann Acked-by: John Fastabend Signed-off-by: Alexei Starovoitov --- kernel/bpf/sockmap.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index 01879e4d599a..26d8a3053407 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c @@ -912,6 +912,8 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, if (unlikely(flags & MSG_ERRQUEUE)) return inet_recv_error(sk, msg, len, addr_len); + if (!skb_queue_empty(&sk->sk_receive_queue)) + return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); rcu_read_lock(); psock = smap_psock_sk(sk); @@ -922,9 +924,6 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, goto out; rcu_read_unlock(); - if (!skb_queue_empty(&sk->sk_receive_queue)) - return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); - lock_sock(sk); bytes_ready: while (copied != len) { -- GitLab From 3f6e138d41ddff196f452993528cfe75762ede0f Mon Sep 17 00:00:00 2001 From: Stefan Agner Date: Mon, 27 Aug 2018 21:30:42 +0200 Subject: [PATCH 0593/1692] bpf: fix build error with clang Building the newly introduced BPF_PROG_TYPE_SK_REUSEPORT leads to a compile time error when building with clang: net/core/filter.o: In function `sk_reuseport_convert_ctx_access': ../net/core/filter.c:7284: undefined reference to `__compiletime_assert_7284' It seems that clang has issues resolving hweight_long at compile time. Since SK_FL_PROTO_MASK is a constant, we can use the interface for known constant arguments which works fine with clang. Fixes: 2dbb9b9e6df6 ("bpf: Introduce BPF_PROG_TYPE_SK_REUSEPORT") Signed-off-by: Stefan Agner Signed-off-by: Alexei Starovoitov --- net/core/filter.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/core/filter.c b/net/core/filter.c index c25eb36f1320..7a2430945c71 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -7281,7 +7281,7 @@ static u32 sk_reuseport_convert_ctx_access(enum bpf_access_type type, break; case offsetof(struct sk_reuseport_md, ip_protocol): - BUILD_BUG_ON(hweight_long(SK_FL_PROTO_MASK) != BITS_PER_BYTE); + BUILD_BUG_ON(HWEIGHT32(SK_FL_PROTO_MASK) != BITS_PER_BYTE); SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(__sk_flags_offset, BPF_W, 0); *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK); -- GitLab From f1ed3df20d2d223e0852cc4ac1f19bba869a7e3c Mon Sep 17 00:00:00 2001 From: Michal Wnukowski Date: Wed, 15 Aug 2018 15:51:57 -0700 Subject: [PATCH 0594/1692] nvme-pci: add a memory barrier to nvme_dbbuf_update_and_check_event In many architectures loads may be reordered with older stores to different locations. In the nvme driver the following two operations could be reordered: - Write shadow doorbell (dbbuf_db) into memory. - Read EventIdx (dbbuf_ei) from memory. This can result in a potential race condition between driver and VM host processing requests (if given virtual NVMe controller has a support for shadow doorbell). If that occurs, then the NVMe controller may decide to wait for MMIO doorbell from guest operating system, and guest driver may decide not to issue MMIO doorbell on any of subsequent commands. This issue is purely timing-dependent one, so there is no easy way to reproduce it. Currently the easiest known approach is to run "Oracle IO Numbers" (orion) that is shipped with Oracle DB: orion -run advanced -num_large 0 -size_small 8 -type rand -simulate \ concat -write 40 -duration 120 -matrix row -testname nvme_test Where nvme_test is a .lun file that contains a list of NVMe block devices to run test against. Limiting number of vCPUs assigned to given VM instance seems to increase chances for this bug to occur. On test environment with VM that got 4 NVMe drives and 1 vCPU assigned the virtual NVMe controller hang could be observed within 10-20 minutes. That correspond to about 400-500k IO operations processed (or about 100GB of IO read/writes). Orion tool was used as a validation and set to run in a loop for 36 hours (equivalent of pushing 550M IO operations). No issues were observed. That suggest that the patch fixes the issue. Fixes: f9f38e33389c ("nvme: improve performance for virtual NVMe devices") Signed-off-by: Michal Wnukowski Reviewed-by: Keith Busch Reviewed-by: Sagi Grimberg [hch: updated changelog and comment a bit] Signed-off-by: Christoph Hellwig --- drivers/nvme/host/pci.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 1b9951d2067e..d668682f91df 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -316,6 +316,14 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, old_value = *dbbuf_db; *dbbuf_db = value; + /* + * Ensure that the doorbell is updated before reading the event + * index from memory. The controller needs to provide similar + * ordering to ensure the envent index is updated before reading + * the doorbell. + */ + mb(); + if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value)) return false; } -- GitLab From afd299ca996929f4f98ac20da0044c0cdc124879 Mon Sep 17 00:00:00 2001 From: James Smart Date: Thu, 9 Aug 2018 16:00:14 -0700 Subject: [PATCH 0595/1692] nvme-fcloop: Fix dropped LS's to removed target port When a targetport is removed from the config, fcloop will avoid calling the LS done() routine thinking the targetport is gone. This leaves the initiator reset/reconnect hanging as it waits for a status on the Create_Association LS for the reconnect. Change the filter in the LS callback path. If tport null (set when failed validation before "sending to remote port"), be sure to call done. This was the main bug. But, continue the logic that only calls done if tport was set but there is no remoteport (e.g. case where remoteport has been removed, thus host doesn't expect a completion). Signed-off-by: James Smart Signed-off-by: Christoph Hellwig --- drivers/nvme/target/fcloop.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index 34712def81b1..5251689a1d9a 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c @@ -311,7 +311,7 @@ fcloop_tgt_lsrqst_done_work(struct work_struct *work) struct fcloop_tport *tport = tls_req->tport; struct nvmefc_ls_req *lsreq = tls_req->lsreq; - if (tport->remoteport) + if (!tport || tport->remoteport) lsreq->done(lsreq, tls_req->status); } @@ -329,6 +329,7 @@ fcloop_ls_req(struct nvme_fc_local_port *localport, if (!rport->targetport) { tls_req->status = -ECONNREFUSED; + tls_req->tport = NULL; schedule_work(&tls_req->work); return ret; } -- GitLab From 04db0e5ec58167364a80fd33ddb4f3b67434eb85 Mon Sep 17 00:00:00 2001 From: Chaitanya Kulkarni Date: Wed, 15 Aug 2018 18:48:25 -0700 Subject: [PATCH 0596/1692] nvmet: free workqueue object if module init fails Signed-off-by: Chaitanya Kulkarni Signed-off-by: Christoph Hellwig --- drivers/nvme/target/core.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index ebf3e7a6c49e..b5ec96abd048 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -1210,7 +1210,7 @@ static int __init nvmet_init(void) error = nvmet_init_discovery(); if (error) - goto out; + goto out_free_work_queue; error = nvmet_init_configfs(); if (error) @@ -1219,6 +1219,8 @@ static int __init nvmet_init(void) out_exit_discovery: nvmet_exit_discovery(); +out_free_work_queue: + destroy_workqueue(buffered_io_wq); out: return error; } -- GitLab From 501ca81760c204ec59b73e4a00bee5971fc0f1b1 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Fri, 24 Aug 2018 17:37:00 -0700 Subject: [PATCH 0597/1692] bpf: sockmap, decrement copied count correctly in redirect error case Currently, when a redirect occurs in sockmap and an error occurs in the redirect call we unwind the scatterlist once in the error path of bpf_tcp_sendmsg_do_redirect() and then again in sendmsg(). Then in the error path of sendmsg we decrement the copied count by the send size. However, its possible we partially sent data before the error was generated. This can happen if do_tcp_sendpages() partially sends the scatterlist before encountering a memory pressure error. If this happens we need to decrement the copied value (the value tracking how many bytes were actually sent to TCP stack) by the number of remaining bytes _not_ the entire send size. Otherwise we risk confusing userspace. Also we don't need two calls to free the scatterlist one is good enough. So remove the one in bpf_tcp_sendmsg_do_redirect() and then properly reduce copied by the number of remaining bytes which may in fact be the entire send size if no bytes were sent. To do this use bool to indicate if free_start_sg() should do mem accounting or not. Signed-off-by: John Fastabend Signed-off-by: Daniel Borkmann --- kernel/bpf/sockmap.c | 45 ++++++++++++++++++++++---------------------- 1 file changed, 22 insertions(+), 23 deletions(-) diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index 26d8a3053407..ce63e5801746 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c @@ -236,7 +236,7 @@ static int bpf_tcp_init(struct sock *sk) } static void smap_release_sock(struct smap_psock *psock, struct sock *sock); -static int free_start_sg(struct sock *sk, struct sk_msg_buff *md); +static int free_start_sg(struct sock *sk, struct sk_msg_buff *md, bool charge); static void bpf_tcp_release(struct sock *sk) { @@ -248,7 +248,7 @@ static void bpf_tcp_release(struct sock *sk) goto out; if (psock->cork) { - free_start_sg(psock->sock, psock->cork); + free_start_sg(psock->sock, psock->cork, true); kfree(psock->cork); psock->cork = NULL; } @@ -330,14 +330,14 @@ static void bpf_tcp_close(struct sock *sk, long timeout) close_fun = psock->save_close; if (psock->cork) { - free_start_sg(psock->sock, psock->cork); + free_start_sg(psock->sock, psock->cork, true); kfree(psock->cork); psock->cork = NULL; } list_for_each_entry_safe(md, mtmp, &psock->ingress, list) { list_del(&md->list); - free_start_sg(psock->sock, md); + free_start_sg(psock->sock, md, true); kfree(md); } @@ -570,14 +570,16 @@ static void free_bytes_sg(struct sock *sk, int bytes, md->sg_start = i; } -static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md) +static int free_sg(struct sock *sk, int start, + struct sk_msg_buff *md, bool charge) { struct scatterlist *sg = md->sg_data; int i = start, free = 0; while (sg[i].length) { free += sg[i].length; - sk_mem_uncharge(sk, sg[i].length); + if (charge) + sk_mem_uncharge(sk, sg[i].length); if (!md->skb) put_page(sg_page(&sg[i])); sg[i].length = 0; @@ -594,9 +596,9 @@ static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md) return free; } -static int free_start_sg(struct sock *sk, struct sk_msg_buff *md) +static int free_start_sg(struct sock *sk, struct sk_msg_buff *md, bool charge) { - int free = free_sg(sk, md->sg_start, md); + int free = free_sg(sk, md->sg_start, md, charge); md->sg_start = md->sg_end; return free; @@ -604,7 +606,7 @@ static int free_start_sg(struct sock *sk, struct sk_msg_buff *md) static int free_curr_sg(struct sock *sk, struct sk_msg_buff *md) { - return free_sg(sk, md->sg_curr, md); + return free_sg(sk, md->sg_curr, md, true); } static int bpf_map_msg_verdict(int _rc, struct sk_msg_buff *md) @@ -718,7 +720,7 @@ static int bpf_tcp_ingress(struct sock *sk, int apply_bytes, list_add_tail(&r->list, &psock->ingress); sk->sk_data_ready(sk); } else { - free_start_sg(sk, r); + free_start_sg(sk, r, true); kfree(r); } @@ -752,14 +754,10 @@ static int bpf_tcp_sendmsg_do_redirect(struct sock *sk, int send, release_sock(sk); } smap_release_sock(psock, sk); - if (unlikely(err)) - goto out; - return 0; + return err; out_rcu: rcu_read_unlock(); -out: - free_bytes_sg(NULL, send, md, false); - return err; + return 0; } static inline void bpf_md_init(struct smap_psock *psock) @@ -822,7 +820,7 @@ static int bpf_exec_tx_verdict(struct smap_psock *psock, case __SK_PASS: err = bpf_tcp_push(sk, send, m, flags, true); if (unlikely(err)) { - *copied -= free_start_sg(sk, m); + *copied -= free_start_sg(sk, m, true); break; } @@ -845,16 +843,17 @@ static int bpf_exec_tx_verdict(struct smap_psock *psock, lock_sock(sk); if (unlikely(err < 0)) { - free_start_sg(sk, m); + int free = free_start_sg(sk, m, false); + psock->sg_size = 0; if (!cork) - *copied -= send; + *copied -= free; } else { psock->sg_size -= send; } if (cork) { - free_start_sg(sk, m); + free_start_sg(sk, m, true); psock->sg_size = 0; kfree(m); m = NULL; @@ -1121,7 +1120,7 @@ static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) err = sk_stream_wait_memory(sk, &timeo); if (err) { if (m && m != psock->cork) - free_start_sg(sk, m); + free_start_sg(sk, m, true); goto out_err; } } @@ -1580,13 +1579,13 @@ static void smap_gc_work(struct work_struct *w) bpf_prog_put(psock->bpf_tx_msg); if (psock->cork) { - free_start_sg(psock->sock, psock->cork); + free_start_sg(psock->sock, psock->cork, true); kfree(psock->cork); } list_for_each_entry_safe(md, mtmp, &psock->ingress, list) { list_del(&md->list); - free_start_sg(psock->sock, md); + free_start_sg(psock->sock, md, true); kfree(md); } -- GitLab From 67d1ba8a6dc83d90cd58b89fa6cbf9ae35a0cf7f Mon Sep 17 00:00:00 2001 From: Danek Duvall Date: Wed, 22 Aug 2018 16:01:04 -0700 Subject: [PATCH 0598/1692] mac80211: correct use of IEEE80211_VHT_CAP_RXSTBC_X The mod mask for VHT capabilities intends to say that you can override the number of STBC receive streams, and it does, but only by accident. The IEEE80211_VHT_CAP_RXSTBC_X aren't bits to be set, but values (albeit left-shifted). ORing the bits together gets the right answer, but we should use the _MASK macro here instead. Signed-off-by: Danek Duvall Signed-off-by: Johannes Berg --- net/mac80211/main.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 0358f20b675f..27cd64acaf00 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -470,10 +470,7 @@ static const struct ieee80211_vht_cap mac80211_vht_capa_mod_mask = { cpu_to_le32(IEEE80211_VHT_CAP_RXLDPC | IEEE80211_VHT_CAP_SHORT_GI_80 | IEEE80211_VHT_CAP_SHORT_GI_160 | - IEEE80211_VHT_CAP_RXSTBC_1 | - IEEE80211_VHT_CAP_RXSTBC_2 | - IEEE80211_VHT_CAP_RXSTBC_3 | - IEEE80211_VHT_CAP_RXSTBC_4 | + IEEE80211_VHT_CAP_RXSTBC_MASK | IEEE80211_VHT_CAP_TXSTBC | IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | -- GitLab From d7c863a2f65e48f442379f4ee1846d52e0c5d24d Mon Sep 17 00:00:00 2001 From: Danek Duvall Date: Wed, 22 Aug 2018 16:01:05 -0700 Subject: [PATCH 0599/1692] mac80211_hwsim: correct use of IEEE80211_VHT_CAP_RXSTBC_X The mac80211_hwsim driver intends to say that it supports up to four STBC receive streams, but instead it ends up saying something undefined. The IEEE80211_VHT_CAP_RXSTBC_X macros aren't independent bits that can be ORed together, but values. In this case, _4 is the appropriate one to use. Signed-off-by: Danek Duvall Signed-off-by: Johannes Berg --- drivers/net/wireless/mac80211_hwsim.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index fe1b0108f06d..7d0b460868f9 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -2699,9 +2699,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, IEEE80211_VHT_CAP_SHORT_GI_80 | IEEE80211_VHT_CAP_SHORT_GI_160 | IEEE80211_VHT_CAP_TXSTBC | - IEEE80211_VHT_CAP_RXSTBC_1 | - IEEE80211_VHT_CAP_RXSTBC_2 | - IEEE80211_VHT_CAP_RXSTBC_3 | IEEE80211_VHT_CAP_RXSTBC_4 | IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; sband->vht_cap.vht_mcs.rx_mcs_map = -- GitLab From 38cb87ee47fb825f6c9d645c019f75b3905c0ab2 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Wed, 22 Aug 2018 13:52:21 +0200 Subject: [PATCH 0600/1692] cfg80211: make wmm_rule part of the reg_rule structure Make wmm_rule be part of the reg_rule structure. This simplifies the code a lot at the cost of having bigger memory usage. However in most cases we have only few reg_rule's and when we do have many like in iwlwifi we do not save memory as it allocates a separate wmm_rule for each channel anyway. This also fixes a bug reported in various places where somewhere the pointers were corrupted and we ended up doing a null-dereference. Fixes: 230ebaa189af ("cfg80211: read wmm rules from regulatory database") Signed-off-by: Stanislaw Gruszka [rephrase commit message slightly] Signed-off-by: Johannes Berg --- .../wireless/intel/iwlwifi/iwl-nvm-parse.c | 50 ++--------- include/net/cfg80211.h | 4 +- include/net/regulatory.h | 4 +- net/mac80211/util.c | 8 +- net/wireless/nl80211.c | 10 +-- net/wireless/reg.c | 90 +++---------------- 6 files changed, 31 insertions(+), 135 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index b815ba38dbdb..88121548eb9f 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -877,15 +877,12 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ? iwl_ext_nvm_channels : iwl_nvm_channels; struct ieee80211_regdomain *regd, *copy_rd; - int size_of_regd, regd_to_copy, wmms_to_copy; - int size_of_wmms = 0; + int size_of_regd, regd_to_copy; struct ieee80211_reg_rule *rule; - struct ieee80211_wmm_rule *wmm_rule, *d_wmm, *s_wmm; struct regdb_ptrs *regdb_ptrs; enum nl80211_band band; int center_freq, prev_center_freq = 0; - int valid_rules = 0, n_wmms = 0; - int i; + int valid_rules = 0; bool new_rule; int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ? IWL_NVM_NUM_CHANNELS_EXT : IWL_NVM_NUM_CHANNELS; @@ -904,11 +901,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, sizeof(struct ieee80211_regdomain) + num_of_ch * sizeof(struct ieee80211_reg_rule); - if (geo_info & GEO_WMM_ETSI_5GHZ_INFO) - size_of_wmms = - num_of_ch * sizeof(struct ieee80211_wmm_rule); - - regd = kzalloc(size_of_regd + size_of_wmms, GFP_KERNEL); + regd = kzalloc(size_of_regd, GFP_KERNEL); if (!regd) return ERR_PTR(-ENOMEM); @@ -922,8 +915,6 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, regd->alpha2[0] = fw_mcc >> 8; regd->alpha2[1] = fw_mcc & 0xff; - wmm_rule = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd); - for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { ch_flags = (u16)__le32_to_cpup(channels + ch_idx); band = (ch_idx < NUM_2GHZ_CHANNELS) ? @@ -977,26 +968,10 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, band == NL80211_BAND_2GHZ) continue; - if (!reg_query_regdb_wmm(regd->alpha2, center_freq, - ®db_ptrs[n_wmms].token, wmm_rule)) { - /* Add only new rules */ - for (i = 0; i < n_wmms; i++) { - if (regdb_ptrs[i].token == - regdb_ptrs[n_wmms].token) { - rule->wmm_rule = regdb_ptrs[i].rule; - break; - } - } - if (i == n_wmms) { - rule->wmm_rule = wmm_rule; - regdb_ptrs[n_wmms++].rule = wmm_rule; - wmm_rule++; - } - } + reg_query_regdb_wmm(regd->alpha2, center_freq, rule); } regd->n_reg_rules = valid_rules; - regd->n_wmm_rules = n_wmms; /* * Narrow down regdom for unused regulatory rules to prevent hole @@ -1005,28 +980,13 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, regd_to_copy = sizeof(struct ieee80211_regdomain) + valid_rules * sizeof(struct ieee80211_reg_rule); - wmms_to_copy = sizeof(struct ieee80211_wmm_rule) * n_wmms; - - copy_rd = kzalloc(regd_to_copy + wmms_to_copy, GFP_KERNEL); + copy_rd = kzalloc(regd_to_copy, GFP_KERNEL); if (!copy_rd) { copy_rd = ERR_PTR(-ENOMEM); goto out; } memcpy(copy_rd, regd, regd_to_copy); - memcpy((u8 *)copy_rd + regd_to_copy, (u8 *)regd + size_of_regd, - wmms_to_copy); - - d_wmm = (struct ieee80211_wmm_rule *)((u8 *)copy_rd + regd_to_copy); - s_wmm = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd); - - for (i = 0; i < regd->n_reg_rules; i++) { - if (!regd->reg_rules[i].wmm_rule) - continue; - - copy_rd->reg_rules[i].wmm_rule = d_wmm + - (regd->reg_rules[i].wmm_rule - s_wmm); - } out: kfree(regdb_ptrs); diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 1beb3ead0385..7229c186d199 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -4763,8 +4763,8 @@ const char *reg_initiator_name(enum nl80211_reg_initiator initiator); * * Return: 0 on success. -ENODATA. */ -int reg_query_regdb_wmm(char *alpha2, int freq, u32 *ptr, - struct ieee80211_wmm_rule *rule); +int reg_query_regdb_wmm(char *alpha2, int freq, + struct ieee80211_reg_rule *rule); /* * callbacks for asynchronous cfg80211 methods, notification diff --git a/include/net/regulatory.h b/include/net/regulatory.h index 60f8cc86a447..3469750df0f4 100644 --- a/include/net/regulatory.h +++ b/include/net/regulatory.h @@ -217,15 +217,15 @@ struct ieee80211_wmm_rule { struct ieee80211_reg_rule { struct ieee80211_freq_range freq_range; struct ieee80211_power_rule power_rule; - struct ieee80211_wmm_rule *wmm_rule; + struct ieee80211_wmm_rule wmm_rule; u32 flags; u32 dfs_cac_ms; + bool has_wmm; }; struct ieee80211_regdomain { struct rcu_head rcu_head; u32 n_reg_rules; - u32 n_wmm_rules; char alpha2[3]; enum nl80211_dfs_regions dfs_region; struct ieee80211_reg_rule reg_rules[]; diff --git a/net/mac80211/util.c b/net/mac80211/util.c index d02fbfec3783..c80187d6e6bb 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1120,7 +1120,7 @@ void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata, { struct ieee80211_chanctx_conf *chanctx_conf; const struct ieee80211_reg_rule *rrule; - struct ieee80211_wmm_ac *wmm_ac; + const struct ieee80211_wmm_ac *wmm_ac; u16 center_freq = 0; if (sdata->vif.type != NL80211_IFTYPE_AP && @@ -1139,15 +1139,15 @@ void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata, rrule = freq_reg_info(sdata->wdev.wiphy, MHZ_TO_KHZ(center_freq)); - if (IS_ERR_OR_NULL(rrule) || !rrule->wmm_rule) { + if (IS_ERR_OR_NULL(rrule) || !rrule->has_wmm) { rcu_read_unlock(); return; } if (sdata->vif.type == NL80211_IFTYPE_AP) - wmm_ac = &rrule->wmm_rule->ap[ac]; + wmm_ac = &rrule->wmm_rule.ap[ac]; else - wmm_ac = &rrule->wmm_rule->client[ac]; + wmm_ac = &rrule->wmm_rule.client[ac]; qparam->cw_min = max_t(u16, qparam->cw_min, wmm_ac->cw_min); qparam->cw_max = max_t(u16, qparam->cw_max, wmm_ac->cw_max); qparam->aifs = max_t(u8, qparam->aifs, wmm_ac->aifsn); diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 80bc986c79e5..e3dcffd96919 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -667,13 +667,13 @@ static int nl80211_msg_put_wmm_rules(struct sk_buff *msg, goto nla_put_failure; if (nla_put_u16(msg, NL80211_WMMR_CW_MIN, - rule->wmm_rule->client[j].cw_min) || + rule->wmm_rule.client[j].cw_min) || nla_put_u16(msg, NL80211_WMMR_CW_MAX, - rule->wmm_rule->client[j].cw_max) || + rule->wmm_rule.client[j].cw_max) || nla_put_u8(msg, NL80211_WMMR_AIFSN, - rule->wmm_rule->client[j].aifsn) || + rule->wmm_rule.client[j].aifsn) || nla_put_u8(msg, NL80211_WMMR_TXOP, - rule->wmm_rule->client[j].cot)) + rule->wmm_rule.client[j].cot)) goto nla_put_failure; nla_nest_end(msg, nl_wmm_rule); @@ -766,7 +766,7 @@ static int nl80211_msg_put_channel(struct sk_buff *msg, struct wiphy *wiphy, const struct ieee80211_reg_rule *rule = freq_reg_info(wiphy, chan->center_freq); - if (!IS_ERR(rule) && rule->wmm_rule) { + if (!IS_ERR_OR_NULL(rule) && rule->has_wmm) { if (nl80211_msg_put_wmm_rules(msg, rule)) goto nla_put_failure; } diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 283902974fbf..2f702adf2912 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -425,35 +425,23 @@ static const struct ieee80211_regdomain * reg_copy_regd(const struct ieee80211_regdomain *src_regd) { struct ieee80211_regdomain *regd; - int size_of_regd, size_of_wmms; + int size_of_regd; unsigned int i; - struct ieee80211_wmm_rule *d_wmm, *s_wmm; size_of_regd = sizeof(struct ieee80211_regdomain) + src_regd->n_reg_rules * sizeof(struct ieee80211_reg_rule); - size_of_wmms = src_regd->n_wmm_rules * - sizeof(struct ieee80211_wmm_rule); - regd = kzalloc(size_of_regd + size_of_wmms, GFP_KERNEL); + regd = kzalloc(size_of_regd, GFP_KERNEL); if (!regd) return ERR_PTR(-ENOMEM); memcpy(regd, src_regd, sizeof(struct ieee80211_regdomain)); - d_wmm = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd); - s_wmm = (struct ieee80211_wmm_rule *)((u8 *)src_regd + size_of_regd); - memcpy(d_wmm, s_wmm, size_of_wmms); - - for (i = 0; i < src_regd->n_reg_rules; i++) { + for (i = 0; i < src_regd->n_reg_rules; i++) memcpy(®d->reg_rules[i], &src_regd->reg_rules[i], sizeof(struct ieee80211_reg_rule)); - if (!src_regd->reg_rules[i].wmm_rule) - continue; - regd->reg_rules[i].wmm_rule = d_wmm + - (src_regd->reg_rules[i].wmm_rule - s_wmm); - } return regd; } @@ -859,9 +847,10 @@ static bool valid_regdb(const u8 *data, unsigned int size) return true; } -static void set_wmm_rule(struct ieee80211_wmm_rule *rule, +static void set_wmm_rule(struct ieee80211_reg_rule *rrule, struct fwdb_wmm_rule *wmm) { + struct ieee80211_wmm_rule *rule = &rrule->wmm_rule; unsigned int i; for (i = 0; i < IEEE80211_NUM_ACS; i++) { @@ -875,11 +864,13 @@ static void set_wmm_rule(struct ieee80211_wmm_rule *rule, rule->ap[i].aifsn = wmm->ap[i].aifsn; rule->ap[i].cot = 1000 * be16_to_cpu(wmm->ap[i].cot); } + + rrule->has_wmm = true; } static int __regdb_query_wmm(const struct fwdb_header *db, const struct fwdb_country *country, int freq, - u32 *dbptr, struct ieee80211_wmm_rule *rule) + struct ieee80211_reg_rule *rule) { unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2; struct fwdb_collection *coll = (void *)((u8 *)db + ptr); @@ -900,8 +891,6 @@ static int __regdb_query_wmm(const struct fwdb_header *db, wmm_ptr = be16_to_cpu(rrule->wmm_ptr) << 2; wmm = (void *)((u8 *)db + wmm_ptr); set_wmm_rule(rule, wmm); - if (dbptr) - *dbptr = wmm_ptr; return 0; } } @@ -909,8 +898,7 @@ static int __regdb_query_wmm(const struct fwdb_header *db, return -ENODATA; } -int reg_query_regdb_wmm(char *alpha2, int freq, u32 *dbptr, - struct ieee80211_wmm_rule *rule) +int reg_query_regdb_wmm(char *alpha2, int freq, struct ieee80211_reg_rule *rule) { const struct fwdb_header *hdr = regdb; const struct fwdb_country *country; @@ -924,8 +912,7 @@ int reg_query_regdb_wmm(char *alpha2, int freq, u32 *dbptr, country = &hdr->country[0]; while (country->coll_ptr) { if (alpha2_equal(alpha2, country->alpha2)) - return __regdb_query_wmm(regdb, country, freq, dbptr, - rule); + return __regdb_query_wmm(regdb, country, freq, rule); country++; } @@ -934,32 +921,13 @@ int reg_query_regdb_wmm(char *alpha2, int freq, u32 *dbptr, } EXPORT_SYMBOL(reg_query_regdb_wmm); -struct wmm_ptrs { - struct ieee80211_wmm_rule *rule; - u32 ptr; -}; - -static struct ieee80211_wmm_rule *find_wmm_ptr(struct wmm_ptrs *wmm_ptrs, - u32 wmm_ptr, int n_wmms) -{ - int i; - - for (i = 0; i < n_wmms; i++) { - if (wmm_ptrs[i].ptr == wmm_ptr) - return wmm_ptrs[i].rule; - } - return NULL; -} - static int regdb_query_country(const struct fwdb_header *db, const struct fwdb_country *country) { unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2; struct fwdb_collection *coll = (void *)((u8 *)db + ptr); struct ieee80211_regdomain *regdom; - struct ieee80211_regdomain *tmp_rd; - unsigned int size_of_regd, i, n_wmms = 0; - struct wmm_ptrs *wmm_ptrs; + unsigned int size_of_regd, i; size_of_regd = sizeof(struct ieee80211_regdomain) + coll->n_rules * sizeof(struct ieee80211_reg_rule); @@ -968,12 +936,6 @@ static int regdb_query_country(const struct fwdb_header *db, if (!regdom) return -ENOMEM; - wmm_ptrs = kcalloc(coll->n_rules, sizeof(*wmm_ptrs), GFP_KERNEL); - if (!wmm_ptrs) { - kfree(regdom); - return -ENOMEM; - } - regdom->n_reg_rules = coll->n_rules; regdom->alpha2[0] = country->alpha2[0]; regdom->alpha2[1] = country->alpha2[1]; @@ -1012,37 +974,11 @@ static int regdb_query_country(const struct fwdb_header *db, 1000 * be16_to_cpu(rule->cac_timeout); if (rule->len >= offsetofend(struct fwdb_rule, wmm_ptr)) { u32 wmm_ptr = be16_to_cpu(rule->wmm_ptr) << 2; - struct ieee80211_wmm_rule *wmm_pos = - find_wmm_ptr(wmm_ptrs, wmm_ptr, n_wmms); - struct fwdb_wmm_rule *wmm; - struct ieee80211_wmm_rule *wmm_rule; - - if (wmm_pos) { - rrule->wmm_rule = wmm_pos; - continue; - } - wmm = (void *)((u8 *)db + wmm_ptr); - tmp_rd = krealloc(regdom, size_of_regd + (n_wmms + 1) * - sizeof(struct ieee80211_wmm_rule), - GFP_KERNEL); - - if (!tmp_rd) { - kfree(regdom); - kfree(wmm_ptrs); - return -ENOMEM; - } - regdom = tmp_rd; - - wmm_rule = (struct ieee80211_wmm_rule *) - ((u8 *)regdom + size_of_regd + n_wmms * - sizeof(struct ieee80211_wmm_rule)); + struct fwdb_wmm_rule *wmm = (void *)((u8 *)db + wmm_ptr); - set_wmm_rule(wmm_rule, wmm); - wmm_ptrs[n_wmms].ptr = wmm_ptr; - wmm_ptrs[n_wmms++].rule = wmm_rule; + set_wmm_rule(rrule, wmm); } } - kfree(wmm_ptrs); return reg_schedule_apply(regdom); } -- GitLab From 20932750d9c78d307e4f2f18f9c6a32b82b1e0e8 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Mon, 20 Aug 2018 13:56:07 +0300 Subject: [PATCH 0601/1692] mac80211: don't update the PM state of a peer upon a multicast frame I changed the way mac80211 updates the PM state of the peer. I forgot that we could also have multicast frames from the peer and that those frame should of course not change the PM state of the peer: A peer goes to power save when it needs to scan, but it won't send the broadcast Probe Request with the PM bit set. This made us mark the peer as awake when it wasn't and then Intel's firmware would fail to transmit because the peer is asleep according to its database. The driver warned about this and it looked like this: WARNING: CPU: 0 PID: 184 at /usr/src/linux-4.16.14/drivers/net/wireless/intel/iwlwifi/mvm/tx.c:1369 iwl_mvm_rx_tx_cmd+0x53b/0x860 CPU: 0 PID: 184 Comm: irq/124-iwlwifi Not tainted 4.16.14 #1 RIP: 0010:iwl_mvm_rx_tx_cmd+0x53b/0x860 Call Trace: iwl_pcie_rx_handle+0x220/0x880 iwl_pcie_irq_handler+0x6c9/0xa20 ? irq_forced_thread_fn+0x60/0x60 ? irq_thread_dtor+0x90/0x90 The relevant code that spits the WARNING is: case TX_STATUS_FAIL_DEST_PS: /* the FW should have stopped the queue and not * return this status */ WARN_ON(1); info->flags |= IEEE80211_TX_STAT_TX_FILTERED; This fixes https://bugzilla.kernel.org/show_bug.cgi?id=199967. Fixes: 9fef65443388 ("mac80211: always update the PM state of a peer on MGMT / DATA frames") Cc: #4.16+ Signed-off-by: Emmanuel Grumbach Signed-off-by: Johannes Berg --- net/mac80211/rx.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 932985ca4e66..3f80a5ca4050 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1612,6 +1612,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) */ if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) && !ieee80211_has_morefrags(hdr->frame_control) && + !is_multicast_ether_addr(hdr->addr1) && (ieee80211_is_mgmt(hdr->frame_control) || ieee80211_is_data(hdr->frame_control)) && !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && -- GitLab From 3a2af7cccbbaf2362db9053a946a6084e12bfa73 Mon Sep 17 00:00:00 2001 From: Jinbum Park Date: Tue, 31 Jul 2018 23:10:40 +0900 Subject: [PATCH 0602/1692] mac80211_hwsim: Fix possible Spectre-v1 for hwsim_world_regdom_custom User controls @idx which to be used as index of hwsim_world_regdom_custom. So, It can be exploited via Spectre-like attack. (speculative execution) This kind of attack leaks address of hwsim_world_regdom_custom, It leads an attacker to bypass security mechanism such as KASLR. So sanitize @idx before using it to prevent attack. I leveraged strategy [1] to find and exploit this gadget. [1] https://github.com/jinb-park/linux-exploit/tree/master/exploit-remaining-spectre-gadget/ Signed-off-by: Jinbum Park [johannes: unwrap URL] Signed-off-by: Johannes Berg --- drivers/net/wireless/mac80211_hwsim.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 7d0b460868f9..80e2c8595c7c 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -33,6 +33,7 @@ #include #include #include +#include #include "mac80211_hwsim.h" #define WARN_QUEUE 100 @@ -3229,6 +3230,9 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) kfree(hwname); return -EINVAL; } + + idx = array_index_nospec(idx, + ARRAY_SIZE(hwsim_world_regdom_custom)); param.regd = hwsim_world_regdom_custom[idx]; } -- GitLab From d3c89bbc7491d5e288ca2993e999d24ba9ff52ad Mon Sep 17 00:00:00 2001 From: Haim Dreyfuss Date: Tue, 21 Aug 2018 09:22:19 +0300 Subject: [PATCH 0603/1692] nl80211: Fix nla_put_u8 to u16 for NL80211_WMMR_TXOP TXOP (also known as Channel Occupancy Time) is u16 and should be added using nla_put_u16 instead of u8, fix that. Fixes: 50f32718e125 ("nl80211: Add wmm rule attribute to NL80211_CMD_GET_WIPHY dump command") Signed-off-by: Haim Dreyfuss Signed-off-by: Luca Coelho Signed-off-by: Johannes Berg --- net/wireless/nl80211.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index e3dcffd96919..3f7ffbe6c634 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -672,8 +672,8 @@ static int nl80211_msg_put_wmm_rules(struct sk_buff *msg, rule->wmm_rule.client[j].cw_max) || nla_put_u8(msg, NL80211_WMMR_AIFSN, rule->wmm_rule.client[j].aifsn) || - nla_put_u8(msg, NL80211_WMMR_TXOP, - rule->wmm_rule.client[j].cot)) + nla_put_u16(msg, NL80211_WMMR_TXOP, + rule->wmm_rule.client[j].cot)) goto nla_put_failure; nla_nest_end(msg, nl_wmm_rule); -- GitLab From b88d26d97c41680f7327e5fb8061ad0037877f40 Mon Sep 17 00:00:00 2001 From: Haim Dreyfuss Date: Tue, 21 Aug 2018 09:22:20 +0300 Subject: [PATCH 0604/1692] nl80211: Pass center frequency in kHz instead of MHz freq_reg_info expects to get the frequency in kHz. Instead we accidently pass it in MHz. Thus, currently the function always return ERR rule. Fix that. Fixes: 50f32718e125 ("nl80211: Add wmm rule attribute to NL80211_CMD_GET_WIPHY dump command") Signed-off-by: Haim Dreyfuss Signed-off-by: Luca Coelho [fix kHz/MHz in commit message] Signed-off-by: Johannes Berg --- net/wireless/nl80211.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 3f7ffbe6c634..ce0149a86c13 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -764,7 +764,7 @@ static int nl80211_msg_put_channel(struct sk_buff *msg, struct wiphy *wiphy, if (large) { const struct ieee80211_reg_rule *rule = - freq_reg_info(wiphy, chan->center_freq); + freq_reg_info(wiphy, MHZ_TO_KHZ(chan->center_freq)); if (!IS_ERR_OR_NULL(rule) && rule->has_wmm) { if (nl80211_msg_put_wmm_rules(msg, rule)) -- GitLab From e0ab8b26aa9661df0541a657e2b2416d90488809 Mon Sep 17 00:00:00 2001 From: Andreas Bosch Date: Fri, 17 Aug 2018 22:16:00 +0200 Subject: [PATCH 0605/1692] HID: intel-ish-hid: Enable Sunrise Point-H ish driver Added PCI ID for Sunrise Point-H ISH. Signed-off-by: Andreas Bosch Acked-by: Srinivas Pandruvada Signed-off-by: Jiri Kosina --- drivers/hid/intel-ish-hid/ipc/hw-ish.h | 1 + drivers/hid/intel-ish-hid/ipc/pci-ish.c | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h index 97869b7410eb..da133716bed0 100644 --- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h +++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h @@ -29,6 +29,7 @@ #define CNL_Ax_DEVICE_ID 0x9DFC #define GLK_Ax_DEVICE_ID 0x31A2 #define CNL_H_DEVICE_ID 0xA37C +#define SPT_H_DEVICE_ID 0xA135 #define REVISION_ID_CHT_A0 0x6 #define REVISION_ID_CHT_Ax_SI 0x0 diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c index 050f9872f5c0..a1125a5c7965 100644 --- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c +++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c @@ -38,6 +38,7 @@ static const struct pci_device_id ish_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_Ax_DEVICE_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, GLK_Ax_DEVICE_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_H_DEVICE_ID)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)}, {0, } }; MODULE_DEVICE_TABLE(pci, ish_pci_tbl); -- GitLab From fb6acf76c3fdd97fea6995e64e2c665725f00fc5 Mon Sep 17 00:00:00 2001 From: AceLan Kao Date: Tue, 21 Aug 2018 16:55:13 +0800 Subject: [PATCH 0606/1692] HID: i2c-hid: Fix flooded incomplete report after S3 on Rayd touchscreen The incomplete report flooded after S3 and touchscreen becomes malfunctioned. [ 1367.646244] i2c_hid i2c-CUST0000:00: i2c_hid_get_input: incomplete report (58/18785) [ 1367.649471] i2c_hid i2c-CUST0000:00: i2c_hid_get_input: incomplete report (58/28743) [ 1367.651092] i2c_hid i2c-CUST0000:00: i2c_hid_get_input: incomplete report (58/26757) [ 1367.652658] i2c_hid i2c-CUST0000:00: i2c_hid_get_input: incomplete report (58/52280) [ 1367.654287] i2c_hid i2c-CUST0000:00: i2c_hid_get_input: incomplete report (58/56059) Adding device ID, 04F3:30CC, to the quirk to re-send report description after resume. Cc: stable@vger.kernel.org Signed-off-by: AceLan Kao Reviewed-by: Benjamin Tissoires Signed-off-by: Jiri Kosina --- drivers/hid/hid-ids.h | 1 + drivers/hid/i2c-hid/i2c-hid.c | 2 ++ 2 files changed, 3 insertions(+) diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 79bdf0c7e351..34367df61b28 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -530,6 +530,7 @@ #define I2C_VENDOR_ID_RAYD 0x2386 #define I2C_PRODUCT_ID_RAYD_3118 0x3118 +#define I2C_PRODUCT_ID_RAYD_4B33 0x4B33 #define USB_VENDOR_ID_HANWANG 0x0b57 #define USB_DEVICE_ID_HANWANG_TABLET_FIRST 0x5000 diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index 2ce194a84868..57126f6837bb 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c @@ -174,6 +174,8 @@ static const struct i2c_hid_quirks { I2C_HID_QUIRK_RESEND_REPORT_DESCR }, { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS10FB_TOUCH, I2C_HID_QUIRK_RESEND_REPORT_DESCR }, + { I2C_VENDOR_ID_RAYD, I2C_PRODUCT_ID_RAYD_4B33, + I2C_HID_QUIRK_RESEND_REPORT_DESCR }, { 0, 0 } }; -- GitLab From ee345492437043a79db058a3d4f029ebcb52089a Mon Sep 17 00:00:00 2001 From: Sean O'Brien Date: Mon, 27 Aug 2018 13:02:15 -0700 Subject: [PATCH 0607/1692] HID: add support for Apple Magic Keyboards USB device Vendor 05ac (Apple) Device 026c (Magic Keyboard with Numeric Keypad) Bluetooth devices Vendor 004c (Apple) Device 0267 (Magic Keyboard) Device 026c (Magic Keyboard with Numeric Keypad) Support already exists for the Magic Keyboard over USB connection. Add support for the Magic Keyboard over Bluetooth connection, and for the Magic Keyboard with Numeric Keypad over Bluetooth and USB connection. Signed-off-by: Sean O'Brien Reviewed-by: Benjamin Tissoires Signed-off-by: Jiri Kosina --- drivers/hid/hid-apple.c | 9 ++++++++- drivers/hid/hid-ids.h | 2 ++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c index 25b7bd56ae11..1cb41992aaa1 100644 --- a/drivers/hid/hid-apple.c +++ b/drivers/hid/hid-apple.c @@ -335,7 +335,8 @@ static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { - if (usage->hid == (HID_UP_CUSTOM | 0x0003)) { + if (usage->hid == (HID_UP_CUSTOM | 0x0003) || + usage->hid == (HID_UP_MSVENDOR | 0x0003)) { /* The fn key on Apple USB keyboards */ set_bit(EV_REP, hi->input->evbit); hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN); @@ -472,6 +473,12 @@ static const struct hid_device_id apple_devices[] = { .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI), .driver_data = APPLE_HAS_FN }, + { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI), + .driver_data = APPLE_HAS_FN }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI), + .driver_data = APPLE_HAS_FN }, + { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI), + .driver_data = APPLE_HAS_FN }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI), .driver_data = APPLE_HAS_FN }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO), diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 34367df61b28..cb2d3170d9dc 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -88,6 +88,7 @@ #define USB_DEVICE_ID_ANTON_TOUCH_PAD 0x3101 #define USB_VENDOR_ID_APPLE 0x05ac +#define BT_VENDOR_ID_APPLE 0x004c #define USB_DEVICE_ID_APPLE_MIGHTYMOUSE 0x0304 #define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d #define USB_DEVICE_ID_APPLE_MAGICTRACKPAD 0x030e @@ -157,6 +158,7 @@ #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS 0x0257 #define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI 0x0267 +#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI 0x026c #define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291 #define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292 -- GitLab From e38c0ac55ee67cf3626cfbc2283f8873dc44d370 Mon Sep 17 00:00:00 2001 From: Stefan Agner Date: Tue, 28 Aug 2018 13:29:55 +0200 Subject: [PATCH 0608/1692] HID: input: fix leaking custom input node name Make sure to free the custom input node name on disconnect. Cc: stable@vger.kernel.org # v4.18+ Fixes: c554bb045511 ("HID: input: append a suffix matching the application") Signed-off-by: Stefan Agner Signed-off-by: Jiri Kosina --- drivers/hid/hid-input.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 4e94ea3e280a..ac201817a2dd 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -1815,6 +1815,7 @@ void hidinput_disconnect(struct hid_device *hid) input_unregister_device(hidinput->input); else input_free_device(hidinput->input); + kfree(hidinput->name); kfree(hidinput); } -- GitLab From b2dd9f2e5a8a4a6afa9d41411cdbfc2f5ceeba71 Mon Sep 17 00:00:00 2001 From: Stefan Agner Date: Tue, 28 Aug 2018 13:29:54 +0200 Subject: [PATCH 0609/1692] HID: core: fix memory leak on probe The dynamically allocted collection stack does not get freed in all situations. Make sure to also free the collection stack when using the parser in hid_open_report(). Fixes: 08a8a7cf1459 ("HID: core: do not upper bound the collection stack") Signed-off-by: Stefan Agner Signed-off-by: Jiri Kosina --- drivers/hid/hid-core.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 3da354af7a0a..44a465db3f96 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1039,6 +1039,7 @@ int hid_open_report(struct hid_device *device) hid_err(device, "unbalanced delimiter at end of report description\n"); goto err; } + kfree(parser->collection_stack); vfree(parser); device->status |= HID_STAT_PARSED; return 0; @@ -1047,6 +1048,7 @@ int hid_open_report(struct hid_device *device) hid_err(device, "item fetching failed at offset %d\n", (int)(end - start)); err: + kfree(parser->collection_stack); vfree(parser); hid_close_report(device); return ret; -- GitLab From 5382bed38f09636330fd119ca2c83d738a551540 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Mon, 27 Aug 2018 15:36:14 -0700 Subject: [PATCH 0610/1692] drm/i915/selftests: ring all doorbells in igt_guc_doorbells We currently verify that all doorbells can be registered with GuC and HW but don't check that all works as expected after a db ring. Do a nop ring of all doorbells to make sure we haven't misprogrammed any WQ or stage descriptor data. This will also help validating upcoming changes in the db programming flow. Cc: Michel Thierry Cc: Michal Wajdeczko Signed-off-by: Daniele Ceraolo Spurio Reviewed-by: Michel Thierry Acked-by: Katarzyna Dec Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20180827223614.22789-1-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/intel_guc_fwif.h | 1 + drivers/gpu/drm/i915/intel_guc_submission.c | 25 +++++++++----- drivers/gpu/drm/i915/intel_guc_submission.h | 4 +++ drivers/gpu/drm/i915/selftests/intel_guc.c | 38 +++++++++++++++++++++ 4 files changed, 59 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h index 1a0f2a39cef9..8382d591c784 100644 --- a/drivers/gpu/drm/i915/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/intel_guc_fwif.h @@ -49,6 +49,7 @@ #define WQ_TYPE_BATCH_BUF (0x1 << WQ_TYPE_SHIFT) #define WQ_TYPE_PSEUDO (0x2 << WQ_TYPE_SHIFT) #define WQ_TYPE_INORDER (0x3 << WQ_TYPE_SHIFT) +#define WQ_TYPE_NOOP (0x4 << WQ_TYPE_SHIFT) #define WQ_TARGET_SHIFT 10 #define WQ_LEN_SHIFT 16 #define WQ_NO_WCFLUSH_WAIT (1 << 27) diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c index 195adbd0ebf7..07b9d313b019 100644 --- a/drivers/gpu/drm/i915/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/intel_guc_submission.c @@ -456,6 +456,9 @@ static void guc_wq_item_append(struct intel_guc_client *client, */ BUILD_BUG_ON(wqi_size != 16); + /* We expect the WQ to be active if we're appending items to it */ + GEM_BUG_ON(desc->wq_status != WQ_STATUS_ACTIVE); + /* Free space is guaranteed. */ wq_off = READ_ONCE(desc->tail); GEM_BUG_ON(CIRC_SPACE(wq_off, READ_ONCE(desc->head), @@ -465,15 +468,19 @@ static void guc_wq_item_append(struct intel_guc_client *client, /* WQ starts from the page after doorbell / process_desc */ wqi = client->vaddr + wq_off + GUC_DB_SIZE; - /* Now fill in the 4-word work queue item */ - wqi->header = WQ_TYPE_INORDER | - (wqi_len << WQ_LEN_SHIFT) | - (target_engine << WQ_TARGET_SHIFT) | - WQ_NO_WCFLUSH_WAIT; - wqi->context_desc = context_desc; - wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT; - GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX); - wqi->fence_id = fence_id; + if (I915_SELFTEST_ONLY(client->use_nop_wqi)) { + wqi->header = WQ_TYPE_NOOP | (wqi_len << WQ_LEN_SHIFT); + } else { + /* Now fill in the 4-word work queue item */ + wqi->header = WQ_TYPE_INORDER | + (wqi_len << WQ_LEN_SHIFT) | + (target_engine << WQ_TARGET_SHIFT) | + WQ_NO_WCFLUSH_WAIT; + wqi->context_desc = context_desc; + wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT; + GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX); + wqi->fence_id = fence_id; + } /* Make the update visible to GuC */ WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1)); diff --git a/drivers/gpu/drm/i915/intel_guc_submission.h b/drivers/gpu/drm/i915/intel_guc_submission.h index fb081cefef93..169c54568340 100644 --- a/drivers/gpu/drm/i915/intel_guc_submission.h +++ b/drivers/gpu/drm/i915/intel_guc_submission.h @@ -28,6 +28,7 @@ #include #include "i915_gem.h" +#include "i915_selftest.h" struct drm_i915_private; @@ -71,6 +72,9 @@ struct intel_guc_client { spinlock_t wq_lock; /* Per-engine counts of GuC submissions */ u64 submissions[I915_NUM_ENGINES]; + + /* For testing purposes, use nop WQ items instead of real ones */ + I915_SELFTEST_DECLARE(bool use_nop_wqi); }; int intel_guc_submission_init(struct intel_guc *guc); diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c index 407c98fb9170..90ba88c972cf 100644 --- a/drivers/gpu/drm/i915/selftests/intel_guc.c +++ b/drivers/gpu/drm/i915/selftests/intel_guc.c @@ -65,6 +65,40 @@ static int check_all_doorbells(struct intel_guc *guc) return 0; } +static int ring_doorbell_nop(struct intel_guc_client *client) +{ + struct guc_process_desc *desc = __get_process_desc(client); + int err; + + client->use_nop_wqi = true; + + spin_lock_irq(&client->wq_lock); + + guc_wq_item_append(client, 0, 0, 0, 0); + guc_ring_doorbell(client); + + spin_unlock_irq(&client->wq_lock); + + client->use_nop_wqi = false; + + /* if there are no issues GuC will update the WQ head and keep the + * WQ in active status + */ + err = wait_for(READ_ONCE(desc->head) == READ_ONCE(desc->tail), 10); + if (err) { + pr_err("doorbell %u ring failed!\n", client->doorbell_id); + return -EIO; + } + + if (desc->wq_status != WQ_STATUS_ACTIVE) { + pr_err("doorbell %u ring put WQ in bad state (%u)!\n", + client->doorbell_id, desc->wq_status); + return -EIO; + } + + return 0; +} + /* * Basic client sanity check, handy to validate create_clients. */ @@ -332,6 +366,10 @@ static int igt_guc_doorbells(void *arg) err = check_all_doorbells(guc); if (err) goto out; + + err = ring_doorbell_nop(clients[i]); + if (err) + goto out; } out: -- GitLab From f86cf25a609107960cf05263e491463feaae1f99 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Tue, 28 Aug 2018 11:39:48 +0800 Subject: [PATCH 0611/1692] Revert "staging: erofs: disable compiling temporarile" This reverts commit 156c3df8d4db4e693c062978186f44079413d74d. Since XArray and the new mount apis aren't merged in 4.19-rc1 merge window, the BROKEN mark can be reverted directly without any problems. Fixes: 156c3df8d4db ("staging: erofs: disable compiling temporarile") Cc: Matthew Wilcox Cc: David Howells Reviewed-by: Chao Yu Signed-off-by: Gao Xiang Signed-off-by: Greg Kroah-Hartman --- drivers/staging/erofs/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/staging/erofs/Kconfig b/drivers/staging/erofs/Kconfig index 96f614934df1..663b755bf2fb 100644 --- a/drivers/staging/erofs/Kconfig +++ b/drivers/staging/erofs/Kconfig @@ -2,7 +2,7 @@ config EROFS_FS tristate "EROFS filesystem support" - depends on BROKEN + depends on BLOCK help EROFS(Enhanced Read-Only File System) is a lightweight read-only file system with modern designs (eg. page-sized -- GitLab From de25eb7f3075f6fb02962526664699cdbdc26db4 Mon Sep 17 00:00:00 2001 From: Rodrigo Vivi Date: Mon, 27 Aug 2018 15:30:20 -0700 Subject: [PATCH 0612/1692] drm/i915: introduce dp_to_i915() helper MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit No functional change. But let's get first i915 pointer directly from intel_dp so we can clean up a lot of code later. Signed-off-by: Rodrigo Vivi Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20180827223021.7145-1-rodrigo.vivi@intel.com --- drivers/gpu/drm/i915/intel_dp.c | 109 +++++++++++++++---------------- drivers/gpu/drm/i915/intel_drv.h | 6 ++ 2 files changed, 57 insertions(+), 58 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index db8515171270..436c22de33b6 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -107,13 +107,6 @@ bool intel_dp_is_edp(struct intel_dp *intel_dp) return intel_dig_port->base.type == INTEL_OUTPUT_EDP; } -static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp) -{ - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - - return intel_dig_port->base.base.dev; -} - static struct intel_dp *intel_attached_dp(struct drm_connector *connector) { return enc_to_intel_dp(&intel_attached_encoder(connector)->base); @@ -232,7 +225,7 @@ intel_dp_link_required(int pixel_clock, int bpp) void icl_program_mg_dp_mode(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum port port = intel_dig_port->base.port; enum tc_port tc_port = intel_port_to_tc(dev_priv, port); u32 ln0, ln1, lane_info; @@ -661,7 +654,7 @@ intel_dp_pps_init(struct intel_dp *intel_dp); static void pps_lock(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); /* * See intel_power_sequencer_reset() why we need @@ -674,7 +667,7 @@ static void pps_lock(struct intel_dp *intel_dp) static void pps_unlock(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); mutex_unlock(&dev_priv->pps_mutex); @@ -684,7 +677,7 @@ static void pps_unlock(struct intel_dp *intel_dp) static void vlv_power_sequencer_kick(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); enum pipe pipe = intel_dp->pps_pipe; bool pll_enabled, release_cl_override = false; @@ -789,7 +782,7 @@ static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv) static enum pipe vlv_power_sequencer_pipe(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); enum pipe pipe; @@ -836,7 +829,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp) static int bxt_power_sequencer_idx(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); int backlight_controller = dev_priv->vbt.backlight.controller; lockdep_assert_held(&dev_priv->pps_mutex); @@ -905,7 +898,7 @@ vlv_initial_pps_pipe(struct drm_i915_private *dev_priv, static void vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); enum port port = intel_dig_port->base.port; @@ -982,7 +975,7 @@ struct pps_registers { static void intel_pps_get_registers(struct intel_dp *intel_dp, struct pps_registers *regs) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); int pps_idx = 0; memset(regs, 0, sizeof(*regs)); @@ -1028,7 +1021,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code, { struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp), edp_notifier); - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART) return 0; @@ -1058,7 +1051,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code, static bool edp_have_panel_power(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); lockdep_assert_held(&dev_priv->pps_mutex); @@ -1071,7 +1064,7 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp) static bool edp_have_panel_vdd(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); lockdep_assert_held(&dev_priv->pps_mutex); @@ -1085,7 +1078,7 @@ static bool edp_have_panel_vdd(struct intel_dp *intel_dp) static void intel_dp_check_edp(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); if (!intel_dp_is_edp(intel_dp)) return; @@ -1101,7 +1094,7 @@ intel_dp_check_edp(struct intel_dp *intel_dp) static uint32_t intel_dp_aux_wait_done(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); uint32_t status; bool done; @@ -1118,7 +1111,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp) static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); if (index) return 0; @@ -1132,7 +1125,7 @@ static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index) static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); if (index) return 0; @@ -1150,7 +1143,7 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); if (intel_dp->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) { /* Workaround for non-ULT HSW */ @@ -1552,7 +1545,7 @@ intel_aux_power_domain(struct intel_dp *intel_dp) static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum aux_ch aux_ch = intel_dp->aux_ch; switch (aux_ch) { @@ -1568,7 +1561,7 @@ static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp) static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum aux_ch aux_ch = intel_dp->aux_ch; switch (aux_ch) { @@ -1584,7 +1577,7 @@ static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index) static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum aux_ch aux_ch = intel_dp->aux_ch; switch (aux_ch) { @@ -1602,7 +1595,7 @@ static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp) static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum aux_ch aux_ch = intel_dp->aux_ch; switch (aux_ch) { @@ -1620,7 +1613,7 @@ static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index) static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum aux_ch aux_ch = intel_dp->aux_ch; switch (aux_ch) { @@ -1639,7 +1632,7 @@ static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp) static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum aux_ch aux_ch = intel_dp->aux_ch; switch (aux_ch) { @@ -1665,7 +1658,7 @@ intel_dp_aux_fini(struct intel_dp *intel_dp) static void intel_dp_aux_init(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; intel_dp->aux_ch = intel_aux_ch(intel_dp); @@ -1833,7 +1826,7 @@ struct link_config_limits { static int intel_dp_compute_bpp(struct intel_dp *intel_dp, struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_connector *intel_connector = intel_dp->attached_connector; int bpp, bpc; @@ -2201,7 +2194,7 @@ static void wait_panel_status(struct intel_dp *intel_dp, u32 mask, u32 value) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); i915_reg_t pp_stat_reg, pp_ctrl_reg; lockdep_assert_held(&dev_priv->pps_mutex); @@ -2277,7 +2270,7 @@ static void edp_wait_backlight_off(struct intel_dp *intel_dp) static u32 ironlake_get_pp_control(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u32 control; lockdep_assert_held(&dev_priv->pps_mutex); @@ -2298,7 +2291,7 @@ static u32 ironlake_get_pp_control(struct intel_dp *intel_dp) */ static bool edp_panel_vdd_on(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); u32 pp; i915_reg_t pp_stat_reg, pp_ctrl_reg; @@ -2369,7 +2362,7 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp) static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); u32 pp; @@ -2435,7 +2428,7 @@ static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp) */ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); lockdep_assert_held(&dev_priv->pps_mutex); @@ -2455,7 +2448,7 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) static void edp_panel_on(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u32 pp; i915_reg_t pp_ctrl_reg; @@ -2513,7 +2506,7 @@ void intel_edp_panel_on(struct intel_dp *intel_dp) static void edp_panel_off(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u32 pp; i915_reg_t pp_ctrl_reg; @@ -2561,7 +2554,7 @@ void intel_edp_panel_off(struct intel_dp *intel_dp) /* Enable backlight in the panel power control. */ static void _intel_edp_backlight_on(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u32 pp; i915_reg_t pp_ctrl_reg; @@ -2604,7 +2597,7 @@ void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, /* Disable backlight in the panel power control. */ static void _intel_edp_backlight_off(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u32 pp; i915_reg_t pp_ctrl_reg; @@ -3035,7 +3028,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp, uint32_t *DP, uint8_t dp_train_pat) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); enum port port = intel_dig_port->base.port; uint8_t train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd); @@ -3117,7 +3110,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp, static void intel_dp_enable_port(struct intel_dp *intel_dp, const struct intel_crtc_state *old_crtc_state) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); /* enable with pattern 1 (as per spec) */ @@ -3374,7 +3367,7 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ uint8_t intel_dp_voltage_max(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; enum port port = encoder->port; @@ -3393,7 +3386,7 @@ intel_dp_voltage_max(struct intel_dp *intel_dp) uint8_t intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; enum port port = encoder->port; @@ -3705,7 +3698,7 @@ ivb_cpu_edp_signal_levels(uint8_t train_set) void intel_dp_set_signal_levels(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); enum port port = intel_dig_port->base.port; uint32_t signal_levels, mask = 0; @@ -3762,7 +3755,7 @@ intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); enum port port = intel_dig_port->base.port; uint32_t val; @@ -4455,7 +4448,7 @@ static bool intel_dp_hotplug(struct intel_encoder *encoder, static bool intel_dp_short_pulse(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u8 sink_irq_vector = 0; u8 old_sink_count = intel_dp->sink_count; bool ret; @@ -5547,7 +5540,7 @@ static const struct intel_hdcp_shim intel_dp_hdcp_shim = { static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); lockdep_assert_held(&dev_priv->pps_mutex); @@ -5568,7 +5561,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) static enum pipe vlv_active_pipe(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; enum pipe pipe; @@ -5635,7 +5628,7 @@ enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) { struct intel_dp *intel_dp = &intel_dig_port->dp; - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum irqreturn ret = IRQ_NONE; if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) { @@ -5751,7 +5744,7 @@ static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp) static void intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0; struct pps_registers regs; @@ -5819,7 +5812,7 @@ intel_pps_verify_state(struct intel_dp *intel_dp) static void intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct edp_power_seq cur, vbt, spec, *final = &intel_dp->pps_delays; @@ -5912,7 +5905,7 @@ static void intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, bool force_disable_vdd) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u32 pp_on, pp_off, pp_div, port_sel = 0; int div = dev_priv->rawclk_freq / 1000; struct pps_registers regs; @@ -6008,7 +6001,7 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, static void intel_dp_pps_init(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { vlv_initial_power_sequencer_setup(intel_dp); @@ -6125,7 +6118,7 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, void intel_edp_drrs_enable(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); if (!crtc_state->has_drrs) { DRM_DEBUG_KMS("Panel doesn't support DRRS\n"); @@ -6160,7 +6153,7 @@ void intel_edp_drrs_enable(struct intel_dp *intel_dp, void intel_edp_drrs_disable(struct intel_dp *intel_dp, const struct intel_crtc_state *old_crtc_state) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); if (!old_crtc_state->has_drrs) return; @@ -6392,8 +6385,8 @@ intel_dp_drrs_init(struct intel_connector *connector, static bool intel_edp_init_connector(struct intel_dp *intel_dp, struct intel_connector *intel_connector) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct drm_device *dev = &dev_priv->drm; struct drm_connector *connector = &intel_connector->base; struct drm_display_mode *fixed_mode = NULL; struct drm_display_mode *downclock_mode = NULL; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 843eefaa0f0c..94bd2735eb62 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1313,6 +1313,12 @@ dp_to_lspcon(struct intel_dp *intel_dp) return &dp_to_dig_port(intel_dp)->lspcon; } +static inline struct drm_i915_private * +dp_to_i915(struct intel_dp *intel_dp) +{ + return to_i915(dp_to_dig_port(intel_dp)->base.base.dev); +} + static inline struct intel_digital_port * hdmi_to_dig_port(struct intel_hdmi *intel_hdmi) { -- GitLab From 1895759ee932e47f48b5e87baadc449ffd0853f1 Mon Sep 17 00:00:00 2001 From: Rodrigo Vivi Date: Mon, 27 Aug 2018 15:30:21 -0700 Subject: [PATCH 0613/1692] drm/i915: Use dp_to_i915 on intel_psr.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now that we have a generic caller let's simplify it and clean up the intel_psr.c code a bit. Cc: Dhinakaran Pandiyan Signed-off-by: Rodrigo Vivi Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20180827223021.7145-2-rodrigo.vivi@intel.com --- drivers/gpu/drm/i915/intel_psr.c | 50 +++++++++----------------------- 1 file changed, 14 insertions(+), 36 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index aee64aee18fe..21984d4c08ed 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -270,7 +270,7 @@ static void intel_psr_setup_vsc(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct edp_vsc_psr psr_vsc; if (dev_priv->psr.psr2_enabled) { @@ -300,8 +300,7 @@ static void intel_psr_setup_vsc(struct intel_dp *intel_dp, static void hsw_psr_setup_aux(struct intel_dp *intel_dp) { - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u32 aux_clock_divider, aux_ctl; int i; static const uint8_t aux_msg[] = { @@ -334,9 +333,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp) static void intel_psr_enable_sink(struct intel_dp *intel_dp) { - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = dig_port->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u8 dpcd_val = DP_PSR_ENABLE; /* Enable ALPM at sink for psr2 */ @@ -357,9 +354,7 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp) static void hsw_activate_psr1(struct intel_dp *intel_dp) { - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = dig_port->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u32 max_sleep_time = 0x1f; u32 val = EDP_PSR_ENABLE; @@ -414,9 +409,7 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp) static void hsw_activate_psr2(struct intel_dp *intel_dp) { - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = dig_port->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u32 val; /* Let's use 6 as the minimum to cover all known cases including the @@ -452,8 +445,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) static bool intel_psr2_config_valid(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state) { - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); int crtc_hdisplay = crtc_state->base.adjusted_mode.crtc_hdisplay; int crtc_vdisplay = crtc_state->base.adjusted_mode.crtc_vdisplay; int psr_max_h = 0, psr_max_v = 0; @@ -488,7 +480,7 @@ void intel_psr_compute_config(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode; int psr_setup_time; @@ -544,9 +536,7 @@ void intel_psr_compute_config(struct intel_dp *intel_dp, static void intel_psr_activate(struct intel_dp *intel_dp) { - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); if (INTEL_GEN(dev_priv) >= 9) WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE); @@ -566,9 +556,7 @@ static void intel_psr_activate(struct intel_dp *intel_dp) static void intel_psr_enable_source(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = dig_port->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+ @@ -639,9 +627,7 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv, void intel_psr_enable(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); if (!crtc_state->has_psr) return; @@ -673,9 +659,7 @@ void intel_psr_enable(struct intel_dp *intel_dp, static void intel_psr_disable_source(struct intel_dp *intel_dp) { - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); if (dev_priv->psr.active) { i915_reg_t psr_status; @@ -714,9 +698,7 @@ intel_psr_disable_source(struct intel_dp *intel_dp) static void intel_psr_disable_locked(struct intel_dp *intel_dp) { - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); lockdep_assert_held(&dev_priv->psr.lock); @@ -743,9 +725,7 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) void intel_psr_disable(struct intel_dp *intel_dp, const struct intel_crtc_state *old_crtc_state) { - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); if (!old_crtc_state->has_psr) return; @@ -1102,9 +1082,7 @@ void intel_psr_init(struct drm_i915_private *dev_priv) void intel_psr_short_pulse(struct intel_dp *intel_dp) { - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct i915_psr *psr = &dev_priv->psr; u8 val; const u8 errors = DP_PSR_RFB_STORAGE_ERROR | -- GitLab From ed11e4158451bf69e1e34b44797d6989d84db60f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Tue, 28 Aug 2018 16:37:23 +0300 Subject: [PATCH 0614/1692] drm/i915: Fix gtt_view asserts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit gcc is too smart for us and doesn't evaluate BUILD_BUG_ON()s in unused static inlines. Collect them up in one static inline and actually call it to make sure gcc sees it. Cc: Chris Wilson Suggested-by: Chris Wilson Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20180828133723.18505-1-ville.syrjala@linux.intel.com Reviewed-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem_gtt.h | 15 ++++----------- drivers/gpu/drm/i915/i915_vma.h | 2 ++ 2 files changed, 6 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index dd161c187a68..01d83a943142 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -167,29 +167,22 @@ struct intel_rotation_info { } plane[2]; } __packed; -static inline void assert_intel_rotation_info_is_packed(void) -{ - BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 8*sizeof(unsigned int)); -} - struct intel_partial_info { u64 offset; unsigned int size; } __packed; -static inline void assert_intel_partial_info_is_packed(void) -{ - BUILD_BUG_ON(sizeof(struct intel_partial_info) != sizeof(u64) + sizeof(unsigned int)); -} - enum i915_ggtt_view_type { I915_GGTT_VIEW_NORMAL = 0, I915_GGTT_VIEW_ROTATED = sizeof(struct intel_rotation_info), I915_GGTT_VIEW_PARTIAL = sizeof(struct intel_partial_info), }; -static inline void assert_i915_ggtt_view_type_is_unique(void) +static inline void assert_i915_gem_gtt_types(void) { + BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 8*sizeof(unsigned int)); + BUILD_BUG_ON(sizeof(struct intel_partial_info) != sizeof(u64) + sizeof(unsigned int)); + /* As we encode the size of each branch inside the union into its type, * we have to be careful that each branch has a unique size. */ diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index f1ba40bbe6f9..4f7c1c7599f4 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -251,6 +251,8 @@ i915_vma_compare(struct i915_vma *vma, if (cmp) return cmp; + assert_i915_gem_gtt_types(); + /* ggtt_view.type also encodes its size so that we both distinguish * different views using it as a "type" and also use a compact (no * accessing of uninitialised padding bytes) memcmp without storing -- GitLab From 04d5e2765802241b54ee93d1e655123c39fa7385 Mon Sep 17 00:00:00 2001 From: Amber Lin Date: Wed, 22 Aug 2018 16:48:50 -0400 Subject: [PATCH 0615/1692] drm/amdgpu: Merge amdkfd into amdgpu MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since KFD is only supported by single GPU driver, it makes sense to merge amdgpu and amdkfd into one module. This patch is the initial step: merge Kconfig and Makefile. v2: also remove kfd from drm Kconfig Signed-off-by: Amber Lin Acked-by: Christian König Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/Kconfig | 2 - drivers/gpu/drm/amd/amdgpu/Kconfig | 1 + drivers/gpu/drm/amd/amdgpu/Makefile | 6 +- drivers/gpu/drm/amd/amdkfd/Kconfig | 2 +- drivers/gpu/drm/amd/amdkfd/Makefile | 53 +++++++++++------ drivers/gpu/drm/amd/amdkfd/kfd_module.c | 77 +++++++------------------ 6 files changed, 63 insertions(+), 78 deletions(-) diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index cb88528e7b10..736b7e67e4ec 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -285,8 +285,6 @@ source "drivers/gpu/drm/bridge/Kconfig" source "drivers/gpu/drm/sti/Kconfig" -source "drivers/gpu/drm/amd/amdkfd/Kconfig" - source "drivers/gpu/drm/imx/Kconfig" source "drivers/gpu/drm/v3d/Kconfig" diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig index e8af1f5e8a79..9221e5489069 100644 --- a/drivers/gpu/drm/amd/amdgpu/Kconfig +++ b/drivers/gpu/drm/amd/amdgpu/Kconfig @@ -42,3 +42,4 @@ config DRM_AMDGPU_GART_DEBUGFS source "drivers/gpu/drm/amd/acp/Kconfig" source "drivers/gpu/drm/amd/display/Kconfig" +source "drivers/gpu/drm/amd/amdkfd/Kconfig" diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index d2bafabe585d..847536b55f46 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -35,7 +35,8 @@ ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \ -I$(FULL_AMD_DISPLAY_PATH) \ -I$(FULL_AMD_DISPLAY_PATH)/include \ -I$(FULL_AMD_DISPLAY_PATH)/dc \ - -I$(FULL_AMD_DISPLAY_PATH)/amdgpu_dm + -I$(FULL_AMD_DISPLAY_PATH)/amdgpu_dm \ + -I$(FULL_AMD_PATH)/amdkfd amdgpu-y := amdgpu_drv.o @@ -136,6 +137,9 @@ amdgpu-y += \ amdgpu-y += amdgpu_amdkfd.o ifneq ($(CONFIG_HSA_AMD),) +AMDKFD_PATH := ../amdkfd +include $(FULL_AMD_PATH)/amdkfd/Makefile +amdgpu-y += $(AMDKFD_FILES) amdgpu-y += \ amdgpu_amdkfd_fence.o \ amdgpu_amdkfd_gpuvm.o \ diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig index 3858820a0055..fbf0ee5201c3 100644 --- a/drivers/gpu/drm/amd/amdkfd/Kconfig +++ b/drivers/gpu/drm/amd/amdkfd/Kconfig @@ -3,7 +3,7 @@ # config HSA_AMD - tristate "HSA kernel driver for AMD GPU devices" + bool "HSA kernel driver for AMD GPU devices" depends on DRM_AMDGPU && X86_64 imply AMD_IOMMU_V2 select MMU_NOTIFIER diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile index ffd096fffc1c..69ec96998bb9 100644 --- a/drivers/gpu/drm/amd/amdkfd/Makefile +++ b/drivers/gpu/drm/amd/amdkfd/Makefile @@ -23,26 +23,41 @@ # Makefile for Heterogenous System Architecture support for AMD GPU devices # -ccflags-y := -Idrivers/gpu/drm/amd/include/ \ - -Idrivers/gpu/drm/amd/include/asic_reg - -amdkfd-y := kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \ - kfd_pasid.o kfd_doorbell.o kfd_flat_memory.o \ - kfd_process.o kfd_queue.o kfd_mqd_manager.o \ - kfd_mqd_manager_cik.o kfd_mqd_manager_vi.o \ - kfd_mqd_manager_v9.o \ - kfd_kernel_queue.o kfd_kernel_queue_cik.o \ - kfd_kernel_queue_vi.o kfd_kernel_queue_v9.o \ - kfd_packet_manager.o kfd_process_queue_manager.o \ - kfd_device_queue_manager.o kfd_device_queue_manager_cik.o \ - kfd_device_queue_manager_vi.o kfd_device_queue_manager_v9.o \ - kfd_interrupt.o kfd_events.o cik_event_interrupt.o \ - kfd_int_process_v9.o kfd_dbgdev.o kfd_dbgmgr.o kfd_crat.o +AMDKFD_FILES := $(AMDKFD_PATH)/kfd_module.o \ + $(AMDKFD_PATH)/kfd_device.o \ + $(AMDKFD_PATH)/kfd_chardev.o \ + $(AMDKFD_PATH)/kfd_topology.o \ + $(AMDKFD_PATH)/kfd_pasid.o \ + $(AMDKFD_PATH)/kfd_doorbell.o \ + $(AMDKFD_PATH)/kfd_flat_memory.o \ + $(AMDKFD_PATH)/kfd_process.o \ + $(AMDKFD_PATH)/kfd_queue.o \ + $(AMDKFD_PATH)/kfd_mqd_manager.o \ + $(AMDKFD_PATH)/kfd_mqd_manager_cik.o \ + $(AMDKFD_PATH)/kfd_mqd_manager_vi.o \ + $(AMDKFD_PATH)/kfd_mqd_manager_v9.o \ + $(AMDKFD_PATH)/kfd_kernel_queue.o \ + $(AMDKFD_PATH)/kfd_kernel_queue_cik.o \ + $(AMDKFD_PATH)/kfd_kernel_queue_vi.o \ + $(AMDKFD_PATH)/kfd_kernel_queue_v9.o \ + $(AMDKFD_PATH)/kfd_packet_manager.o \ + $(AMDKFD_PATH)/kfd_process_queue_manager.o \ + $(AMDKFD_PATH)/kfd_device_queue_manager.o \ + $(AMDKFD_PATH)/kfd_device_queue_manager_cik.o \ + $(AMDKFD_PATH)/kfd_device_queue_manager_vi.o \ + $(AMDKFD_PATH)/kfd_device_queue_manager_v9.o \ + $(AMDKFD_PATH)/kfd_interrupt.o \ + $(AMDKFD_PATH)/kfd_events.o \ + $(AMDKFD_PATH)/cik_event_interrupt.o \ + $(AMDKFD_PATH)/kfd_int_process_v9.o \ + $(AMDKFD_PATH)/kfd_dbgdev.o \ + $(AMDKFD_PATH)/kfd_dbgmgr.o \ + $(AMDKFD_PATH)/kfd_crat.o ifneq ($(CONFIG_AMD_IOMMU_V2),) -amdkfd-y += kfd_iommu.o +AMDKFD_FILES += $(AMDKFD_PATH)/kfd_iommu.o endif -amdkfd-$(CONFIG_DEBUG_FS) += kfd_debugfs.o - -obj-$(CONFIG_HSA_AMD) += amdkfd.o +ifneq ($(CONFIG_DEBUG_FS),) +AMDKFD_FILES += $(AMDKFD_PATH)/kfd_debugfs.o +endif diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c index 6e1f5c7c2d4b..b445674bba9a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c @@ -20,21 +20,11 @@ * OTHER DEALINGS IN THE SOFTWARE. */ -#include #include #include #include -#include #include "kfd_priv.h" -#define KFD_DRIVER_AUTHOR "AMD Inc. and others" - -#define KFD_DRIVER_DESC "Standalone HSA driver for AMD's GPUs" -#define KFD_DRIVER_DATE "20150421" -#define KFD_DRIVER_MAJOR 0 -#define KFD_DRIVER_MINOR 7 -#define KFD_DRIVER_PATCHLEVEL 2 - static const struct kgd2kfd_calls kgd2kfd = { .exit = kgd2kfd_exit, .probe = kgd2kfd_probe, @@ -95,33 +85,7 @@ module_param(halt_if_hws_hang, int, 0644); MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)"); -static int amdkfd_init_completed; - - -int kgd2kfd_init(unsigned int interface_version, - const struct kgd2kfd_calls **g2f) -{ - if (!amdkfd_init_completed) - return -EPROBE_DEFER; - - /* - * Only one interface version is supported, - * no kfd/kgd version skew allowed. - */ - if (interface_version != KFD_INTERFACE_VERSION) - return -EINVAL; - - *g2f = &kgd2kfd; - - return 0; -} -EXPORT_SYMBOL(kgd2kfd_init); - -void kgd2kfd_exit(void) -{ -} - -static int __init kfd_module_init(void) +static int kfd_init(void) { int err; @@ -129,7 +93,7 @@ static int __init kfd_module_init(void) if ((sched_policy < KFD_SCHED_POLICY_HWS) || (sched_policy > KFD_SCHED_POLICY_NO_HWS)) { pr_err("sched_policy has invalid value\n"); - return -1; + return -EINVAL; } /* Verify module parameters */ @@ -137,7 +101,7 @@ static int __init kfd_module_init(void) (max_num_of_queues_per_device > KFD_MAX_NUM_OF_QUEUES_PER_DEVICE)) { pr_err("max_num_of_queues_per_device must be between 1 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n"); - return -1; + return -EINVAL; } err = kfd_chardev_init(); @@ -154,10 +118,6 @@ static int __init kfd_module_init(void) kfd_debugfs_init(); - amdkfd_init_completed = 1; - - dev_info(kfd_device, "Initialized module\n"); - return 0; err_create_wq: @@ -168,23 +128,30 @@ static int __init kfd_module_init(void) return err; } -static void __exit kfd_module_exit(void) +static void kfd_exit(void) { - amdkfd_init_completed = 0; - kfd_debugfs_fini(); kfd_process_destroy_wq(); kfd_topology_shutdown(); kfd_chardev_exit(); - pr_info("amdkfd: Removed module\n"); } -module_init(kfd_module_init); -module_exit(kfd_module_exit); +int kgd2kfd_init(unsigned int interface_version, + const struct kgd2kfd_calls **g2f) +{ + int err; + + err = kfd_init(); + if (err) + return err; -MODULE_AUTHOR(KFD_DRIVER_AUTHOR); -MODULE_DESCRIPTION(KFD_DRIVER_DESC); -MODULE_LICENSE("GPL and additional rights"); -MODULE_VERSION(__stringify(KFD_DRIVER_MAJOR) "." - __stringify(KFD_DRIVER_MINOR) "." - __stringify(KFD_DRIVER_PATCHLEVEL)); + *g2f = &kgd2kfd; + + return 0; +} +EXPORT_SYMBOL(kgd2kfd_init); + +void kgd2kfd_exit(void) +{ + kfd_exit(); +} -- GitLab From 82b7b619c44c5cd5b92134cbb58bd62558079ba4 Mon Sep 17 00:00:00 2001 From: Amber Lin Date: Wed, 22 Aug 2018 17:05:33 -0400 Subject: [PATCH 0616/1692] drm/amdgpu: Remove CONFIG_HSA_AMD_MODULE MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit After amdkfd is merged to amdgpu, CONFIG_HSA_AMD_MODULE no longer exists. Signed-off-by: Amber Lin Acked-by: Christian König Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 26 +++------------------- 1 file changed, 3 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 3dbe675b6fe1..8bee9a0a1dec 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -36,36 +36,16 @@ int amdgpu_amdkfd_init(void) { int ret; -#if defined(CONFIG_HSA_AMD_MODULE) - int (*kgd2kfd_init_p)(unsigned int, const struct kgd2kfd_calls**); - - kgd2kfd_init_p = symbol_request(kgd2kfd_init); - - if (kgd2kfd_init_p == NULL) - return -ENOENT; - - ret = kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd); - if (ret) { - symbol_put(kgd2kfd_init); - kgd2kfd = NULL; - } - - -#elif defined(CONFIG_HSA_AMD) - +#ifdef CONFIG_HSA_AMD ret = kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd); if (ret) kgd2kfd = NULL; - + amdgpu_amdkfd_gpuvm_init_mem_limits(); #else kgd2kfd = NULL; ret = -ENOENT; #endif -#if defined(CONFIG_HSA_AMD_MODULE) || defined(CONFIG_HSA_AMD) - amdgpu_amdkfd_gpuvm_init_mem_limits(); -#endif - return ret; } @@ -507,7 +487,7 @@ bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid) return false; } -#if !defined(CONFIG_HSA_AMD_MODULE) && !defined(CONFIG_HSA_AMD) +#ifndef CONFIG_HSA_AMD bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm) { return false; -- GitLab From 521fb7d021f7952aa3030e56c19edf342309cf9f Mon Sep 17 00:00:00 2001 From: Amber Lin Date: Thu, 23 Aug 2018 10:52:34 -0400 Subject: [PATCH 0617/1692] drm/amdgpu: Move KFD parameters to amdgpu (v3) After merging KFD into amdgpu, move module parameters defined in KFD to amdgpu_drv.c, where other module parameters are declared. v2: add kernel-doc comments v3: rebase and fix parameter variable name (Alex) Signed-off-by: Amber Lin Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 96 +++++++++++++++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_module.c | 45 ------------ 2 files changed, 96 insertions(+), 45 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 2221f6b1dd7c..82e6b6746511 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -39,6 +39,7 @@ #include "amdgpu_gem.h" #include "amdgpu_amdkfd.h" +#include "kfd_priv.h" /* * KMS wrapper. @@ -127,6 +128,16 @@ int amdgpu_compute_multipipe = -1; int amdgpu_gpu_recovery = -1; /* auto */ int amdgpu_emu_mode = 0; uint amdgpu_smu_memory_pool_size = 0; +/* KFD parameters */ +int sched_policy = KFD_SCHED_POLICY_HWS; +int hws_max_conc_proc = 8; +int cwsr_enable = 1; +int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT; +int send_sigterm; +int debug_largebar; +int ignore_crat; +int noretry; +int halt_if_hws_hang; /** * DOC: vramlimit (int) @@ -532,6 +543,91 @@ MODULE_PARM_DESC(smu_memory_pool_size, "0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte"); module_param_named(smu_memory_pool_size, amdgpu_smu_memory_pool_size, uint, 0444); +/** + * DOC: sched_policy (int) + * Set scheduling policy. Default is HWS(hardware scheduling) with over-subscription. + * Setting 1 disables over-subscription. Setting 2 disables HWS and statically + * assigns queues to HQDs. + */ +module_param(sched_policy, int, 0444); +MODULE_PARM_DESC(sched_policy, + "Scheduling policy (0 = HWS (Default), 1 = HWS without over-subscription, 2 = Non-HWS (Used for debugging only)"); + +/** + * DOC: hws_max_conc_proc (int) + * Maximum number of processes that HWS can schedule concurrently. The maximum is the + * number of VMIDs assigned to the HWS, which is also the default. + */ +module_param(hws_max_conc_proc, int, 0444); +MODULE_PARM_DESC(hws_max_conc_proc, + "Max # processes HWS can execute concurrently when sched_policy=0 (0 = no concurrency, #VMIDs for KFD = Maximum(default))"); + +/** + * DOC: cwsr_enable (int) + * CWSR(compute wave store and resume) allows the GPU to preempt shader execution in + * the middle of a compute wave. Default is 1 to enable this feature. Setting 0 + * disables it. + */ +module_param(cwsr_enable, int, 0444); +MODULE_PARM_DESC(cwsr_enable, "CWSR enable (0 = Off, 1 = On (Default))"); + +/** + * DOC: max_num_of_queues_per_device (int) + * Maximum number of queues per device. Valid setting is between 1 and 4096. Default + * is 4096. + */ +module_param(max_num_of_queues_per_device, int, 0444); +MODULE_PARM_DESC(max_num_of_queues_per_device, + "Maximum number of supported queues per device (1 = Minimum, 4096 = default)"); + +/** + * DOC: send_sigterm (int) + * Send sigterm to HSA process on unhandled exceptions. Default is not to send sigterm + * but just print errors on dmesg. Setting 1 enables sending sigterm. + */ +module_param(send_sigterm, int, 0444); +MODULE_PARM_DESC(send_sigterm, + "Send sigterm to HSA process on unhandled exception (0 = disable, 1 = enable)"); + +/** + * DOC: debug_largebar (int) + * Set debug_largebar as 1 to enable simulating large-bar capability on non-large bar + * system. This limits the VRAM size reported to ROCm applications to the visible + * size, usually 256MB. + * Default value is 0, diabled. + */ +module_param(debug_largebar, int, 0444); +MODULE_PARM_DESC(debug_largebar, + "Debug large-bar flag used to simulate large-bar capability on non-large bar machine (0 = disable, 1 = enable)"); + +/** + * DOC: ignore_crat (int) + * Ignore CRAT table during KFD initialization. By default, KFD uses the ACPI CRAT + * table to get information about AMD APUs. This option can serve as a workaround on + * systems with a broken CRAT table. + */ +module_param(ignore_crat, int, 0444); +MODULE_PARM_DESC(ignore_crat, + "Ignore CRAT table during KFD initialization (0 = use CRAT (default), 1 = ignore CRAT)"); + +/** + * DOC: noretry (int) + * This parameter sets sh_mem_config.retry_disable. Default value, 0, enables retry. + * Setting 1 disables retry. + * Retry is needed for recoverable page faults. + */ +module_param(noretry, int, 0644); +MODULE_PARM_DESC(noretry, + "Set sh_mem_config.retry_disable on Vega10 (0 = retry enabled (default), 1 = retry disabled)"); + +/** + * DOC: halt_if_hws_hang (int) + * Halt if HWS hang is detected. Default value, 0, disables the halt on hang. + * Setting 1 enables halt on hang. + */ +module_param(halt_if_hws_hang, int, 0644); +MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)"); + static const struct pci_device_id pciidlist[] = { #ifdef CONFIG_DRM_AMDGPU_SI {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c index b445674bba9a..8018163414ff 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c @@ -21,7 +21,6 @@ */ #include -#include #include #include "kfd_priv.h" @@ -41,50 +40,6 @@ static const struct kgd2kfd_calls kgd2kfd = { .post_reset = kgd2kfd_post_reset, }; -int sched_policy = KFD_SCHED_POLICY_HWS; -module_param(sched_policy, int, 0444); -MODULE_PARM_DESC(sched_policy, - "Scheduling policy (0 = HWS (Default), 1 = HWS without over-subscription, 2 = Non-HWS (Used for debugging only)"); - -int hws_max_conc_proc = 8; -module_param(hws_max_conc_proc, int, 0444); -MODULE_PARM_DESC(hws_max_conc_proc, - "Max # processes HWS can execute concurrently when sched_policy=0 (0 = no concurrency, #VMIDs for KFD = Maximum(default))"); - -int cwsr_enable = 1; -module_param(cwsr_enable, int, 0444); -MODULE_PARM_DESC(cwsr_enable, "CWSR enable (0 = off, 1 = on (default))"); - -int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT; -module_param(max_num_of_queues_per_device, int, 0444); -MODULE_PARM_DESC(max_num_of_queues_per_device, - "Maximum number of supported queues per device (1 = Minimum, 4096 = default)"); - -int send_sigterm; -module_param(send_sigterm, int, 0444); -MODULE_PARM_DESC(send_sigterm, - "Send sigterm to HSA process on unhandled exception (0 = disable, 1 = enable)"); - -int debug_largebar; -module_param(debug_largebar, int, 0444); -MODULE_PARM_DESC(debug_largebar, - "Debug large-bar flag used to simulate large-bar capability on non-large bar machine (0 = disable, 1 = enable)"); - -int ignore_crat; -module_param(ignore_crat, int, 0444); -MODULE_PARM_DESC(ignore_crat, - "Ignore CRAT table during KFD initialization (0 = use CRAT (default), 1 = ignore CRAT)"); - -int noretry; -module_param(noretry, int, 0644); -MODULE_PARM_DESC(noretry, - "Set sh_mem_config.retry_disable on GFXv9+ dGPUs (0 = retry enabled (default), 1 = retry disabled)"); - -int halt_if_hws_hang; -module_param(halt_if_hws_hang, int, 0644); -MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)"); - - static int kfd_init(void) { int err; -- GitLab From 496f3347d834aec91c38b45d6249ed00f58ad233 Mon Sep 17 00:00:00 2001 From: Neeraj Dantu Date: Tue, 28 Aug 2018 16:37:58 +0000 Subject: [PATCH 0618/1692] ARM: dts: Fix file permission for am335x-osd3358-sm-red.dts Fix wrong mode for dts file added by commit bb3e3fbbac86 ("ARM: dts: Add DT support for Octavo Systems OSD3358-SM-RED based on TI AM335x"). Signed-off-by: Neeraj Dantu CC: Robert Nelson CC: Jason Kridner Signed-off-by: Tony Lindgren --- arch/arm/boot/dts/am335x-osd3358-sm-red.dts | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100755 => 100644 arch/arm/boot/dts/am335x-osd3358-sm-red.dts diff --git a/arch/arm/boot/dts/am335x-osd3358-sm-red.dts b/arch/arm/boot/dts/am335x-osd3358-sm-red.dts old mode 100755 new mode 100644 -- GitLab From 0c79c0bb872e393d507fb7a0835b2ec124f8266b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 27 Aug 2018 15:43:37 +0200 Subject: [PATCH 0619/1692] drm/amdgpu: remove extra newline when printing VM faults MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Looks like a copy&paste error to me. Signed-off-by: Christian König Reviewed-by: Alex Deucher Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 39bee98155ee..9e976c2be955 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -265,7 +265,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); dev_err(adev->dev, - "[%s] VMC page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d\n)\n", + "[%s] VMC page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d)\n", entry->vmid_src ? "mmhub" : "gfxhub", entry->src_id, entry->ring_id, entry->vmid, entry->pasid, task_info.process_name, task_info.tgid, -- GitLab From e78196444b43caed136bf424b09352d8433cdb95 Mon Sep 17 00:00:00 2001 From: Yintian Tao Date: Wed, 22 Aug 2018 17:08:13 +0800 Subject: [PATCH 0620/1692] drm/amdgpu: move full access into amdgpu_device_ip_suspend It will be more safe to make full-acess include both phase1 and phase2. Then accessing special registeris wherever at phase1 or phase2 will not block any shutdown and suspend process under virtualization. Signed-off-by: Yintian Tao Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 8f431740c424..a20c13c6f6f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1932,9 +1932,6 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev) { int i, r; - if (amdgpu_sriov_vf(adev)) - amdgpu_virt_request_full_gpu(adev, false); - amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); @@ -1953,9 +1950,6 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev) } } - if (amdgpu_sriov_vf(adev)) - amdgpu_virt_release_full_gpu(adev, false); - return 0; } @@ -2007,11 +2001,17 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev) { int r; + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_request_full_gpu(adev, false); + r = amdgpu_device_ip_suspend_phase1(adev); if (r) return r; r = amdgpu_device_ip_suspend_phase2(adev); + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_release_full_gpu(adev, false); + return r; } -- GitLab From 7ef0b435457a797712119c0151e144744bc45ded Mon Sep 17 00:00:00 2001 From: Emily Deng Date: Tue, 28 Aug 2018 20:52:40 +0800 Subject: [PATCH 0621/1692] drm/amdgpu: Need to set moved to true when evict bo MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix the VMC page fault when the running sequence is as below: 1.amdgpu_gem_create_ioctl 2.ttm_bo_swapout->amdgpu_vm_bo_invalidate, as not called amdgpu_vm_bo_base_init, so won't called list_add_tail(&base->bo_list, &bo->va). Even the bo was evicted, it won't set the bo_base->moved. 3.drm_gem_open_ioctl->amdgpu_vm_bo_base_init, here only called list_move_tail(&base->vm_status, &vm->evicted), but not set the bo_base->moved. 4.amdgpu_vm_bo_map->amdgpu_vm_bo_insert_map, as the bo_base->moved is not set true, the function amdgpu_vm_bo_insert_map will call list_move(&bo_va->base.vm_status, &vm->moved) 5.amdgpu_cs_ioctl won't validate the swapout bo, as it is only in the moved list, not in the evict list. So VMC page fault occurs. Signed-off-by: Emily Deng Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 25b390dc8636..60c0609b78a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -172,6 +172,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, * is validated on next vm use to avoid fault. * */ list_move_tail(&base->vm_status, &vm->evicted); + base->moved = true; } /** -- GitLab From bdb1922abd620d24715906bac4d119274d98f4c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Tue, 28 Aug 2018 11:26:17 +0200 Subject: [PATCH 0622/1692] drm/amdgpu: Only retrieve GPU address of GART table after pinning it MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Doing it earlier hits a WARN_ON_ONCE in amdgpu_bo_gpu_offset. Fixes: "drm/amdgpu: remove gart.table_addr" Reviewed-by: Christian König Signed-off-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 5 ++++- drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 5 ++++- drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 5 ++++- 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 543287e5d67b..9c45ea318bd6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -494,7 +494,7 @@ static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable) static int gmc_v6_0_gart_enable(struct amdgpu_device *adev) { - uint64_t table_addr = amdgpu_bo_gpu_offset(adev->gart.bo); + uint64_t table_addr; int r, i; u32 field; @@ -505,6 +505,9 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev) r = amdgpu_gart_table_vram_pin(adev); if (r) return r; + + table_addr = amdgpu_bo_gpu_offset(adev->gart.bo); + /* Setup TLB control */ WREG32(mmMC_VM_MX_L1_TLB_CNTL, (0xA << 7) | diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index c88708abe016..d3400064e9db 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -602,7 +602,7 @@ static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable) */ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) { - uint64_t table_addr = amdgpu_bo_gpu_offset(adev->gart.bo); + uint64_t table_addr; int r, i; u32 tmp, field; @@ -613,6 +613,9 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) r = amdgpu_gart_table_vram_pin(adev); if (r) return r; + + table_addr = amdgpu_bo_gpu_offset(adev->gart.bo); + /* Setup TLB control */ tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL); tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 8213ea1a6cbc..fb0d57655f78 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -807,7 +807,7 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable) */ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) { - uint64_t table_addr = amdgpu_bo_gpu_offset(adev->gart.bo); + uint64_t table_addr; int r, i; u32 tmp, field; @@ -818,6 +818,9 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) r = amdgpu_gart_table_vram_pin(adev); if (r) return r; + + table_addr = amdgpu_bo_gpu_offset(adev->gart.bo); + /* Setup TLB control */ tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL); tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1); -- GitLab From 2d59bb602314a4b2593fde267734266b5e872dd0 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Mon, 27 Aug 2018 19:18:21 -0700 Subject: [PATCH 0623/1692] ARM: dts: omap4-droid4: Fix emmc errors seen on some devices Otherwise we can get the following errors occasionally on some devices: mmc1: tried to HW reset card, got error -110 mmcblk1: error -110 requesting status mmcblk1: recovery failed! print_req_error: I/O error, dev mmcblk1, sector 14329 ... I have one device that hits this error almost on every boot, and another one that hits it only rarely with the other ones I've used behave without problems. I'm not sure if the issue is related to a particular eMMC card model, but in case it is, both of the machines with issues have: # cat /sys/class/mmc_host/mmc1/mmc1:0001/manfid \ /sys/class/mmc_host/mmc1/mmc1:0001/oemid \ /sys/class/mmc_host/mmc1/mmc1:0001/name 0x000045 0x0100 SEM16G and the working ones have: 0x000011 0x0100 016G92 Note that "ti,non-removable" is different as omap_hsmmc_reg_get() does not call omap_hsmmc_disable_boot_regulators() if no_regulator_off_init is set. And currently we set no_regulator_off_init only for "ti,non-removable" and not for "non-removable". It seems that we should have "non-removable" with some other mmc generic property behave in the same way instead of having to use a non-generic property. But let's fix the issue first. Fixes: 7e2f8c0ae670 ("ARM: dts: Add minimal support for motorola droid 4 xt894") Cc: Marcel Partap Cc: Merlijn Wajer Cc: Michael Scott Cc: NeKit Cc: Pavel Machek Cc: Sebastian Reichel Signed-off-by: Tony Lindgren --- arch/arm/boot/dts/omap4-droid4-xt894.dts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/boot/dts/omap4-droid4-xt894.dts b/arch/arm/boot/dts/omap4-droid4-xt894.dts index 3c26a4bbc340..04758a2a87f0 100644 --- a/arch/arm/boot/dts/omap4-droid4-xt894.dts +++ b/arch/arm/boot/dts/omap4-droid4-xt894.dts @@ -354,7 +354,7 @@ &mmc1 { &mmc2 { vmmc-supply = <&vsdio>; bus-width = <8>; - non-removable; + ti,non-removable; }; &mmc3 { -- GitLab From 6ddd9769db4fc11a98bd7e58be1764e47fdb8384 Mon Sep 17 00:00:00 2001 From: Emily Deng Date: Tue, 28 Aug 2018 20:52:40 +0800 Subject: [PATCH 0624/1692] drm/amdgpu: Need to set moved to true when evict bo MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix the VMC page fault when the running sequence is as below: 1.amdgpu_gem_create_ioctl 2.ttm_bo_swapout->amdgpu_vm_bo_invalidate, as not called amdgpu_vm_bo_base_init, so won't called list_add_tail(&base->bo_list, &bo->va). Even the bo was evicted, it won't set the bo_base->moved. 3.drm_gem_open_ioctl->amdgpu_vm_bo_base_init, here only called list_move_tail(&base->vm_status, &vm->evicted), but not set the bo_base->moved. 4.amdgpu_vm_bo_map->amdgpu_vm_bo_insert_map, as the bo_base->moved is not set true, the function amdgpu_vm_bo_insert_map will call list_move(&bo_va->base.vm_status, &vm->moved) 5.amdgpu_cs_ioctl won't validate the swapout bo, as it is only in the moved list, not in the evict list. So VMC page fault occurs. Signed-off-by: Emily Deng Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index d174d50e3bd3..b17771dd5ce7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -172,6 +172,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, * is validated on next vm use to avoid fault. * */ list_move_tail(&base->vm_status, &vm->evicted); + base->moved = true; } /** -- GitLab From ca917f9fe1a0fab3dde41bba4bbd173c5a3c5805 Mon Sep 17 00:00:00 2001 From: Ryan Lee Date: Thu, 23 Aug 2018 18:37:08 -0700 Subject: [PATCH 0625/1692] ASoC: max98373: Added 10ms sleep after amp software reset Signed-off-by: Ryan Lee Signed-off-by: Mark Brown --- sound/soc/codecs/max98373.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sound/soc/codecs/max98373.c b/sound/soc/codecs/max98373.c index 2764fae69333..1093f766d0d2 100644 --- a/sound/soc/codecs/max98373.c +++ b/sound/soc/codecs/max98373.c @@ -730,6 +730,7 @@ static int max98373_probe(struct snd_soc_component *component) /* Software Reset */ regmap_write(max98373->regmap, MAX98373_R2000_SW_RESET, MAX98373_SOFT_RESET); + usleep_range(10000, 11000); /* IV default slot configuration */ regmap_write(max98373->regmap, @@ -818,6 +819,7 @@ static int max98373_resume(struct device *dev) regmap_write(max98373->regmap, MAX98373_R2000_SW_RESET, MAX98373_SOFT_RESET); + usleep_range(10000, 11000); regcache_cache_only(max98373->regmap, false); regcache_sync(max98373->regmap); return 0; -- GitLab From 7509487785d7a2bf3606cf26710f0ca29e9ca94d Mon Sep 17 00:00:00 2001 From: Shuming Fan Date: Fri, 24 Aug 2018 10:52:19 +0800 Subject: [PATCH 0626/1692] ASoC: rt5682: Change DAC/ADC volume scale The step of DAC/ADC volume scale changes from 0.375dB to 0.75dB Signed-off-by: Shuming Fan Signed-off-by: Mark Brown Cc: stable@vger.kernel.org --- sound/soc/codecs/rt5682.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c index 640d400ca013..afe7d5b19313 100644 --- a/sound/soc/codecs/rt5682.c +++ b/sound/soc/codecs/rt5682.c @@ -750,8 +750,8 @@ static bool rt5682_readable_register(struct device *dev, unsigned int reg) } static const DECLARE_TLV_DB_SCALE(hp_vol_tlv, -2250, 150, 0); -static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -65625, 375, 0); -static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0); +static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -6525, 75, 0); +static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -1725, 75, 0); static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0); /* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */ @@ -1114,7 +1114,7 @@ static const struct snd_kcontrol_new rt5682_snd_controls[] = { /* DAC Digital Volume */ SOC_DOUBLE_TLV("DAC1 Playback Volume", RT5682_DAC1_DIG_VOL, - RT5682_L_VOL_SFT, RT5682_R_VOL_SFT, 175, 0, dac_vol_tlv), + RT5682_L_VOL_SFT + 1, RT5682_R_VOL_SFT + 1, 86, 0, dac_vol_tlv), /* IN Boost Volume */ SOC_SINGLE_TLV("CBJ Boost Volume", RT5682_CBJ_BST_CTRL, @@ -1124,7 +1124,7 @@ static const struct snd_kcontrol_new rt5682_snd_controls[] = { SOC_DOUBLE("STO1 ADC Capture Switch", RT5682_STO1_ADC_DIG_VOL, RT5682_L_MUTE_SFT, RT5682_R_MUTE_SFT, 1, 1), SOC_DOUBLE_TLV("STO1 ADC Capture Volume", RT5682_STO1_ADC_DIG_VOL, - RT5682_L_VOL_SFT, RT5682_R_VOL_SFT, 127, 0, adc_vol_tlv), + RT5682_L_VOL_SFT + 1, RT5682_R_VOL_SFT + 1, 63, 0, adc_vol_tlv), /* ADC Boost Volume Control */ SOC_DOUBLE_TLV("STO1 ADC Boost Gain Volume", RT5682_STO1_ADC_BOOST, -- GitLab From 960cdd50ca9fdfeb82c2757107bcb7f93c8d7d41 Mon Sep 17 00:00:00 2001 From: Pierre-Louis Bossart Date: Wed, 22 Aug 2018 22:49:36 -0500 Subject: [PATCH 0627/1692] ASoC: wm8804: Add ACPI support HID made of either Wolfson/CirrusLogic PCI ID + 8804 identifier. This helps enumerate the HifiBerry Digi+ HAT boards on the Up2 platform. The scripts at https://github.com/thesofproject/acpi-scripts can be used to add the ACPI initrd overlays. Signed-off-by: Pierre-Louis Bossart Acked-by: Charles Keepax Signed-off-by: Mark Brown --- sound/soc/codecs/wm8804-i2c.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/sound/soc/codecs/wm8804-i2c.c b/sound/soc/codecs/wm8804-i2c.c index f27464c2c5ba..79541960f45d 100644 --- a/sound/soc/codecs/wm8804-i2c.c +++ b/sound/soc/codecs/wm8804-i2c.c @@ -13,6 +13,7 @@ #include #include #include +#include #include "wm8804.h" @@ -40,17 +41,29 @@ static const struct i2c_device_id wm8804_i2c_id[] = { }; MODULE_DEVICE_TABLE(i2c, wm8804_i2c_id); +#if defined(CONFIG_OF) static const struct of_device_id wm8804_of_match[] = { { .compatible = "wlf,wm8804", }, { } }; MODULE_DEVICE_TABLE(of, wm8804_of_match); +#endif + +#ifdef CONFIG_ACPI +static const struct acpi_device_id wm8804_acpi_match[] = { + { "1AEC8804", 0 }, /* Wolfson PCI ID + part ID */ + { "10138804", 0 }, /* Cirrus Logic PCI ID + part ID */ + { }, +}; +MODULE_DEVICE_TABLE(acpi, wm8804_acpi_match); +#endif static struct i2c_driver wm8804_i2c_driver = { .driver = { .name = "wm8804", .pm = &wm8804_pm, - .of_match_table = wm8804_of_match, + .of_match_table = of_match_ptr(wm8804_of_match), + .acpi_match_table = ACPI_PTR(wm8804_acpi_match), }, .probe = wm8804_i2c_probe, .remove = wm8804_i2c_remove, -- GitLab From eb0f50441056c68b2bbef82ac03d300221f41d26 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Tue, 28 Aug 2018 17:27:06 +0300 Subject: [PATCH 0628/1692] drm/i915: Don't pass plane to .check_plane() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit .check_plane() already gets the plane state, so we can dig out the plane from there if needed. No need in passing it separately. Cc: José Roberto de Souza Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20180828142707.31583-1-ville.syrjala@linux.intel.com Reviewed-by: José Roberto de Souza --- drivers/gpu/drm/i915/intel_atomic_plane.c | 2 +- drivers/gpu/drm/i915/intel_display.c | 11 +++++------ drivers/gpu/drm/i915/intel_drv.h | 5 ++--- drivers/gpu/drm/i915/intel_sprite.c | 4 ++-- 4 files changed, 10 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c index dcba645cabb8..eddcdd6e4b3b 100644 --- a/drivers/gpu/drm/i915/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/intel_atomic_plane.c @@ -159,7 +159,7 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_ } intel_state->base.visible = false; - ret = intel_plane->check_plane(intel_plane, crtc_state, intel_state); + ret = intel_plane->check_plane(crtc_state, intel_state); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index b0b6e1e9a294..2afe1bdb284e 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -9694,8 +9694,7 @@ static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state) return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64); } -static int i845_check_cursor(struct intel_plane *plane, - struct intel_crtc_state *crtc_state, +static int i845_check_cursor(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) { const struct drm_framebuffer *fb = plane_state->base.fb; @@ -9885,10 +9884,10 @@ static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state) return true; } -static int i9xx_check_cursor(struct intel_plane *plane, - struct intel_crtc_state *crtc_state, +static int i9xx_check_cursor(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) { + struct intel_plane *plane = to_intel_plane(plane_state->base.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); const struct drm_framebuffer *fb = plane_state->base.fb; enum pipe pipe = plane->pipe; @@ -13190,10 +13189,10 @@ skl_max_scale(struct intel_crtc *intel_crtc, } static int -intel_check_primary_plane(struct intel_plane *plane, - struct intel_crtc_state *crtc_state, +intel_check_primary_plane(struct intel_crtc_state *crtc_state, struct intel_plane_state *state) { + struct intel_plane *plane = to_intel_plane(state->base.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); struct drm_crtc *crtc = state->base.crtc; int min_scale = DRM_PLANE_HELPER_NO_SCALING; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 94bd2735eb62..9d0ca1715d81 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -971,9 +971,8 @@ struct intel_plane { void (*disable_plane)(struct intel_plane *plane, struct intel_crtc *crtc); bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe); - int (*check_plane)(struct intel_plane *plane, - struct intel_crtc_state *crtc_state, - struct intel_plane_state *state); + int (*check_plane)(struct intel_crtc_state *crtc_state, + struct intel_plane_state *plane_state); }; struct intel_watermark_params { diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index c286dda625e4..9600ccfc5b76 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -959,10 +959,10 @@ g4x_plane_get_hw_state(struct intel_plane *plane, } static int -intel_check_sprite_plane(struct intel_plane *plane, - struct intel_crtc_state *crtc_state, +intel_check_sprite_plane(struct intel_crtc_state *crtc_state, struct intel_plane_state *state) { + struct intel_plane *plane = to_intel_plane(state->base.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_framebuffer *fb = state->base.fb; -- GitLab From 0d45db9c7a02a4736b78bb8e4ee7d96a29f554b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Tue, 28 Aug 2018 17:27:07 +0300 Subject: [PATCH 0629/1692] drm/i915: Reject compressed Y/Yf with interlaced modes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Y/Yf tiling can't be used with IF-ID. We already reject uncompressed Y/Yf but we should also reject them when compressed. Cc: José Roberto de Souza Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20180828142707.31583-2-ville.syrjala@linux.intel.com Reviewed-by: José Roberto de Souza Reviewed-by: Mahesh Kumar --- drivers/gpu/drm/i915/intel_atomic_plane.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c index eddcdd6e4b3b..fa7df5fe154b 100644 --- a/drivers/gpu/drm/i915/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/intel_atomic_plane.c @@ -170,7 +170,9 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_ if (state->fb && INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { if (state->fb->modifier == I915_FORMAT_MOD_Y_TILED || - state->fb->modifier == I915_FORMAT_MOD_Yf_TILED) { + state->fb->modifier == I915_FORMAT_MOD_Yf_TILED || + state->fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || + state->fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) { DRM_DEBUG_KMS("Y/Yf tiling not supported in IF-ID mode\n"); return -EINVAL; } -- GitLab From 5ea752c6efdf5aa8a57aed816d453a8f479f1b0a Mon Sep 17 00:00:00 2001 From: Danny Smith Date: Thu, 23 Aug 2018 10:26:20 +0200 Subject: [PATCH 0630/1692] ASoC: sigmadsp: safeload should not have lower byte limit Fixed range in safeload conditional to allow safeload to up to 20 bytes, without a lower limit. Signed-off-by: Danny Smith Acked-by: Lars-Peter Clausen Signed-off-by: Mark Brown --- sound/soc/codecs/sigmadsp.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/sound/soc/codecs/sigmadsp.c b/sound/soc/codecs/sigmadsp.c index d53680ac78e4..6df158669420 100644 --- a/sound/soc/codecs/sigmadsp.c +++ b/sound/soc/codecs/sigmadsp.c @@ -117,8 +117,7 @@ static int sigmadsp_ctrl_write(struct sigmadsp *sigmadsp, struct sigmadsp_control *ctrl, void *data) { /* safeload loads up to 20 bytes in a atomic operation */ - if (ctrl->num_bytes > 4 && ctrl->num_bytes <= 20 && sigmadsp->ops && - sigmadsp->ops->safeload) + if (ctrl->num_bytes <= 20 && sigmadsp->ops && sigmadsp->ops->safeload) return sigmadsp->ops->safeload(sigmadsp, ctrl->addr, data, ctrl->num_bytes); else -- GitLab From 18563409b13274e9d199276ba82910f72b69c308 Mon Sep 17 00:00:00 2001 From: Dhinakaran Pandiyan Date: Mon, 27 Aug 2018 15:56:24 -0700 Subject: [PATCH 0631/1692] drm/i915: Clean up skl_plane_has_planar() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit skl_plane_has_planar is hard to read, simplify the logic by checking for support in the order of platform, pipe and plane. No change in functionality intended. v2: Fix logic for primary plane (Ville) Cc: Ville Syrjälä Signed-off-by: Dhinakaran Pandiyan Reviewed-by: Ville Syrjälä Reviewed-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20180827225624.4912-1-dhinakaran.pandiyan@intel.com --- drivers/gpu/drm/i915/intel_display.c | 27 +++++++++------------------ 1 file changed, 9 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 2afe1bdb284e..bbd4d469cfdf 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -13619,24 +13619,15 @@ static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv, bool skl_plane_has_planar(struct drm_i915_private *dev_priv, enum pipe pipe, enum plane_id plane_id) { - if (plane_id == PLANE_PRIMARY) { - if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv)) - return false; - else if ((INTEL_GEN(dev_priv) == 9 && pipe == PIPE_C) && - !IS_GEMINILAKE(dev_priv)) - return false; - } else if (plane_id >= PLANE_SPRITE0) { - if (plane_id == PLANE_CURSOR) - return false; - if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) == 10) { - if (plane_id != PLANE_SPRITE0) - return false; - } else { - if (plane_id != PLANE_SPRITE0 || pipe == PIPE_C || - IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv)) - return false; - } - } + if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv)) + return false; + + if (INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C) + return false; + + if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0) + return false; + return true; } -- GitLab From b45649fbd5bf94199a84bdeb4515bca926f698a9 Mon Sep 17 00:00:00 2001 From: Dhinakaran Pandiyan Date: Fri, 24 Aug 2018 13:38:56 -0700 Subject: [PATCH 0632/1692] drm/i915: Do not advertize support for NV12 on ICL yet. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ICL requires two planes for scanning out a NV12 framebuffer. Do not advertize support for creating NV12 framebuffers until required plane programming is implemented. v2: Do not allow adding buffers. Check inside skl_plane_has_planar (Ville) Bspec: Plane Planar YUV programming (18566) Cc: Ville Syrjälä Cc: Rodrigo Vivi Signed-off-by: Dhinakaran Pandiyan Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20180824203856.17700-2-dhinakaran.pandiyan@intel.com --- drivers/gpu/drm/i915/intel_display.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index bbd4d469cfdf..ec3e24f07486 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -13619,6 +13619,13 @@ static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv, bool skl_plane_has_planar(struct drm_i915_private *dev_priv, enum pipe pipe, enum plane_id plane_id) { + /* + * FIXME: ICL requires two hardware planes for scanning out NV12 + * framebuffers. Do not advertize support until this is implemented. + */ + if (INTEL_GEN(dev_priv) >= 11) + return false; + if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv)) return false; @@ -14540,7 +14547,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, break; case DRM_FORMAT_NV12: if (INTEL_GEN(dev_priv) < 9 || IS_SKYLAKE(dev_priv) || - IS_BROXTON(dev_priv)) { + IS_BROXTON(dev_priv) || INTEL_GEN(dev_priv) >= 11) { DRM_DEBUG_KMS("unsupported pixel format: %s\n", drm_get_format_name(mode_cmd->pixel_format, &format_name)); -- GitLab From 04b2d03a75652bda989de1595048f0501dc0c0a0 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 21 Aug 2018 11:53:03 +0200 Subject: [PATCH 0633/1692] spi: Fix double IDR allocation with DT aliases If the SPI bus number is provided by a DT alias, idr_alloc() is called twice, leading to: WARNING: CPU: 1 PID: 1 at drivers/spi/spi.c:2179 spi_register_controller+0x11c/0x5d8 couldn't get idr Fix this by moving the handling of fixed SPI bus numbers up, before the DT handling code fills in ctlr->bus_num. Fixes: 1a4327fbf4554d5b ("spi: fix IDR collision on systems with both fixed and dynamic SPI bus numbers") Signed-off-by: Geert Uytterhoeven Tested-by: Fabio Estevam Signed-off-by: Mark Brown --- drivers/spi/spi.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index a00d006d4c3a..9da0bc5a036c 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -2143,8 +2143,17 @@ int spi_register_controller(struct spi_controller *ctlr) */ if (ctlr->num_chipselect == 0) return -EINVAL; - /* allocate dynamic bus number using Linux idr */ - if ((ctlr->bus_num < 0) && ctlr->dev.of_node) { + if (ctlr->bus_num >= 0) { + /* devices with a fixed bus num must check-in with the num */ + mutex_lock(&board_lock); + id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num, + ctlr->bus_num + 1, GFP_KERNEL); + mutex_unlock(&board_lock); + if (WARN(id < 0, "couldn't get idr")) + return id == -ENOSPC ? -EBUSY : id; + ctlr->bus_num = id; + } else if (ctlr->dev.of_node) { + /* allocate dynamic bus number using Linux idr */ id = of_alias_get_id(ctlr->dev.of_node, "spi"); if (id >= 0) { ctlr->bus_num = id; @@ -2170,15 +2179,6 @@ int spi_register_controller(struct spi_controller *ctlr) if (WARN(id < 0, "couldn't get idr")) return id; ctlr->bus_num = id; - } else { - /* devices with a fixed bus num must check-in with the num */ - mutex_lock(&board_lock); - id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num, - ctlr->bus_num + 1, GFP_KERNEL); - mutex_unlock(&board_lock); - if (WARN(id < 0, "couldn't get idr")) - return id == -ENOSPC ? -EBUSY : id; - ctlr->bus_num = id; } INIT_LIST_HEAD(&ctlr->queue); spin_lock_init(&ctlr->queue_lock); -- GitLab From 5b2695fd4b20f9b8320e9ecbfc232842bacf5b6f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Thu, 19 Jul 2018 21:21:57 +0300 Subject: [PATCH 0634/1692] drm/i915: Fix glk/cnl display w/a #1175 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The workaround was supposed to look at the plane destination coordinates. Currently it's looking at some mixture of src and dst coordinates that doesn't make sense. Fix it up. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20180719182214.4323-2-ville.syrjala@linux.intel.com Fixes: 394676f05bee (drm/i915: Add WA for planes ending close to left screen edge) Reviewed-by: Imre Deak (cherry picked from commit b1f1c2c11fc6c6cd3e361061e30f9b2839897b28) Signed-off-by: Rodrigo Vivi --- drivers/gpu/drm/i915/intel_display.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index ed3fa1c8a983..4a3c8ee9a973 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2988,6 +2988,7 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state, int w = drm_rect_width(&plane_state->base.src) >> 16; int h = drm_rect_height(&plane_state->base.src) >> 16; int dst_x = plane_state->base.dst.x1; + int dst_w = drm_rect_width(&plane_state->base.dst); int pipe_src_w = crtc_state->pipe_src_w; int max_width = skl_max_plane_width(fb, 0, rotation); int max_height = 4096; @@ -3009,10 +3010,10 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state, * screen may cause FIFO underflow and display corruption. */ if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) && - (dst_x + w < 4 || dst_x > pipe_src_w - 4)) { + (dst_x + dst_w < 4 || dst_x > pipe_src_w - 4)) { DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n", - dst_x + w < 4 ? "end" : "start", - dst_x + w < 4 ? dst_x + w : dst_x, + dst_x + dst_w < 4 ? "end" : "start", + dst_x + dst_w < 4 ? dst_x + dst_w : dst_x, 4, pipe_src_w - 4); return -ERANGE; } -- GitLab From 1b1b1162745e5f9e5c6c095afc8081df3edabc50 Mon Sep 17 00:00:00 2001 From: Rodrigo Vivi Date: Thu, 23 Aug 2018 13:51:36 -0700 Subject: [PATCH 0635/1692] drm/i915: Free write_buf that we allocated with kzalloc. We use kzalloc to allocate the write_buf that we use for i2c transfer on hdcp write. But it seems that we are forgetting to free the memory that is not needed after i2c transfer is completed. Reported-by: Brian J Wood Fixes: 2320175feb74 ("drm/i915: Implement HDCP for HDMI") Cc: Ramalingam C Cc: Sean Paul Cc: Jani Nikula Cc: Rodrigo Vivi Cc: # v4.17+ Signed-off-by: Rodrigo Vivi Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20180823205136.31310-1-rodrigo.vivi@intel.com (cherry picked from commit 62d3a8deaa10b8346d979d0dabde56c33b742afa) Signed-off-by: Rodrigo Vivi --- drivers/gpu/drm/i915/intel_hdmi.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index a9076402dcb0..192972a7d287 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -943,8 +943,12 @@ static int intel_hdmi_hdcp_write(struct intel_digital_port *intel_dig_port, ret = i2c_transfer(adapter, &msg, 1); if (ret == 1) - return 0; - return ret >= 0 ? -EIO : ret; + ret = 0; + else if (ret >= 0) + ret = -EIO; + + kfree(write_buf); + return ret; } static -- GitLab From 5223c9c1cbfc0cd4d0a1b50758e0949af3290fa1 Mon Sep 17 00:00:00 2001 From: Angelo Dureghello Date: Sat, 18 Aug 2018 01:51:58 +0200 Subject: [PATCH 0636/1692] spi: spi-fsl-dspi: fix broken DSPI_EOQ_MODE This patch fixes the dspi_eoq_write function used by the ColdFire mcf5441x family. The 16 bit cmd part must be re-set at each data transfer. Also, now that fifo_size variables are used for eoq_read/write, a proper fifo size must be set (16 slots for the ColdFire dspi module version). Signed-off-by: Angelo Dureghello Acked-by: Esben Haabendal Signed-off-by: Mark Brown Cc: stable@vger.kernel.org --- drivers/spi/spi-fsl-dspi.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 7cb3ab0a35a0..3082e72e4f6c 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -30,7 +30,11 @@ #define DRIVER_NAME "fsl-dspi" +#ifdef CONFIG_M5441x +#define DSPI_FIFO_SIZE 16 +#else #define DSPI_FIFO_SIZE 4 +#endif #define DSPI_DMA_BUFSIZE (DSPI_FIFO_SIZE * 1024) #define SPI_MCR 0x00 @@ -623,9 +627,11 @@ static void dspi_tcfq_read(struct fsl_dspi *dspi) static void dspi_eoq_write(struct fsl_dspi *dspi) { int fifo_size = DSPI_FIFO_SIZE; + u16 xfer_cmd = dspi->tx_cmd; /* Fill TX FIFO with as many transfers as possible */ while (dspi->len && fifo_size--) { + dspi->tx_cmd = xfer_cmd; /* Request EOQF for last transfer in FIFO */ if (dspi->len == dspi->bytes_per_word || fifo_size == 0) dspi->tx_cmd |= SPI_PUSHR_CMD_EOQ; -- GitLab From 11f65ad111fa29de2d11929f773bf1e553d5b7c4 Mon Sep 17 00:00:00 2001 From: Palmer Dabbelt Date: Mon, 20 Aug 2018 15:47:57 -0700 Subject: [PATCH 0637/1692] dt-bindings: riscv,cpu-intc: Cleanups from a missed review I managed to miss one of Rob's code reviews on the mailing list . The patch has already been merged, so I'm submitting a fixup. Sorry! Fixes: b67bc7cb4088 ("dt-bindings: interrupt-controller: RISC-V local interrupt controller") Cc: Rob Herring Cc: Christoph Hellwig Cc: Karsten Merker Signed-off-by: Palmer Dabbelt --- .../interrupt-controller/riscv,cpu-intc.txt | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/Documentation/devicetree/bindings/interrupt-controller/riscv,cpu-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/riscv,cpu-intc.txt index b0a8af51c388..265b223cd978 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/riscv,cpu-intc.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/riscv,cpu-intc.txt @@ -11,7 +11,7 @@ The RISC-V supervisor ISA manual specifies three interrupt sources that are attached to every HLIC: software interrupts, the timer interrupt, and external interrupts. Software interrupts are used to send IPIs between cores. The timer interrupt comes from an architecturally mandated real-time timer that is -controller via Supervisor Binary Interface (SBI) calls and CSR reads. External +controlled via Supervisor Binary Interface (SBI) calls and CSR reads. External interrupts connect all other device interrupts to the HLIC, which are routed via the platform-level interrupt controller (PLIC). @@ -25,7 +25,15 @@ in the system. Required properties: - compatible : "riscv,cpu-intc" -- #interrupt-cells : should be <1> +- #interrupt-cells : should be <1>. The interrupt sources are defined by the + RISC-V supervisor ISA manual, with only the following three interrupts being + defined for supervisor mode: + - Source 1 is the supervisor software interrupt, which can be sent by an SBI + call and is reserved for use by software. + - Source 5 is the supervisor timer interrupt, which can be configured by + SBI calls and implements a one-shot timer. + - Source 9 is the supervisor external interrupt, which chains to all other + device interrupts. - interrupt-controller : Identifies the node as an interrupt controller Furthermore, this interrupt-controller MUST be embedded inside the cpu @@ -38,7 +46,7 @@ An example device tree entry for a HLIC is show below. ... cpu1-intc: interrupt-controller { #interrupt-cells = <1>; - compatible = "riscv,cpu-intc", "sifive,fu540-c000-cpu-intc"; + compatible = "sifive,fu540-c000-cpu-intc", "riscv,cpu-intc"; interrupt-controller; }; }; -- GitLab From 0ce5671c4450527f90d2bfb31302f78580587983 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 24 Aug 2018 11:22:55 -0700 Subject: [PATCH 0638/1692] riscv: tlb: Provide definition of tlb_flush() before including tlb.h As of commit fd1102f0aade ("mm: mmu_notifier fix for tlb_end_vma"), asm-generic/tlb.h now calls tlb_flush() from a static inline function, so we need to make sure that it's declared before #including the asm-generic header in the arch header. Reported-by: Guenter Roeck Fixes: fd1102f0aade ("mm: mmu_notifier fix for tlb_end_vma") Signed-off-by: Will Deacon [groeck: Use forward declaration instead of moving inline function] Signed-off-by: Guenter Roeck Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/tlb.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h index c229509288ea..439dc7072e05 100644 --- a/arch/riscv/include/asm/tlb.h +++ b/arch/riscv/include/asm/tlb.h @@ -14,6 +14,10 @@ #ifndef _ASM_RISCV_TLB_H #define _ASM_RISCV_TLB_H +struct mmu_gather; + +static void tlb_flush(struct mmu_gather *tlb); + #include static inline void tlb_flush(struct mmu_gather *tlb) -- GitLab From 47d80a68f10d3290204a12f7836a9a8190dfc327 Mon Sep 17 00:00:00 2001 From: Palmer Dabbelt Date: Tue, 28 Aug 2018 09:37:16 -0700 Subject: [PATCH 0639/1692] RISC-V: Use a less ugly workaround for unused variable warnings Thanks to Christoph Hellwig for pointing out a cleaner way to do this, as my approach was quite ugly. CC: Christoph Hellwig Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/sys_riscv.c | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c index 568026ccf6e8..fb03a4482ad6 100644 --- a/arch/riscv/kernel/sys_riscv.c +++ b/arch/riscv/kernel/sys_riscv.c @@ -65,24 +65,11 @@ SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len, SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end, uintptr_t, flags) { -#ifdef CONFIG_SMP - struct mm_struct *mm = current->mm; - bool local = (flags & SYS_RISCV_FLUSH_ICACHE_LOCAL) != 0; -#endif - /* Check the reserved flags. */ if (unlikely(flags & ~SYS_RISCV_FLUSH_ICACHE_ALL)) return -EINVAL; - /* - * Without CONFIG_SMP flush_icache_mm is a just a flush_icache_all(), - * which generates unused variable warnings all over this function. - */ -#ifdef CONFIG_SMP - flush_icache_mm(mm, local); -#else - flush_icache_all(); -#endif + flush_icache_mm(current->mm, flags & SYS_RISCV_FLUSH_ICACHE_LOCAL); return 0; } -- GitLab From 076e2cedd6ea4786569c35f8725b4efdc1ecf2f2 Mon Sep 17 00:00:00 2001 From: Joe Jin Date: Tue, 28 Aug 2018 07:56:08 -0700 Subject: [PATCH 0640/1692] xen: export device state to sysfs Export device state to sysfs to allow for easier get device state. Signed-off-by: Joe Jin Reviewed-by: Boris Ostrovsky Cc: Boris Ostrovsky Cc: Juergen Gross Cc: Konrad Rzeszutek Wilk Signed-off-by: Boris Ostrovsky --- Documentation/ABI/stable/sysfs-bus-xen-backend | 9 +++++++++ drivers/xen/xenbus/xenbus_probe.c | 9 +++++++++ 2 files changed, 18 insertions(+) diff --git a/Documentation/ABI/stable/sysfs-bus-xen-backend b/Documentation/ABI/stable/sysfs-bus-xen-backend index 3d5951c8bf5f..e8b60bd766f7 100644 --- a/Documentation/ABI/stable/sysfs-bus-xen-backend +++ b/Documentation/ABI/stable/sysfs-bus-xen-backend @@ -73,3 +73,12 @@ KernelVersion: 3.0 Contact: Konrad Rzeszutek Wilk Description: Number of sectors written by the frontend. + +What: /sys/bus/xen-backend/devices/*/state +Date: August 2018 +KernelVersion: 4.19 +Contact: Joe Jin +Description: + The state of the device. One of: 'Unknown', + 'Initialising', 'Initialised', 'Connected', 'Closing', + 'Closed', 'Reconfiguring', 'Reconfigured'. diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index f2088838f690..5b471889d723 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c @@ -402,10 +402,19 @@ static ssize_t modalias_show(struct device *dev, } static DEVICE_ATTR_RO(modalias); +static ssize_t state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%s\n", + xenbus_strstate(to_xenbus_device(dev)->state)); +} +static DEVICE_ATTR_RO(state); + static struct attribute *xenbus_dev_attrs[] = { &dev_attr_nodename.attr, &dev_attr_devtype.attr, &dev_attr_modalias.attr, + &dev_attr_state.attr, NULL, }; -- GitLab From 6d3c8ce012cefbdd73a3bba3c7f9a44af1c6a0bb Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 28 Aug 2018 17:10:46 +0100 Subject: [PATCH 0641/1692] x86/xen: remove redundant variable save_pud Variable save_pud is being assigned but is never used hence it is redundant and can be removed. Cleans up clang warning: variable 'save_pud' set but not used [-Wunused-but-set-variable] Signed-off-by: Colin Ian King Reviewed-by: Boris Ostrovsky Signed-off-by: Boris Ostrovsky --- arch/x86/xen/mmu_pv.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index 9396b4d17064..ede298c183e9 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c @@ -2059,7 +2059,6 @@ void __init xen_relocate_p2m(void) pud_t *pud; pgd_t *pgd; unsigned long *new_p2m; - int save_pud; size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long)); n_pte = roundup(size, PAGE_SIZE) >> PAGE_SHIFT; @@ -2089,7 +2088,6 @@ void __init xen_relocate_p2m(void) pgd = __va(read_cr3_pa()); new_p2m = (unsigned long *)(2 * PGDIR_SIZE); - save_pud = n_pud; for (idx_pud = 0; idx_pud < n_pud; idx_pud++) { pud = early_memremap(pud_phys, PAGE_SIZE); clear_page(pud); -- GitLab From 5df52391ddbed869c7d67b00fbb013bd64334115 Mon Sep 17 00:00:00 2001 From: Manasi Navare Date: Thu, 23 Aug 2018 18:48:07 -0700 Subject: [PATCH 0642/1692] drm/i915/dsc: Fix PPS register definition macros for 2nd VDSC engine This patch fixes the PPS4 and PPS5 register definition macros that were resulting into an incorect MMIO address. Fixes: 2efbb2f099fb ("i915/dp/dsc: Add DSC PPS register definitions") Cc: Anusha Srivatsa Signed-off-by: Manasi Navare Reviewed-by: Rodrigo Vivi Reviewed-by: Anusha Srivatsa Link: https://patchwork.freedesktop.org/patch/msgid/20180824014807.14681-1-manasi.d.navare@intel.com --- drivers/gpu/drm/i915/i915_reg.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 8534f88a60f6..f2321785cbd6 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -10454,7 +10454,7 @@ enum skl_power_gate { _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \ _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC) #define ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB, \ _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC) #define DSC_INITIAL_DEC_DELAY(dec_delay) ((dec_delay) << 16) #define DSC_INITIAL_XMIT_DELAY(xmit_delay) ((xmit_delay) << 0) @@ -10469,7 +10469,7 @@ enum skl_power_gate { _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB, \ _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC) #define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB, \ _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC) #define DSC_SCALE_DEC_INT(scale_dec) ((scale_dec) << 16) #define DSC_SCALE_INC_INT(scale_inc) ((scale_inc) << 0) -- GitLab From 450b6b9b169382205f88858541a8b79830262ce7 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 23 Aug 2018 18:06:54 -0500 Subject: [PATCH 0643/1692] clk: npcm7xx: fix memory allocation One of the more common cases of allocation size calculations is finding the size of a structure that has a zero-sized array at the end, along with memory for some number of elements for that array. For example: struct foo { int stuff; void *entry[]; }; instance = kzalloc(sizeof(struct foo) + sizeof(void *) * count, GFP_KERNEL); Instead of leaving these open-coded and prone to type mistakes, we can now use the new struct_size() helper: instance = kzalloc(struct_size(instance, entry, count), GFP_KERNEL); Notice that, currently, there is a bug during the allocation: sizeof(npcm7xx_clk_data) should be sizeof(*npcm7xx_clk_data) Fix this bug by using struct_size() in kzalloc() This issue was detected with the help of Coccinelle. Cc: stable@vger.kernel.org Signed-off-by: Gustavo A. R. Silva Reviewed-by: Kees Cook Reviewed-by: Avi Fishman Signed-off-by: Stephen Boyd --- drivers/clk/clk-npcm7xx.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/clk/clk-npcm7xx.c b/drivers/clk/clk-npcm7xx.c index 740af90a9508..c5edf8f2fd19 100644 --- a/drivers/clk/clk-npcm7xx.c +++ b/drivers/clk/clk-npcm7xx.c @@ -558,8 +558,8 @@ static void __init npcm7xx_clk_init(struct device_node *clk_np) if (!clk_base) goto npcm7xx_init_error; - npcm7xx_clk_data = kzalloc(sizeof(*npcm7xx_clk_data->hws) * - NPCM7XX_NUM_CLOCKS + sizeof(npcm7xx_clk_data), GFP_KERNEL); + npcm7xx_clk_data = kzalloc(struct_size(npcm7xx_clk_data, hws, + NPCM7XX_NUM_CLOCKS), GFP_KERNEL); if (!npcm7xx_clk_data) goto npcm7xx_init_np_err; -- GitLab From 5b24109b0563d45094c470684c1f8cea1af269f8 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 28 Aug 2018 16:15:35 +0200 Subject: [PATCH 0644/1692] bpf: fix several offset tests in bpf_msg_pull_data While recently going over bpf_msg_pull_data(), I noticed three issues which are fixed in here: 1) When we attempt to find the first scatterlist element (sge) for the start offset, we add len to the offset before we check for start < offset + len, whereas it should come after when we iterate to the next sge to accumulate the offsets. For example, given a start offset of 12 with a sge length of 8 for the first sge in the list would lead us to determine this sge as the first sge thinking it covers first 16 bytes where start is located, whereas start sits in subsequent sges so we would end up pulling in the wrong data. 2) After figuring out the starting sge, we have a short-cut test in !msg->sg_copy[i] && bytes <= len. This checks whether it's not needed to make the page at the sge private where we can just exit by updating msg->data and msg->data_end. However, the length test is not fully correct. bytes <= len checks whether the requested bytes (end - start offsets) fit into the sge's length. The part that is missing is that start must not be sge length aligned. Meaning, the start offset into the sge needs to be accounted as well on top of the requested bytes as otherwise we can access the sge out of bounds. For example the sge could have length of 8, our requested bytes could have length of 8, but at a start offset of 4, so we also would need to pull in 4 bytes of the next sge, when we jump to the out label we do set msg->data to sg_virt(&sg[i]) + start - offset and msg->data_end to msg->data + bytes which would be oob. 3) The subsequent bytes < copy test for finding the last sge has the same issue as in point 2) but also it tests for less than rather than less or equal to. Meaning if the sge length is of 8 and requested bytes of 8 while having the start aligned with the sge, we would unnecessarily go and pull in the next sge as well to make it private. Fixes: 015632bb30da ("bpf: sk_msg program helper bpf_sk_msg_pull_data") Signed-off-by: Daniel Borkmann Acked-by: John Fastabend Signed-off-by: Alexei Starovoitov --- net/core/filter.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/net/core/filter.c b/net/core/filter.c index 7a2430945c71..ec4d67c0cf0c 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2286,10 +2286,10 @@ BPF_CALL_4(bpf_msg_pull_data, struct sk_msg_buff *, msg, u32, start, u32, end, u64, flags) { unsigned int len = 0, offset = 0, copy = 0; + int bytes = end - start, bytes_sg_total; struct scatterlist *sg = msg->sg_data; int first_sg, last_sg, i, shift; unsigned char *p, *to, *from; - int bytes = end - start; struct page *page; if (unlikely(flags || end <= start)) @@ -2299,9 +2299,9 @@ BPF_CALL_4(bpf_msg_pull_data, i = msg->sg_start; do { len = sg[i].length; - offset += len; if (start < offset + len) break; + offset += len; i++; if (i == MAX_SKB_FRAGS) i = 0; @@ -2310,7 +2310,11 @@ BPF_CALL_4(bpf_msg_pull_data, if (unlikely(start >= offset + len)) return -EINVAL; - if (!msg->sg_copy[i] && bytes <= len) + /* The start may point into the sg element so we need to also + * account for the headroom. + */ + bytes_sg_total = start - offset + bytes; + if (!msg->sg_copy[i] && bytes_sg_total <= len) goto out; first_sg = i; @@ -2330,12 +2334,12 @@ BPF_CALL_4(bpf_msg_pull_data, i++; if (i == MAX_SKB_FRAGS) i = 0; - if (bytes < copy) + if (bytes_sg_total <= copy) break; } while (i != msg->sg_end); last_sg = i; - if (unlikely(copy < end - start)) + if (unlikely(bytes_sg_total > copy)) return -EINVAL; page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC, get_order(copy)); -- GitLab From 66174b6998a645c39867c3a301b4ee4611d6ca5b Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 13 Aug 2018 23:56:55 +0200 Subject: [PATCH 0645/1692] usb: dwc3: of-simple: avoid unused function warnings An incorrect #ifdef caused a pair of harmless warnings when CONFIG_PM_SLEEP is disabled: drivers/usb/dwc3/dwc3-of-simple.c:223:12: error: 'dwc3_of_simple_resume' defined but not used [-Werror=unused-function] static int dwc3_of_simple_resume(struct device *dev) ^~~~~~~~~~~~~~~~~~~~~ drivers/usb/dwc3/dwc3-of-simple.c:213:12: error: 'dwc3_of_simple_suspend' defined but not used [-Werror=unused-function] static int dwc3_of_simple_suspend(struct device *dev) Since the #ifdef method is generally hard to get right, use a simpler __maybe_unused annotation here to let the compiler drop the unused functions silently. This also improves compile-time coverage. Fixes: 76251db86561 ("usb: dwc3: of-simple: reset host controller at suspend/resume") Signed-off-by: Arnd Bergmann Signed-off-by: Felipe Balbi --- drivers/usb/dwc3/dwc3-of-simple.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c index 40bf9e0bbc59..4c2771c5e727 100644 --- a/drivers/usb/dwc3/dwc3-of-simple.c +++ b/drivers/usb/dwc3/dwc3-of-simple.c @@ -180,8 +180,7 @@ static int dwc3_of_simple_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM -static int dwc3_of_simple_runtime_suspend(struct device *dev) +static int __maybe_unused dwc3_of_simple_runtime_suspend(struct device *dev) { struct dwc3_of_simple *simple = dev_get_drvdata(dev); int i; @@ -192,7 +191,7 @@ static int dwc3_of_simple_runtime_suspend(struct device *dev) return 0; } -static int dwc3_of_simple_runtime_resume(struct device *dev) +static int __maybe_unused dwc3_of_simple_runtime_resume(struct device *dev) { struct dwc3_of_simple *simple = dev_get_drvdata(dev); int ret; @@ -210,7 +209,7 @@ static int dwc3_of_simple_runtime_resume(struct device *dev) return 0; } -static int dwc3_of_simple_suspend(struct device *dev) +static int __maybe_unused dwc3_of_simple_suspend(struct device *dev) { struct dwc3_of_simple *simple = dev_get_drvdata(dev); @@ -220,7 +219,7 @@ static int dwc3_of_simple_suspend(struct device *dev) return 0; } -static int dwc3_of_simple_resume(struct device *dev) +static int __maybe_unused dwc3_of_simple_resume(struct device *dev) { struct dwc3_of_simple *simple = dev_get_drvdata(dev); @@ -229,7 +228,6 @@ static int dwc3_of_simple_resume(struct device *dev) return 0; } -#endif static const struct dev_pm_ops dwc3_of_simple_dev_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(dwc3_of_simple_suspend, dwc3_of_simple_resume) -- GitLab From dec3c23c9aa1815f07d98ae0375b4cbc10971e13 Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Wed, 8 Aug 2018 11:20:39 -0400 Subject: [PATCH 0646/1692] USB: net2280: Fix erroneous synchronization change Commit f16443a034c7 ("USB: gadgetfs, dummy-hcd, net2280: fix locking for callbacks") was based on a serious misunderstanding. It introduced regressions into both the dummy-hcd and net2280 drivers. The problem in dummy-hcd was fixed by commit 7dbd8f4cabd9 ("USB: dummy-hcd: Fix erroneous synchronization change"), but the problem in net2280 remains. Namely: the ->disconnect(), ->suspend(), ->resume(), and ->reset() callbacks must be invoked without the private lock held; otherwise a deadlock will occur when the callback routine tries to interact with the UDC driver. This patch largely is a reversion of the relevant parts of f16443a034c7. It also drops the private lock around the calls to ->suspend() and ->resume() (something the earlier patch forgot to do). This is safe from races with device interrupts because it occurs within the interrupt handler. Finally, the patch changes where the ->disconnect() callback is invoked when net2280_pullup() turns the pullup off. Rather than making the callback from within stop_activity() at a time when dropping the private lock could be unsafe, the callback is moved to a point after the lock has already been dropped. Signed-off-by: Alan Stern Fixes: f16443a034c7 ("USB: gadgetfs, dummy-hcd, net2280: fix locking for callbacks") Reported-by: D. Ziesche Tested-by: D. Ziesche CC: Signed-off-by: Felipe Balbi --- drivers/usb/gadget/udc/net2280.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c index 318246d8b2e2..b02ab2a8d927 100644 --- a/drivers/usb/gadget/udc/net2280.c +++ b/drivers/usb/gadget/udc/net2280.c @@ -1545,11 +1545,14 @@ static int net2280_pullup(struct usb_gadget *_gadget, int is_on) writel(tmp | BIT(USB_DETECT_ENABLE), &dev->usb->usbctl); } else { writel(tmp & ~BIT(USB_DETECT_ENABLE), &dev->usb->usbctl); - stop_activity(dev, dev->driver); + stop_activity(dev, NULL); } spin_unlock_irqrestore(&dev->lock, flags); + if (!is_on && dev->driver) + dev->driver->disconnect(&dev->gadget); + return 0; } @@ -2466,8 +2469,11 @@ static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver) nuke(&dev->ep[i]); /* report disconnect; the driver is already quiesced */ - if (driver) + if (driver) { + spin_unlock(&dev->lock); driver->disconnect(&dev->gadget); + spin_lock(&dev->lock); + } usb_reinit(dev); } @@ -3341,6 +3347,8 @@ static void handle_stat0_irqs(struct net2280 *dev, u32 stat) BIT(PCI_RETRY_ABORT_INTERRUPT)) static void handle_stat1_irqs(struct net2280 *dev, u32 stat) +__releases(dev->lock) +__acquires(dev->lock) { struct net2280_ep *ep; u32 tmp, num, mask, scratch; @@ -3381,12 +3389,14 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat) if (disconnect || reset) { stop_activity(dev, dev->driver); ep0_start(dev); + spin_unlock(&dev->lock); if (reset) usb_gadget_udc_reset (&dev->gadget, dev->driver); else (dev->driver->disconnect) (&dev->gadget); + spin_lock(&dev->lock); return; } } @@ -3405,6 +3415,7 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat) tmp = BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT); if (stat & tmp) { writel(tmp, &dev->regs->irqstat1); + spin_unlock(&dev->lock); if (stat & BIT(SUSPEND_REQUEST_INTERRUPT)) { if (dev->driver->suspend) dev->driver->suspend(&dev->gadget); @@ -3415,6 +3426,7 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat) dev->driver->resume(&dev->gadget); /* at high speed, note erratum 0133 */ } + spin_lock(&dev->lock); stat &= ~tmp; } -- GitLab From c37bd52836296ecc9a0fc8060b819089aebdbcde Mon Sep 17 00:00:00 2001 From: Anton Vasilyev Date: Tue, 7 Aug 2018 14:44:48 +0300 Subject: [PATCH 0647/1692] usb: gadget: fotg210-udc: Fix memory leak of fotg210->ep[i] There is no deallocation of fotg210->ep[i] elements, allocated at fotg210_udc_probe. The patch adds deallocation of fotg210->ep array elements and simplifies error path of fotg210_udc_probe(). Found by Linux Driver Verification project (linuxtesting.org). Signed-off-by: Anton Vasilyev Signed-off-by: Felipe Balbi --- drivers/usb/gadget/udc/fotg210-udc.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c index 53a48f561458..587c5037ff07 100644 --- a/drivers/usb/gadget/udc/fotg210-udc.c +++ b/drivers/usb/gadget/udc/fotg210-udc.c @@ -1063,12 +1063,15 @@ static const struct usb_gadget_ops fotg210_gadget_ops = { static int fotg210_udc_remove(struct platform_device *pdev) { struct fotg210_udc *fotg210 = platform_get_drvdata(pdev); + int i; usb_del_gadget_udc(&fotg210->gadget); iounmap(fotg210->reg); free_irq(platform_get_irq(pdev, 0), fotg210); fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req); + for (i = 0; i < FOTG210_MAX_NUM_EP; i++) + kfree(fotg210->ep[i]); kfree(fotg210); return 0; @@ -1099,7 +1102,7 @@ static int fotg210_udc_probe(struct platform_device *pdev) /* initialize udc */ fotg210 = kzalloc(sizeof(struct fotg210_udc), GFP_KERNEL); if (fotg210 == NULL) - goto err_alloc; + goto err; for (i = 0; i < FOTG210_MAX_NUM_EP; i++) { _ep[i] = kzalloc(sizeof(struct fotg210_ep), GFP_KERNEL); @@ -1111,7 +1114,7 @@ static int fotg210_udc_probe(struct platform_device *pdev) fotg210->reg = ioremap(res->start, resource_size(res)); if (fotg210->reg == NULL) { pr_err("ioremap error.\n"); - goto err_map; + goto err_alloc; } spin_lock_init(&fotg210->lock); @@ -1159,7 +1162,7 @@ static int fotg210_udc_probe(struct platform_device *pdev) fotg210->ep0_req = fotg210_ep_alloc_request(&fotg210->ep[0]->ep, GFP_KERNEL); if (fotg210->ep0_req == NULL) - goto err_req; + goto err_map; fotg210_init(fotg210); @@ -1187,12 +1190,14 @@ static int fotg210_udc_probe(struct platform_device *pdev) fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req); err_map: - if (fotg210->reg) - iounmap(fotg210->reg); + iounmap(fotg210->reg); err_alloc: + for (i = 0; i < FOTG210_MAX_NUM_EP; i++) + kfree(fotg210->ep[i]); kfree(fotg210); +err: return ret; } -- GitLab From dfe1a51d2a36647f74cbad478801efa7cf394376 Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Fri, 3 Aug 2018 12:12:46 +0900 Subject: [PATCH 0648/1692] usb: gadget: udc: renesas_usb3: fix maxpacket size of ep0 This patch fixes an issue that maxpacket size of ep0 is incorrect for SuperSpeed. Otherwise, CDC NCM class with SuperSpeed doesn't work correctly on this driver because its control read data size is more than 64 bytes. Reported-by: Junki Kato Fixes: 746bfe63bba3 ("usb: gadget: renesas_usb3: add support for Renesas USB3.0 peripheral controller") Cc: # v4.5+ Signed-off-by: Yoshihiro Shimoda Tested-by: Junki Kato Signed-off-by: Felipe Balbi --- drivers/usb/gadget/udc/renesas_usb3.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c index 1f879b3f2c96..e1656f361e08 100644 --- a/drivers/usb/gadget/udc/renesas_usb3.c +++ b/drivers/usb/gadget/udc/renesas_usb3.c @@ -812,12 +812,15 @@ static void usb3_irq_epc_int_1_speed(struct renesas_usb3 *usb3) switch (speed) { case USB_STA_SPEED_SS: usb3->gadget.speed = USB_SPEED_SUPER; + usb3->gadget.ep0->maxpacket = USB3_EP0_SS_MAX_PACKET_SIZE; break; case USB_STA_SPEED_HS: usb3->gadget.speed = USB_SPEED_HIGH; + usb3->gadget.ep0->maxpacket = USB3_EP0_HSFS_MAX_PACKET_SIZE; break; case USB_STA_SPEED_FS: usb3->gadget.speed = USB_SPEED_FULL; + usb3->gadget.ep0->maxpacket = USB3_EP0_HSFS_MAX_PACKET_SIZE; break; default: usb3->gadget.speed = USB_SPEED_UNKNOWN; @@ -2513,7 +2516,7 @@ static int renesas_usb3_init_ep(struct renesas_usb3 *usb3, struct device *dev, /* for control pipe */ usb3->gadget.ep0 = &usb3_ep->ep; usb_ep_set_maxpacket_limit(&usb3_ep->ep, - USB3_EP0_HSFS_MAX_PACKET_SIZE); + USB3_EP0_SS_MAX_PACKET_SIZE); usb3_ep->ep.caps.type_control = true; usb3_ep->ep.caps.dir_in = true; usb3_ep->ep.caps.dir_out = true; -- GitLab From b497fff6f59ec4ab2816439e7ab976a90b7bab5c Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Tue, 31 Jul 2018 14:38:52 +0000 Subject: [PATCH 0649/1692] usb: dwc3: pci: Fix return value check in dwc3_byt_enable_ulpi_refclock() In case of error, the function pcim_iomap() returns NULL pointer not ERR_PTR(). The IS_ERR() test in the return value check should be replaced with NULL test. Fixes: 7740d04d901d ("usb: dwc3: pci: Enable ULPI Refclk on platforms where the firmware does not") Reviewed-by: Hans de Goede Signed-off-by: Wei Yongjun Signed-off-by: Felipe Balbi --- drivers/usb/dwc3/dwc3-pci.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 5edd79470368..1286076a8890 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -85,8 +85,8 @@ static int dwc3_byt_enable_ulpi_refclock(struct pci_dev *pci) u32 value; reg = pcim_iomap(pci, GP_RWBAR, 0); - if (IS_ERR(reg)) - return PTR_ERR(reg); + if (!reg) + return -ENOMEM; value = readl(reg + GP_RWREG1); if (!(value & GP_RWREG1_ULPI_REFCLK_DISABLE)) -- GitLab From b55326dc969ea2d704a008d9a97583b128f54f4f Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Thu, 16 Aug 2018 13:06:46 -0700 Subject: [PATCH 0650/1692] pinctrl: msm: Really mask level interrupts to prevent latching The interrupt controller hardware in this pin controller has two status enable bits. The first "normal" status enable bit enables or disables the summary interrupt line being raised when a gpio interrupt triggers and the "raw" status enable bit allows or prevents the hardware from latching an interrupt into the status register for a gpio interrupt. Currently we just toggle the "normal" status enable bit in the mask and unmask ops so that the summary irq interrupt going to the CPU's interrupt controller doesn't trigger for the masked gpio interrupt. For a level triggered interrupt, the flow would be as follows: the pin controller sees the interrupt, latches the status into the status register, raises the summary irq to the CPU, summary irq handler runs and calls handle_level_irq(), handle_level_irq() masks and acks the gpio interrupt, the interrupt handler runs, and finally unmask the interrupt. When the interrupt handler completes, we expect that the interrupt line level will go back to the deasserted state so the genirq code can unmask the interrupt without it triggering again. If we only mask the interrupt by clearing the "normal" status enable bit then we'll ack the interrupt but it will continue to show up as pending in the status register because the raw status bit is enabled, the hardware hasn't deasserted the line, and thus the asserted state latches into the status register again. When the hardware deasserts the interrupt the pin controller still thinks there is a pending unserviced level interrupt because it latched it earlier. This behavior causes software to see an extra interrupt for level type interrupts each time the interrupt is handled. Let's fix this by clearing the raw status enable bit for level type interrupts so that the hardware stops latching the status of the interrupt after we ack it. We don't do this for edge type interrupts because it seems that toggling the raw status enable bit for edge type interrupts causes spurious edge interrupts. Signed-off-by: Stephen Boyd Reviewed-by: Douglas Anderson Reviewed-by: Bjorn Andersson Signed-off-by: Linus Walleij --- drivers/pinctrl/qcom/pinctrl-msm.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index 2155a30c282b..5d72ffad32c2 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c @@ -634,6 +634,29 @@ static void msm_gpio_irq_mask(struct irq_data *d) raw_spin_lock_irqsave(&pctrl->lock, flags); val = readl(pctrl->regs + g->intr_cfg_reg); + /* + * There are two bits that control interrupt forwarding to the CPU. The + * RAW_STATUS_EN bit causes the level or edge sensed on the line to be + * latched into the interrupt status register when the hardware detects + * an irq that it's configured for (either edge for edge type or level + * for level type irq). The 'non-raw' status enable bit causes the + * hardware to assert the summary interrupt to the CPU if the latched + * status bit is set. There's a bug though, the edge detection logic + * seems to have a problem where toggling the RAW_STATUS_EN bit may + * cause the status bit to latch spuriously when there isn't any edge + * so we can't touch that bit for edge type irqs and we have to keep + * the bit set anyway so that edges are latched while the line is masked. + * + * To make matters more complicated, leaving the RAW_STATUS_EN bit + * enabled all the time causes level interrupts to re-latch into the + * status register because the level is still present on the line after + * we ack it. We clear the raw status enable bit during mask here and + * set the bit on unmask so the interrupt can't latch into the hardware + * while it's masked. + */ + if (irqd_get_trigger_type(d) & IRQ_TYPE_LEVEL_MASK) + val &= ~BIT(g->intr_raw_status_bit); + val &= ~BIT(g->intr_enable_bit); writel(val, pctrl->regs + g->intr_cfg_reg); @@ -655,6 +678,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d) raw_spin_lock_irqsave(&pctrl->lock, flags); val = readl(pctrl->regs + g->intr_cfg_reg); + val |= BIT(g->intr_raw_status_bit); val |= BIT(g->intr_enable_bit); writel(val, pctrl->regs + g->intr_cfg_reg); -- GitLab From 6537886cdc9a637711fd6da980dbb87c2c87c9aa Mon Sep 17 00:00:00 2001 From: Michael Hennerich Date: Mon, 13 Aug 2018 15:57:44 +0200 Subject: [PATCH 0651/1692] gpio: adp5588: Fix sleep-in-atomic-context bug This fixes: [BUG] gpio: gpio-adp5588: A possible sleep-in-atomic-context bug in adp5588_gpio_write() [BUG] gpio: gpio-adp5588: A possible sleep-in-atomic-context bug in adp5588_gpio_direction_input() Reported-by: Jia-Ju Bai Signed-off-by: Michael Hennerich Signed-off-by: Linus Walleij --- drivers/gpio/gpio-adp5588.c | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c index 3530ccd17e04..da9781a2ef4a 100644 --- a/drivers/gpio/gpio-adp5588.c +++ b/drivers/gpio/gpio-adp5588.c @@ -41,6 +41,8 @@ struct adp5588_gpio { uint8_t int_en[3]; uint8_t irq_mask[3]; uint8_t irq_stat[3]; + uint8_t int_input_en[3]; + uint8_t int_lvl_cached[3]; }; static int adp5588_gpio_read(struct i2c_client *client, u8 reg) @@ -173,12 +175,28 @@ static void adp5588_irq_bus_sync_unlock(struct irq_data *d) struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d); int i; - for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) + for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) { + if (dev->int_input_en[i]) { + mutex_lock(&dev->lock); + dev->dir[i] &= ~dev->int_input_en[i]; + dev->int_input_en[i] = 0; + adp5588_gpio_write(dev->client, GPIO_DIR1 + i, + dev->dir[i]); + mutex_unlock(&dev->lock); + } + + if (dev->int_lvl_cached[i] != dev->int_lvl[i]) { + dev->int_lvl_cached[i] = dev->int_lvl[i]; + adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + i, + dev->int_lvl[i]); + } + if (dev->int_en[i] ^ dev->irq_mask[i]) { dev->int_en[i] = dev->irq_mask[i]; adp5588_gpio_write(dev->client, GPIO_INT_EN1 + i, dev->int_en[i]); } + } mutex_unlock(&dev->irq_lock); } @@ -221,9 +239,7 @@ static int adp5588_irq_set_type(struct irq_data *d, unsigned int type) else return -EINVAL; - adp5588_gpio_direction_input(&dev->gpio_chip, gpio); - adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + bank, - dev->int_lvl[bank]); + dev->int_input_en[bank] |= bit; return 0; } -- GitLab From 1f631c3201fe5491808df143d8fcba81b3197ffd Mon Sep 17 00:00:00 2001 From: Yuan-Chi Pang Date: Wed, 29 Aug 2018 09:30:08 +0800 Subject: [PATCH 0652/1692] mac80211: mesh: fix HWMP sequence numbering to follow standard IEEE 802.11-2016 14.10.8.3 HWMP sequence numbering says: If it is a target mesh STA, it shall update its own HWMP SN to maximum (current HWMP SN, target HWMP SN in the PREQ element) + 1 immediately before it generates a PREP element in response to a PREQ element. Signed-off-by: Yuan-Chi Pang Signed-off-by: Johannes Berg --- net/mac80211/mesh_hwmp.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 35ad3983ae4b..daf9db3c8f24 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -572,6 +572,10 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata, forward = false; reply = true; target_metric = 0; + + if (SN_GT(target_sn, ifmsh->sn)) + ifmsh->sn = target_sn; + if (time_after(jiffies, ifmsh->last_sn_update + net_traversal_jiffies(sdata)) || time_before(jiffies, ifmsh->last_sn_update)) { -- GitLab From 166ac9d55b0ab70b644e429be1f217fe8393cbd7 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Wed, 29 Aug 2018 08:57:02 +0200 Subject: [PATCH 0653/1692] mac80211: avoid kernel panic when building AMSDU from non-linear SKB When building building AMSDU from non-linear SKB, we hit a kernel panic when trying to push the padding to the tail. Instead, put the padding at the head of the next subframe. This also fixes the A-MSDU subframes to not have the padding accounted in the length field and not have pad at all for the last subframe, both required by the spec. Fixes: 6e0456b54545 ("mac80211: add A-MSDU tx support") Signed-off-by: Sara Sharon Reviewed-by: Lorenzo Bianconi Signed-off-by: Johannes Berg --- net/mac80211/tx.c | 38 +++++++++++++++++++++----------------- 1 file changed, 21 insertions(+), 17 deletions(-) diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index fa1f1e63a264..667a73d6eb5c 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -3073,27 +3073,18 @@ void ieee80211_clear_fast_xmit(struct sta_info *sta) } static bool ieee80211_amsdu_realloc_pad(struct ieee80211_local *local, - struct sk_buff *skb, int headroom, - int *subframe_len) + struct sk_buff *skb, int headroom) { - int amsdu_len = *subframe_len + sizeof(struct ethhdr); - int padding = (4 - amsdu_len) & 3; - - if (skb_headroom(skb) < headroom || skb_tailroom(skb) < padding) { + if (skb_headroom(skb) < headroom) { I802_DEBUG_INC(local->tx_expand_skb_head); - if (pskb_expand_head(skb, headroom, padding, GFP_ATOMIC)) { + if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) { wiphy_debug(local->hw.wiphy, "failed to reallocate TX buffer\n"); return false; } } - if (padding) { - *subframe_len += padding; - skb_put_zero(skb, padding); - } - return true; } @@ -3117,8 +3108,7 @@ static bool ieee80211_amsdu_prepare_head(struct ieee80211_sub_if_data *sdata, if (info->control.flags & IEEE80211_TX_CTRL_AMSDU) return true; - if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(*amsdu_hdr), - &subframe_len)) + if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(*amsdu_hdr))) return false; data = skb_push(skb, sizeof(*amsdu_hdr)); @@ -3184,7 +3174,8 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata, void *data; bool ret = false; unsigned int orig_len; - int n = 1, nfrags; + int n = 1, nfrags, pad = 0; + u16 hdrlen; if (!ieee80211_hw_check(&local->hw, TX_AMSDU)) return false; @@ -3235,8 +3226,19 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata, if (max_frags && nfrags > max_frags) goto out; - if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(rfc1042_header) + 2, - &subframe_len)) + /* + * Pad out the previous subframe to a multiple of 4 by adding the + * padding to the next one, that's being added. Note that head->len + * is the length of the full A-MSDU, but that works since each time + * we add a new subframe we pad out the previous one to a multiple + * of 4 and thus it no longer matters in the next round. + */ + hdrlen = fast_tx->hdr_len - sizeof(rfc1042_header); + if ((head->len - hdrlen) & 3) + pad = 4 - ((head->len - hdrlen) & 3); + + if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(rfc1042_header) + + 2 + pad)) goto out; ret = true; @@ -3248,6 +3250,8 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata, memcpy(data, &len, 2); memcpy(data + 2, rfc1042_header, sizeof(rfc1042_header)); + memset(skb_push(skb, pad), 0, pad); + head->len += skb->len; head->data_len += skb->len; *frag_tail = skb; -- GitLab From d8c5d29f21bf0bc690fd8c26c54197221e235bc9 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Tue, 28 Aug 2018 15:22:31 +0300 Subject: [PATCH 0654/1692] drm/i915: Don't check power domains state in intel_power_domains_init_hw() During power domains initialization we acquire power well references for power wells in the INIT power domain. The rest of power wells - which BIOS could have left enabled - we can only acquire references as needed during display HW readout and so must defer sanitization until then (also implying that we must always do HW readout to cleanup unused power wells). Thus during initialization these latter power wells can have a refcount of 0 while still being enabled. To avoid the false-positive state mismatch error this causes remove the check from intel_power_domains_init_hw() and rely on the state check in intel_power_domains_enable() which follows the HW readout. v2: - Add comment to log and code clarifying how unused power wells get disabled. (Chris) Fixes: 6dfc4a8f134f ("drm/i915: Verify power domains after enabling them") Cc: Chris Wilson References: https://bugs.freedesktop.org/show_bug.cgi?id=107411 Signed-off-by: Imre Deak Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20180828122231.14336-1-imre.deak@intel.com --- drivers/gpu/drm/i915/intel_runtime_pm.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 2852395125cd..480dadb1047b 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -3724,9 +3724,10 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv); * * This function initializes the hardware power domain state and enables all * power wells belonging to the INIT power domain. Power wells in other - * domains (and not in the INIT domain) are referenced or disabled during the - * modeset state HW readout. After that the reference count of each power well - * must match its HW enabled state, see intel_power_domains_verify_state(). + * domains (and not in the INIT domain) are referenced or disabled by + * intel_modeset_readout_hw_state(). After that the reference count of each + * power well must match its HW enabled state, see + * intel_power_domains_verify_state(). * * It will return with power domains disabled (to be enabled later by * intel_power_domains_enable()) and must be paired with @@ -3767,9 +3768,8 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume) if (!i915_modparams.disable_power_well) intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); intel_power_domains_sync_hw(dev_priv); - power_domains->initializing = false; - intel_power_domains_verify_state(dev_priv); + power_domains->initializing = false; } /** -- GitLab From afce0cc9ad8aa510650e781a51e43c26e2a34cf6 Mon Sep 17 00:00:00 2001 From: Julien Grall Date: Tue, 14 Aug 2018 11:33:32 +0100 Subject: [PATCH 0655/1692] Documentation/arm64/sve: Couple of improvements and typos - Fix mismatch between SVE registers (Z) and FPSIMD register (V) - Don't prefix the path for [3] with Linux to stay consistent with [1] and [2]. Signed-off-by: Julien Grall Signed-off-by: Will Deacon --- Documentation/arm64/sve.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Documentation/arm64/sve.txt b/Documentation/arm64/sve.txt index f128f736b4a5..7169a0ec41d8 100644 --- a/Documentation/arm64/sve.txt +++ b/Documentation/arm64/sve.txt @@ -200,7 +200,7 @@ prctl(PR_SVE_SET_VL, unsigned long arg) thread. * Changing the vector length causes all of P0..P15, FFR and all bits of - Z0..V31 except for Z0 bits [127:0] .. Z31 bits [127:0] to become + Z0..Z31 except for Z0 bits [127:0] .. Z31 bits [127:0] to become unspecified. Calling PR_SVE_SET_VL with vl equal to the thread's current vector length, or calling PR_SVE_SET_VL with the PR_SVE_SET_VL_ONEXEC flag, does not constitute a change to the vector length for this purpose. @@ -500,7 +500,7 @@ References [2] arch/arm64/include/uapi/asm/ptrace.h AArch64 Linux ptrace ABI definitions -[3] linux/Documentation/arm64/cpu-feature-registers.txt +[3] Documentation/arm64/cpu-feature-registers.txt [4] ARM IHI0055C http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055c/IHI0055C_beta_aapcs64.pdf -- GitLab From 1d8f574708a3fb6f18c85486d0c5217df893c0cf Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 24 Aug 2018 15:08:29 +0100 Subject: [PATCH 0656/1692] arm/arm64: smccc-1.1: Make return values unsigned long An unfortunate consequence of having a strong typing for the input values to the SMC call is that it also affects the type of the return values, limiting r0 to 32 bits and r{1,2,3} to whatever was passed as an input. Let's turn everything into "unsigned long", which satisfies the requirements of both architectures, and allows for the full range of return values. Reported-by: Julien Grall Signed-off-by: Marc Zyngier Signed-off-by: Will Deacon --- include/linux/arm-smccc.h | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index ca1d2cc2cdfa..5a91ff33720b 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -199,31 +199,31 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, #define __declare_arg_0(a0, res) \ struct arm_smccc_res *___res = res; \ - register u32 r0 asm("r0") = a0; \ + register unsigned long r0 asm("r0") = (u32)a0; \ register unsigned long r1 asm("r1"); \ register unsigned long r2 asm("r2"); \ register unsigned long r3 asm("r3") #define __declare_arg_1(a0, a1, res) \ struct arm_smccc_res *___res = res; \ - register u32 r0 asm("r0") = a0; \ - register typeof(a1) r1 asm("r1") = a1; \ + register unsigned long r0 asm("r0") = (u32)a0; \ + register unsigned long r1 asm("r1") = a1; \ register unsigned long r2 asm("r2"); \ register unsigned long r3 asm("r3") #define __declare_arg_2(a0, a1, a2, res) \ struct arm_smccc_res *___res = res; \ - register u32 r0 asm("r0") = a0; \ - register typeof(a1) r1 asm("r1") = a1; \ - register typeof(a2) r2 asm("r2") = a2; \ + register unsigned long r0 asm("r0") = (u32)a0; \ + register unsigned long r1 asm("r1") = a1; \ + register unsigned long r2 asm("r2") = a2; \ register unsigned long r3 asm("r3") #define __declare_arg_3(a0, a1, a2, a3, res) \ struct arm_smccc_res *___res = res; \ - register u32 r0 asm("r0") = a0; \ - register typeof(a1) r1 asm("r1") = a1; \ - register typeof(a2) r2 asm("r2") = a2; \ - register typeof(a3) r3 asm("r3") = a3 + register unsigned long r0 asm("r0") = (u32)a0; \ + register unsigned long r1 asm("r1") = a1; \ + register unsigned long r2 asm("r2") = a2; \ + register unsigned long r3 asm("r3") = a3 #define __declare_arg_4(a0, a1, a2, a3, a4, res) \ __declare_arg_3(a0, a1, a2, a3, res); \ -- GitLab From ef39078d6342deaddacdd550c4197421bd83fb76 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 24 Aug 2018 08:43:35 +0200 Subject: [PATCH 0657/1692] netfilter: conntrack: place 'new' timeout in first location too tcp, sctp and dccp trackers re-use the userspace ctnetlink states to index their timeout arrays, which means timeout[0] is never used. Copy the 'new' state (syn-sent, dccp-request, ..) to 0 as well so external users can simply read it off timeouts[0] without need to differentiate dccp/sctp/tcp and udp/icmp/gre/generic. The alternative is to map all array accesses to 'i - 1', but that is a much more intrusive change. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_proto_dccp.c | 7 +++++++ net/netfilter/nf_conntrack_proto_sctp.c | 7 +++++++ net/netfilter/nf_conntrack_proto_tcp.c | 7 +++++++ 3 files changed, 21 insertions(+) diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index 8c58f96b59e7..b81f70039828 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c @@ -697,6 +697,8 @@ static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[], timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ; } } + + timeouts[CTA_TIMEOUT_DCCP_UNSPEC] = timeouts[CTA_TIMEOUT_DCCP_REQUEST]; return 0; } @@ -827,6 +829,11 @@ static int dccp_init_net(struct net *net, u_int16_t proto) dn->dccp_timeout[CT_DCCP_CLOSEREQ] = 64 * HZ; dn->dccp_timeout[CT_DCCP_CLOSING] = 64 * HZ; dn->dccp_timeout[CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL; + + /* timeouts[0] is unused, make it same as SYN_SENT so + * ->timeouts[0] contains 'new' timeout, like udp or icmp. + */ + dn->dccp_timeout[CT_DCCP_NONE] = dn->dccp_timeout[CT_DCCP_REQUEST]; } return dccp_kmemdup_sysctl_table(net, pn, dn); diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index 8d1e085fc14a..5eddfd32b852 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c @@ -613,6 +613,8 @@ static int sctp_timeout_nlattr_to_obj(struct nlattr *tb[], timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ; } } + + timeouts[CTA_TIMEOUT_SCTP_UNSPEC] = timeouts[CTA_TIMEOUT_SCTP_CLOSED]; return 0; } @@ -743,6 +745,11 @@ static int sctp_init_net(struct net *net, u_int16_t proto) for (i = 0; i < SCTP_CONNTRACK_MAX; i++) sn->timeouts[i] = sctp_timeouts[i]; + + /* timeouts[0] is unused, init it so ->timeouts[0] contains + * 'new' timeout, like udp or icmp. + */ + sn->timeouts[0] = sctp_timeouts[SCTP_CONNTRACK_CLOSED]; } return sctp_kmemdup_sysctl_table(pn, sn); diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index d80d322b9d8b..3e2dc56a96c3 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -1301,6 +1301,7 @@ static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[], timeouts[TCP_CONNTRACK_SYN_SENT] = ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT]))*HZ; } + if (tb[CTA_TIMEOUT_TCP_SYN_RECV]) { timeouts[TCP_CONNTRACK_SYN_RECV] = ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_RECV]))*HZ; @@ -1341,6 +1342,8 @@ static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[], timeouts[TCP_CONNTRACK_UNACK] = ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_UNACK]))*HZ; } + + timeouts[CTA_TIMEOUT_TCP_UNSPEC] = timeouts[CTA_TIMEOUT_TCP_SYN_SENT]; return 0; } @@ -1518,6 +1521,10 @@ static int tcp_init_net(struct net *net, u_int16_t proto) for (i = 0; i < TCP_CONNTRACK_TIMEOUT_MAX; i++) tn->timeouts[i] = tcp_timeouts[i]; + /* timeouts[0] is unused, make it same as SYN_SENT so + * ->timeouts[0] contains 'new' timeout, like udp or icmp. + */ + tn->timeouts[0] = tcp_timeouts[TCP_CONNTRACK_SYN_SENT]; tn->tcp_loose = nf_ct_tcp_loose; tn->tcp_be_liberal = nf_ct_tcp_be_liberal; tn->tcp_max_retrans = nf_ct_tcp_max_retrans; -- GitLab From 0434ccdcf883e53ec7156a6843943e940dc1feb8 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 24 Aug 2018 08:43:36 +0200 Subject: [PATCH 0658/1692] netfilter: nf_tables: rework ct timeout set support Using a private template is problematic: 1. We can't assign both a zone and a timeout policy (zone assigns a conntrack template, so we hit problem 1) 2. Using a template needs to take care of ct refcount, else we'll eventually free the private template due to ->use underflow. This patch reworks template policy to instead work with existing conntrack. As long as such conntrack has not yet been placed into the hash table (unconfirmed) we can still add the timeout extension. The only caveat is that we now need to update/correct ct->timeout to reflect the initial/new state, otherwise the conntrack entry retains the default 'new' timeout. Side effect of this change is that setting the policy must now occur from chains that are evaluated *after* the conntrack lookup has taken place. No released kernel contains the timeout policy feature yet, so this change should be ok. Changes since v2: - don't handle 'ct is confirmed case' - after previous patch, no need to special-case tcp/dccp/sctp timeout anymore Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack_timeout.h | 2 +- net/netfilter/nft_ct.c | 59 ++++++++++---------- 2 files changed, 30 insertions(+), 31 deletions(-) diff --git a/include/net/netfilter/nf_conntrack_timeout.h b/include/net/netfilter/nf_conntrack_timeout.h index d5f62cc6c2ae..3394d75e1c80 100644 --- a/include/net/netfilter/nf_conntrack_timeout.h +++ b/include/net/netfilter/nf_conntrack_timeout.h @@ -30,7 +30,7 @@ struct nf_conn_timeout { }; static inline unsigned int * -nf_ct_timeout_data(struct nf_conn_timeout *t) +nf_ct_timeout_data(const struct nf_conn_timeout *t) { struct nf_ct_timeout *timeout; diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index 26a8baebd072..5dd87748afa8 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c @@ -799,7 +799,7 @@ nft_ct_timeout_parse_policy(void *timeouts, } struct nft_ct_timeout_obj { - struct nf_conn *tmpl; + struct nf_ct_timeout *timeout; u8 l4proto; }; @@ -809,26 +809,42 @@ static void nft_ct_timeout_obj_eval(struct nft_object *obj, { const struct nft_ct_timeout_obj *priv = nft_obj_data(obj); struct nf_conn *ct = (struct nf_conn *)skb_nfct(pkt->skb); - struct sk_buff *skb = pkt->skb; + struct nf_conn_timeout *timeout; + const unsigned int *values; + + if (priv->l4proto != pkt->tprot) + return; - if (ct || - priv->l4proto != pkt->tprot) + if (!ct || nf_ct_is_template(ct) || nf_ct_is_confirmed(ct)) return; - nf_ct_set(skb, priv->tmpl, IP_CT_NEW); + timeout = nf_ct_timeout_find(ct); + if (!timeout) { + timeout = nf_ct_timeout_ext_add(ct, priv->timeout, GFP_ATOMIC); + if (!timeout) { + regs->verdict.code = NF_DROP; + return; + } + } + + rcu_assign_pointer(timeout->timeout, priv->timeout); + + /* adjust the timeout as per 'new' state. ct is unconfirmed, + * so the current timestamp must not be added. + */ + values = nf_ct_timeout_data(timeout); + if (values) + nf_ct_refresh(ct, pkt->skb, values[0]); } static int nft_ct_timeout_obj_init(const struct nft_ctx *ctx, const struct nlattr * const tb[], struct nft_object *obj) { - const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt; struct nft_ct_timeout_obj *priv = nft_obj_data(obj); const struct nf_conntrack_l4proto *l4proto; - struct nf_conn_timeout *timeout_ext; struct nf_ct_timeout *timeout; int l3num = ctx->family; - struct nf_conn *tmpl; __u8 l4num; int ret; @@ -863,28 +879,14 @@ static int nft_ct_timeout_obj_init(const struct nft_ctx *ctx, timeout->l3num = l3num; timeout->l4proto = l4proto; - tmpl = nf_ct_tmpl_alloc(ctx->net, zone, GFP_ATOMIC); - if (!tmpl) { - ret = -ENOMEM; - goto err_free_timeout; - } - - timeout_ext = nf_ct_timeout_ext_add(tmpl, timeout, GFP_ATOMIC); - if (!timeout_ext) { - ret = -ENOMEM; - goto err_free_tmpl; - } ret = nf_ct_netns_get(ctx->net, ctx->family); if (ret < 0) - goto err_free_tmpl; - - priv->tmpl = tmpl; + goto err_free_timeout; + priv->timeout = timeout; return 0; -err_free_tmpl: - nf_ct_tmpl_free(tmpl); err_free_timeout: kfree(timeout); err_proto_put: @@ -896,22 +898,19 @@ static void nft_ct_timeout_obj_destroy(const struct nft_ctx *ctx, struct nft_object *obj) { struct nft_ct_timeout_obj *priv = nft_obj_data(obj); - struct nf_conn_timeout *t = nf_ct_timeout_find(priv->tmpl); - struct nf_ct_timeout *timeout; + struct nf_ct_timeout *timeout = priv->timeout; - timeout = rcu_dereference_raw(t->timeout); nf_ct_untimeout(ctx->net, timeout); nf_ct_l4proto_put(timeout->l4proto); nf_ct_netns_put(ctx->net, ctx->family); - nf_ct_tmpl_free(priv->tmpl); + kfree(priv->timeout); } static int nft_ct_timeout_obj_dump(struct sk_buff *skb, struct nft_object *obj, bool reset) { const struct nft_ct_timeout_obj *priv = nft_obj_data(obj); - const struct nf_conn_timeout *t = nf_ct_timeout_find(priv->tmpl); - const struct nf_ct_timeout *timeout = rcu_dereference_raw(t->timeout); + const struct nf_ct_timeout *timeout = priv->timeout; struct nlattr *nest_params; int ret; -- GitLab From 993b9bc5c47fda86f8ab4e53d68c6fea5ff2764a Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 13 Aug 2018 19:00:27 +0300 Subject: [PATCH 0659/1692] gpiolib: acpi: Switch to cansleep version of GPIO library call The commit ca876c7483b6 ("gpiolib-acpi: make sure we trigger edge events at least once on boot") added a initial value check for pin which is about to be locked as IRQ. Unfortunately, not all GPIO drivers can do that atomically. Thus, switch to cansleep version of the call. Otherwise we have a warning: ... WARNING: CPU: 2 PID: 1408 at drivers/gpio/gpiolib.c:2883 gpiod_get_value+0x46/0x50 ... RIP: 0010:gpiod_get_value+0x46/0x50 ... The change tested on Intel Broxton with Whiskey Cove PMIC GPIO controller. Fixes: ca876c7483b6 ("gpiolib-acpi: make sure we trigger edge events at least once on boot") Signed-off-by: Andy Shevchenko Cc: Hans de Goede Cc: Benjamin Tissoires Acked-by: Mika Westerberg Signed-off-by: Linus Walleij --- drivers/gpio/gpiolib-acpi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index c48ed9d89ff5..f9134f23c7e8 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -186,7 +186,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, gpiod_direction_input(desc); - value = gpiod_get_value(desc); + value = gpiod_get_value_cansleep(desc); ret = gpiochip_lock_as_irq(chip, pin); if (ret) { -- GitLab From 78d3a92edbfb02e8cb83173cad84c3f2d5e1f070 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Tue, 14 Aug 2018 16:07:03 +0200 Subject: [PATCH 0660/1692] gpiolib-acpi: Register GpioInt ACPI event handlers from a late_initcall GpioInt ACPI event handlers may see there IRQ triggered immediately after requesting the IRQ (esp. level triggered ones). This means that they may run before any other (builtin) drivers have had a chance to register their OpRegion handlers, leading to errors like this: [ 1.133274] ACPI Error: No handler for Region [PMOP] ((____ptrval____)) [UserDefinedRegion] (20180531/evregion-132) [ 1.133286] ACPI Error: Region UserDefinedRegion (ID=141) has no handler (20180531/exfldio-265) [ 1.133297] ACPI Error: Method parse/execution failed \_SB.GPO2._L01, AE_NOT_EXIST (20180531/psparse-516) We already defer the manual initial trigger of edge triggered interrupts by running it from a late_initcall handler, this commit replaces this with deferring the entire acpi_gpiochip_request_interrupts() call till then, fixing the problem of some OpRegions not being registered yet. Note that this removes the need to have a list of edge triggered handlers which need to run, since the entire acpi_gpiochip_request_interrupts() call is now delayed, acpi_gpiochip_request_interrupt() can call these directly now. Acked-by: Mika Westerberg Signed-off-by: Hans de Goede Signed-off-by: Linus Walleij --- drivers/gpio/gpiolib-acpi.c | 84 +++++++++++++++++++++---------------- 1 file changed, 49 insertions(+), 35 deletions(-) diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index f9134f23c7e8..8b9d7e42c600 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -25,7 +25,6 @@ struct acpi_gpio_event { struct list_head node; - struct list_head initial_sync_list; acpi_handle handle; unsigned int pin; unsigned int irq; @@ -49,10 +48,19 @@ struct acpi_gpio_chip { struct mutex conn_lock; struct gpio_chip *chip; struct list_head events; + struct list_head deferred_req_irqs_list_entry; }; -static LIST_HEAD(acpi_gpio_initial_sync_list); -static DEFINE_MUTEX(acpi_gpio_initial_sync_list_lock); +/* + * For gpiochips which call acpi_gpiochip_request_interrupts() before late_init + * (so builtin drivers) we register the ACPI GpioInt event handlers from a + * late_initcall_sync handler, so that other builtin drivers can register their + * OpRegions before the event handlers can run. This list contains gpiochips + * for which the acpi_gpiochip_request_interrupts() has been deferred. + */ +static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock); +static LIST_HEAD(acpi_gpio_deferred_req_irqs_list); +static bool acpi_gpio_deferred_req_irqs_done; static int acpi_gpiochip_find(struct gpio_chip *gc, void *data) { @@ -89,21 +97,6 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin) return gpiochip_get_desc(chip, pin); } -static void acpi_gpio_add_to_initial_sync_list(struct acpi_gpio_event *event) -{ - mutex_lock(&acpi_gpio_initial_sync_list_lock); - list_add(&event->initial_sync_list, &acpi_gpio_initial_sync_list); - mutex_unlock(&acpi_gpio_initial_sync_list_lock); -} - -static void acpi_gpio_del_from_initial_sync_list(struct acpi_gpio_event *event) -{ - mutex_lock(&acpi_gpio_initial_sync_list_lock); - if (!list_empty(&event->initial_sync_list)) - list_del_init(&event->initial_sync_list); - mutex_unlock(&acpi_gpio_initial_sync_list_lock); -} - static irqreturn_t acpi_gpio_irq_handler(int irq, void *data) { struct acpi_gpio_event *event = data; @@ -229,7 +222,6 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, event->irq = irq; event->pin = pin; event->desc = desc; - INIT_LIST_HEAD(&event->initial_sync_list); ret = request_threaded_irq(event->irq, NULL, handler, irqflags, "ACPI:Event", event); @@ -251,10 +243,9 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, * may refer to OperationRegions from other (builtin) drivers which * may be probed after us. */ - if (handler == acpi_gpio_irq_handler && - (((irqflags & IRQF_TRIGGER_RISING) && value == 1) || - ((irqflags & IRQF_TRIGGER_FALLING) && value == 0))) - acpi_gpio_add_to_initial_sync_list(event); + if (((irqflags & IRQF_TRIGGER_RISING) && value == 1) || + ((irqflags & IRQF_TRIGGER_FALLING) && value == 0)) + handler(event->irq, event); return AE_OK; @@ -283,6 +274,7 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip) struct acpi_gpio_chip *acpi_gpio; acpi_handle handle; acpi_status status; + bool defer; if (!chip->parent || !chip->to_irq) return; @@ -295,6 +287,16 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip) if (ACPI_FAILURE(status)) return; + mutex_lock(&acpi_gpio_deferred_req_irqs_lock); + defer = !acpi_gpio_deferred_req_irqs_done; + if (defer) + list_add(&acpi_gpio->deferred_req_irqs_list_entry, + &acpi_gpio_deferred_req_irqs_list); + mutex_unlock(&acpi_gpio_deferred_req_irqs_lock); + + if (defer) + return; + acpi_walk_resources(handle, "_AEI", acpi_gpiochip_request_interrupt, acpi_gpio); } @@ -325,11 +327,14 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip) if (ACPI_FAILURE(status)) return; + mutex_lock(&acpi_gpio_deferred_req_irqs_lock); + if (!list_empty(&acpi_gpio->deferred_req_irqs_list_entry)) + list_del_init(&acpi_gpio->deferred_req_irqs_list_entry); + mutex_unlock(&acpi_gpio_deferred_req_irqs_lock); + list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) { struct gpio_desc *desc; - acpi_gpio_del_from_initial_sync_list(event); - if (irqd_is_wakeup_set(irq_get_irq_data(event->irq))) disable_irq_wake(event->irq); @@ -1052,6 +1057,7 @@ void acpi_gpiochip_add(struct gpio_chip *chip) acpi_gpio->chip = chip; INIT_LIST_HEAD(&acpi_gpio->events); + INIT_LIST_HEAD(&acpi_gpio->deferred_req_irqs_list_entry); status = acpi_attach_data(handle, acpi_gpio_chip_dh, acpi_gpio); if (ACPI_FAILURE(status)) { @@ -1198,20 +1204,28 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id) return con_id == NULL; } -/* Sync the initial state of handlers after all builtin drivers have probed */ -static int acpi_gpio_initial_sync(void) +/* Run deferred acpi_gpiochip_request_interrupts() */ +static int acpi_gpio_handle_deferred_request_interrupts(void) { - struct acpi_gpio_event *event, *ep; + struct acpi_gpio_chip *acpi_gpio, *tmp; + + mutex_lock(&acpi_gpio_deferred_req_irqs_lock); + list_for_each_entry_safe(acpi_gpio, tmp, + &acpi_gpio_deferred_req_irqs_list, + deferred_req_irqs_list_entry) { + acpi_handle handle; - mutex_lock(&acpi_gpio_initial_sync_list_lock); - list_for_each_entry_safe(event, ep, &acpi_gpio_initial_sync_list, - initial_sync_list) { - acpi_evaluate_object(event->handle, NULL, NULL, NULL); - list_del_init(&event->initial_sync_list); + handle = ACPI_HANDLE(acpi_gpio->chip->parent); + acpi_walk_resources(handle, "_AEI", + acpi_gpiochip_request_interrupt, acpi_gpio); + + list_del_init(&acpi_gpio->deferred_req_irqs_list_entry); } - mutex_unlock(&acpi_gpio_initial_sync_list_lock); + + acpi_gpio_deferred_req_irqs_done = true; + mutex_unlock(&acpi_gpio_deferred_req_irqs_lock); return 0; } /* We must use _sync so that this runs after the first deferred_probe run */ -late_initcall_sync(acpi_gpio_initial_sync); +late_initcall_sync(acpi_gpio_handle_deferred_request_interrupts); -- GitLab From 823dd71f58eb2133c24af85fad056a8dbb1a76e9 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Sat, 25 Aug 2018 10:53:28 -0700 Subject: [PATCH 0661/1692] pinctrl: ingenic: Fix group & function error checking Commit a203728ac6bb ("pinctrl: core: Return selector to the pinctrl driver") and commit f913cfce4ee4 ("pinctrl: pinmux: Return selector to the pinctrl driver") modified the return values of pinctrl_generic_add_group() and pinmux_generic_add_function() respectively, but did so without updating their callers. This broke the pinctrl-ingenic driver, which treats non-zero return values from these functions as errors & fails to probe. For example on a MIPS Ci20: pinctrl-ingenic 10010000.pin-controller: Failed to register group uart0-hwflow pinctrl-ingenic: probe of 10010000.pin-controller failed with error 1 Without the pinctrl driver probed, other drivers go on to fail to probe too & the system is unusable. Fix this by modifying the error checks to treat only negative values as errors, matching the commits that introduced the breakage & similar changes made to other drivers. Signed-off-by: Paul Burton Fixes: a203728ac6bb ("pinctrl: core: Return selector to the pinctrl driver") Fixes: f913cfce4ee4 ("pinctrl: pinmux: Return selector to the pinctrl driver") Cc: Linus Walleij Cc: Paul Cercueil Cc: Tony Lindgren Cc: linux-gpio@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Linus Walleij --- drivers/pinctrl/pinctrl-ingenic.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c index 6a1b6058b991..628817c40e3b 100644 --- a/drivers/pinctrl/pinctrl-ingenic.c +++ b/drivers/pinctrl/pinctrl-ingenic.c @@ -793,7 +793,7 @@ static int ingenic_pinctrl_probe(struct platform_device *pdev) err = pinctrl_generic_add_group(jzpc->pctl, group->name, group->pins, group->num_pins, group->data); - if (err) { + if (err < 0) { dev_err(dev, "Failed to register group %s\n", group->name); return err; @@ -806,7 +806,7 @@ static int ingenic_pinctrl_probe(struct platform_device *pdev) err = pinmux_generic_add_function(jzpc->pctl, func->name, func->group_names, func->num_group_names, func->data); - if (err) { + if (err < 0) { dev_err(dev, "Failed to register function %s\n", func->name); return err; -- GitLab From 5bc5a671b1f4b3aa019264ce970d3683a9ffa761 Mon Sep 17 00:00:00 2001 From: Richard Fitzgerald Date: Tue, 28 Aug 2018 09:45:37 +0100 Subject: [PATCH 0662/1692] pinctrl: madera: Fix possible NULL pointer with pdata config If we are being configured via pdata we don't necessarily have any gpio mappings being configured that way so pdata->gpio_config could be NULL. Signed-off-by: Richard Fitzgerald Signed-off-by: Linus Walleij --- drivers/pinctrl/cirrus/pinctrl-madera-core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/pinctrl/cirrus/pinctrl-madera-core.c b/drivers/pinctrl/cirrus/pinctrl-madera-core.c index ece41fb2848f..c4f4d904e4a6 100644 --- a/drivers/pinctrl/cirrus/pinctrl-madera-core.c +++ b/drivers/pinctrl/cirrus/pinctrl-madera-core.c @@ -1040,7 +1040,7 @@ static int madera_pin_probe(struct platform_device *pdev) } /* if the configuration is provided through pdata, apply it */ - if (pdata) { + if (pdata && pdata->gpio_configs) { ret = pinctrl_register_mappings(pdata->gpio_configs, pdata->n_gpio_configs); if (ret) { -- GitLab From a618cf4800970d260871c159b7eec014a1da2e81 Mon Sep 17 00:00:00 2001 From: Alexey Khoroshilov Date: Tue, 28 Aug 2018 23:40:26 +0300 Subject: [PATCH 0663/1692] gpio: dwapb: Fix error handling in dwapb_gpio_probe() If dwapb_gpio_add_port() fails in dwapb_gpio_probe(), gpio->clk is left undisabled. Found by Linux Driver Verification project (linuxtesting.org). Signed-off-by: Alexey Khoroshilov Signed-off-by: Linus Walleij --- drivers/gpio/gpio-dwapb.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c index 28da700f5f52..044888fd96a1 100644 --- a/drivers/gpio/gpio-dwapb.c +++ b/drivers/gpio/gpio-dwapb.c @@ -728,6 +728,7 @@ static int dwapb_gpio_probe(struct platform_device *pdev) out_unregister: dwapb_gpio_unregister(gpio); dwapb_irq_teardown(gpio); + clk_disable_unprepare(gpio->clk); return err; } -- GitLab From 9e4fa01221b3230320135072ad31ea809ca31147 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 28 Aug 2018 16:27:02 +0100 Subject: [PATCH 0664/1692] drm/i915/execlists: Flush tasklet directly from reset-finish On finishing the reset, the intention is to restart the GPU before we relinquish the forcewake taken to handle the reset - the goal being the GPU reloads a context before it is allowed to sleep. For this purpose, we used tasklet_flush() which although it accomplished the goal of restarting the GPU, carried with it a sting in its tail: it cleared the TASKLET_STATE_SCHED bit. This meant that if another CPU queued a new request to this engine, we would clear the flag and later attempt to requeue the tasklet on the local CPU, breaking the per-cpu softirq lists. Remove the dangerous tasklet_kill() and just run the tasklet func directly as we know it is safe to do so (the tasklets are internally locked to allow mixed usage from direct submission). Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Cc: Mika Kuoppala Cc: Michel Thierry Cc: Joonas Lahtinen Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20180828152702.27536-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem.h | 6 ------ drivers/gpu/drm/i915/intel_lrc.c | 17 ++++++----------- 2 files changed, 6 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h index e46592956872..599c4f6eb1ea 100644 --- a/drivers/gpu/drm/i915/i915_gem.h +++ b/drivers/gpu/drm/i915/i915_gem.h @@ -82,12 +82,6 @@ static inline void __tasklet_disable_sync_once(struct tasklet_struct *t) tasklet_unlock_wait(t); } -static inline void __tasklet_enable_sync_once(struct tasklet_struct *t) -{ - if (atomic_dec_return(&t->count) == 0) - tasklet_kill(t); -} - static inline bool __tasklet_is_enabled(const struct tasklet_struct *t) { return !atomic_read(&t->count); diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 36050f085071..f8ceb9c99dd6 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -1962,21 +1962,16 @@ static void execlists_reset_finish(struct intel_engine_cs *engine) { struct intel_engine_execlists * const execlists = &engine->execlists; - /* After a GPU reset, we may have requests to replay */ - if (!RB_EMPTY_ROOT(&execlists->queue.rb_root)) - tasklet_schedule(&execlists->tasklet); - /* - * Flush the tasklet while we still have the forcewake to be sure - * that it is not allowed to sleep before we restart and reload a - * context. + * After a GPU reset, we may have requests to replay. Do so now while + * we still have the forcewake to be sure that the GPU is not allowed + * to sleep before we restart and reload a context. * - * As before (with execlists_reset_prepare) we rely on the caller - * serialising multiple attempts to reset so that we know that we - * are the only one manipulating tasklet state. */ - __tasklet_enable_sync_once(&execlists->tasklet); + if (!RB_EMPTY_ROOT(&execlists->queue.rb_root)) + execlists->tasklet.func(execlists->tasklet.data); + tasklet_enable(&execlists->tasklet); GEM_TRACE("%s: depth->%d\n", engine->name, atomic_read(&execlists->tasklet.count)); } -- GitLab From 36156f9241cb0f9e37d998052873ca7501ad4b36 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Mon, 27 Aug 2018 10:21:45 +0200 Subject: [PATCH 0665/1692] of: add helper to lookup compatible child node Add of_get_compatible_child() helper that can be used to lookup compatible child nodes. Several drivers currently use of_find_compatible_node() to lookup child nodes while failing to notice that the of_find_ functions search the entire tree depth-first (from a given start node) and therefore can match unrelated nodes. The fact that these functions also drop a reference to the node they start searching from (e.g. the parent node) is typically also overlooked, something which can lead to use-after-free bugs. Signed-off-by: Johan Hovold Signed-off-by: Rob Herring --- drivers/of/base.c | 25 +++++++++++++++++++++++++ include/linux/of.h | 8 ++++++++ 2 files changed, 33 insertions(+) diff --git a/drivers/of/base.c b/drivers/of/base.c index 466e3c8582f0..bc420d2aa5f5 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -719,6 +719,31 @@ struct device_node *of_get_next_available_child(const struct device_node *node, } EXPORT_SYMBOL(of_get_next_available_child); +/** + * of_get_compatible_child - Find compatible child node + * @parent: parent node + * @compatible: compatible string + * + * Lookup child node whose compatible property contains the given compatible + * string. + * + * Returns a node pointer with refcount incremented, use of_node_put() on it + * when done; or NULL if not found. + */ +struct device_node *of_get_compatible_child(const struct device_node *parent, + const char *compatible) +{ + struct device_node *child; + + for_each_child_of_node(parent, child) { + if (of_device_is_compatible(child, compatible)) + break; + } + + return child; +} +EXPORT_SYMBOL(of_get_compatible_child); + /** * of_get_child_by_name - Find the child node by name for a given parent * @node: parent node diff --git a/include/linux/of.h b/include/linux/of.h index 4d25e4f952d9..b99a1a8c2952 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -290,6 +290,8 @@ extern struct device_node *of_get_next_child(const struct device_node *node, extern struct device_node *of_get_next_available_child( const struct device_node *node, struct device_node *prev); +extern struct device_node *of_get_compatible_child(const struct device_node *parent, + const char *compatible); extern struct device_node *of_get_child_by_name(const struct device_node *node, const char *name); @@ -632,6 +634,12 @@ static inline bool of_have_populated_dt(void) return false; } +static inline struct device_node *of_get_compatible_child(const struct device_node *parent, + const char *compatible) +{ + return NULL; +} + static inline struct device_node *of_get_child_by_name( const struct device_node *node, const char *name) -- GitLab From 8f3fafc9c2f0ece10832c25f7ffcb07c97a32ad4 Mon Sep 17 00:00:00 2001 From: Scott Bauer Date: Thu, 26 Apr 2018 11:51:08 -0600 Subject: [PATCH 0666/1692] cdrom: Fix info leak/OOB read in cdrom_ioctl_drive_status Like d88b6d04: "cdrom: information leak in cdrom_ioctl_media_changed()" There is another cast from unsigned long to int which causes a bounds check to fail with specially crafted input. The value is then used as an index in the slot array in cdrom_slot_status(). Signed-off-by: Scott Bauer Signed-off-by: Scott Bauer Cc: stable@vger.kernel.org Signed-off-by: Jens Axboe --- drivers/cdrom/cdrom.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 113fc6edb2b0..a5d5a96479bf 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -2546,7 +2546,7 @@ static int cdrom_ioctl_drive_status(struct cdrom_device_info *cdi, if (!CDROM_CAN(CDC_SELECT_DISC) || (arg == CDSL_CURRENT || arg == CDSL_NONE)) return cdi->ops->drive_status(cdi, CDSL_CURRENT); - if (((int)arg >= cdi->capacity)) + if (arg >= cdi->capacity) return -EINVAL; return cdrom_slot_status(cdi, arg); } -- GitLab From 1685b01a858872075bc258a350153de0c7e95404 Mon Sep 17 00:00:00 2001 From: Oak Zeng Date: Wed, 29 Aug 2018 12:33:52 -0500 Subject: [PATCH 0667/1692] drm/amdgpu: Set pasid for compute vm (v2) To make a amdgpu vm to a compute vm, the old pasid will be freed and replaced with a pasid managed by kfd. Kfd can't reuse original pasid allocated by amdgpu because kfd uses different pasid policy with amdgpu. For example, all graphic devices share one same pasid in a process. v2: rebase (Alex) Signed-off-by: Oak Zeng Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 6 +-- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 10 ++--- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 38 +++++++++++++++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_process.c | 4 +- .../gpu/drm/amd/include/kgd_kfd_interface.h | 5 ++- 6 files changed, 48 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 2f379c183ed2..ba0057e94d5e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -162,11 +162,11 @@ uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd); }) /* GPUVM API */ -int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, void **vm, - void **process_info, +int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, unsigned int pasid, + void **vm, void **process_info, struct dma_fence **ef); int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd, - struct file *filp, + struct file *filp, unsigned int pasid, void **vm, void **process_info, struct dma_fence **ef); void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 2ef6e8557b65..1cbdd9a7538e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1000,8 +1000,8 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, return ret; } -int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, void **vm, - void **process_info, +int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, unsigned int pasid, + void **vm, void **process_info, struct dma_fence **ef) { struct amdgpu_device *adev = get_amdgpu_device(kgd); @@ -1013,7 +1013,7 @@ int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, void **vm, return -ENOMEM; /* Initialize AMDGPU part of the VM */ - ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, 0); + ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, pasid); if (ret) { pr_err("Failed init vm ret %d\n", ret); goto amdgpu_vm_init_fail; @@ -1036,7 +1036,7 @@ int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, void **vm, } int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd, - struct file *filp, + struct file *filp, unsigned int pasid, void **vm, void **process_info, struct dma_fence **ef) { @@ -1051,7 +1051,7 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd, return -EINVAL; /* Convert VM into a compute VM */ - ret = amdgpu_vm_make_compute(adev, avm); + ret = amdgpu_vm_make_compute(adev, avm, pasid); if (ret) return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 60c0609b78a4..272b7902a25c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2740,7 +2740,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, * Returns: * 0 for success, -errno for errors. */ -int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) +int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid) { bool pte_support_ats = (adev->asic_type == CHIP_RAVEN); int r; @@ -2752,7 +2752,20 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) /* Sanity checks */ if (!RB_EMPTY_ROOT(&vm->va.rb_root) || vm->root.entries) { r = -EINVAL; - goto error; + goto unreserve_bo; + } + + if (pasid) { + unsigned long flags; + + spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); + r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1, + GFP_ATOMIC); + spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); + + if (r == -ENOSPC) + goto unreserve_bo; + r = 0; } /* Check if PD needs to be reinitialized and do it before @@ -2763,7 +2776,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) adev->vm_manager.root_level, pte_support_ats); if (r) - goto error; + goto free_idr; } /* Update VM state */ @@ -2782,13 +2795,30 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) idr_remove(&adev->vm_manager.pasid_idr, vm->pasid); spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); + /* Free the original amdgpu allocated pasid + * Will be replaced with kfd allocated pasid + */ + amdgpu_pasid_free(vm->pasid); vm->pasid = 0; } /* Free the shadow bo for compute VM */ amdgpu_bo_unref(&vm->root.base.bo->shadow); -error: + if (pasid) + vm->pasid = pasid; + + goto unreserve_bo; + +free_idr: + if (pasid) { + unsigned long flags; + + spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); + idr_remove(&adev->vm_manager.pasid_idr, pasid); + spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); + } +unreserve_bo: amdgpu_bo_unreserve(vm->root.base.bo); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 94fe47890adf..24b02c91ce0c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -297,7 +297,7 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev); void amdgpu_vm_manager_fini(struct amdgpu_device *adev); int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int vm_context, unsigned int pasid); -int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm); +int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid); void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev, unsigned int pasid); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 4694386cc623..a246b8d76c86 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -687,11 +687,11 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd, if (drm_file) ret = dev->kfd2kgd->acquire_process_vm( - dev->kgd, drm_file, + dev->kgd, drm_file, p->pasid, &pdd->vm, &p->kgd_process_info, &p->ef); else ret = dev->kfd2kgd->create_process_vm( - dev->kgd, &pdd->vm, &p->kgd_process_info, &p->ef); + dev->kgd, p->pasid, &pdd->vm, &p->kgd_process_info, &p->ef); if (ret) { pr_err("Failed to create process VM object\n"); return ret; diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index 14391b06080c..4b9981351bb3 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -372,10 +372,11 @@ struct kfd2kgd_calls { struct kfd_cu_info *cu_info); uint64_t (*get_vram_usage)(struct kgd_dev *kgd); - int (*create_process_vm)(struct kgd_dev *kgd, void **vm, + int (*create_process_vm)(struct kgd_dev *kgd, unsigned int pasid, void **vm, void **process_info, struct dma_fence **ef); int (*acquire_process_vm)(struct kgd_dev *kgd, struct file *filp, - void **vm, void **process_info, struct dma_fence **ef); + unsigned int pasid, void **vm, void **process_info, + struct dma_fence **ef); void (*destroy_process_vm)(struct kgd_dev *kgd, void *vm); uint32_t (*get_process_page_dir)(void *vm); void (*set_vm_context_page_table_base)(struct kgd_dev *kgd, -- GitLab From bf47afbabf1cf149f9ebc8e1f7dab6913e360dc4 Mon Sep 17 00:00:00 2001 From: Oak Zeng Date: Mon, 27 Aug 2018 15:18:36 -0400 Subject: [PATCH 0668/1692] drm/amdkfd: Release an acquired process vm For compute vm acquired from amdgpu, vm.pasid is managed by kfd. Decouple pasid from such vm on process destroy to avoid duplicate pasid release. Signed-off-by: Oak Zeng Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 1 + .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c | 1 + .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c | 1 + .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 1 + .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 19 ++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 20 +++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 1 + drivers/gpu/drm/amd/amdkfd/kfd_process.c | 4 +++- .../gpu/drm/amd/include/kgd_kfd_interface.h | 1 + 9 files changed, 48 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index ba0057e94d5e..2a1da3fe2b06 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -172,6 +172,7 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd, void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm); +void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm); uint32_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm); int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( struct kgd_dev *kgd, uint64_t va, uint64_t size, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index ea3f698aef5e..6f0b5267addb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -205,6 +205,7 @@ static const struct kfd2kgd_calls kfd2kgd = { .create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm, .acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm, .destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm, + .release_process_vm = amdgpu_amdkfd_gpuvm_release_process_vm, .get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir, .set_vm_context_page_table_base = set_vm_context_page_table_base, .alloc_memory_of_gpu = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index f6e53e9352bd..ea7c18ce7754 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -164,6 +164,7 @@ static const struct kfd2kgd_calls kfd2kgd = { .create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm, .acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm, .destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm, + .release_process_vm = amdgpu_amdkfd_gpuvm_release_process_vm, .get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir, .set_vm_context_page_table_base = set_vm_context_page_table_base, .alloc_memory_of_gpu = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 8efedfcb9dfc..3dc987cab0ea 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -201,6 +201,7 @@ static const struct kfd2kgd_calls kfd2kgd = { .create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm, .acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm, .destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm, + .release_process_vm = amdgpu_amdkfd_gpuvm_release_process_vm, .get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir, .set_vm_context_page_table_base = set_vm_context_page_table_base, .alloc_memory_of_gpu = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 1cbdd9a7538e..e7ceae05d517 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1114,6 +1114,25 @@ void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm) kfree(vm); } +void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; + + if (WARN_ON(!kgd || !vm)) + return; + + pr_debug("Releasing process vm %p\n", vm); + + /* The original pasid of amdgpu vm has already been + * released during making a amdgpu vm to a compute vm + * The current pasid is managed by kfd and will be + * released on kfd process destroy. Set amdgpu pasid + * to 0 to avoid duplicate release. + */ + amdgpu_vm_release_compute(adev, avm); +} + uint32_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm) { struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 272b7902a25c..23c78af850c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2823,6 +2823,26 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns return r; } +/** + * amdgpu_vm_release_compute - release a compute vm + * @adev: amdgpu_device pointer + * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute + * + * This is a correspondant of amdgpu_vm_make_compute. It decouples compute + * pasid from vm. Compute should stop use of vm after this call. + */ +void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) +{ + if (vm->pasid) { + unsigned long flags; + + spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); + idr_remove(&adev->vm_manager.pasid_idr, vm->pasid); + spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); + } + vm->pasid = 0; +} + /** * amdgpu_vm_free_levels - free PD/PT levels * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 24b02c91ce0c..62116fa44718 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -298,6 +298,7 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev); int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int vm_context, unsigned int pasid); int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid); +void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev, unsigned int pasid); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index a246b8d76c86..0039e451d9af 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -322,8 +322,10 @@ static void kfd_process_destroy_pdds(struct kfd_process *p) pr_debug("Releasing pdd (topology id %d) for process (pasid %d)\n", pdd->dev->id, p->pasid); - if (pdd->drm_file) + if (pdd->drm_file) { + pdd->dev->kfd2kgd->release_process_vm(pdd->dev->kgd, pdd->vm); fput(pdd->drm_file); + } else if (pdd->vm) pdd->dev->kfd2kgd->destroy_process_vm( pdd->dev->kgd, pdd->vm); diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index 4b9981351bb3..814576f6ca1c 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -378,6 +378,7 @@ struct kfd2kgd_calls { unsigned int pasid, void **vm, void **process_info, struct dma_fence **ef); void (*destroy_process_vm)(struct kgd_dev *kgd, void *vm); + void (*release_process_vm)(struct kgd_dev *kgd, void *vm); uint32_t (*get_process_page_dir)(void *vm); void (*set_vm_context_page_table_base)(struct kgd_dev *kgd, uint32_t vmid, uint32_t page_table_base); -- GitLab From efa9a5ef10fbbf500e6b479359dde72947a9799e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 28 Aug 2018 13:44:32 +0200 Subject: [PATCH 0669/1692] drm/amdgpu: remove amdgpu_bo_gpu_accessible MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Not used any more. Signed-off-by: Christian König Reviewed-by: Michel Dänzer Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 18945dd6982d..907fdf46d895 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -193,19 +193,6 @@ static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo) return drm_vma_node_offset_addr(&bo->tbo.vma_node); } -/** - * amdgpu_bo_gpu_accessible - return whether the bo is currently in memory that - * is accessible to the GPU. - */ -static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo) -{ - switch (bo->tbo.mem.mem_type) { - case TTM_PL_TT: return amdgpu_gtt_mgr_has_gart_addr(&bo->tbo.mem); - case TTM_PL_VRAM: return true; - default: return false; - } -} - /** * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM */ -- GitLab From 961c75cf203179d0c546722290bf4b1147e5feb1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 23 Aug 2018 15:20:43 +0200 Subject: [PATCH 0670/1692] drm/amdgpu: move amdgpu_device_(vram|gtt)_location MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move that into amdgpu_gmc.c since we are really deadling with GMC address space here. Signed-off-by: Christian König Reviewed-by: Alex Deucher Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 4 -- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 65 ---------------------- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 64 +++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | 4 ++ drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 4 +- drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 4 +- drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 4 +- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 4 +- 8 files changed, 76 insertions(+), 77 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 340e40d03d54..09bdedfc91c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1166,10 +1166,6 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev); void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, u64 num_vis_bytes); -void amdgpu_device_vram_location(struct amdgpu_device *adev, - struct amdgpu_gmc *mc, u64 base); -void amdgpu_device_gart_location(struct amdgpu_device *adev, - struct amdgpu_gmc *mc); int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev); void amdgpu_device_program_register_sequence(struct amdgpu_device *adev, const u32 *registers, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index a20c13c6f6f1..93476b8c2e72 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -651,71 +651,6 @@ void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb) __clear_bit(wb, adev->wb.used); } -/** - * amdgpu_device_vram_location - try to find VRAM location - * - * @adev: amdgpu device structure holding all necessary informations - * @mc: memory controller structure holding memory informations - * @base: base address at which to put VRAM - * - * Function will try to place VRAM at base address provided - * as parameter. - */ -void amdgpu_device_vram_location(struct amdgpu_device *adev, - struct amdgpu_gmc *mc, u64 base) -{ - uint64_t limit = (uint64_t)amdgpu_vram_limit << 20; - - mc->vram_start = base; - mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; - if (limit && limit < mc->real_vram_size) - mc->real_vram_size = limit; - dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", - mc->mc_vram_size >> 20, mc->vram_start, - mc->vram_end, mc->real_vram_size >> 20); -} - -/** - * amdgpu_device_gart_location - try to find GART location - * - * @adev: amdgpu device structure holding all necessary informations - * @mc: memory controller structure holding memory informations - * - * Function will place try to place GART before or after VRAM. - * - * If GART size is bigger than space left then we ajust GART size. - * Thus function will never fails. - */ -void amdgpu_device_gart_location(struct amdgpu_device *adev, - struct amdgpu_gmc *mc) -{ - u64 size_af, size_bf; - - mc->gart_size += adev->pm.smu_prv_buffer_size; - - size_af = adev->gmc.mc_mask - mc->vram_end; - size_bf = mc->vram_start; - if (size_bf > size_af) { - if (mc->gart_size > size_bf) { - dev_warn(adev->dev, "limiting GART\n"); - mc->gart_size = size_bf; - } - mc->gart_start = 0; - } else { - if (mc->gart_size > size_af) { - dev_warn(adev->dev, "limiting GART\n"); - mc->gart_size = size_af; - } - /* VCE doesn't like it when BOs cross a 4GB segment, so align - * the GART base on a 4GB boundary as well. - */ - mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL); - } - mc->gart_end = mc->gart_start + mc->gart_size - 1; - dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n", - mc->gart_size >> 20, mc->gart_start, mc->gart_end); -} - /** * amdgpu_device_resize_fb_bar - try to resize FB BAR * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index a249931ef512..72dffa3fd194 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -78,3 +78,67 @@ uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo) } return pd_addr; } + +/** + * amdgpu_gmc_vram_location - try to find VRAM location + * + * @adev: amdgpu device structure holding all necessary informations + * @mc: memory controller structure holding memory informations + * @base: base address at which to put VRAM + * + * Function will try to place VRAM at base address provided + * as parameter. + */ +void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc, + u64 base) +{ + uint64_t limit = (uint64_t)amdgpu_vram_limit << 20; + + mc->vram_start = base; + mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; + if (limit && limit < mc->real_vram_size) + mc->real_vram_size = limit; + dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", + mc->mc_vram_size >> 20, mc->vram_start, + mc->vram_end, mc->real_vram_size >> 20); +} + +/** + * amdgpu_gmc_gart_location - try to find GART location + * + * @adev: amdgpu device structure holding all necessary informations + * @mc: memory controller structure holding memory informations + * + * Function will place try to place GART before or after VRAM. + * + * If GART size is bigger than space left then we ajust GART size. + * Thus function will never fails. + */ +void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) +{ + u64 size_af, size_bf; + + mc->gart_size += adev->pm.smu_prv_buffer_size; + + size_af = adev->gmc.mc_mask - mc->vram_end; + size_bf = mc->vram_start; + if (size_bf > size_af) { + if (mc->gart_size > size_bf) { + dev_warn(adev->dev, "limiting GART\n"); + mc->gart_size = size_bf; + } + mc->gart_start = 0; + } else { + if (mc->gart_size > size_af) { + dev_warn(adev->dev, "limiting GART\n"); + mc->gart_size = size_af; + } + /* VCE doesn't like it when BOs cross a 4GB segment, so align + * the GART base on a 4GB boundary as well. + */ + mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL); + } + mc->gart_end = mc->gart_start + mc->gart_size - 1; + dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n", + mc->gart_size >> 20, mc->gart_start, mc->gart_end); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index 85030c04c443..588a62f7aebc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -136,5 +136,9 @@ static inline bool amdgpu_gmc_vram_full_visible(struct amdgpu_gmc *gmc) void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level, uint64_t *addr, uint64_t *flags); uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo); +void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc, + u64 base); +void amdgpu_gmc_gart_location(struct amdgpu_device *adev, + struct amdgpu_gmc *mc); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 9c45ea318bd6..4411463ca719 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -224,8 +224,8 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev, u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; base <<= 24; - amdgpu_device_vram_location(adev, &adev->gmc, base); - amdgpu_device_gart_location(adev, mc); + amdgpu_gmc_vram_location(adev, &adev->gmc, base); + amdgpu_gmc_gart_location(adev, mc); } static void gmc_v6_0_mc_program(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index d3400064e9db..ae776ce9a415 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -242,8 +242,8 @@ static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev, u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; base <<= 24; - amdgpu_device_vram_location(adev, &adev->gmc, base); - amdgpu_device_gart_location(adev, mc); + amdgpu_gmc_vram_location(adev, &adev->gmc, base); + amdgpu_gmc_gart_location(adev, mc); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index fb0d57655f78..53ae49b8bde8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -411,8 +411,8 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev, base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; base <<= 24; - amdgpu_device_vram_location(adev, &adev->gmc, base); - amdgpu_device_gart_location(adev, mc); + amdgpu_gmc_vram_location(adev, &adev->gmc, base); + amdgpu_gmc_gart_location(adev, mc); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 9e976c2be955..04d50893a6f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -749,8 +749,8 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, u64 base = 0; if (!amdgpu_sriov_vf(adev)) base = mmhub_v1_0_get_fb_location(adev); - amdgpu_device_vram_location(adev, &adev->gmc, base); - amdgpu_device_gart_location(adev, mc); + amdgpu_gmc_vram_location(adev, &adev->gmc, base); + amdgpu_gmc_gart_location(adev, mc); /* base offset of vram pages */ adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev); } -- GitLab From 0be655d1c6c6a98811067544f6a84ebd42ba46b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 23 Aug 2018 20:38:52 +0200 Subject: [PATCH 0671/1692] drm/amdgpu: fix amdgpu_gmc_gart_location a little bit MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Improve the VCE limitation handling. Signed-off-by: Christian König Reviewed-by: Alex Deucher Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 28 ++++++++++++------------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 72dffa3fd194..8269197df8e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -120,24 +120,22 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) mc->gart_size += adev->pm.smu_prv_buffer_size; - size_af = adev->gmc.mc_mask - mc->vram_end; + /* VCE doesn't like it when BOs cross a 4GB segment, so align + * the GART base on a 4GB boundary as well. + */ size_bf = mc->vram_start; - if (size_bf > size_af) { - if (mc->gart_size > size_bf) { - dev_warn(adev->dev, "limiting GART\n"); - mc->gart_size = size_bf; - } + size_af = adev->gmc.mc_mask + 1 - + ALIGN(mc->vram_end + 1, 0x100000000ULL); + + if (mc->gart_size > max(size_bf, size_af)) { + dev_warn(adev->dev, "limiting GART\n"); + mc->gart_size = max(size_bf, size_af); + } + + if (size_bf > size_af) mc->gart_start = 0; - } else { - if (mc->gart_size > size_af) { - dev_warn(adev->dev, "limiting GART\n"); - mc->gart_size = size_af; - } - /* VCE doesn't like it when BOs cross a 4GB segment, so align - * the GART base on a 4GB boundary as well. - */ + else mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL); - } mc->gart_end = mc->gart_start + mc->gart_size - 1; dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n", mc->gart_size >> 20, mc->gart_start, mc->gart_end); -- GitLab From 22d8bfafcc12dfa17b91d2e8ae4e1898e782003a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 27 Aug 2018 13:12:19 +0200 Subject: [PATCH 0672/1692] drm/amdgpu: stop using gart_start as offset for the GTT domain MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Further separate GART and GTT domain. Signed-off-by: Christian König Reviewed-by: Junwei Zhang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 3 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 6 +++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index da7b1b92d9cf..c2539f6821c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -143,7 +143,8 @@ static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, spin_unlock(&mgr->lock); if (!r) - mem->start = node->node.start; + mem->start = node->node.start + + (adev->gmc.gart_start >> PAGE_SHIFT); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 2f304f9dd543..5cadf4f1ee2c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -188,7 +188,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, case TTM_PL_TT: /* GTT memory */ man->func = &amdgpu_gtt_mgr_func; - man->gpu_offset = adev->gmc.gart_start; + man->gpu_offset = 0; man->available_caching = TTM_PL_MASK_CACHING; man->default_caching = TTM_PL_FLAG_CACHED; man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; @@ -1062,7 +1062,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem); /* bind pages into GART page tables */ - gtt->offset = (u64)bo_mem->start << PAGE_SHIFT; + gtt->offset = ((u64)bo_mem->start << PAGE_SHIFT) - adev->gmc.gart_start; r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, ttm->pages, gtt->ttm.dma_address, flags); @@ -1110,7 +1110,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp); /* Bind pages */ - gtt->offset = (u64)tmp.start << PAGE_SHIFT; + gtt->offset = ((u64)tmp.start << PAGE_SHIFT) - adev->gmc.gart_start; r = amdgpu_ttm_gart_bind(adev, bo, flags); if (unlikely(r)) { ttm_bo_mem_put(bo, &tmp); -- GitLab From 0e33495d4907f0834321c678f86c912d64f0cd6c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 27 Aug 2018 13:51:27 +0200 Subject: [PATCH 0673/1692] drm/amdgpu: distinct between allocated GART space and GMC addr MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Most of the time we only need to know if the BO has a valid GMC addr. Signed-off-by: Christian König Reviewed-by: Junwei Zhang Acked-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 2 -- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 13 +++++-------- 2 files changed, 5 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 5ddd4e87480b..b5f20b42439e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1362,8 +1362,6 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) { WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM); - WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT && - !amdgpu_gtt_mgr_has_gart_addr(&bo->tbo.mem)); WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) && !bo->pin_count); WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 5cadf4f1ee2c..d9f3201c9e5c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -345,7 +345,7 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo, { uint64_t addr = 0; - if (mem->mem_type != TTM_PL_TT || amdgpu_gtt_mgr_has_gart_addr(mem)) { + if (mm_node->start != AMDGPU_BO_INVALID_OFFSET) { addr = mm_node->start << PAGE_SHIFT; addr += bo->bdev->man[mem->mem_type].gpu_offset; } @@ -433,8 +433,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, /* Map only what needs to be accessed. Map src to window 0 and * dst to window 1 */ - if (src->mem->mem_type == TTM_PL_TT && - !amdgpu_gtt_mgr_has_gart_addr(src->mem)) { + if (src->mem->start == AMDGPU_BO_INVALID_OFFSET) { r = amdgpu_map_buffer(src->bo, src->mem, PFN_UP(cur_size + src_page_offset), src_node_start, 0, ring, @@ -447,8 +446,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, from += src_page_offset; } - if (dst->mem->mem_type == TTM_PL_TT && - !amdgpu_gtt_mgr_has_gart_addr(dst->mem)) { + if (dst->mem->start == AMDGPU_BO_INVALID_OFFSET) { r = amdgpu_map_buffer(dst->bo, dst->mem, PFN_UP(cur_size + dst_page_offset), dst_node_start, 1, ring, @@ -1086,11 +1084,10 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) uint64_t flags; int r; - if (bo->mem.mem_type != TTM_PL_TT || - amdgpu_gtt_mgr_has_gart_addr(&bo->mem)) + if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET) return 0; - /* allocate GTT space */ + /* allocate GART space */ tmp = bo->mem; tmp.mm_node = NULL; placement.num_placement = 1; -- GitLab From d78c1fa0c9f815fe951fd57001acca3d35262a17 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Wed, 29 Aug 2018 11:59:38 +0200 Subject: [PATCH 0674/1692] Revert "drm/amdgpu: move PD/PT bos on LRU again" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 31625ccae4464b61ec8cdb9740df848bbc857a5b. It triggered various badness on my development machine when running the piglit gpu profile with radeonsi on Bonaire, looks like memory corruption due to insufficiently protected list manipulations. Signed-off-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 23c78af850c6..521ddb358ec1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1127,7 +1127,7 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev, struct amdgpu_vm_bo_base, vm_status); bo_base->moved = false; - list_move(&bo_base->vm_status, &vm->idle); + list_del_init(&bo_base->vm_status); bo = bo_base->bo->parent; if (!bo) -- GitLab From 5f232bd79b2417450064b78a9b3d398f9cd498ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 24 Aug 2018 09:40:10 +0200 Subject: [PATCH 0675/1692] drm/amdgpu: use the smaller hole for GART MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of the larger one use the smaller hole in the MC address space for the GART mappings. Signed-off-by: Christian König Reviewed-by: Junwei Zhang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 8269197df8e0..265ec6807130 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -132,7 +132,8 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) mc->gart_size = max(size_bf, size_af); } - if (size_bf > size_af) + if ((size_bf >= mc->gart_size && size_bf < size_af) || + (size_af < mc->gart_size)) mc->gart_start = 0; else mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL); -- GitLab From 25eef4214a43513c9166d8a99470b3a4d2220976 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Tue, 28 Aug 2018 18:56:29 +0200 Subject: [PATCH 0676/1692] drm/ttm: Initialize local lists in ttm_bo_bulk_move_helper MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The first parameter of list_cut_position() must point to an initialized list. Noticed thanks to KASAN pointing out something's fishy here. Fixes: "drm/ttm: add bulk move function on LRU" Reviewed-by: Christian König Signed-off-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/ttm/ttm_bo.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 39d9d559b279..35d53d81f486 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -250,7 +250,8 @@ EXPORT_SYMBOL(ttm_bo_move_to_lru_tail); static void ttm_bo_bulk_move_helper(struct ttm_lru_bulk_move_pos *pos, struct list_head *lru, bool is_swap) { - struct list_head entries, before; + LIST_HEAD(entries); + LIST_HEAD(before); struct list_head *list1, *list2; list1 = is_swap ? &pos->last->swap : &pos->last->lru; -- GitLab From dcaaff4eed13c4dcc15525ff87269b3f4544345a Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Wed, 29 Aug 2018 10:53:23 -0400 Subject: [PATCH 0677/1692] drm/amdgpu: remove redundant memset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit kvmalloc_array uses __GFP_ZERO flag ensures that the returned address is zeroed already, memset it to zero again afterwards is unnecessary, and in this case buggy because we only clear the first entry. Signed-off-by: Philip Yang Reviewed-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 521ddb358ec1..f50697df9799 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -541,7 +541,6 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, GFP_KERNEL | __GFP_ZERO); if (!parent->entries) return -ENOMEM; - memset(parent->entries, 0 , sizeof(struct amdgpu_vm_pt)); } from = saddr >> shift; -- GitLab From 2690262ec9fea3aa364ca9cd31981d7fe3888a5b Mon Sep 17 00:00:00 2001 From: Amber Lin Date: Wed, 29 Aug 2018 12:39:16 -0500 Subject: [PATCH 0678/1692] drm/amdgpu: Relocate some definitions v2 Move some KFD-related (but used in amdgpu_drv.c) definitions from kfd_priv.h to kgd_kfd_interface.h so we don't need to include kfd_priv.h in amdgpu_drv.c. This fixes a build failure when AMDGPU is enabled but MMU_NOTIFIER is not. This patch also disables KFD-related module options when HSA_AMD is not enabled. v2: rebase (Alex) Signed-off-by: Amber Lin Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 22 +++++++-------- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 28 ------------------- .../gpu/drm/amd/include/kgd_kfd_interface.h | 28 +++++++++++++++++++ 3 files changed, 39 insertions(+), 39 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 82e6b6746511..d7d9a9d32381 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -39,7 +39,6 @@ #include "amdgpu_gem.h" #include "amdgpu_amdkfd.h" -#include "kfd_priv.h" /* * KMS wrapper. @@ -128,16 +127,6 @@ int amdgpu_compute_multipipe = -1; int amdgpu_gpu_recovery = -1; /* auto */ int amdgpu_emu_mode = 0; uint amdgpu_smu_memory_pool_size = 0; -/* KFD parameters */ -int sched_policy = KFD_SCHED_POLICY_HWS; -int hws_max_conc_proc = 8; -int cwsr_enable = 1; -int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT; -int send_sigterm; -int debug_largebar; -int ignore_crat; -int noretry; -int halt_if_hws_hang; /** * DOC: vramlimit (int) @@ -543,12 +532,14 @@ MODULE_PARM_DESC(smu_memory_pool_size, "0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte"); module_param_named(smu_memory_pool_size, amdgpu_smu_memory_pool_size, uint, 0444); +#ifdef CONFIG_HSA_AMD /** * DOC: sched_policy (int) * Set scheduling policy. Default is HWS(hardware scheduling) with over-subscription. * Setting 1 disables over-subscription. Setting 2 disables HWS and statically * assigns queues to HQDs. */ +int sched_policy = KFD_SCHED_POLICY_HWS; module_param(sched_policy, int, 0444); MODULE_PARM_DESC(sched_policy, "Scheduling policy (0 = HWS (Default), 1 = HWS without over-subscription, 2 = Non-HWS (Used for debugging only)"); @@ -558,6 +549,7 @@ MODULE_PARM_DESC(sched_policy, * Maximum number of processes that HWS can schedule concurrently. The maximum is the * number of VMIDs assigned to the HWS, which is also the default. */ +int hws_max_conc_proc = 8; module_param(hws_max_conc_proc, int, 0444); MODULE_PARM_DESC(hws_max_conc_proc, "Max # processes HWS can execute concurrently when sched_policy=0 (0 = no concurrency, #VMIDs for KFD = Maximum(default))"); @@ -568,6 +560,7 @@ MODULE_PARM_DESC(hws_max_conc_proc, * the middle of a compute wave. Default is 1 to enable this feature. Setting 0 * disables it. */ +int cwsr_enable = 1; module_param(cwsr_enable, int, 0444); MODULE_PARM_DESC(cwsr_enable, "CWSR enable (0 = Off, 1 = On (Default))"); @@ -576,6 +569,7 @@ MODULE_PARM_DESC(cwsr_enable, "CWSR enable (0 = Off, 1 = On (Default))"); * Maximum number of queues per device. Valid setting is between 1 and 4096. Default * is 4096. */ +int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT; module_param(max_num_of_queues_per_device, int, 0444); MODULE_PARM_DESC(max_num_of_queues_per_device, "Maximum number of supported queues per device (1 = Minimum, 4096 = default)"); @@ -585,6 +579,7 @@ MODULE_PARM_DESC(max_num_of_queues_per_device, * Send sigterm to HSA process on unhandled exceptions. Default is not to send sigterm * but just print errors on dmesg. Setting 1 enables sending sigterm. */ +int send_sigterm; module_param(send_sigterm, int, 0444); MODULE_PARM_DESC(send_sigterm, "Send sigterm to HSA process on unhandled exception (0 = disable, 1 = enable)"); @@ -596,6 +591,7 @@ MODULE_PARM_DESC(send_sigterm, * size, usually 256MB. * Default value is 0, diabled. */ +int debug_largebar; module_param(debug_largebar, int, 0444); MODULE_PARM_DESC(debug_largebar, "Debug large-bar flag used to simulate large-bar capability on non-large bar machine (0 = disable, 1 = enable)"); @@ -606,6 +602,7 @@ MODULE_PARM_DESC(debug_largebar, * table to get information about AMD APUs. This option can serve as a workaround on * systems with a broken CRAT table. */ +int ignore_crat; module_param(ignore_crat, int, 0444); MODULE_PARM_DESC(ignore_crat, "Ignore CRAT table during KFD initialization (0 = use CRAT (default), 1 = ignore CRAT)"); @@ -616,6 +613,7 @@ MODULE_PARM_DESC(ignore_crat, * Setting 1 disables retry. * Retry is needed for recoverable page faults. */ +int noretry; module_param(noretry, int, 0644); MODULE_PARM_DESC(noretry, "Set sh_mem_config.retry_disable on Vega10 (0 = retry enabled (default), 1 = retry disabled)"); @@ -625,8 +623,10 @@ MODULE_PARM_DESC(noretry, * Halt if HWS hang is detected. Default value, 0, disables the halt on hang. * Setting 1 enables halt on hang. */ +int halt_if_hws_hang; module_param(halt_if_hws_hang, int, 0644); MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)"); +#endif static const struct pci_device_id pciidlist[] = { #ifdef CONFIG_DRM_AMDGPU_SI diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index f971710f1c91..355f79da8a63 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -103,7 +103,6 @@ */ extern int max_num_of_queues_per_device; -#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096 #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \ (KFD_MAX_NUM_OF_PROCESSES * \ KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) @@ -149,33 +148,6 @@ extern int noretry; */ extern int halt_if_hws_hang; -/** - * enum kfd_sched_policy - * - * @KFD_SCHED_POLICY_HWS: H/W scheduling policy known as command processor (cp) - * scheduling. In this scheduling mode we're using the firmware code to - * schedule the user mode queues and kernel queues such as HIQ and DIQ. - * the HIQ queue is used as a special queue that dispatches the configuration - * to the cp and the user mode queues list that are currently running. - * the DIQ queue is a debugging queue that dispatches debugging commands to the - * firmware. - * in this scheduling mode user mode queues over subscription feature is - * enabled. - * - * @KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION: The same as above but the over - * subscription feature disabled. - * - * @KFD_SCHED_POLICY_NO_HWS: no H/W scheduling policy is a mode which directly - * set the command processor registers and sets the queues "manually". This - * mode is used *ONLY* for debugging proposes. - * - */ -enum kfd_sched_policy { - KFD_SCHED_POLICY_HWS = 0, - KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION, - KFD_SCHED_POLICY_NO_HWS -}; - enum cache_policy { cache_policy_coherent, cache_policy_noncoherent diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index 814576f6ca1c..31c52c116e20 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -98,6 +98,33 @@ enum kgd_engine_type { KGD_ENGINE_MAX }; +/** + * enum kfd_sched_policy + * + * @KFD_SCHED_POLICY_HWS: H/W scheduling policy known as command processor (cp) + * scheduling. In this scheduling mode we're using the firmware code to + * schedule the user mode queues and kernel queues such as HIQ and DIQ. + * the HIQ queue is used as a special queue that dispatches the configuration + * to the cp and the user mode queues list that are currently running. + * the DIQ queue is a debugging queue that dispatches debugging commands to the + * firmware. + * in this scheduling mode user mode queues over subscription feature is + * enabled. + * + * @KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION: The same as above but the over + * subscription feature disabled. + * + * @KFD_SCHED_POLICY_NO_HWS: no H/W scheduling policy is a mode which directly + * set the command processor registers and sets the queues "manually". This + * mode is used *ONLY* for debugging proposes. + * + */ +enum kfd_sched_policy { + KFD_SCHED_POLICY_HWS = 0, + KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION, + KFD_SCHED_POLICY_NO_HWS +}; + struct kgd2kfd_shared_resources { /* Bit n == 1 means VMID n is available for KFD. */ unsigned int compute_vmid_bitmap; @@ -153,6 +180,7 @@ struct tile_config { uint32_t num_ranks; }; +#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096 /* * Allocation flag domains -- GitLab From d9997b64c52b70bd98c48f443f068253621d1ffc Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 28 Aug 2018 14:16:23 -0500 Subject: [PATCH 0679/1692] drm/amdgpu: add missing CHIP_HAINAN in amdgpu_ucode_get_load_type MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This caused a confusing error message, but there is functionally no problem since the default method is DIRECT. Reviewed-by: Michel Dänzer Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index b419d6e33b3a..a942fd28dae8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -277,6 +277,7 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type) case CHIP_PITCAIRN: case CHIP_VERDE: case CHIP_OLAND: + case CHIP_HAINAN: return AMDGPU_FW_LOAD_DIRECT; #endif #ifdef CONFIG_DRM_AMDGPU_CIK -- GitLab From 0e06b227c5221dd51b5569de93f3b9f532be4a32 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 29 Aug 2018 16:50:34 +0200 Subject: [PATCH 0680/1692] bpf: fix msg->data/data_end after sg shift repair in bpf_msg_pull_data In the current code, msg->data is set as sg_virt(&sg[i]) + start - offset and msg->data_end relative to it as msg->data + bytes. Using iterator i to point to the updated starting scatterlist element holds true for some cases, however not for all where we'd end up pointing out of bounds. It is /correct/ for these ones: 1) When first finding the starting scatterlist element (sge) where we find that the page is already privately owned by the msg and where the requested bytes and headroom fit into the sge's length. However, it's /incorrect/ for the following ones: 2) After we made the requested area private and updated the newly allocated page into first_sg slot of the scatterlist ring; when we find that no shift repair of the ring is needed where we bail out updating msg->data and msg->data_end. At that point i will point to last_sg, which in this case is the next elem of first_sg in the ring. The sge at that point might as well be invalid (e.g. i == msg->sg_end), which we use for setting the range of sg_virt(&sg[i]). The correct one would have been first_sg. 3) Similar as in 2) but when we find that a shift repair of the ring is needed. In this case we fix up all sges and stop once we've reached the end. In this case i will point to will point to the new msg->sg_end, and the sge at that point will be invalid. Again here the requested range sits in first_sg. Fixes: 015632bb30da ("bpf: sk_msg program helper bpf_sk_msg_pull_data") Signed-off-by: Daniel Borkmann Acked-by: John Fastabend Signed-off-by: Alexei Starovoitov --- net/core/filter.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/net/core/filter.c b/net/core/filter.c index ec4d67c0cf0c..b9225c55926a 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2310,6 +2310,7 @@ BPF_CALL_4(bpf_msg_pull_data, if (unlikely(start >= offset + len)) return -EINVAL; + first_sg = i; /* The start may point into the sg element so we need to also * account for the headroom. */ @@ -2317,8 +2318,6 @@ BPF_CALL_4(bpf_msg_pull_data, if (!msg->sg_copy[i] && bytes_sg_total <= len) goto out; - first_sg = i; - /* At this point we need to linearize multiple scatterlist * elements or a single shared page. Either way we need to * copy into a linear buffer exclusively owned by BPF. Then @@ -2400,7 +2399,7 @@ BPF_CALL_4(bpf_msg_pull_data, if (msg->sg_end < 0) msg->sg_end += MAX_SKB_FRAGS; out: - msg->data = sg_virt(&sg[i]) + start - offset; + msg->data = sg_virt(&sg[first_sg]) + start - offset; msg->data_end = msg->data + bytes; return 0; -- GitLab From 2e43f95dd8ee62bc8bf57f2afac37fbd70c8d565 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 29 Aug 2018 16:50:35 +0200 Subject: [PATCH 0681/1692] bpf: fix shift upon scatterlist ring wrap-around in bpf_msg_pull_data If first_sg and last_sg wraps around in the scatterlist ring, then we need to account for that in the shift as well. E.g. crafting such msgs where this is the case leads to a hang as shift becomes negative. E.g. consider the following scenario: first_sg := 14 |=> shift := -12 msg->sg_start := 10 last_sg := 3 | msg->sg_end := 5 round 1: i := 15, move_from := 3, sg[15] := sg[ 3] round 2: i := 0, move_from := -12, sg[ 0] := sg[-12] round 3: i := 1, move_from := -11, sg[ 1] := sg[-11] round 4: i := 2, move_from := -10, sg[ 2] := sg[-10] [...] round 13: i := 11, move_from := -1, sg[ 2] := sg[ -1] round 14: i := 12, move_from := 0, sg[ 2] := sg[ 0] round 15: i := 13, move_from := 1, sg[ 2] := sg[ 1] round 16: i := 14, move_from := 2, sg[ 2] := sg[ 2] round 17: i := 15, move_from := 3, sg[ 2] := sg[ 3] [...] This means we will loop forever and never hit the msg->sg_end condition to break out of the loop. When we see that the ring wraps around, then the shift should be MAX_SKB_FRAGS - first_sg + last_sg - 1. Meaning, the remainder slots from the tail of the ring and the head until last_sg combined. Fixes: 015632bb30da ("bpf: sk_msg program helper bpf_sk_msg_pull_data") Signed-off-by: Daniel Borkmann Acked-by: John Fastabend Signed-off-by: Alexei Starovoitov --- net/core/filter.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/net/core/filter.c b/net/core/filter.c index b9225c55926a..43ba5f8c38ca 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2370,7 +2370,10 @@ BPF_CALL_4(bpf_msg_pull_data, * had a single entry though we can just replace it and * be done. Otherwise walk the ring and shift the entries. */ - shift = last_sg - first_sg - 1; + WARN_ON_ONCE(last_sg == first_sg); + shift = last_sg > first_sg ? + last_sg - first_sg - 1 : + MAX_SKB_FRAGS - first_sg + last_sg - 1; if (!shift) goto out; -- GitLab From a8cf76a9023bc6709b1361d06bb2fae5227b9d68 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 29 Aug 2018 16:50:36 +0200 Subject: [PATCH 0682/1692] bpf: fix sg shift repair start offset in bpf_msg_pull_data When we perform the sg shift repair for the scatterlist ring, we currently start out at i = first_sg + 1. However, this is not correct since the first_sg could point to the sge sitting at slot MAX_SKB_FRAGS - 1, and a subsequent i = MAX_SKB_FRAGS will access the scatterlist ring (sg) out of bounds. Add the sk_msg_iter_var() helper for iterating through the ring, and apply the same rule for advancing to the next ring element as we do elsewhere. Later work will use this helper also in other places. Fixes: 015632bb30da ("bpf: sk_msg program helper bpf_sk_msg_pull_data") Signed-off-by: Daniel Borkmann Acked-by: John Fastabend Signed-off-by: Alexei Starovoitov --- net/core/filter.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/net/core/filter.c b/net/core/filter.c index 43ba5f8c38ca..2c7801f6737a 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2282,6 +2282,13 @@ static const struct bpf_func_proto bpf_msg_cork_bytes_proto = { .arg2_type = ARG_ANYTHING, }; +#define sk_msg_iter_var(var) \ + do { \ + var++; \ + if (var == MAX_SKB_FRAGS) \ + var = 0; \ + } while (0) + BPF_CALL_4(bpf_msg_pull_data, struct sk_msg_buff *, msg, u32, start, u32, end, u64, flags) { @@ -2302,9 +2309,7 @@ BPF_CALL_4(bpf_msg_pull_data, if (start < offset + len) break; offset += len; - i++; - if (i == MAX_SKB_FRAGS) - i = 0; + sk_msg_iter_var(i); } while (i != msg->sg_end); if (unlikely(start >= offset + len)) @@ -2330,9 +2335,7 @@ BPF_CALL_4(bpf_msg_pull_data, */ do { copy += sg[i].length; - i++; - if (i == MAX_SKB_FRAGS) - i = 0; + sk_msg_iter_var(i); if (bytes_sg_total <= copy) break; } while (i != msg->sg_end); @@ -2358,9 +2361,7 @@ BPF_CALL_4(bpf_msg_pull_data, sg[i].length = 0; put_page(sg_page(&sg[i])); - i++; - if (i == MAX_SKB_FRAGS) - i = 0; + sk_msg_iter_var(i); } while (i != last_sg); sg[first_sg].length = copy; @@ -2377,7 +2378,8 @@ BPF_CALL_4(bpf_msg_pull_data, if (!shift) goto out; - i = first_sg + 1; + i = first_sg; + sk_msg_iter_var(i); do { int move_from; @@ -2394,9 +2396,7 @@ BPF_CALL_4(bpf_msg_pull_data, sg[move_from].page_link = 0; sg[move_from].offset = 0; - i++; - if (i == MAX_SKB_FRAGS) - i = 0; + sk_msg_iter_var(i); } while (1); msg->sg_end -= shift; if (msg->sg_end < 0) -- GitLab From 74081c9f16a213f8f2681c175dc6ad7d17ad16ba Mon Sep 17 00:00:00 2001 From: Fabrizio Castro Date: Tue, 14 Aug 2018 13:33:48 +0100 Subject: [PATCH 0683/1692] dt-bindings: watchdog: renesas-wdt: Document r8a774a1 support RZ/G2M (R8A774A1) watchdog implementation is compatible with R-Car Gen3, therefore add relevant documentation. Signed-off-by: Fabrizio Castro Reviewed-by: Biju Das Reviewed-by: Rob Herring Reviewed-by: Simon Horman Reviewed-by: Guenter Roeck Signed-off-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck --- Documentation/devicetree/bindings/watchdog/renesas-wdt.txt | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt b/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt index 5d47a262474c..9407212a85a8 100644 --- a/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt +++ b/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt @@ -7,6 +7,7 @@ Required properties: Examples with soctypes are: - "renesas,r8a7743-wdt" (RZ/G1M) - "renesas,r8a7745-wdt" (RZ/G1E) + - "renesas,r8a774a1-wdt" (RZ/G2M) - "renesas,r8a7790-wdt" (R-Car H2) - "renesas,r8a7791-wdt" (R-Car M2-W) - "renesas,r8a7792-wdt" (R-Car V2H) @@ -21,8 +22,8 @@ Required properties: - "renesas,r7s72100-wdt" (RZ/A1) The generic compatible string must be: - "renesas,rza-wdt" for RZ/A - - "renesas,rcar-gen2-wdt" for R-Car Gen2 and RZ/G - - "renesas,rcar-gen3-wdt" for R-Car Gen3 + - "renesas,rcar-gen2-wdt" for R-Car Gen2 and RZ/G1 + - "renesas,rcar-gen3-wdt" for R-Car Gen3 and RZ/G2 - reg : Should contain WDT registers location and length - clocks : the clock feeding the watchdog timer. -- GitLab From 1dbd150d04f11a6e8d03c80132167e8d3391f8ce Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Mon, 27 Aug 2018 15:27:08 +0200 Subject: [PATCH 0684/1692] ARM: defconfig: Update the ARM Versatile defconfig This updates the ARM Versatile defconfig to the latest Kconfig structural changes and adds the DUMB VGA bridge driver so that VGA works out of the box, e.g. with QEMU. Signed-off-by: Linus Walleij Signed-off-by: Olof Johansson --- arch/arm/configs/versatile_defconfig | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/arch/arm/configs/versatile_defconfig b/arch/arm/configs/versatile_defconfig index df68dc4056e5..5282324c7cef 100644 --- a/arch/arm/configs/versatile_defconfig +++ b/arch/arm/configs/versatile_defconfig @@ -5,19 +5,19 @@ CONFIG_HIGH_RES_TIMERS=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y CONFIG_SLAB=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_PARTITION_ADVANCED=y # CONFIG_ARCH_MULTI_V7 is not set CONFIG_ARCH_VERSATILE=y CONFIG_AEABI=y CONFIG_OABI_COMPAT=y -CONFIG_CMA=y CONFIG_ZBOOT_ROM_TEXT=0x0 CONFIG_ZBOOT_ROM_BSS=0x0 CONFIG_CMDLINE="root=1f03 mem=32M" CONFIG_FPE_NWFPE=y CONFIG_VFP=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_CMA=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -59,6 +59,7 @@ CONFIG_GPIO_PL061=y CONFIG_DRM=y CONFIG_DRM_PANEL_ARM_VERSATILE=y CONFIG_DRM_PANEL_SIMPLE=y +CONFIG_DRM_DUMB_VGA_DAC=y CONFIG_DRM_PL111=y CONFIG_FB_MODE_HELPERS=y CONFIG_BACKLIGHT_LCD_SUPPORT=y @@ -89,9 +90,10 @@ CONFIG_NFSD=y CONFIG_NFSD_V3=y CONFIG_NLS_CODEPAGE_850=m CONFIG_NLS_ISO8859_1=m +CONFIG_FONTS=y +CONFIG_FONT_ACORN_8x8=y +CONFIG_DEBUG_FS=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_USER=y CONFIG_DEBUG_LL=y -CONFIG_FONTS=y -CONFIG_FONT_ACORN_8x8=y -- GitLab From 67e6ddb5be440a92b82e87ca0ab8f973ae31b12c Mon Sep 17 00:00:00 2001 From: Nishanth Menon Date: Mon, 27 Aug 2018 19:46:50 -0500 Subject: [PATCH 0685/1692] arm64: defconfig: Enable TI's AM6 SoC platform Enable K3 SoC platform for TI's AM6 SoC. Signed-off-by: Nishanth Menon Acked-by: Tony Lindgren Signed-off-by: Olof Johansson --- arch/arm64/configs/defconfig | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index f67e8d5e93ad..db8d364f8476 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -38,6 +38,7 @@ CONFIG_ARCH_BCM_IPROC=y CONFIG_ARCH_BERLIN=y CONFIG_ARCH_BRCMSTB=y CONFIG_ARCH_EXYNOS=y +CONFIG_ARCH_K3=y CONFIG_ARCH_LAYERSCAPE=y CONFIG_ARCH_LG1K=y CONFIG_ARCH_HISI=y @@ -605,6 +606,8 @@ CONFIG_ARCH_TEGRA_132_SOC=y CONFIG_ARCH_TEGRA_210_SOC=y CONFIG_ARCH_TEGRA_186_SOC=y CONFIG_ARCH_TEGRA_194_SOC=y +CONFIG_ARCH_K3_AM6_SOC=y +CONFIG_SOC_TI=y CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y CONFIG_EXTCON_USB_GPIO=y CONFIG_EXTCON_USBC_CROS_EC=y -- GitLab From f013027e266553effa3e9d9d62236ae5ee3b25e7 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 16 Aug 2018 08:34:46 +0100 Subject: [PATCH 0686/1692] drm/i915: Stop holding a ref to the ppgtt from each vma The context owns both the ppgtt and the vma within it, and our activity tracking on the context ensures that we do not release active ppgtt. As the context fulfils our obligations for active memory tracking, we can relinquish the reference from the vma. This fixes a silly transient refleak from closed vma being kept alive until the entire system was idle, keeping all vm alive as well. Reported-by: Paulo Zanoni Testcase: igt/gem_ctx_create/files Fixes: 3365e2268b6b ("drm/i915: Lazily unbind vma on close") Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Cc: Paulo Zanoni Reviewed-by: Mika Kuoppala Tested-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20180816073448.19396-1-chris@chris-wilson.co.uk (cherry picked from commit a4417b7b419a68540ad7945ac4efbb39d19afa63) Signed-off-by: Rodrigo Vivi --- drivers/gpu/drm/i915/i915_vma.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 11d834f94220..98358b4b36de 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -199,7 +199,6 @@ vma_create(struct drm_i915_gem_object *obj, vma->flags |= I915_VMA_GGTT; list_add(&vma->obj_link, &obj->vma_list); } else { - i915_ppgtt_get(i915_vm_to_ppgtt(vm)); list_add_tail(&vma->obj_link, &obj->vma_list); } @@ -807,9 +806,6 @@ static void __i915_vma_destroy(struct i915_vma *vma) if (vma->obj) rb_erase(&vma->obj_node, &vma->obj->vma_tree); - if (!i915_vma_is_ggtt(vma)) - i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm)); - rbtree_postorder_for_each_entry_safe(iter, n, &vma->active, node) { GEM_BUG_ON(i915_gem_active_isset(&iter->base)); kfree(iter); -- GitLab From 299c2a904b1e8d5096d4813df6371357d97a6cd1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fredrik=20Sch=C3=B6n?= Date: Fri, 17 Aug 2018 22:07:28 +0200 Subject: [PATCH 0687/1692] drm/i915: Increase LSPCON timeout MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 100 ms is not enough time for the LSPCON adapter on Intel NUC devices to settle. This causes dropped display modes at boot or screen reconfiguration. Empirical testing can reproduce the error up to a timeout of 190 ms. Basic boot and stress testing at 200 ms has not (yet) failed. Increase timeout to 400 ms to get some margin of error. Changes from v1: The initial suggestion of 1000 ms was lowered due to concerns about delaying valid timeout cases. Update patch metadata. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=107503 Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1570392 Fixes: 357c0ae9198a ("drm/i915/lspcon: Wait for expected LSPCON mode to settle") Cc: Shashank Sharma Cc: Imre Deak Cc: Jani Nikula Cc: # v4.11+ Reviewed-by: Rodrigo Vivi Reviewed-by: Shashank Sharma Signed-off-by: Fredrik Schön Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20180817200728.8154-1-fredrik.schon@gmail.com (cherry picked from commit 59f1c8ab30d6f9042562949f42cbd3f3cf69de94) Signed-off-by: Rodrigo Vivi --- drivers/gpu/drm/i915/intel_lspcon.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c index 5dae16ccd9f1..3e085c5f2b81 100644 --- a/drivers/gpu/drm/i915/intel_lspcon.c +++ b/drivers/gpu/drm/i915/intel_lspcon.c @@ -74,7 +74,7 @@ static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon, DRM_DEBUG_KMS("Waiting for LSPCON mode %s to settle\n", lspcon_mode_name(mode)); - wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 100); + wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 400); if (current_mode != mode) DRM_ERROR("LSPCON mode hasn't settled\n"); -- GitLab From 80ab316901bc4ae6dd0b5903dbe22766307eac9b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 17 Aug 2018 11:02:41 +0100 Subject: [PATCH 0688/1692] drm/i915/audio: Hook up component bindings even if displays are disabled If the display has been disabled by modparam, we still want to connect together the HW bits and bobs with the associated drivers so that we can continue to manage their runtime power gating. Fixes: 108109444ff6 ("drm/i915: Check num_pipes before initializing audio component") Signed-off-by: Chris Wilson Cc: Imre Deak Cc: Takashi Iwai Cc: Jani Nikula Cc: Elaine Wang Reviewed-by: Imre Deak Link: https://patchwork.freedesktop.org/patch/msgid/20180817100241.4628-1-chris@chris-wilson.co.uk (cherry picked from commit 35a5fd9ebfa93758ca579e30f337b6c9126d995b) Signed-off-by: Rodrigo Vivi --- drivers/gpu/drm/i915/intel_audio.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index b725835b47ef..769f3f586661 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c @@ -962,9 +962,6 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv) { int ret; - if (INTEL_INFO(dev_priv)->num_pipes == 0) - return; - ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops); if (ret < 0) { DRM_ERROR("failed to add audio component (%d)\n", ret); -- GitLab From ff69279a44e9ba876466b7d3ab84d6dbd31cac92 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 29 Aug 2018 08:47:53 +0200 Subject: [PATCH 0689/1692] powerpc: disable support for relative ksymtab references The newly added code that emits ksymtab entries as pairs of 32-bit relative references interacts poorly with the way powerpc lays out its address space: when a module exports a per-CPU variable, the primary module region covering the ksymtab entry -and thus the 32-bit relative reference- is too far away from the actual per-CPU variable's base address (to which the per-CPU offsets are applied to obtain the respective address of each CPU's copy), resulting in corruption when the module loader attempts to resolve symbol references of modules that are loaded on top and link to the exported per-CPU symbol. So let's disable this feature on powerpc. Even though it implements CONFIG_RELOCATABLE, it does not implement CONFIG_RANDOMIZE_BASE and so KASLR kernels (which are the main target of the feature) do not exist on powerpc anyway. Reported-by: Andreas Schwab Suggested-by: Nicholas Piggin Signed-off-by: Ard Biesheuvel Signed-off-by: Linus Torvalds --- arch/powerpc/Kconfig | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index db0b6eebbfa5..a80669209155 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -177,7 +177,6 @@ config PPC select HAVE_ARCH_KGDB select HAVE_ARCH_MMAP_RND_BITS select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT - select HAVE_ARCH_PREL32_RELOCATIONS select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK select HAVE_CBPF_JIT if !PPC64 -- GitLab From 9f2895461439fda2801a7906fb4c5fb3dbb37a0a Mon Sep 17 00:00:00 2001 From: Alexey Kodanev Date: Thu, 23 Aug 2018 19:49:54 +0300 Subject: [PATCH 0690/1692] vti6: remove !skb->ignore_df check from vti6_xmit() Before the commit d6990976af7c ("vti6: fix PMTU caching and reporting on xmit") '!skb->ignore_df' check was always true because the function skb_scrub_packet() was called before it, resetting ignore_df to zero. In the commit, skb_scrub_packet() was moved below, and now this check can be false for the packet, e.g. when sending it in the two fragments, this prevents successful PMTU updates in such case. The next attempts to send the packet lead to the same tx error. Moreover, vti6 initial MTU value relies on PMTU adjustments. This issue can be reproduced with the following LTP test script: udp_ipsec_vti.sh -6 -p ah -m tunnel -s 2000 Fixes: ccd740cbc6e0 ("vti6: Add pmtu handling to vti6_xmit.") Signed-off-by: Alexey Kodanev Acked-by: Steffen Klassert Signed-off-by: David S. Miller --- net/ipv6/ip6_vti.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 5095367c7204..eeaf7455d51e 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -481,7 +481,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) } mtu = dst_mtu(dst); - if (!skb->ignore_df && skb->len > mtu) { + if (skb->len > mtu) { skb_dst_update_pmtu(skb, mtu); if (skb->protocol == htons(ETH_P_IPV6)) { -- GitLab From bd583fe30427500a2d0abe25724025b1cb5e2636 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Thu, 23 Aug 2018 16:19:44 -0700 Subject: [PATCH 0691/1692] tipc: fix a missing rhashtable_walk_exit() rhashtable_walk_exit() must be paired with rhashtable_walk_enter(). Fixes: 40f9f4397060 ("tipc: Fix tipc_sk_reinit race conditions") Cc: Herbert Xu Cc: Ying Xue Signed-off-by: Cong Wang Signed-off-by: David S. Miller --- net/tipc/socket.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/tipc/socket.c b/net/tipc/socket.c index c1e93c9515bc..c9a50b62c738 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -2672,6 +2672,8 @@ void tipc_sk_reinit(struct net *net) rhashtable_walk_stop(&iter); } while (tsk == ERR_PTR(-EAGAIN)); + + rhashtable_walk_exit(&iter); } static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid) -- GitLab From e5133f2f1261f8ab412e7fc5e3694c9f84328f89 Mon Sep 17 00:00:00 2001 From: Jerome Brunet Date: Fri, 24 Aug 2018 11:04:40 +0200 Subject: [PATCH 0692/1692] Revert "net: stmmac: Do not keep rearming the coalesce timer in stmmac_xmit" This reverts commit 4ae0169fd1b3c792b66be58995b7e6b629919ecf. This change in the handling of the coalesce timer is causing regression on (at least) amlogic platforms. Network will break down very quickly (a few seconds) after starting a download. This can easily be reproduced using iperf3 for example. The problem has been reported on the S805, S905, S912 and A113 SoCs (Realtek and Micrel PHYs) and it is likely impacting all Amlogics platforms using Gbit ethernet No problem was seen with the platform using 10/100 only PHYs (GXL internal) Reverting change brings things back to normal and allows to use network again until we better understand the problem with the coalesce timer. Cc: Jose Abreu Cc: Joao Pinto Cc: Vitor Soares Cc: Giuseppe Cavallaro Cc: Alexandre Torgue Cc: Corentin Labbe Signed-off-by: Jerome Brunet Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac.h | 1 - drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 5 +---- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 76649adf8fb0..c0a855b7ab3b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -112,7 +112,6 @@ struct stmmac_priv { u32 tx_count_frames; u32 tx_coal_frames; u32 tx_coal_timer; - bool tx_timer_armed; int tx_coalesce; int hwts_tx_en; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index ff1ffb46198a..9f458bb16f2a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -3147,16 +3147,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) * element in case of no SG. */ priv->tx_count_frames += nfrags + 1; - if (likely(priv->tx_coal_frames > priv->tx_count_frames) && - !priv->tx_timer_armed) { + if (likely(priv->tx_coal_frames > priv->tx_count_frames)) { mod_timer(&priv->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer)); - priv->tx_timer_armed = true; } else { priv->tx_count_frames = 0; stmmac_set_tx_ic(priv, desc); priv->xstats.tx_set_ic_bit++; - priv->tx_timer_armed = false; } skb_tx_timestamp(skb); -- GitLab From 9a07efa9aea2f4a59f35da0785a4e6a6b5a96192 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Fri, 24 Aug 2018 12:28:06 -0700 Subject: [PATCH 0693/1692] tipc: switch to rhashtable iterator syzbot reported a use-after-free in tipc_group_fill_sock_diag(), where tipc_group_fill_sock_diag() still reads tsk->group meanwhile tipc_group_delete() just deletes it in tipc_release(). tipc_nl_sk_walk() aims to lock this sock when walking each sock in the hash table to close race conditions with sock changes like this one, by acquiring tsk->sk.sk_lock.slock spinlock, unfortunately this doesn't work at all. All non-BH call path should take lock_sock() instead to make it work. tipc_nl_sk_walk() brutally iterates with raw rht_for_each_entry_rcu() where RCU read lock is required, this is the reason why lock_sock() can't be taken on this path. This could be resolved by switching to rhashtable iterator API's, where taking a sleepable lock is possible. Also, the iterator API's are friendly for restartable calls like diag dump, the last position is remembered behind the scence, all we need to do here is saving the iterator into cb->args[]. I tested this with parallel tipc diag dump and thousands of tipc socket creation and release, no crash or memory leak. Reported-by: syzbot+b9c8f3ab2994b7cd1625@syzkaller.appspotmail.com Cc: Jon Maloy Cc: Ying Xue Signed-off-by: Cong Wang Signed-off-by: David S. Miller --- net/tipc/diag.c | 2 ++ net/tipc/netlink.c | 2 ++ net/tipc/socket.c | 76 ++++++++++++++++++++++++++++++---------------- net/tipc/socket.h | 2 ++ 4 files changed, 56 insertions(+), 26 deletions(-) diff --git a/net/tipc/diag.c b/net/tipc/diag.c index aaabb0b776dd..73137f4aeb68 100644 --- a/net/tipc/diag.c +++ b/net/tipc/diag.c @@ -84,7 +84,9 @@ static int tipc_sock_diag_handler_dump(struct sk_buff *skb, if (h->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { + .start = tipc_dump_start, .dump = tipc_diag_dump, + .done = tipc_dump_done, }; netlink_dump_start(net->diag_nlsk, skb, h, &c); return 0; diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c index 6ff2254088f6..99ee419210ba 100644 --- a/net/tipc/netlink.c +++ b/net/tipc/netlink.c @@ -167,7 +167,9 @@ static const struct genl_ops tipc_genl_v2_ops[] = { }, { .cmd = TIPC_NL_SOCK_GET, + .start = tipc_dump_start, .dumpit = tipc_nl_sk_dump, + .done = tipc_dump_done, .policy = tipc_nl_policy, }, { diff --git a/net/tipc/socket.c b/net/tipc/socket.c index c9a50b62c738..ab7a2a7178f7 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -3229,45 +3229,69 @@ int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb, struct netlink_callback *cb, struct tipc_sock *tsk)) { - struct net *net = sock_net(skb->sk); - struct tipc_net *tn = tipc_net(net); - const struct bucket_table *tbl; - u32 prev_portid = cb->args[1]; - u32 tbl_id = cb->args[0]; - struct rhash_head *pos; + struct rhashtable_iter *iter = (void *)cb->args[0]; struct tipc_sock *tsk; int err; - rcu_read_lock(); - tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht); - for (; tbl_id < tbl->size; tbl_id++) { - rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) { - spin_lock_bh(&tsk->sk.sk_lock.slock); - if (prev_portid && prev_portid != tsk->portid) { - spin_unlock_bh(&tsk->sk.sk_lock.slock); + rhashtable_walk_start(iter); + while ((tsk = rhashtable_walk_next(iter)) != NULL) { + if (IS_ERR(tsk)) { + err = PTR_ERR(tsk); + if (err == -EAGAIN) { + err = 0; continue; } + break; + } - err = skb_handler(skb, cb, tsk); - if (err) { - prev_portid = tsk->portid; - spin_unlock_bh(&tsk->sk.sk_lock.slock); - goto out; - } - - prev_portid = 0; - spin_unlock_bh(&tsk->sk.sk_lock.slock); + sock_hold(&tsk->sk); + rhashtable_walk_stop(iter); + lock_sock(&tsk->sk); + err = skb_handler(skb, cb, tsk); + if (err) { + release_sock(&tsk->sk); + sock_put(&tsk->sk); + goto out; } + release_sock(&tsk->sk); + rhashtable_walk_start(iter); + sock_put(&tsk->sk); } + rhashtable_walk_stop(iter); out: - rcu_read_unlock(); - cb->args[0] = tbl_id; - cb->args[1] = prev_portid; - return skb->len; } EXPORT_SYMBOL(tipc_nl_sk_walk); +int tipc_dump_start(struct netlink_callback *cb) +{ + struct rhashtable_iter *iter = (void *)cb->args[0]; + struct net *net = sock_net(cb->skb->sk); + struct tipc_net *tn = tipc_net(net); + + if (!iter) { + iter = kmalloc(sizeof(*iter), GFP_KERNEL); + if (!iter) + return -ENOMEM; + + cb->args[0] = (long)iter; + } + + rhashtable_walk_enter(&tn->sk_rht, iter); + return 0; +} +EXPORT_SYMBOL(tipc_dump_start); + +int tipc_dump_done(struct netlink_callback *cb) +{ + struct rhashtable_iter *hti = (void *)cb->args[0]; + + rhashtable_walk_exit(hti); + kfree(hti); + return 0; +} +EXPORT_SYMBOL(tipc_dump_done); + int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb, struct tipc_sock *tsk, u32 sk_filter_state, u64 (*tipc_diag_gen_cookie)(struct sock *sk)) diff --git a/net/tipc/socket.h b/net/tipc/socket.h index aff9b2ae5a1f..d43032e26532 100644 --- a/net/tipc/socket.h +++ b/net/tipc/socket.h @@ -68,4 +68,6 @@ int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb, int (*skb_handler)(struct sk_buff *skb, struct netlink_callback *cb, struct tipc_sock *tsk)); +int tipc_dump_start(struct netlink_callback *cb); +int tipc_dump_done(struct netlink_callback *cb); #endif -- GitLab From 05212ba8132b42047ab5d63d759c6f9c28e7eab5 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sun, 26 Aug 2018 17:03:09 +0300 Subject: [PATCH 0694/1692] r8169: set RxConfig after tx/rx is enabled for RTL8169sb/8110sb devices I have two Ethernet adapters: r8169 0000:03:01.0 eth0: RTL8169sb/8110sb, 00:14:d1:14:2d:49, XID 10000000, IRQ 18 r8169 0000:01:00.0 eth0: RTL8168e/8111e, 64:66:b3:11:14:5d, XID 2c200000, IRQ 30 And after upgrading from linux 4.15 [1] to linux 4.18+ [2] RTL8169sb failed to receive any packets. tcpdump shows a lot of checksum mismatch. [1]: a0f79386a4968b4925da6db2d1daffd0605a4402 [2]: 0519359784328bfa92bf0931bf0cff3b58c16932 (4.19 merge window opened) I started bisecting and the found that [3] breaks it. According to [4]: "For 8110S, 8110SB, and 8110SC series, the initial value of RxConfig needs to be set after the tx/rx is enabled." So I moved rtl_init_rxcfg() after enabling tx/rs and now my adapter works (RTL8168e works too). [3]: 3559d81e76bfe3803e89f2e04cf6ef7ab4f3aace [4]: e542a2269f232d61270ceddd42b73a4348dee2bb ("r8169: adjust the RxConfig settings.") Also drop "rx" from rtl_set_rx_tx_config_registers(), since it does nothing with it already. Fixes: 3559d81e76bfe3803e89f2e04cf6ef7ab4f3aace ("r8169: simplify rtl_hw_start_8169") Cc: Heiner Kallweit Cc: David S. Miller Cc: netdev@vger.kernel.org Cc: Realtek linux nic maintainers Signed-off-by: Azat Khuzhin Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 0efa977c422d..ac306797590e 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -4522,7 +4522,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp) rtl_hw_reset(tp); } -static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp) +static void rtl_set_tx_config_registers(struct rtl8169_private *tp) { /* Set DMA burst size and Interframe Gap Time */ RTL_W32(tp, TxConfig, (TX_DMA_BURST << TxDMAShift) | @@ -4633,12 +4633,14 @@ static void rtl_hw_start(struct rtl8169_private *tp) rtl_set_rx_max_size(tp); rtl_set_rx_tx_desc_registers(tp); - rtl_set_rx_tx_config_registers(tp); + rtl_set_tx_config_registers(tp); RTL_W8(tp, Cfg9346, Cfg9346_Lock); /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ RTL_R8(tp, IntrMask); RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); + rtl_init_rxcfg(tp); + rtl_set_rx_mode(tp->dev); /* no early-rx interrupts */ RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000); -- GitLab From 31fabbee8f5c658c3fa1603c66e9e4f51ea8c2c6 Mon Sep 17 00:00:00 2001 From: Peng Li Date: Mon, 27 Aug 2018 09:59:29 +0800 Subject: [PATCH 0695/1692] net: hns: add the code for cleaning pkt in chip If there are packets in hardware when changing the speed or duplex, it may cause hardware hang up. This patch adds the code for waiting chip to clean the all pkts(TX & RX) in chip when the driver uses the function named "adjust link". This patch cleans the pkts as follows: 1) close rx of chip, close tx of protocol stack. 2) wait rcb, ppe, mac to clean. 3) adjust link 4) open rx of chip, open tx of protocol stack. Signed-off-by: Peng Li Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns/hnae.h | 2 + .../net/ethernet/hisilicon/hns/hns_ae_adapt.c | 67 ++++++++++++++++++- .../ethernet/hisilicon/hns/hns_dsaf_gmac.c | 36 ++++++++++ .../net/ethernet/hisilicon/hns/hns_dsaf_mac.c | 44 ++++++++++++ .../net/ethernet/hisilicon/hns/hns_dsaf_mac.h | 8 +++ .../ethernet/hisilicon/hns/hns_dsaf_main.c | 29 ++++++++ .../ethernet/hisilicon/hns/hns_dsaf_main.h | 3 + .../net/ethernet/hisilicon/hns/hns_dsaf_ppe.c | 23 +++++++ .../net/ethernet/hisilicon/hns/hns_dsaf_ppe.h | 1 + .../net/ethernet/hisilicon/hns/hns_dsaf_rcb.c | 23 +++++++ .../net/ethernet/hisilicon/hns/hns_dsaf_rcb.h | 1 + .../net/ethernet/hisilicon/hns/hns_dsaf_reg.h | 1 + drivers/net/ethernet/hisilicon/hns/hns_enet.c | 21 +++++- 13 files changed, 255 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h index cad52bd331f7..08a750fb60c4 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.h +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h @@ -486,6 +486,8 @@ struct hnae_ae_ops { u8 *auto_neg, u16 *speed, u8 *duplex); void (*toggle_ring_irq)(struct hnae_ring *ring, u32 val); void (*adjust_link)(struct hnae_handle *handle, int speed, int duplex); + bool (*need_adjust_link)(struct hnae_handle *handle, + int speed, int duplex); int (*set_loopback)(struct hnae_handle *handle, enum hnae_loop loop_mode, int en); void (*get_ring_bdnum_limit)(struct hnae_queue *queue, diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index e6aad30e7e69..b52029e26d15 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c @@ -155,6 +155,41 @@ static void hns_ae_put_handle(struct hnae_handle *handle) hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0; } +static int hns_ae_wait_flow_down(struct hnae_handle *handle) +{ + struct dsaf_device *dsaf_dev; + struct hns_ppe_cb *ppe_cb; + struct hnae_vf_cb *vf_cb; + int ret; + int i; + + for (i = 0; i < handle->q_num; i++) { + ret = hns_rcb_wait_tx_ring_clean(handle->qs[i]); + if (ret) + return ret; + } + + ppe_cb = hns_get_ppe_cb(handle); + ret = hns_ppe_wait_tx_fifo_clean(ppe_cb); + if (ret) + return ret; + + dsaf_dev = hns_ae_get_dsaf_dev(handle->dev); + if (!dsaf_dev) + return -EINVAL; + ret = hns_dsaf_wait_pkt_clean(dsaf_dev, handle->dport_id); + if (ret) + return ret; + + vf_cb = hns_ae_get_vf_cb(handle); + ret = hns_mac_wait_fifo_clean(vf_cb->mac_cb); + if (ret) + return ret; + + mdelay(10); + return 0; +} + static void hns_ae_ring_enable_all(struct hnae_handle *handle, int val) { int q_num = handle->q_num; @@ -399,12 +434,41 @@ static int hns_ae_get_mac_info(struct hnae_handle *handle, return hns_mac_get_port_info(mac_cb, auto_neg, speed, duplex); } +static bool hns_ae_need_adjust_link(struct hnae_handle *handle, int speed, + int duplex) +{ + struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); + + return hns_mac_need_adjust_link(mac_cb, speed, duplex); +} + static void hns_ae_adjust_link(struct hnae_handle *handle, int speed, int duplex) { struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); - hns_mac_adjust_link(mac_cb, speed, duplex); + switch (mac_cb->dsaf_dev->dsaf_ver) { + case AE_VERSION_1: + hns_mac_adjust_link(mac_cb, speed, duplex); + break; + + case AE_VERSION_2: + /* chip need to clear all pkt inside */ + hns_mac_disable(mac_cb, MAC_COMM_MODE_RX); + if (hns_ae_wait_flow_down(handle)) { + hns_mac_enable(mac_cb, MAC_COMM_MODE_RX); + break; + } + + hns_mac_adjust_link(mac_cb, speed, duplex); + hns_mac_enable(mac_cb, MAC_COMM_MODE_RX); + break; + + default: + break; + } + + return; } static void hns_ae_get_ring_bdnum_limit(struct hnae_queue *queue, @@ -902,6 +966,7 @@ static struct hnae_ae_ops hns_dsaf_ops = { .get_status = hns_ae_get_link_status, .get_info = hns_ae_get_mac_info, .adjust_link = hns_ae_adjust_link, + .need_adjust_link = hns_ae_need_adjust_link, .set_loopback = hns_ae_config_loopback, .get_ring_bdnum_limit = hns_ae_get_ring_bdnum_limit, .get_pauseparam = hns_ae_get_pauseparam, diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c index 5488c6e89f21..09e4061d1fa6 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c @@ -257,6 +257,16 @@ static void hns_gmac_get_pausefrm_cfg(void *mac_drv, u32 *rx_pause_en, *tx_pause_en = dsaf_get_bit(pause_en, GMAC_PAUSE_EN_TX_FDFC_B); } +static bool hns_gmac_need_adjust_link(void *mac_drv, enum mac_speed speed, + int duplex) +{ + struct mac_driver *drv = (struct mac_driver *)mac_drv; + struct hns_mac_cb *mac_cb = drv->mac_cb; + + return (mac_cb->speed != speed) || + (mac_cb->half_duplex == duplex); +} + static int hns_gmac_adjust_link(void *mac_drv, enum mac_speed speed, u32 full_duplex) { @@ -309,6 +319,30 @@ static void hns_gmac_set_promisc(void *mac_drv, u8 en) hns_gmac_set_uc_match(mac_drv, en); } +int hns_gmac_wait_fifo_clean(void *mac_drv) +{ + struct mac_driver *drv = (struct mac_driver *)mac_drv; + int wait_cnt; + u32 val; + + wait_cnt = 0; + while (wait_cnt++ < HNS_MAX_WAIT_CNT) { + val = dsaf_read_dev(drv, GMAC_FIFO_STATE_REG); + /* bit5~bit0 is not send complete pkts */ + if ((val & 0x3f) == 0) + break; + usleep_range(100, 200); + } + + if (wait_cnt >= HNS_MAX_WAIT_CNT) { + dev_err(drv->dev, + "hns ge %d fifo was not idle.\n", drv->mac_id); + return -EBUSY; + } + + return 0; +} + static void hns_gmac_init(void *mac_drv) { u32 port; @@ -690,6 +724,7 @@ void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param) mac_drv->mac_disable = hns_gmac_disable; mac_drv->mac_free = hns_gmac_free; mac_drv->adjust_link = hns_gmac_adjust_link; + mac_drv->need_adjust_link = hns_gmac_need_adjust_link; mac_drv->set_tx_auto_pause_frames = hns_gmac_set_tx_auto_pause_frames; mac_drv->config_max_frame_length = hns_gmac_config_max_frame_length; mac_drv->mac_pausefrm_cfg = hns_gmac_pause_frm_cfg; @@ -717,6 +752,7 @@ void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param) mac_drv->get_strings = hns_gmac_get_strings; mac_drv->update_stats = hns_gmac_update_stats; mac_drv->set_promiscuous = hns_gmac_set_promisc; + mac_drv->wait_fifo_clean = hns_gmac_wait_fifo_clean; return (void *)mac_drv; } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index 1c2326bd76e2..6ed6f142427e 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -114,6 +114,26 @@ int hns_mac_get_port_info(struct hns_mac_cb *mac_cb, return 0; } +/** + *hns_mac_is_adjust_link - check is need change mac speed and duplex register + *@mac_cb: mac device + *@speed: phy device speed + *@duplex:phy device duplex + * + */ +bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex) +{ + struct mac_driver *mac_ctrl_drv; + + mac_ctrl_drv = (struct mac_driver *)(mac_cb->priv.mac); + + if (mac_ctrl_drv->need_adjust_link) + return mac_ctrl_drv->need_adjust_link(mac_ctrl_drv, + (enum mac_speed)speed, duplex); + else + return true; +} + void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex) { int ret; @@ -430,6 +450,16 @@ int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, bool enable) return 0; } +int hns_mac_wait_fifo_clean(struct hns_mac_cb *mac_cb) +{ + struct mac_driver *drv = hns_mac_get_drv(mac_cb); + + if (drv->wait_fifo_clean) + return drv->wait_fifo_clean(drv); + + return 0; +} + void hns_mac_reset(struct hns_mac_cb *mac_cb) { struct mac_driver *drv = hns_mac_get_drv(mac_cb); @@ -998,6 +1028,20 @@ static int hns_mac_get_max_port_num(struct dsaf_device *dsaf_dev) return DSAF_MAX_PORT_NUM; } +void hns_mac_enable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode) +{ + struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb); + + mac_ctrl_drv->mac_enable(mac_cb->priv.mac, mode); +} + +void hns_mac_disable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode) +{ + struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb); + + mac_ctrl_drv->mac_disable(mac_cb->priv.mac, mode); +} + /** * hns_mac_init - init mac * @dsaf_dev: dsa fabric device struct pointer diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h index bbc0a98e7ca3..fbc75341bef7 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h @@ -356,6 +356,9 @@ struct mac_driver { /*adjust mac mode of port,include speed and duplex*/ int (*adjust_link)(void *mac_drv, enum mac_speed speed, u32 full_duplex); + /* need adjust link */ + bool (*need_adjust_link)(void *mac_drv, enum mac_speed speed, + int duplex); /* config autoegotaite mode of port*/ void (*set_an_mode)(void *mac_drv, u8 enable); /* config loopbank mode */ @@ -394,6 +397,7 @@ struct mac_driver { void (*get_info)(void *mac_drv, struct mac_info *mac_info); void (*update_stats)(void *mac_drv); + int (*wait_fifo_clean)(void *mac_drv); enum mac_mode mac_mode; u8 mac_id; @@ -427,6 +431,7 @@ void *hns_xgmac_config(struct hns_mac_cb *mac_cb, int hns_mac_init(struct dsaf_device *dsaf_dev); void mac_adjust_link(struct net_device *net_dev); +bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex); void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status); int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid, char *addr); int hns_mac_set_multi(struct hns_mac_cb *mac_cb, @@ -463,5 +468,8 @@ int hns_mac_add_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id, int hns_mac_rm_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id, const unsigned char *addr); int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn); +void hns_mac_enable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode); +void hns_mac_disable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode); +int hns_mac_wait_fifo_clean(struct hns_mac_cb *mac_cb); #endif /* _HNS_DSAF_MAC_H */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index ca50c2553a9c..e557a4ef5996 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -2727,6 +2727,35 @@ void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev, soft_mac_entry->index = enable ? entry_index : DSAF_INVALID_ENTRY_IDX; } +int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port) +{ + u32 val, val_tmp; + int wait_cnt; + + if (port >= DSAF_SERVICE_NW_NUM) + return 0; + + wait_cnt = 0; + while (wait_cnt++ < HNS_MAX_WAIT_CNT) { + val = dsaf_read_dev(dsaf_dev, DSAF_VOQ_IN_PKT_NUM_0_REG + + (port + DSAF_XGE_NUM) * 0x40); + val_tmp = dsaf_read_dev(dsaf_dev, DSAF_VOQ_OUT_PKT_NUM_0_REG + + (port + DSAF_XGE_NUM) * 0x40); + if (val == val_tmp) + break; + + usleep_range(100, 200); + } + + if (wait_cnt >= HNS_MAX_WAIT_CNT) { + dev_err(dsaf_dev->dev, "hns dsaf clean wait timeout(%u - %u).\n", + val, val_tmp); + return -EBUSY; + } + + return 0; +} + /** * dsaf_probe - probo dsaf dev * @pdev: dasf platform device diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h index 4507e8222683..0e1cd99831a6 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h @@ -44,6 +44,8 @@ struct hns_mac_cb; #define DSAF_ROCE_CREDIT_CHN 8 #define DSAF_ROCE_CHAN_MODE 3 +#define HNS_MAX_WAIT_CNT 10000 + enum dsaf_roce_port_mode { DSAF_ROCE_6PORT_MODE, DSAF_ROCE_4PORT_MODE, @@ -463,5 +465,6 @@ int hns_dsaf_rm_mac_addr( int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev, u8 mac_id, u8 port_num); +int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port); #endif /* __HNS_DSAF_MAIN_H__ */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c index d160d8c9e45b..0942e4916d9d 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c @@ -275,6 +275,29 @@ static void hns_ppe_exc_irq_en(struct hns_ppe_cb *ppe_cb, int en) dsaf_write_dev(ppe_cb, PPE_INTEN_REG, msk_vlue & vld_msk); } +int hns_ppe_wait_tx_fifo_clean(struct hns_ppe_cb *ppe_cb) +{ + int wait_cnt; + u32 val; + + wait_cnt = 0; + while (wait_cnt++ < HNS_MAX_WAIT_CNT) { + val = dsaf_read_dev(ppe_cb, PPE_CURR_TX_FIFO0_REG) & 0x3ffU; + if (!val) + break; + + usleep_range(100, 200); + } + + if (wait_cnt >= HNS_MAX_WAIT_CNT) { + dev_err(ppe_cb->dev, "hns ppe tx fifo clean wait timeout, still has %u pkt.\n", + val); + return -EBUSY; + } + + return 0; +} + /** * ppe_init_hw - init ppe * @ppe_cb: ppe device diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h index 9d8e643e8aa6..f670e63a5a01 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h @@ -100,6 +100,7 @@ struct ppe_common_cb { }; +int hns_ppe_wait_tx_fifo_clean(struct hns_ppe_cb *ppe_cb); int hns_ppe_init(struct dsaf_device *dsaf_dev); void hns_ppe_uninit(struct dsaf_device *dsaf_dev); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c index 9d76e2e54f9d..5d64519b9b1d 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c @@ -66,6 +66,29 @@ void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag) "queue(%d) wait fbd(%d) clean fail!!\n", i, fbd_num); } +int hns_rcb_wait_tx_ring_clean(struct hnae_queue *qs) +{ + u32 head, tail; + int wait_cnt; + + tail = dsaf_read_dev(&qs->tx_ring, RCB_REG_TAIL); + wait_cnt = 0; + while (wait_cnt++ < HNS_MAX_WAIT_CNT) { + head = dsaf_read_dev(&qs->tx_ring, RCB_REG_HEAD); + if (tail == head) + break; + + usleep_range(100, 200); + } + + if (wait_cnt >= HNS_MAX_WAIT_CNT) { + dev_err(qs->dev->dev, "rcb wait timeout, head not equal to tail.\n"); + return -EBUSY; + } + + return 0; +} + /** *hns_rcb_reset_ring_hw - ring reset *@q: ring struct pointer diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h index 602816498c8d..2319b772a271 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h @@ -136,6 +136,7 @@ void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag); void hns_rcb_init_hw(struct ring_pair_cb *ring); void hns_rcb_reset_ring_hw(struct hnae_queue *q); void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag); +int hns_rcb_wait_tx_ring_clean(struct hnae_queue *qs); u32 hns_rcb_get_rx_coalesced_frames( struct rcb_common_cb *rcb_common, u32 port_idx); u32 hns_rcb_get_tx_coalesced_frames( diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index 886cbbf25761..74d935d82cbc 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h @@ -464,6 +464,7 @@ #define RCB_RING_INTMSK_TX_OVERTIME_REG 0x000C4 #define RCB_RING_INTSTS_TX_OVERTIME_REG 0x000C8 +#define GMAC_FIFO_STATE_REG 0x0000UL #define GMAC_DUPLEX_TYPE_REG 0x0008UL #define GMAC_FD_FC_TYPE_REG 0x000CUL #define GMAC_TX_WATER_LINE_REG 0x0010UL diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 02a0ba20fad5..f56855e63c96 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -1112,11 +1112,26 @@ static void hns_nic_adjust_link(struct net_device *ndev) struct hnae_handle *h = priv->ae_handle; int state = 1; + /* If there is no phy, do not need adjust link */ if (ndev->phydev) { - h->dev->ops->adjust_link(h, ndev->phydev->speed, - ndev->phydev->duplex); - state = ndev->phydev->link; + /* When phy link down, do nothing */ + if (ndev->phydev->link == 0) + return; + + if (h->dev->ops->need_adjust_link(h, ndev->phydev->speed, + ndev->phydev->duplex)) { + /* because Hi161X chip don't support to change gmac + * speed and duplex with traffic. Delay 200ms to + * make sure there is no more data in chip FIFO. + */ + netif_carrier_off(ndev); + msleep(200); + h->dev->ops->adjust_link(h, ndev->phydev->speed, + ndev->phydev->duplex); + netif_carrier_on(ndev); + } } + state = state && h->dev->ops->get_status(h); if (state != priv->link) { -- GitLab From 455c4401fe7a538facaffb35b906ce19f1ece474 Mon Sep 17 00:00:00 2001 From: Peng Li Date: Mon, 27 Aug 2018 09:59:30 +0800 Subject: [PATCH 0696/1692] net: hns: add netif_carrier_off before change speed and duplex If there are packets in hardware when changing the speed or duplex, it may cause hardware hang up. This patch adds netif_carrier_off before change speed and duplex in ethtool_ops.set_link_ksettings, and adds netif_carrier_on after complete the change. Signed-off-by: Peng Li Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns/hns_ethtool.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 08f3c4743f74..774beda040a1 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -243,7 +243,9 @@ static int hns_nic_set_link_ksettings(struct net_device *net_dev, } if (h->dev->ops->adjust_link) { + netif_carrier_off(net_dev); h->dev->ops->adjust_link(h, (int)speed, cmd->base.duplex); + netif_carrier_on(net_dev); return 0; } -- GitLab From 6e0bb04d0e4f597d8d8f4f21401a9636f2809fd1 Mon Sep 17 00:00:00 2001 From: Chris Brandt Date: Mon, 27 Aug 2018 12:42:02 -0500 Subject: [PATCH 0697/1692] sh_eth: Add R7S9210 support Add support for the R7S9210 which is part of the RZ/A2 series. Signed-off-by: Chris Brandt Acked-by: Rob Herring Signed-off-by: David S. Miller --- .../devicetree/bindings/net/sh_eth.txt | 1 + drivers/net/ethernet/renesas/sh_eth.c | 36 +++++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/Documentation/devicetree/bindings/net/sh_eth.txt b/Documentation/devicetree/bindings/net/sh_eth.txt index 76db9f13ad96..abc36274227c 100644 --- a/Documentation/devicetree/bindings/net/sh_eth.txt +++ b/Documentation/devicetree/bindings/net/sh_eth.txt @@ -16,6 +16,7 @@ Required properties: "renesas,ether-r8a7794" if the device is a part of R8A7794 SoC. "renesas,gether-r8a77980" if the device is a part of R8A77980 SoC. "renesas,ether-r7s72100" if the device is a part of R7S72100 SoC. + "renesas,ether-r7s9210" if the device is a part of R7S9210 SoC. "renesas,rcar-gen1-ether" for a generic R-Car Gen1 device. "renesas,rcar-gen2-ether" for a generic R-Car Gen2 or RZ/G1 device. diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index ad4433d59237..f27a0dc8c563 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -798,6 +798,41 @@ static struct sh_eth_cpu_data r8a77980_data = { .magic = 1, .cexcr = 1, }; + +/* R7S9210 */ +static struct sh_eth_cpu_data r7s9210_data = { + .soft_reset = sh_eth_soft_reset, + + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_rcar, + + .register_type = SH_ETH_REG_FAST_SH4, + + .edtrr_trns = EDTRR_TRNS_ETHER, + .ecsr_value = ECSR_ICD, + .ecsipr_value = ECSIPR_ICDIP, + .eesipr_value = EESIPR_TWBIP | EESIPR_TABTIP | EESIPR_RABTIP | + EESIPR_RFCOFIP | EESIPR_ECIIP | EESIPR_FTCIP | + EESIPR_TDEIP | EESIPR_TFUFIP | EESIPR_FRIP | + EESIPR_RDEIP | EESIPR_RFOFIP | EESIPR_CNDIP | + EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP | + EESIPR_RMAFIP | EESIPR_RRFIP | EESIPR_RTLFIP | + EESIPR_RTSFIP | EESIPR_PREIP | EESIPR_CERFIP, + + .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO, + .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, + + .fdr_value = 0x0000070f, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .hw_swap = 1, + .rpadir = 1, + .no_ade = 1, + .xdfar_rw = 1, +}; #endif /* CONFIG_OF */ static void sh_eth_set_rate_sh7724(struct net_device *ndev) @@ -3121,6 +3156,7 @@ static const struct of_device_id sh_eth_match_table[] = { { .compatible = "renesas,ether-r8a7794", .data = &rcar_gen2_data }, { .compatible = "renesas,gether-r8a77980", .data = &r8a77980_data }, { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data }, + { .compatible = "renesas,ether-r7s9210", .data = &r7s9210_data }, { .compatible = "renesas,rcar-gen1-ether", .data = &rcar_gen1_data }, { .compatible = "renesas,rcar-gen2-ether", .data = &rcar_gen2_data }, { } -- GitLab From 85eb9af182243ce9a8b72410d5321c440ac5f8d7 Mon Sep 17 00:00:00 2001 From: Davide Caratti Date: Mon, 27 Aug 2018 22:56:22 +0200 Subject: [PATCH 0698/1692] net/sched: act_pedit: fix dump of extended layered op in the (rare) case of failure in nla_nest_start(), missing NULL checks in tcf_pedit_key_ex_dump() can make the following command # tc action add action pedit ex munge ip ttl set 64 dereference a NULL pointer: BUG: unable to handle kernel NULL pointer dereference at 0000000000000000 PGD 800000007d1cd067 P4D 800000007d1cd067 PUD 7acd3067 PMD 0 Oops: 0002 [#1] SMP PTI CPU: 0 PID: 3336 Comm: tc Tainted: G E 4.18.0.pedit+ #425 Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011 RIP: 0010:tcf_pedit_dump+0x19d/0x358 [act_pedit] Code: be 02 00 00 00 48 89 df 66 89 44 24 20 e8 9b b1 fd e0 85 c0 75 46 8b 83 c8 00 00 00 49 83 c5 08 48 03 83 d0 00 00 00 4d 39 f5 <66> 89 04 25 00 00 00 00 0f 84 81 01 00 00 41 8b 45 00 48 8d 4c 24 RSP: 0018:ffffb5d4004478a8 EFLAGS: 00010246 RAX: ffff8880fcda2070 RBX: ffff8880fadd2900 RCX: 0000000000000000 RDX: 0000000000000002 RSI: ffffb5d4004478ca RDI: ffff8880fcda206e RBP: ffff8880fb9cb900 R08: 0000000000000008 R09: ffff8880fcda206e R10: ffff8880fadd2900 R11: 0000000000000000 R12: ffff8880fd26cf40 R13: ffff8880fc957430 R14: ffff8880fc957430 R15: ffff8880fb9cb988 FS: 00007f75a537a740(0000) GS:ffff8880fda00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000000000000 CR3: 000000007a2fa005 CR4: 00000000001606f0 Call Trace: ? __nla_reserve+0x38/0x50 tcf_action_dump_1+0xd2/0x130 tcf_action_dump+0x6a/0xf0 tca_get_fill.constprop.31+0xa3/0x120 tcf_action_add+0xd1/0x170 tc_ctl_action+0x137/0x150 rtnetlink_rcv_msg+0x263/0x2d0 ? _cond_resched+0x15/0x40 ? rtnl_calcit.isra.30+0x110/0x110 netlink_rcv_skb+0x4d/0x130 netlink_unicast+0x1a3/0x250 netlink_sendmsg+0x2ae/0x3a0 sock_sendmsg+0x36/0x40 ___sys_sendmsg+0x26f/0x2d0 ? do_wp_page+0x8e/0x5f0 ? handle_pte_fault+0x6c3/0xf50 ? __handle_mm_fault+0x38e/0x520 ? __sys_sendmsg+0x5e/0xa0 __sys_sendmsg+0x5e/0xa0 do_syscall_64+0x5b/0x180 entry_SYSCALL_64_after_hwframe+0x44/0xa9 RIP: 0033:0x7f75a4583ba0 Code: c3 48 8b 05 f2 62 2c 00 f7 db 64 89 18 48 83 cb ff eb dd 0f 1f 80 00 00 00 00 83 3d fd c3 2c 00 00 75 10 b8 2e 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 31 c3 48 83 ec 08 e8 ae cc 00 00 48 89 04 24 RSP: 002b:00007fff60ee7418 EFLAGS: 00000246 ORIG_RAX: 000000000000002e RAX: ffffffffffffffda RBX: 00007fff60ee7540 RCX: 00007f75a4583ba0 RDX: 0000000000000000 RSI: 00007fff60ee7490 RDI: 0000000000000003 RBP: 000000005b842d3e R08: 0000000000000002 R09: 0000000000000000 R10: 00007fff60ee6ea0 R11: 0000000000000246 R12: 0000000000000000 R13: 00007fff60ee7554 R14: 0000000000000001 R15: 000000000066c100 Modules linked in: act_pedit(E) ip6table_filter ip6_tables iptable_filter binfmt_misc crct10dif_pclmul ext4 crc32_pclmul mbcache ghash_clmulni_intel jbd2 pcbc snd_hda_codec_generic snd_hda_intel snd_hda_codec snd_hda_core snd_hwdep snd_seq snd_seq_device snd_pcm aesni_intel crypto_simd snd_timer cryptd glue_helper snd joydev pcspkr soundcore virtio_balloon i2c_piix4 nfsd auth_rpcgss nfs_acl lockd grace sunrpc ip_tables xfs libcrc32c ata_generic pata_acpi virtio_net net_failover virtio_blk virtio_console failover qxl crc32c_intel drm_kms_helper syscopyarea serio_raw sysfillrect sysimgblt fb_sys_fops ttm drm ata_piix virtio_pci libata virtio_ring i2c_core virtio floppy dm_mirror dm_region_hash dm_log dm_mod [last unloaded: act_pedit] CR2: 0000000000000000 Like it's done for other TC actions, give up dumping pedit rules and return an error if nla_nest_start() returns NULL. Fixes: 71d0ed7079df ("net/act_pedit: Support using offset relative to the conventional network headers") Signed-off-by: Davide Caratti Acked-by: Cong Wang Signed-off-by: David S. Miller --- net/sched/act_pedit.c | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 107034070019..ad99a99f11f6 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -109,16 +109,18 @@ static int tcf_pedit_key_ex_dump(struct sk_buff *skb, { struct nlattr *keys_start = nla_nest_start(skb, TCA_PEDIT_KEYS_EX); + if (!keys_start) + goto nla_failure; for (; n > 0; n--) { struct nlattr *key_start; key_start = nla_nest_start(skb, TCA_PEDIT_KEY_EX); + if (!key_start) + goto nla_failure; if (nla_put_u16(skb, TCA_PEDIT_KEY_EX_HTYPE, keys_ex->htype) || - nla_put_u16(skb, TCA_PEDIT_KEY_EX_CMD, keys_ex->cmd)) { - nlmsg_trim(skb, keys_start); - return -EINVAL; - } + nla_put_u16(skb, TCA_PEDIT_KEY_EX_CMD, keys_ex->cmd)) + goto nla_failure; nla_nest_end(skb, key_start); @@ -128,6 +130,9 @@ static int tcf_pedit_key_ex_dump(struct sk_buff *skb, nla_nest_end(skb, keys_start); return 0; +nla_failure: + nla_nest_cancel(skb, keys_start); + return -EINVAL; } static int tcf_pedit_init(struct net *net, struct nlattr *nla, @@ -418,7 +423,10 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a, opt->bindcnt = atomic_read(&p->tcf_bindcnt) - bind; if (p->tcfp_keys_ex) { - tcf_pedit_key_ex_dump(skb, p->tcfp_keys_ex, p->tcfp_nkeys); + if (tcf_pedit_key_ex_dump(skb, + p->tcfp_keys_ex, + p->tcfp_nkeys)) + goto nla_put_failure; if (nla_put(skb, TCA_PEDIT_PARMS_EX, s, opt)) goto nla_put_failure; -- GitLab From afe49de44c27a89e8e9631c44b5ffadf6ace65e2 Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Tue, 28 Aug 2018 13:40:51 +0200 Subject: [PATCH 0699/1692] ipv6: fix cleanup ordering for ip6_mr failure Commit 15e668070a64 ("ipv6: reorder icmpv6_init() and ip6_mr_init()") moved the cleanup label for ipmr_fail, but should have changed the contents of the cleanup labels as well. Now we can end up cleaning up icmpv6 even though it hasn't been initialized (jump to icmp_fail or ipmr_fail). Simply undo things in the reverse order of their initialization. Example of panic (triggered by faking a failure of icmpv6_init): kasan: GPF could be caused by NULL-ptr deref or user memory access general protection fault: 0000 [#1] PREEMPT SMP KASAN PTI [...] RIP: 0010:__list_del_entry_valid+0x79/0x160 [...] Call Trace: ? lock_release+0x8a0/0x8a0 unregister_pernet_operations+0xd4/0x560 ? ops_free_list+0x480/0x480 ? down_write+0x91/0x130 ? unregister_pernet_subsys+0x15/0x30 ? down_read+0x1b0/0x1b0 ? up_read+0x110/0x110 ? kmem_cache_create_usercopy+0x1b4/0x240 unregister_pernet_subsys+0x1d/0x30 icmpv6_cleanup+0x1d/0x30 inet6_init+0x1b5/0x23f Fixes: 15e668070a64 ("ipv6: reorder icmpv6_init() and ip6_mr_init()") Signed-off-by: Sabrina Dubroca Signed-off-by: David S. Miller --- net/ipv6/af_inet6.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 673bba31eb18..e5da133c6932 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -1113,11 +1113,11 @@ static int __init inet6_init(void) igmp_fail: ndisc_cleanup(); ndisc_fail: - ip6_mr_cleanup(); + icmpv6_cleanup(); icmp_fail: - unregister_pernet_subsys(&inet6_net_ops); + ip6_mr_cleanup(); ipmr_fail: - icmpv6_cleanup(); + unregister_pernet_subsys(&inet6_net_ops); register_pernet_fail: sock_unregister(PF_INET6); rtnl_unregister_all(PF_INET6); -- GitLab From a03dc36bdca6b614651fedfcd8559cf914d2d21d Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Tue, 28 Aug 2018 13:40:52 +0200 Subject: [PATCH 0700/1692] ipv6: fix cleanup ordering for pingv6 registration Commit 6d0bfe226116 ("net: ipv6: Add IPv6 support to the ping socket.") contains an error in the cleanup path of inet6_init(): when proto_register(&pingv6_prot, 1) fails, we try to unregister &pingv6_prot. When rawv6_init() fails, we skip unregistering &pingv6_prot. Example of panic (triggered by faking a failure of proto_register(&pingv6_prot, 1)): general protection fault: 0000 [#1] PREEMPT SMP KASAN PTI [...] RIP: 0010:__list_del_entry_valid+0x79/0x160 [...] Call Trace: proto_unregister+0xbb/0x550 ? trace_preempt_on+0x6f0/0x6f0 ? sock_no_shutdown+0x10/0x10 inet6_init+0x153/0x1b8 Fixes: 6d0bfe226116 ("net: ipv6: Add IPv6 support to the ping socket.") Signed-off-by: Sabrina Dubroca Signed-off-by: David S. Miller --- net/ipv6/af_inet6.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index e5da133c6932..9a4261e50272 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -938,14 +938,14 @@ static int __init inet6_init(void) err = proto_register(&pingv6_prot, 1); if (err) - goto out_unregister_ping_proto; + goto out_unregister_raw_proto; /* We MUST register RAW sockets before we create the ICMP6, * IGMP6, or NDISC control sockets. */ err = rawv6_init(); if (err) - goto out_unregister_raw_proto; + goto out_unregister_ping_proto; /* Register the family here so that the init calls below will * be able to create sockets. (?? is this dangerous ??) -- GitLab From f707ef61e17261f2bb18c3e4871c6f135ab3aba9 Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Tue, 28 Aug 2018 13:40:53 +0200 Subject: [PATCH 0701/1692] net: rtnl: return early from rtnl_unregister_all when protocol isn't registered rtnl_unregister_all(PF_INET6) gets called from inet6_init in cases when no handler has been registered for PF_INET6 yet, for example if ip6_mr_init() fails. Abort and avoid a NULL pointer deref in that case. Example of panic (triggered by faking a failure of register_pernet_subsys): general protection fault: 0000 [#1] PREEMPT SMP KASAN PTI [...] RIP: 0010:rtnl_unregister_all+0x17e/0x2a0 [...] Call Trace: ? rtnetlink_net_init+0x250/0x250 ? sock_unregister+0x103/0x160 ? kernel_getsockopt+0x200/0x200 inet6_init+0x197/0x20d Fixes: e2fddf5e96df ("[IPV6]: Make af_inet6 to check ip6_route_init return value.") Signed-off-by: Sabrina Dubroca Signed-off-by: David S. Miller --- net/core/rtnetlink.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 24431e578310..60c928894a78 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -324,6 +324,10 @@ void rtnl_unregister_all(int protocol) rtnl_lock(); tab = rtnl_msg_handlers[protocol]; + if (!tab) { + rtnl_unlock(); + return; + } RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL); for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) { link = tab[msgindex]; -- GitLab From c305660b325463cdf3d944492f96155dc931b448 Mon Sep 17 00:00:00 2001 From: Dinh Nguyen Date: Tue, 28 Aug 2018 10:19:20 -0500 Subject: [PATCH 0702/1692] net: stmmac: build the dwmac-socfpga platform driver for Stratix10 The Stratix10 SoC is an AARCH64 based platform that shares the same ethernet controller that is on other SoCFPGA platforms. Build the platform driver. Signed-off-by: Dinh Nguyen Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index bf4acebb6bcd..324049eebb9b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -110,7 +110,7 @@ config DWMAC_ROCKCHIP config DWMAC_SOCFPGA tristate "SOCFPGA dwmac support" - default ARCH_SOCFPGA + default (ARCH_SOCFPGA || ARCH_STRATIX10) depends on OF && (ARCH_SOCFPGA || ARCH_STRATIX10 || COMPILE_TEST) select MFD_SYSCON help -- GitLab From c3c397c1f16c51601a3fac4fe0c63ad8aa85a904 Mon Sep 17 00:00:00 2001 From: Doug Berger Date: Tue, 28 Aug 2018 12:33:15 -0700 Subject: [PATCH 0703/1692] net: bcmgenet: use MAC link status for fixed phy When using the fixed PHY with GENET (e.g. MOCA) the PHY link status can be determined from the internal link status captured by the MAC. This allows the PHY state machine to use the correct link state with the fixed PHY even if MAC link event interrupts are missed when the net device is opened. Fixes: 8d88c6ebb34c ("net: bcmgenet: enable MoCA link state change detection") Signed-off-by: Doug Berger Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/genet/bcmgenet.h | 3 +++ drivers/net/ethernet/broadcom/genet/bcmmii.c | 10 ++++++++-- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index b773bc07edf7..14b49612aa86 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -186,6 +186,9 @@ struct bcmgenet_mib_counters { #define UMAC_MAC1 0x010 #define UMAC_MAX_FRAME_LEN 0x014 +#define UMAC_MODE 0x44 +#define MODE_LINK_STATUS (1 << 5) + #define UMAC_EEE_CTRL 0x064 #define EN_LPI_RX_PAUSE (1 << 0) #define EN_LPI_TX_PFC (1 << 1) diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 5333274a283c..4241ae928d4a 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -115,8 +115,14 @@ void bcmgenet_mii_setup(struct net_device *dev) static int bcmgenet_fixed_phy_link_update(struct net_device *dev, struct fixed_phy_status *status) { - if (dev && dev->phydev && status) - status->link = dev->phydev->link; + struct bcmgenet_priv *priv; + u32 reg; + + if (dev && dev->phydev && status) { + priv = netdev_priv(dev); + reg = bcmgenet_umac_readl(priv, UMAC_MODE); + status->link = !!(reg & MODE_LINK_STATUS); + } return 0; } -- GitLab From 9174c1d6196d612799808009ec2796df021ab625 Mon Sep 17 00:00:00 2001 From: Xiaolin Zhang Date: Tue, 7 Aug 2018 20:39:16 +0800 Subject: [PATCH 0704/1692] drm/i915/gvt: emulate gen9 dbuf ctl register access there is below call track at boot time when booting guest with kabylake vgpu with specifal configuration and this try to fix it. [drm:gen9_dbuf_enable [i915]] *ERROR* DBuf power enable timeout ------------[ cut here ]------------ WARNING: gen9_dc_off_power_well_enable+0x224/0x230 [i915] Unexpected DBuf power power state (0x8000000a) Hardware name: Red Hat KVM, BIOS 1.11.0-2.el7 04/01/2014 Call Trace: [] dump_stack+0x19/0x1b [] __warn+0xd8/0x100 [] warn_slowpath_fmt+0x5f/0x80 [] gen9_dc_off_power_well_enable+0x224/0x230 [i915] [] intel_power_well_enable+0x42/0x50 [i915] [] __intel_display_power_get_domain+0x8a/0xb0 [i915] [] intel_display_power_get+0x33/0x50 [i915] [] intel_display_set_init_power+0x45/0x50 [i915] [] intel_power_domains_init_hw+0x63/0x8a0 [i915] [] i915_driver_load+0xae3/0x1760 [i915] [] ? nvmem_register+0x500/0x500 [] i915_pci_probe+0x2c/0x50 [i915] [] local_pci_probe+0x4a/0xb0 [] pci_device_probe+0x109/0x160 [] driver_probe_device+0xc5/0x3e0 [] __driver_attach+0x93/0xa0 [] ? __device_attach+0x50/0x50 [] bus_for_each_dev+0x75/0xc0 [] driver_attach+0x1e/0x20 [] bus_add_driver+0x200/0x2d0 [] driver_register+0x64/0xf0 [] __pci_register_driver+0xa5/0xc0 [] ? 0xffffffffc0928fff [] i915_init+0x59/0x5c [i915] [] do_one_initcall+0xba/0x240 [] load_module+0x272c/0x2bc0 [] ? ddebug_proc_write+0xf0/0xf0 [] SyS_init_module+0xc5/0x110 [] system_call_fastpath+0x1c/0x21 Signed-off-by: Xiaolin Zhang Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/handlers.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 7a58ca555197..450e730743a1 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1296,6 +1296,19 @@ static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu, return 0; } +static int gen9_dbuf_ctl_mmio_write(struct intel_vgpu *vgpu, + unsigned int offset, void *p_data, unsigned int bytes) +{ + write_vreg(vgpu, offset, p_data, bytes); + + if (vgpu_vreg(vgpu, offset) & DBUF_POWER_REQUEST) + vgpu_vreg(vgpu, offset) |= DBUF_POWER_STATE; + else + vgpu_vreg(vgpu, offset) &= ~DBUF_POWER_STATE; + + return 0; +} + static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { @@ -2812,6 +2825,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(SKL_DISP_PW_MISC_IO), D_SKL_PLUS, NULL, skl_power_well_ctl_write); + MMIO_DH(DBUF_CTL, D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write); + MMIO_D(_MMIO(0xa210), D_SKL_PLUS); MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS); @@ -2987,8 +3002,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) NULL, gen9_trtte_write); MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write); - MMIO_D(_MMIO(0x45008), D_SKL_PLUS); - MMIO_D(_MMIO(0x46430), D_SKL_PLUS); MMIO_D(_MMIO(0x46520), D_SKL_PLUS); -- GitLab From c8ab5ac30ccc20a31672ab0f8938a6271dfe4122 Mon Sep 17 00:00:00 2001 From: Colin Xu Date: Mon, 20 Aug 2018 16:46:34 +0800 Subject: [PATCH 0705/1692] drm/i915/gvt: Make correct handling to vreg BXT_PHY_CTL_FAMILY Guest kernel will write to BXT_PHY_CTL_FAMILY to reset DDI PHY and pull BXT_PHY_CTL to check PHY status. Previous handling will set/reset BXT_PHY_CTL of all PHYs at same time on receiving vreg write to some BXT_PHY_CTL_FAMILY. If some BXT_PHY_CTL is already enabled, following reset to another BXT_PHY_CTL_FAMILY will clear the enabled BXT_PHY_CTL, which result in guest kernel print: ----------------------------------- [drm:intel_ddi_get_hw_state [i915]] *ERROR* Port B enabled but PHY powered down? (PHY_CTL 00000000) ----------------------------------- The correct handling should operate BXT_PHY_CTL_FAMILY and BXT_PHY_CTL on the same DDI. v2: Use correct reg define. The naming looks confusing, however current i915_reg.h bind DPIO_PHY0 to _PHY_CTL_FAMILY_DDI and bind DPIO_PHY1 to _PHY_CTL_FAMILY_EDP, pairing to _BXT_PHY_CTL_DDI_A and _BXT_PHY_CTL_DDI_B respectively. v3: v2 incorrectly map _PHY_CTL_FAMILY_EDP to _BXT_PHY_CTL_DDI_A. BXT_PHY_CTL() looks up DDI using PORTx but not PHYx. Based on DPIO_PHY to DDI mapping, make correct vreg handle to BXT_PHY_CTL on receiving vreg write to BXT_PHY_CTL_FAMILY. (He, Min) Current mapping according to bxt_power_wells: dpio-common-a: >>> DPIO_PHY1 >>> BXT_DPIO_CMN_A_POWER_DOMAINS >>> POWER_DOMAIN_PORT_DDI_A_LANES >>> PORT_A dpio-common-bc: >>> DPIO_PHY0 >>> BXT_DPIO_CMN_BC_POWER_DOMAINS >>> POWER_DOMAIN_PORT_DDI_B_LANES | POWER_DOMAIN_PORT_DDI_C_LANES >>> PORT_B or PORT_C Signed-off-by: Colin Xu Reviewed-by: He, Min Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/handlers.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 450e730743a1..d0db55a79627 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1538,9 +1538,15 @@ static int bxt_phy_ctl_family_write(struct intel_vgpu *vgpu, u32 v = *(u32 *)p_data; u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0; - vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data; - vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data; - vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data; + switch (offset) { + case _PHY_CTL_FAMILY_EDP: + vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data; + break; + case _PHY_CTL_FAMILY_DDI: + vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data; + vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data; + break; + } vgpu_vreg(vgpu, offset) = v; -- GitLab From b9b824a55876275f8506c1c187558ab22d879f73 Mon Sep 17 00:00:00 2001 From: Colin Xu Date: Fri, 17 Aug 2018 16:42:24 +0800 Subject: [PATCH 0706/1692] drm/i915/gvt: Handle GEN9_WM_CHICKEN3 with F_CMD_ACCESS. Recent patch introduce strict check on scanning cmd: Commit 8d458ea0ec33 ("drm/i915/gvt: return error on cmd access") Before 8d458ea0ec33, if cmd_reg_handler() checks that a cmd access a mmio that not marked as F_CMD_ACCESS, it simply returns 0 and log an error. Now it will return -EBADRQC which will cause the workload fail to submit. On BXT, i915 applies WaClearHIZ_WM_CHICKEN3 which will program GEN9_WM_CHICKEN3 by LRI when init wa ctx. If it has no F_CMD_ACCESS flag, vgpu will fail to start. Also add F_MODE_MASK since it's mode mask reg. v2: Refresh commit message to elaborate issue symptom in detail. v3: Make SKL_PLUS share same handling since GEN9_WM_CHICKEN3 should be F_CMD_ACCESS from HW aspect. (yan, zhenyu) Signed-off-by: Colin Xu Acked-by: Zhao Yan Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/handlers.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index d0db55a79627..72afa518edd9 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -3044,7 +3044,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_D(_MMIO(0x44500), D_SKL_PLUS); MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, - NULL, NULL); + NULL, NULL); + MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, + NULL, NULL); MMIO_D(_MMIO(0x4ab8), D_KBL); MMIO_D(_MMIO(0x2248), D_KBL | D_SKL); -- GitLab From b2b599fb54f90ae395ddc51f0d49e4f28244a8f8 Mon Sep 17 00:00:00 2001 From: Hang Yuan Date: Wed, 29 Aug 2018 17:15:56 +0800 Subject: [PATCH 0707/1692] drm/i915/gvt: move intel_runtime_pm_get out of spin_lock in stop_schedule pm_runtime_get_sync in intel_runtime_pm_get might sleep if i915 device is not active. When stop vgpu schedule, the device may be inactive. So need to move runtime_pm_get out of spin_lock/unlock. Fixes: b24881e0b0b6("drm/i915/gvt: Add runtime_pm_get/put into gvt_switch_mmio Cc: Signed-off-by: Hang Yuan Signed-off-by: Xiong Zhang Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/mmio_context.c | 2 -- drivers/gpu/drm/i915/gvt/sched_policy.c | 3 +++ 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 42e1e6bdcc2c..e872f4847fbe 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -562,11 +562,9 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre, * performace for batch mmio read/write, so we need * handle forcewake mannually. */ - intel_runtime_pm_get(dev_priv); intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); switch_mmio(pre, next, ring_id); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); - intel_runtime_pm_put(dev_priv); } /** diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index 09d7bb72b4ff..985fe81794dd 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c @@ -426,6 +426,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) &vgpu->gvt->scheduler; int ring_id; struct vgpu_sched_data *vgpu_data = vgpu->sched_data; + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; if (!vgpu_data->active) return; @@ -444,6 +445,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) scheduler->current_vgpu = NULL; } + intel_runtime_pm_get(dev_priv); spin_lock_bh(&scheduler->mmio_context_lock); for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) { if (scheduler->engine_owner[ring_id] == vgpu) { @@ -452,5 +454,6 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) } } spin_unlock_bh(&scheduler->mmio_context_lock); + intel_runtime_pm_put(dev_priv); mutex_unlock(&vgpu->gvt->sched_lock); } -- GitLab From b244ffa15c8b1aabdc117c0b6008086df7b668b7 Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Thu, 30 Aug 2018 10:50:36 +0800 Subject: [PATCH 0708/1692] drm/i915/gvt: Fix drm_format_mod value for vGPU plane Physical plane's tiling mode value is given directly as drm_format_mod for plane query, which is not correct fourcc code. Fix it by using correct intel tiling fourcc mod definition. Current qemu seems also doesn't correctly utilize drm_format_mod for plane object setting. Anyway this is required to fix the usage. v3: use DRM_FORMAT_MOD_LINEAR, fix comment v2: Fix missed old 'tiled' use for stride calculation Fixes: e546e281d33d ("drm/i915/gvt: Dmabuf support for GVT-g") Cc: Tina Zhang Cc: Gerd Hoffmann Cc: Colin Xu Reviewed-by: Colin Xu Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/dmabuf.c | 33 +++++++++++++++++++++------ drivers/gpu/drm/i915/gvt/fb_decoder.c | 5 ++-- drivers/gpu/drm/i915/gvt/fb_decoder.h | 2 +- 3 files changed, 29 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index 6e3f56684f4e..51ed99a37803 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -170,20 +170,22 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev, unsigned int tiling_mode = 0; unsigned int stride = 0; - switch (info->drm_format_mod << 10) { - case PLANE_CTL_TILED_LINEAR: + switch (info->drm_format_mod) { + case DRM_FORMAT_MOD_LINEAR: tiling_mode = I915_TILING_NONE; break; - case PLANE_CTL_TILED_X: + case I915_FORMAT_MOD_X_TILED: tiling_mode = I915_TILING_X; stride = info->stride; break; - case PLANE_CTL_TILED_Y: + case I915_FORMAT_MOD_Y_TILED: + case I915_FORMAT_MOD_Yf_TILED: tiling_mode = I915_TILING_Y; stride = info->stride; break; default: - gvt_dbg_core("not supported tiling mode\n"); + gvt_dbg_core("invalid drm_format_mod %llx for tiling\n", + info->drm_format_mod); } obj->tiling_and_stride = tiling_mode | stride; } else { @@ -222,9 +224,26 @@ static int vgpu_get_plane_info(struct drm_device *dev, info->height = p.height; info->stride = p.stride; info->drm_format = p.drm_format; - info->drm_format_mod = p.tiled; + + switch (p.tiled) { + case PLANE_CTL_TILED_LINEAR: + info->drm_format_mod = DRM_FORMAT_MOD_LINEAR; + break; + case PLANE_CTL_TILED_X: + info->drm_format_mod = I915_FORMAT_MOD_X_TILED; + break; + case PLANE_CTL_TILED_Y: + info->drm_format_mod = I915_FORMAT_MOD_Y_TILED; + break; + case PLANE_CTL_TILED_YF: + info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED; + break; + default: + gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled); + } + info->size = (((p.stride * p.height * p.bpp) / 8) + - (PAGE_SIZE - 1)) >> PAGE_SHIFT; + (PAGE_SIZE - 1)) >> PAGE_SHIFT; } else if (plane_id == DRM_PLANE_TYPE_CURSOR) { ret = intel_vgpu_decode_cursor_plane(vgpu, &c); if (ret) diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c index face664be3e8..481896fb712a 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.c +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c @@ -220,8 +220,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)) { - plane->tiled = (val & PLANE_CTL_TILED_MASK) >> - _PLANE_CTL_TILED_SHIFT; + plane->tiled = val & PLANE_CTL_TILED_MASK; fmt = skl_format_to_drm( val & PLANE_CTL_FORMAT_MASK, val & PLANE_CTL_ORDER_RGBX, @@ -260,7 +259,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, return -EINVAL; } - plane->stride = intel_vgpu_get_stride(vgpu, pipe, (plane->tiled << 10), + plane->stride = intel_vgpu_get_stride(vgpu, pipe, plane->tiled, (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)) ? diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.h b/drivers/gpu/drm/i915/gvt/fb_decoder.h index cb055f3c81a2..60c155085029 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.h +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.h @@ -101,7 +101,7 @@ struct intel_gvt; /* color space conversion and gamma correction are not included */ struct intel_vgpu_primary_plane_format { u8 enabled; /* plane is enabled */ - u8 tiled; /* X-tiled */ + u32 tiled; /* tiling mode: linear, X-tiled, Y tiled, etc */ u8 bpp; /* bits per pixel */ u32 hw_format; /* format field in the PRI_CTL register */ u32 drm_format; /* format in DRM definition */ -- GitLab From c4053ef322081554765e1b708d6cdd8855e1d72d Mon Sep 17 00:00:00 2001 From: Baruch Siach Date: Wed, 29 Aug 2018 09:44:39 +0300 Subject: [PATCH 0709/1692] net: mvpp2: initialize port of_node pointer Without a valid of_node in struct device we can't find the mvpp2 port device by its DT node. Specifically, this breaks of_find_net_device_by_node(). For example, the Armada 8040 based Clearfog GT-8K uses Marvell 88E6141 switch connected to the &cp1_eth2 port: &cp1_mdio { ... switch0: switch0@4 { compatible = "marvell,mv88e6085"; ... ports { ... port@5 { reg = <5>; label = "cpu"; ethernet = <&cp1_eth2>; }; }; }; }; Without this patch, dsa_register_switch() returns -EPROBE_DEFER because of_find_net_device_by_node() can't find the device_node of the &cp1_eth2 device. Reviewed-by: Andrew Lunn Signed-off-by: Baruch Siach Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 32d785b616e1..28500417843e 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -4803,6 +4803,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, dev->min_mtu = ETH_MIN_MTU; /* 9704 == 9728 - 20 and rounding to 8 */ dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE; + dev->dev.of_node = port_node; /* Phylink isn't used w/ ACPI as of now */ if (port_node) { -- GitLab From 97763dc0f4010bc20e2969a6bf9a40a2551c4f79 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Wed, 29 Aug 2018 10:22:33 +0200 Subject: [PATCH 0710/1692] net_sched: reject unknown tcfa_action values After the commit 802bfb19152c ("net/sched: user-space can't set unknown tcfa_action values"), unknown tcfa_action values are converted to TC_ACT_UNSPEC, but the common agreement is instead rejecting such configurations. This change also introduces a helper to simplify the destruction of a single action, avoiding code duplication. v1 -> v2: - helper is now static and renamed according to act_* convention - updated extack message, according to the new behavior Fixes: 802bfb19152c ("net/sched: user-space can't set unknown tcfa_action values") Signed-off-by: Paolo Abeni Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- net/sched/act_api.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/net/sched/act_api.c b/net/sched/act_api.c index db83dac1e7f4..316c98bb87e4 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -662,6 +662,13 @@ int tcf_action_destroy(struct tc_action *actions[], int bind) return ret; } +static int tcf_action_destroy_1(struct tc_action *a, int bind) +{ + struct tc_action *actions[] = { a, NULL }; + + return tcf_action_destroy(actions, bind); +} + static int tcf_action_put(struct tc_action *p) { return __tcf_action_put(p, false); @@ -881,17 +888,16 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN)) { err = tcf_action_goto_chain_init(a, tp); if (err) { - struct tc_action *actions[] = { a, NULL }; - - tcf_action_destroy(actions, bind); + tcf_action_destroy_1(a, bind); NL_SET_ERR_MSG(extack, "Failed to init TC action chain"); return ERR_PTR(err); } } if (!tcf_action_valid(a->tcfa_action)) { - NL_SET_ERR_MSG(extack, "invalid action value, using TC_ACT_UNSPEC instead"); - a->tcfa_action = TC_ACT_UNSPEC; + tcf_action_destroy_1(a, bind); + NL_SET_ERR_MSG(extack, "Invalid control action value"); + return ERR_PTR(-EINVAL); } return a; -- GitLab From 25a8238f4cc8425d4aade4f9041be468d0e8aa2e Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Wed, 29 Aug 2018 10:22:34 +0200 Subject: [PATCH 0711/1692] tc-testing: add test-cases for numeric and invalid control action Only the police action allows us to specify an arbitrary numeric value for the control action. This change introduces an explicit test case for the above feature and then leverage it for testing the kernel behavior for invalid control actions (reject). Signed-off-by: Paolo Abeni Signed-off-by: David S. Miller --- .../tc-testing/tc-tests/actions/police.json | 48 +++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/police.json b/tools/testing/selftests/tc-testing/tc-tests/actions/police.json index f03763d81617..30f9b54bd666 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/police.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/police.json @@ -312,6 +312,54 @@ "$TC actions flush action police" ] }, + { + "id": "6aaf", + "name": "Add police actions with conform-exceed control pass/pipe [with numeric values]", + "category": [ + "actions", + "police" + ], + "setup": [ + [ + "$TC actions flush action police", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action police rate 3mbit burst 250k conform-exceed 0/3 index 1", + "expExitCode": "0", + "verifyCmd": "$TC actions get action police index 1", + "matchPattern": "action order [0-9]*: police 0x1 rate 3Mbit burst 250Kb mtu 2Kb action pass/pipe", + "matchCount": "1", + "teardown": [ + "$TC actions flush action police" + ] + }, + { + "id": "29b1", + "name": "Add police actions with conform-exceed control /drop", + "category": [ + "actions", + "police" + ], + "setup": [ + [ + "$TC actions flush action police", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action police rate 3mbit burst 250k conform-exceed 10/drop index 1", + "expExitCode": "255", + "verifyCmd": "$TC actions ls action police", + "matchPattern": "action order [0-9]*: police 0x1 rate 3Mbit burst 250Kb mtu 2Kb action ", + "matchCount": "0", + "teardown": [ + "$TC actions flush action police" + ] + }, { "id": "c26f", "name": "Add police action with invalid peakrate value", -- GitLab From 4f0223bfe9c3e62d8f45a85f1ef1b18a8a263ef9 Mon Sep 17 00:00:00 2001 From: Arunk Khandavalli Date: Thu, 30 Aug 2018 00:40:16 +0300 Subject: [PATCH 0712/1692] cfg80211: nl80211_update_ft_ies() to validate NL80211_ATTR_IE nl80211_update_ft_ies() tried to validate NL80211_ATTR_IE with is_valid_ie_attr() before dereferencing it, but that helper function returns true in case of NULL pointer (i.e., attribute not included). This can result to dereferencing a NULL pointer. Fix that by explicitly checking that NL80211_ATTR_IE is included. Fixes: 355199e02b83 ("cfg80211: Extend support for IEEE 802.11r Fast BSS Transition") Signed-off-by: Arunk Khandavalli Signed-off-by: Jouni Malinen Signed-off-by: Johannes Berg --- net/wireless/nl80211.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index ce0149a86c13..733ccf867972 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -12099,6 +12099,7 @@ static int nl80211_update_ft_ies(struct sk_buff *skb, struct genl_info *info) return -EOPNOTSUPP; if (!info->attrs[NL80211_ATTR_MDID] || + !info->attrs[NL80211_ATTR_IE] || !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) return -EINVAL; -- GitLab From 1eb507903665442360a959136dfa3234c43db085 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Wed, 29 Aug 2018 21:03:25 +0200 Subject: [PATCH 0713/1692] mac80211: do not convert to A-MSDU if frag/subframe limited Do not start to aggregate packets in a A-MSDU frame (converting the first subframe to A-MSDU, adding the header) if max_tx_fragments or max_amsdu_subframes limits are already exceeded by it. In particular, this happens when drivers set the limit to 1 to avoid A-MSDUs at all. Signed-off-by: Lorenzo Bianconi [reword commit message to be more precise] Signed-off-by: Johannes Berg --- net/mac80211/tx.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 667a73d6eb5c..1aac5e3c7eee 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -3208,9 +3208,6 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata, if (skb->len + head->len > max_amsdu_len) goto out; - if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head)) - goto out; - nfrags = 1 + skb_shinfo(skb)->nr_frags; nfrags += 1 + skb_shinfo(head)->nr_frags; frag_tail = &skb_shinfo(head)->frag_list; @@ -3226,6 +3223,9 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata, if (max_frags && nfrags > max_frags) goto out; + if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head)) + goto out; + /* * Pad out the previous subframe to a multiple of 4 by adding the * padding to the next one, that's being added. Note that head->len -- GitLab From 43822c98f2ebb2cbd5e467ab72bbcdae7f0caa22 Mon Sep 17 00:00:00 2001 From: Harry Mallon Date: Tue, 28 Aug 2018 22:51:29 +0100 Subject: [PATCH 0714/1692] HID: hid-saitek: Add device ID for RAT 7 Contagion Signed-off-by: Harry Mallon Signed-off-by: Jiri Kosina --- drivers/hid/hid-ids.h | 1 + drivers/hid/hid-saitek.c | 2 ++ 2 files changed, 3 insertions(+) diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index cb2d3170d9dc..19a66ceca217 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -953,6 +953,7 @@ #define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17 #define USB_DEVICE_ID_SAITEK_PS1000 0x0621 #define USB_DEVICE_ID_SAITEK_RAT7_OLD 0x0ccb +#define USB_DEVICE_ID_SAITEK_RAT7_CONTAGION 0x0ccd #define USB_DEVICE_ID_SAITEK_RAT7 0x0cd7 #define USB_DEVICE_ID_SAITEK_RAT9 0x0cfa #define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0 diff --git a/drivers/hid/hid-saitek.c b/drivers/hid/hid-saitek.c index 39e642686ff0..683861f324e3 100644 --- a/drivers/hid/hid-saitek.c +++ b/drivers/hid/hid-saitek.c @@ -183,6 +183,8 @@ static const struct hid_device_id saitek_devices[] = { .driver_data = SAITEK_RELEASE_MODE_RAT7 }, { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7), .driver_data = SAITEK_RELEASE_MODE_RAT7 }, + { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_CONTAGION), + .driver_data = SAITEK_RELEASE_MODE_RAT7 }, { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT9), .driver_data = SAITEK_RELEASE_MODE_RAT7 }, { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9), -- GitLab From aa58acf325b4aadeecae2bfc90658273b47dbace Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 30 Aug 2018 10:55:49 +0200 Subject: [PATCH 0715/1692] mac80211: always account for A-MSDU header changes In the error path of changing the SKB headroom of the second A-MSDU subframe, we would not account for the already-changed length of the first frame that just got converted to be in A-MSDU format and thus is a bit longer now. Fix this by doing the necessary accounting. It would be possible to reorder the operations, but that would make the code more complex (to calculate the necessary pad), and the headroom expansion should not fail frequently enough to make that worthwhile. Fixes: 6e0456b54545 ("mac80211: add A-MSDU tx support") Signed-off-by: Johannes Berg Acked-by: Lorenzo Bianconi Signed-off-by: Johannes Berg --- net/mac80211/tx.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 1aac5e3c7eee..6ca0865de945 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -3239,7 +3239,7 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata, if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(rfc1042_header) + 2 + pad)) - goto out; + goto out_recalc; ret = true; data = skb_push(skb, ETH_ALEN + 2); @@ -3256,11 +3256,13 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata, head->data_len += skb->len; *frag_tail = skb; - flow->backlog += head->len - orig_len; - tin->backlog_bytes += head->len - orig_len; - - fq_recalc_backlog(fq, tin, flow); +out_recalc: + if (head->len != orig_len) { + flow->backlog += head->len - orig_len; + tin->backlog_bytes += head->len - orig_len; + fq_recalc_backlog(fq, tin, flow); + } out: spin_unlock_bh(&fq->lock); -- GitLab From 36bf9da2913054c218337d8cd7cb11bddc1fafb0 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 27 Aug 2018 14:45:14 +0900 Subject: [PATCH 0716/1692] x86/build: Remove jump label quirk for GCC older than 4.5.2 Commit cafa0010cd51 ("Raise the minimum required gcc version to 4.6") bumped the minimum GCC version to 4.6 for all architectures. Remove the workaround code. It was the only user of cc-if-fullversion. Remove the macro as well. Signed-off-by: Masahiro Yamada Signed-off-by: Thomas Gleixner Cc: "H. Peter Anvin" Cc: Michal Marek Cc: linux-kbuild@vger.kernel.org Link: https://lkml.kernel.org/r/1535348714-25457-1-git-send-email-yamada.masahiro@socionext.com --- arch/x86/Makefile | 12 ------------ scripts/Kbuild.include | 4 ---- 2 files changed, 16 deletions(-) diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 94859241bc3e..8fc8f94ef5f5 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -179,18 +179,6 @@ ifndef CC_HAVE_ASM_GOTO $(error Compiler lacks asm-goto support.) endif -# -# Jump labels need '-maccumulate-outgoing-args' for gcc < 4.5.2 to prevent a -# GCC bug (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=46226). There's no way -# to test for this bug at compile-time because the test case needs to execute, -# which is a no-go for cross compilers. So check the GCC version instead. -# -ifdef CONFIG_JUMP_LABEL - ifneq ($(ACCUMULATE_OUTGOING_ARGS), 1) - ACCUMULATE_OUTGOING_ARGS = $(call cc-if-fullversion, -lt, 040502, 1) - endif -endif - ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1) # This compiler flag is not supported by Clang: KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args,) diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index c75413d05a63..ce53639a864a 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include @@ -153,10 +153,6 @@ cc-fullversion = $(shell $(CONFIG_SHELL) \ # Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1) cc-ifversion = $(shell [ $(cc-version) $(1) $(2) ] && echo $(3) || echo $(4)) -# cc-if-fullversion -# Usage: EXTRA_CFLAGS += $(call cc-if-fullversion, -lt, 040502, -O1) -cc-if-fullversion = $(shell [ $(cc-fullversion) $(1) $(2) ] && echo $(3) || echo $(4)) - # cc-ldoption # Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both) cc-ldoption = $(call try-run,\ -- GitLab From 1f59a4581b5ecfe9b4f049a7a2cf904d8352842d Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Mon, 27 Aug 2018 14:40:09 -0700 Subject: [PATCH 0717/1692] x86/irqflags: Mark native_restore_fl extern inline This should have been marked extern inline in order to pick up the out of line definition in arch/x86/kernel/irqflags.S. Fixes: 208cbb325589 ("x86/irqflags: Provide a declaration for native_save_fl") Reported-by: Ben Hutchings Signed-off-by: Nick Desaulniers Signed-off-by: Thomas Gleixner Reviewed-by: Juergen Gross Cc: "H. Peter Anvin" Cc: Boris Ostrovsky Cc: Greg Kroah-Hartman Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20180827214011.55428-1-ndesaulniers@google.com --- arch/x86/include/asm/irqflags.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h index c14f2a74b2be..15450a675031 100644 --- a/arch/x86/include/asm/irqflags.h +++ b/arch/x86/include/asm/irqflags.h @@ -33,7 +33,8 @@ extern inline unsigned long native_save_fl(void) return flags; } -static inline void native_restore_fl(unsigned long flags) +extern inline void native_restore_fl(unsigned long flags); +extern inline void native_restore_fl(unsigned long flags) { asm volatile("push %0 ; popf" : /* no output */ -- GitLab From f12d11c5c184626b4befdee3d573ec8237405a33 Mon Sep 17 00:00:00 2001 From: Jann Horn Date: Tue, 28 Aug 2018 20:40:33 +0200 Subject: [PATCH 0718/1692] x86/entry/64: Wipe KASAN stack shadow before rewind_stack_do_exit() Reset the KASAN shadow state of the task stack before rewinding RSP. Without this, a kernel oops will leave parts of the stack poisoned, and code running under do_exit() can trip over such poisoned regions and cause nonsensical false-positive KASAN reports about stack-out-of-bounds bugs. This does not wipe the exception stacks; if an oops happens on an exception stack, it might result in random KASAN false-positives from other tasks afterwards. This is probably relatively uninteresting, since if the kernel oopses on an exception stack, there are most likely bigger things to worry about. It'd be more interesting if vmapped stacks and KASAN were compatible, since then handle_stack_overflow() would oops from exception stack context. Fixes: 2deb4be28077 ("x86/dumpstack: When OOPSing, rewind the stack before do_exit()") Signed-off-by: Jann Horn Signed-off-by: Thomas Gleixner Acked-by: Andrey Ryabinin Cc: Andy Lutomirski Cc: Dmitry Vyukov Cc: Alexander Potapenko Cc: Kees Cook Cc: kasan-dev@googlegroups.com Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20180828184033.93712-1-jannh@google.com --- arch/x86/kernel/dumpstack.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 9c8652974f8e..1596e6bfea6f 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -346,7 +347,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr) * We're not going to return, but we might be on an IST stack or * have very little stack space left. Rewind the stack and kill * the task. + * Before we rewind the stack, we have to tell KASAN that we're going to + * reuse the task stack and that existing poisons are invalid. */ + kasan_unpoison_task_stack(current); rewind_stack_do_exit(signr); } NOKPROBE_SYMBOL(oops_end); -- GitLab From cb9d7fd51d9fbb329d182423bd7b92d0f8cb0e01 Mon Sep 17 00:00:00 2001 From: Vincent Whitchurch Date: Tue, 21 Aug 2018 17:25:07 +0200 Subject: [PATCH 0719/1692] watchdog: Mark watchdog touch functions as notrace Some architectures need to use stop_machine() to patch functions for ftrace, and the assumption is that the stopped CPUs do not make function calls to traceable functions when they are in the stopped state. Commit ce4f06dcbb5d ("stop_machine: Touch_nmi_watchdog() after MULTI_STOP_PREPARE") added calls to the watchdog touch functions from the stopped CPUs and those functions lack notrace annotations. This leads to crashes when enabling/disabling ftrace on ARM kernels built with the Thumb-2 instruction set. Fix it by adding the necessary notrace annotations. Fixes: ce4f06dcbb5d ("stop_machine: Touch_nmi_watchdog() after MULTI_STOP_PREPARE") Signed-off-by: Vincent Whitchurch Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra Cc: oleg@redhat.com Cc: tj@kernel.org Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20180821152507.18313-1-vincent.whitchurch@axis.com --- kernel/watchdog.c | 4 ++-- kernel/watchdog_hld.c | 2 +- kernel/workqueue.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 5470dce212c0..977918d5d350 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -261,7 +261,7 @@ static void __touch_watchdog(void) * entering idle state. This should only be used for scheduler events. * Use touch_softlockup_watchdog() for everything else. */ -void touch_softlockup_watchdog_sched(void) +notrace void touch_softlockup_watchdog_sched(void) { /* * Preemption can be enabled. It doesn't matter which CPU's timestamp @@ -270,7 +270,7 @@ void touch_softlockup_watchdog_sched(void) raw_cpu_write(watchdog_touch_ts, 0); } -void touch_softlockup_watchdog(void) +notrace void touch_softlockup_watchdog(void) { touch_softlockup_watchdog_sched(); wq_watchdog_touch(raw_smp_processor_id()); diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c index 1f7020d65d0a..71381168dede 100644 --- a/kernel/watchdog_hld.c +++ b/kernel/watchdog_hld.c @@ -29,7 +29,7 @@ static struct cpumask dead_events_mask; static unsigned long hardlockup_allcpu_dumped; static atomic_t watchdog_cpus = ATOMIC_INIT(0); -void arch_touch_nmi_watchdog(void) +notrace void arch_touch_nmi_watchdog(void) { /* * Using __raw here because some code paths have diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 60e80198c3df..0280deac392e 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -5574,7 +5574,7 @@ static void wq_watchdog_timer_fn(struct timer_list *unused) mod_timer(&wq_watchdog_timer, jiffies + thresh); } -void wq_watchdog_touch(int cpu) +notrace void wq_watchdog_touch(int cpu) { if (cpu >= 0) per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies; -- GitLab From 13ba17bee18e321b073b49a88dcab10881f757da Mon Sep 17 00:00:00 2001 From: Mukesh Ojha Date: Fri, 24 Aug 2018 18:03:53 +0530 Subject: [PATCH 0720/1692] notifier: Remove notifier header file wherever not used The conversion of the hotplug notifiers to a state machine left the notifier.h includes around in some places. Remove them. Signed-off-by: Mukesh Ojha Signed-off-by: Thomas Gleixner Link: https://lkml.kernel.org/r/1535114033-4605-1-git-send-email-mojha@codeaurora.org --- fs/buffer.c | 1 - kernel/printk/printk.c | 1 - lib/percpu_counter.c | 1 - mm/page-writeback.c | 1 - mm/page_alloc.c | 1 - mm/slub.c | 1 - net/core/dev.c | 1 - 7 files changed, 7 deletions(-) diff --git a/fs/buffer.c b/fs/buffer.c index 4cc679d5bf58..6f1ae3ac9789 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -39,7 +39,6 @@ #include #include #include -#include #include #include #include diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 924e37fb1620..fd6f8ed28e01 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -38,7 +38,6 @@ #include #include #include -#include #include #include #include diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index c72577e472f2..a66595ba5543 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c @@ -4,7 +4,6 @@ */ #include -#include #include #include #include diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 6551d3b0dc30..84ae9bf5858a 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include #include diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e75865d58ba7..05e983f42316 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -32,7 +32,6 @@ #include #include #include -#include #include #include #include diff --git a/mm/slub.c b/mm/slub.c index ce2b9e5cea77..8da34a8af53d 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -19,7 +19,6 @@ #include #include "slab.h" #include -#include #include #include #include diff --git a/net/core/dev.c b/net/core/dev.c index 325fc5088370..82114e1111e6 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -93,7 +93,6 @@ #include #include #include -#include #include #include #include -- GitLab From 113fc08357ad4e8b84caa75402430875d9ac4c1a Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 27 Aug 2018 12:39:43 +0900 Subject: [PATCH 0721/1692] objtool: Remove workaround for unreachable warnings from old GCC Commit cafa0010cd51 ("Raise the minimum required gcc version to 4.6") bumped the minimum GCC version to 4.6 for all architectures. This effectively reverts commit da541b20021c ("objtool: Skip unreachable warnings for GCC 4.4 and older"), which was a workaround for GCC 4.4 or older. Signed-off-by: Masahiro Yamada Signed-off-by: Thomas Gleixner Acked-by: Josh Poimboeuf Cc: Peter Zijlstra Cc: Michal Marek Cc: linux-kbuild@vger.kernel.org Link: https://lkml.kernel.org/r/1535341183-19994-1-git-send-email-yamada.masahiro@socionext.com --- scripts/Makefile.build | 2 -- 1 file changed, 2 deletions(-) diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 1c48572223d1..5a2d1c9578a0 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -246,8 +246,6 @@ objtool_args += --no-fp endif ifdef CONFIG_GCOV_KERNEL objtool_args += --no-unreachable -else -objtool_args += $(call cc-ifversion, -lt, 0405, --no-unreachable) endif ifdef CONFIG_RETPOLINE ifneq ($(RETPOLINE_CFLAGS),) -- GitLab From 9222f606506c5f8ca2c8b8c939d59ed3e6ac4148 Mon Sep 17 00:00:00 2001 From: Jiri Kosina Date: Tue, 28 Aug 2018 08:55:14 +0200 Subject: [PATCH 0722/1692] x86/alternatives: Lockdep-enforce text_mutex in text_poke*() text_poke() and text_poke_bp() must be called with text_mutex held. Put proper lockdep anotation in place instead of just mentioning the requirement in a comment. Reported-by: Peter Zijlstra Signed-off-by: Jiri Kosina Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra (Intel) Acked-by: Masami Hiramatsu Cc: Andy Lutomirski Link: https://lkml.kernel.org/r/nycvar.YFH.7.76.1808280853520.25787@cbobk.fhfr.pm --- arch/x86/kernel/alternative.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 014f214da581..b9d5e7c9ef43 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -684,8 +684,6 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode, * It means the size must be writable atomically and the address must be aligned * in a way that permits an atomic write. It also makes sure we fit on a single * page. - * - * Note: Must be called under text_mutex. */ void *text_poke(void *addr, const void *opcode, size_t len) { @@ -700,6 +698,8 @@ void *text_poke(void *addr, const void *opcode, size_t len) */ BUG_ON(!after_bootmem); + lockdep_assert_held(&text_mutex); + if (!core_kernel_text((unsigned long)addr)) { pages[0] = vmalloc_to_page(addr); pages[1] = vmalloc_to_page(addr + PAGE_SIZE); @@ -782,8 +782,6 @@ int poke_int3_handler(struct pt_regs *regs) * - replace the first byte (int3) by the first byte of * replacing opcode * - sync cores - * - * Note: must be called under text_mutex. */ void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler) { @@ -792,6 +790,9 @@ void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler) bp_int3_handler = handler; bp_int3_addr = (u8 *)addr + sizeof(int3); bp_patching_in_progress = true; + + lockdep_assert_held(&text_mutex); + /* * Corresponding read barrier in int3 notifier for making sure the * in_progress and handler are correctly ordered wrt. patching. -- GitLab From 26e609eccd37967d3681662433086894830c5d62 Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Tue, 14 Aug 2018 18:59:51 +0200 Subject: [PATCH 0723/1692] x86/asm: Use CC_SET()/CC_OUT() in __gen_sigismember() Replace open-coded set instructions with CC_SET()/CC_OUT(). Signed-off-by: Uros Bizjak Signed-off-by: Thomas Gleixner Link: https://lkml.kernel.org/r/20180814165951.13538-1-ubizjak@gmail.com --- arch/x86/include/asm/signal.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h index 5f9012ff52ed..33d3c88a7225 100644 --- a/arch/x86/include/asm/signal.h +++ b/arch/x86/include/asm/signal.h @@ -39,6 +39,7 @@ extern void do_signal(struct pt_regs *regs); #define __ARCH_HAS_SA_RESTORER +#include #include #ifdef __i386__ @@ -86,9 +87,9 @@ static inline int __const_sigismember(sigset_t *set, int _sig) static inline int __gen_sigismember(sigset_t *set, int _sig) { - unsigned char ret; - asm("btl %2,%1\n\tsetc %0" - : "=qm"(ret) : "m"(*set), "Ir"(_sig-1) : "cc"); + bool ret; + asm("btl %2,%1" CC_SET(c) + : CC_OUT(c) (ret) : "m"(*set), "Ir"(_sig-1)); return ret; } -- GitLab From 7915919bb94e12460c58e27c708472e6f85f6699 Mon Sep 17 00:00:00 2001 From: Vincent Pelletier Date: Mon, 27 Aug 2018 14:45:15 -0500 Subject: [PATCH 0724/1692] scsi: iscsi: target: Set conn->sess to NULL when iscsi_login_set_conn_values fails Fixes a use-after-free reported by KASAN when later iscsi_target_login_sess_out gets called and it tries to access conn->sess->se_sess: Disabling lock debugging due to kernel taint iSCSI Login timeout on Network Portal [::]:3260 iSCSI Login negotiation failed. ================================================================== BUG: KASAN: use-after-free in iscsi_target_login_sess_out.cold.12+0x58/0xff [iscsi_target_mod] Read of size 8 at addr ffff880109d070c8 by task iscsi_np/980 CPU: 1 PID: 980 Comm: iscsi_np Tainted: G O 4.17.8kasan.sess.connops+ #4 Hardware name: To be filled by O.E.M. To be filled by O.E.M./Aptio CRB, BIOS 5.6.5 05/19/2014 Call Trace: dump_stack+0x71/0xac print_address_description+0x65/0x22e ? iscsi_target_login_sess_out.cold.12+0x58/0xff [iscsi_target_mod] kasan_report.cold.6+0x241/0x2fd iscsi_target_login_sess_out.cold.12+0x58/0xff [iscsi_target_mod] iscsi_target_login_thread+0x1086/0x1710 [iscsi_target_mod] ? __sched_text_start+0x8/0x8 ? iscsi_target_login_sess_out+0x250/0x250 [iscsi_target_mod] ? __kthread_parkme+0xcc/0x100 ? parse_args.cold.14+0xd3/0xd3 ? iscsi_target_login_sess_out+0x250/0x250 [iscsi_target_mod] kthread+0x1a0/0x1c0 ? kthread_bind+0x30/0x30 ret_from_fork+0x35/0x40 Allocated by task 980: kasan_kmalloc+0xbf/0xe0 kmem_cache_alloc_trace+0x112/0x210 iscsi_target_login_thread+0x816/0x1710 [iscsi_target_mod] kthread+0x1a0/0x1c0 ret_from_fork+0x35/0x40 Freed by task 980: __kasan_slab_free+0x125/0x170 kfree+0x90/0x1d0 iscsi_target_login_thread+0x1577/0x1710 [iscsi_target_mod] kthread+0x1a0/0x1c0 ret_from_fork+0x35/0x40 The buggy address belongs to the object at ffff880109d06f00 which belongs to the cache kmalloc-512 of size 512 The buggy address is located 456 bytes inside of 512-byte region [ffff880109d06f00, ffff880109d07100) The buggy address belongs to the page: page:ffffea0004274180 count:1 mapcount:0 mapping:0000000000000000 index:0x0 compound_mapcount: 0 flags: 0x17fffc000008100(slab|head) raw: 017fffc000008100 0000000000000000 0000000000000000 00000001000c000c raw: dead000000000100 dead000000000200 ffff88011b002e00 0000000000000000 page dumped because: kasan: bad access detected Memory state around the buggy address: ffff880109d06f80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ffff880109d07000: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb >ffff880109d07080: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ^ ffff880109d07100: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc ffff880109d07180: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ================================================================== Signed-off-by: Vincent Pelletier [rebased against idr/ida changes and to handle ret review comments from Matthew] Signed-off-by: Mike Christie Cc: Matthew Wilcox Reviewed-by: Matthew Wilcox Signed-off-by: Martin K. Petersen --- drivers/target/iscsi/iscsi_target_login.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 9e74f8bc2963..f58b9c1d6fd4 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -310,11 +310,9 @@ static int iscsi_login_zero_tsih_s1( return -ENOMEM; } - ret = iscsi_login_set_conn_values(sess, conn, pdu->cid); - if (unlikely(ret)) { - kfree(sess); - return ret; - } + if (iscsi_login_set_conn_values(sess, conn, pdu->cid)) + goto free_sess; + sess->init_task_tag = pdu->itt; memcpy(&sess->isid, pdu->isid, 6); sess->exp_cmd_sn = be32_to_cpu(pdu->cmdsn); -- GitLab From 05a86e78ea9823ec25b3515db078dd8a76fc263c Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Mon, 27 Aug 2018 14:45:16 -0500 Subject: [PATCH 0725/1692] scsi: iscsi: target: Fix conn_ops double free If iscsi_login_init_conn fails it can free conn_ops. __iscsi_target_login_thread will then call iscsi_target_login_sess_out which will also free it. This fixes the problem by organizing conn allocation/setup into parts that are needed through the life of the conn and parts that are only needed for the login. The free functions then release what was allocated in the alloc functions. With this patch we have: iscsit_alloc_conn/iscsit_free_conn - allocs/frees the conn we need for the entire life of the conn. iscsi_login_init_conn/iscsi_target_nego_release - allocs/frees the parts of the conn that are only needed during login. Signed-off-by: Mike Christie Signed-off-by: Martin K. Petersen --- drivers/target/iscsi/iscsi_target.c | 9 +- drivers/target/iscsi/iscsi_target_login.c | 141 ++++++++++++---------- drivers/target/iscsi/iscsi_target_login.h | 2 +- 3 files changed, 77 insertions(+), 75 deletions(-) diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 94bad43c41ff..9cdfccbdd06f 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -4208,22 +4208,15 @@ int iscsit_close_connection( crypto_free_ahash(tfm); } - free_cpumask_var(conn->conn_cpumask); - - kfree(conn->conn_ops); - conn->conn_ops = NULL; - if (conn->sock) sock_release(conn->sock); if (conn->conn_transport->iscsit_free_conn) conn->conn_transport->iscsit_free_conn(conn); - iscsit_put_transport(conn->conn_transport); - pr_debug("Moving to TARG_CONN_STATE_FREE.\n"); conn->conn_state = TARG_CONN_STATE_FREE; - kfree(conn); + iscsit_free_conn(conn); spin_lock_bh(&sess->conn_lock); atomic_dec(&sess->nconn); diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index f58b9c1d6fd4..bb90c80ff388 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -67,45 +67,10 @@ static struct iscsi_login *iscsi_login_init_conn(struct iscsi_conn *conn) goto out_req_buf; } - conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL); - if (!conn->conn_ops) { - pr_err("Unable to allocate memory for" - " struct iscsi_conn_ops.\n"); - goto out_rsp_buf; - } - - init_waitqueue_head(&conn->queues_wq); - INIT_LIST_HEAD(&conn->conn_list); - INIT_LIST_HEAD(&conn->conn_cmd_list); - INIT_LIST_HEAD(&conn->immed_queue_list); - INIT_LIST_HEAD(&conn->response_queue_list); - init_completion(&conn->conn_post_wait_comp); - init_completion(&conn->conn_wait_comp); - init_completion(&conn->conn_wait_rcfr_comp); - init_completion(&conn->conn_waiting_on_uc_comp); - init_completion(&conn->conn_logout_comp); - init_completion(&conn->rx_half_close_comp); - init_completion(&conn->tx_half_close_comp); - init_completion(&conn->rx_login_comp); - spin_lock_init(&conn->cmd_lock); - spin_lock_init(&conn->conn_usage_lock); - spin_lock_init(&conn->immed_queue_lock); - spin_lock_init(&conn->nopin_timer_lock); - spin_lock_init(&conn->response_queue_lock); - spin_lock_init(&conn->state_lock); - - if (!zalloc_cpumask_var(&conn->conn_cpumask, GFP_KERNEL)) { - pr_err("Unable to allocate conn->conn_cpumask\n"); - goto out_conn_ops; - } conn->conn_login = login; return login; -out_conn_ops: - kfree(conn->conn_ops); -out_rsp_buf: - kfree(login->rsp_buf); out_req_buf: kfree(login->req_buf); out_login: @@ -1147,6 +1112,75 @@ iscsit_conn_set_transport(struct iscsi_conn *conn, struct iscsit_transport *t) return 0; } +static struct iscsi_conn *iscsit_alloc_conn(struct iscsi_np *np) +{ + struct iscsi_conn *conn; + + conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL); + if (!conn) { + pr_err("Could not allocate memory for new connection\n"); + return NULL; + } + pr_debug("Moving to TARG_CONN_STATE_FREE.\n"); + conn->conn_state = TARG_CONN_STATE_FREE; + + init_waitqueue_head(&conn->queues_wq); + INIT_LIST_HEAD(&conn->conn_list); + INIT_LIST_HEAD(&conn->conn_cmd_list); + INIT_LIST_HEAD(&conn->immed_queue_list); + INIT_LIST_HEAD(&conn->response_queue_list); + init_completion(&conn->conn_post_wait_comp); + init_completion(&conn->conn_wait_comp); + init_completion(&conn->conn_wait_rcfr_comp); + init_completion(&conn->conn_waiting_on_uc_comp); + init_completion(&conn->conn_logout_comp); + init_completion(&conn->rx_half_close_comp); + init_completion(&conn->tx_half_close_comp); + init_completion(&conn->rx_login_comp); + spin_lock_init(&conn->cmd_lock); + spin_lock_init(&conn->conn_usage_lock); + spin_lock_init(&conn->immed_queue_lock); + spin_lock_init(&conn->nopin_timer_lock); + spin_lock_init(&conn->response_queue_lock); + spin_lock_init(&conn->state_lock); + + timer_setup(&conn->nopin_response_timer, + iscsit_handle_nopin_response_timeout, 0); + timer_setup(&conn->nopin_timer, iscsit_handle_nopin_timeout, 0); + + if (iscsit_conn_set_transport(conn, np->np_transport) < 0) + goto free_conn; + + conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL); + if (!conn->conn_ops) { + pr_err("Unable to allocate memory for struct iscsi_conn_ops.\n"); + goto put_transport; + } + + if (!zalloc_cpumask_var(&conn->conn_cpumask, GFP_KERNEL)) { + pr_err("Unable to allocate conn->conn_cpumask\n"); + goto free_mask; + } + + return conn; + +free_mask: + free_cpumask_var(conn->conn_cpumask); +put_transport: + iscsit_put_transport(conn->conn_transport); +free_conn: + kfree(conn); + return NULL; +} + +void iscsit_free_conn(struct iscsi_conn *conn) +{ + free_cpumask_var(conn->conn_cpumask); + kfree(conn->conn_ops); + iscsit_put_transport(conn->conn_transport); + kfree(conn); +} + void iscsi_target_login_sess_out(struct iscsi_conn *conn, struct iscsi_np *np, bool zero_tsih, bool new_sess) { @@ -1196,10 +1230,6 @@ void iscsi_target_login_sess_out(struct iscsi_conn *conn, crypto_free_ahash(tfm); } - free_cpumask_var(conn->conn_cpumask); - - kfree(conn->conn_ops); - if (conn->param_list) { iscsi_release_param_list(conn->param_list); conn->param_list = NULL; @@ -1217,8 +1247,7 @@ void iscsi_target_login_sess_out(struct iscsi_conn *conn, if (conn->conn_transport->iscsit_free_conn) conn->conn_transport->iscsit_free_conn(conn); - iscsit_put_transport(conn->conn_transport); - kfree(conn); + iscsit_free_conn(conn); } static int __iscsi_target_login_thread(struct iscsi_np *np) @@ -1248,31 +1277,16 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) } spin_unlock_bh(&np->np_thread_lock); - conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL); + conn = iscsit_alloc_conn(np); if (!conn) { - pr_err("Could not allocate memory for" - " new connection\n"); /* Get another socket */ return 1; } - pr_debug("Moving to TARG_CONN_STATE_FREE.\n"); - conn->conn_state = TARG_CONN_STATE_FREE; - - timer_setup(&conn->nopin_response_timer, - iscsit_handle_nopin_response_timeout, 0); - timer_setup(&conn->nopin_timer, iscsit_handle_nopin_timeout, 0); - - if (iscsit_conn_set_transport(conn, np->np_transport) < 0) { - kfree(conn); - return 1; - } rc = np->np_transport->iscsit_accept_np(np, conn); if (rc == -ENOSYS) { complete(&np->np_restart_comp); - iscsit_put_transport(conn->conn_transport); - kfree(conn); - conn = NULL; + iscsit_free_conn(conn); goto exit; } else if (rc < 0) { spin_lock_bh(&np->np_thread_lock); @@ -1280,17 +1294,13 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; spin_unlock_bh(&np->np_thread_lock); complete(&np->np_restart_comp); - iscsit_put_transport(conn->conn_transport); - kfree(conn); - conn = NULL; + iscsit_free_conn(conn); /* Get another socket */ return 1; } spin_unlock_bh(&np->np_thread_lock); - iscsit_put_transport(conn->conn_transport); - kfree(conn); - conn = NULL; - goto out; + iscsit_free_conn(conn); + return 1; } /* * Perform the remaining iSCSI connection initialization items.. @@ -1440,7 +1450,6 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) tpg_np = NULL; } -out: return 1; exit: diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h index 74ac3abc44a0..3b8e3639ff5d 100644 --- a/drivers/target/iscsi/iscsi_target_login.h +++ b/drivers/target/iscsi/iscsi_target_login.h @@ -19,7 +19,7 @@ extern int iscsi_target_setup_login_socket(struct iscsi_np *, extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *); extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *); extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32); -extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *); +extern void iscsit_free_conn(struct iscsi_conn *); extern int iscsit_start_kthreads(struct iscsi_conn *); extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8); extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *, -- GitLab From c77a2fa3ff8f73d1a485e67e6f81c64823739d59 Mon Sep 17 00:00:00 2001 From: Nilesh Javali Date: Wed, 29 Aug 2018 23:55:53 -0700 Subject: [PATCH 0726/1692] scsi: qedi: Add the CRC size within iSCSI NVM image The QED driver commit, 1ac4329a1cff ("qed: Add configuration information to register dump and debug data"), removes the CRC length validation causing nvm_get_image failure while loading qedi driver: [qed_mcp_get_nvm_image:2700(host_10-0)]Image [0] is too big - 00006008 bytes where only 00006004 are available [qedi_get_boot_info:2253]:10: Could not get NVM image. ret = -12 Hence add and adjust the CRC size to iSCSI NVM image to read boot info at qedi load time. Signed-off-by: Nilesh Javali Signed-off-by: Martin K. Petersen --- drivers/scsi/qedi/qedi.h | 7 ++++++- drivers/scsi/qedi/qedi_main.c | 28 +++++++++++++++------------- 2 files changed, 21 insertions(+), 14 deletions(-) diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h index fc3babc15fa3..a6f96b35e971 100644 --- a/drivers/scsi/qedi/qedi.h +++ b/drivers/scsi/qedi/qedi.h @@ -77,6 +77,11 @@ enum qedi_nvm_tgts { QEDI_NVM_TGT_SEC, }; +struct qedi_nvm_iscsi_image { + struct nvm_iscsi_cfg iscsi_cfg; + u32 crc; +}; + struct qedi_uio_ctrl { /* meta data */ u32 uio_hsi_version; @@ -294,7 +299,7 @@ struct qedi_ctx { void *bdq_pbl_list; dma_addr_t bdq_pbl_list_dma; u8 bdq_pbl_list_num_entries; - struct nvm_iscsi_cfg *iscsi_cfg; + struct qedi_nvm_iscsi_image *iscsi_image; dma_addr_t nvm_buf_dma; void __iomem *bdq_primary_prod; void __iomem *bdq_secondary_prod; diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index aa96bccb5a96..cc8e64dc65ad 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c @@ -1346,23 +1346,26 @@ static int qedi_setup_int(struct qedi_ctx *qedi) static void qedi_free_nvm_iscsi_cfg(struct qedi_ctx *qedi) { - if (qedi->iscsi_cfg) + if (qedi->iscsi_image) dma_free_coherent(&qedi->pdev->dev, - sizeof(struct nvm_iscsi_cfg), - qedi->iscsi_cfg, qedi->nvm_buf_dma); + sizeof(struct qedi_nvm_iscsi_image), + qedi->iscsi_image, qedi->nvm_buf_dma); } static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi) { - qedi->iscsi_cfg = dma_zalloc_coherent(&qedi->pdev->dev, - sizeof(struct nvm_iscsi_cfg), - &qedi->nvm_buf_dma, GFP_KERNEL); - if (!qedi->iscsi_cfg) { + struct qedi_nvm_iscsi_image nvm_image; + + qedi->iscsi_image = dma_zalloc_coherent(&qedi->pdev->dev, + sizeof(nvm_image), + &qedi->nvm_buf_dma, + GFP_KERNEL); + if (!qedi->iscsi_image) { QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n"); return -ENOMEM; } QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, - "NVM BUF addr=0x%p dma=0x%llx.\n", qedi->iscsi_cfg, + "NVM BUF addr=0x%p dma=0x%llx.\n", qedi->iscsi_image, qedi->nvm_buf_dma); return 0; @@ -1905,7 +1908,7 @@ qedi_get_nvram_block(struct qedi_ctx *qedi) struct nvm_iscsi_block *block; pf = qedi->dev_info.common.abs_pf_id; - block = &qedi->iscsi_cfg->block[0]; + block = &qedi->iscsi_image->iscsi_cfg.block[0]; for (i = 0; i < NUM_OF_ISCSI_PF_SUPPORTED; i++, block++) { flags = ((block->id) & NVM_ISCSI_CFG_BLK_CTRL_FLAG_MASK) >> NVM_ISCSI_CFG_BLK_CTRL_FLAG_OFFSET; @@ -2194,15 +2197,14 @@ static void qedi_boot_release(void *data) static int qedi_get_boot_info(struct qedi_ctx *qedi) { int ret = 1; - u16 len; - - len = sizeof(struct nvm_iscsi_cfg); + struct qedi_nvm_iscsi_image nvm_image; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Get NVM iSCSI CFG image\n"); ret = qedi_ops->common->nvm_get_image(qedi->cdev, QED_NVM_IMAGE_ISCSI_CFG, - (char *)qedi->iscsi_cfg, len); + (char *)qedi->iscsi_image, + sizeof(nvm_image)); if (ret) QEDI_ERR(&qedi->dbg_ctx, "Could not get NVM image. ret = %d\n", ret); -- GitLab From 6e4adef7e4b0f5000a914482c028ed6284577997 Mon Sep 17 00:00:00 2001 From: Joonas Lahtinen Date: Thu, 30 Aug 2018 14:29:53 +0300 Subject: [PATCH 0727/1692] drm/i915: Update DRIVER_DATE to 20180830 Signed-off-by: Joonas Lahtinen --- drivers/gpu/drm/i915/i915_drv.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e5b9d3c77139..dc38e99bdaf1 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -86,8 +86,8 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20180719" -#define DRIVER_TIMESTAMP 1532015279 +#define DRIVER_DATE "20180830" +#define DRIVER_TIMESTAMP 1535628593 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and * WARN_ON()) for hw state sanity checks to check for unexpected conditions -- GitLab From 755a8bf5579d22eb5636685c516d8dede799e27b Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 24 Aug 2018 15:08:30 +0100 Subject: [PATCH 0728/1692] arm/arm64: smccc-1.1: Handle function result as parameters If someone has the silly idea to write something along those lines: extern u64 foo(void); void bar(struct arm_smccc_res *res) { arm_smccc_1_1_smc(0xbad, foo(), res); } they are in for a surprise, as this gets compiled as: 0000000000000588 : 588: a9be7bfd stp x29, x30, [sp, #-32]! 58c: 910003fd mov x29, sp 590: f9000bf3 str x19, [sp, #16] 594: aa0003f3 mov x19, x0 598: aa1e03e0 mov x0, x30 59c: 94000000 bl 0 <_mcount> 5a0: 94000000 bl 0 5a4: aa0003e1 mov x1, x0 5a8: d4000003 smc #0x0 5ac: b4000073 cbz x19, 5b8 5b0: a9000660 stp x0, x1, [x19] 5b4: a9010e62 stp x2, x3, [x19, #16] 5b8: f9400bf3 ldr x19, [sp, #16] 5bc: a8c27bfd ldp x29, x30, [sp], #32 5c0: d65f03c0 ret 5c4: d503201f nop The call to foo "overwrites" the x0 register for the return value, and we end up calling the wrong secure service. A solution is to evaluate all the parameters before assigning anything to specific registers, leading to the expected result: 0000000000000588 : 588: a9be7bfd stp x29, x30, [sp, #-32]! 58c: 910003fd mov x29, sp 590: f9000bf3 str x19, [sp, #16] 594: aa0003f3 mov x19, x0 598: aa1e03e0 mov x0, x30 59c: 94000000 bl 0 <_mcount> 5a0: 94000000 bl 0 5a4: aa0003e1 mov x1, x0 5a8: d28175a0 mov x0, #0xbad 5ac: d4000003 smc #0x0 5b0: b4000073 cbz x19, 5bc 5b4: a9000660 stp x0, x1, [x19] 5b8: a9010e62 stp x2, x3, [x19, #16] 5bc: f9400bf3 ldr x19, [sp, #16] 5c0: a8c27bfd ldp x29, x30, [sp], #32 5c4: d65f03c0 ret Reported-by: Julien Grall Signed-off-by: Marc Zyngier Signed-off-by: Will Deacon --- include/linux/arm-smccc.h | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index 5a91ff33720b..18863d56273c 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -205,41 +205,51 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, register unsigned long r3 asm("r3") #define __declare_arg_1(a0, a1, res) \ + typeof(a1) __a1 = a1; \ struct arm_smccc_res *___res = res; \ register unsigned long r0 asm("r0") = (u32)a0; \ - register unsigned long r1 asm("r1") = a1; \ + register unsigned long r1 asm("r1") = __a1; \ register unsigned long r2 asm("r2"); \ register unsigned long r3 asm("r3") #define __declare_arg_2(a0, a1, a2, res) \ + typeof(a1) __a1 = a1; \ + typeof(a2) __a2 = a2; \ struct arm_smccc_res *___res = res; \ register unsigned long r0 asm("r0") = (u32)a0; \ - register unsigned long r1 asm("r1") = a1; \ - register unsigned long r2 asm("r2") = a2; \ + register unsigned long r1 asm("r1") = __a1; \ + register unsigned long r2 asm("r2") = __a2; \ register unsigned long r3 asm("r3") #define __declare_arg_3(a0, a1, a2, a3, res) \ + typeof(a1) __a1 = a1; \ + typeof(a2) __a2 = a2; \ + typeof(a3) __a3 = a3; \ struct arm_smccc_res *___res = res; \ register unsigned long r0 asm("r0") = (u32)a0; \ - register unsigned long r1 asm("r1") = a1; \ - register unsigned long r2 asm("r2") = a2; \ - register unsigned long r3 asm("r3") = a3 + register unsigned long r1 asm("r1") = __a1; \ + register unsigned long r2 asm("r2") = __a2; \ + register unsigned long r3 asm("r3") = __a3 #define __declare_arg_4(a0, a1, a2, a3, a4, res) \ + typeof(a4) __a4 = a4; \ __declare_arg_3(a0, a1, a2, a3, res); \ - register typeof(a4) r4 asm("r4") = a4 + register unsigned long r4 asm("r4") = __a4 #define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \ + typeof(a5) __a5 = a5; \ __declare_arg_4(a0, a1, a2, a3, a4, res); \ - register typeof(a5) r5 asm("r5") = a5 + register unsigned long r5 asm("r5") = __a5 #define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \ + typeof(a6) __a6 = a6; \ __declare_arg_5(a0, a1, a2, a3, a4, a5, res); \ - register typeof(a6) r6 asm("r6") = a6 + register unsigned long r6 asm("r6") = __a6 #define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \ + typeof(a7) __a7 = a7; \ __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \ - register typeof(a7) r7 asm("r7") = a7 + register unsigned long r7 asm("r7") = __a7 #define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__) #define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__) -- GitLab From 16037643969e095509cd8446a3f8e406a6dc3a2c Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Thu, 30 Aug 2018 15:13:16 +0200 Subject: [PATCH 0729/1692] ALSA: hda - Fix cancel_work_sync() stall from jackpoll work On AMD/ATI controllers, the HD-audio controller driver allows a bus reset upon the error recovery, and its procedure includes the cancellation of pending jack polling work as found in snd_hda_bus_codec_reset(). This works usually fine, but it becomes a problem when the reset happens from the jack poll work itself; then calling cancel_work_sync() from the work being processed tries to wait the finish endlessly. As a workaround, this patch adds the check of current_work() and applies the cancel_work_sync() only when it's not from the jackpoll_work. This doesn't fix the root cause of the reported error below, but at least, it eases the unexpected stall of the whole system. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=200937 Cc: Cc: Lukas Wunner Signed-off-by: Takashi Iwai --- sound/pci/hda/hda_codec.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 0a5085537034..26d348b47867 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -3935,7 +3935,8 @@ void snd_hda_bus_reset_codecs(struct hda_bus *bus) list_for_each_codec(codec, bus) { /* FIXME: maybe a better way needed for forced reset */ - cancel_delayed_work_sync(&codec->jackpoll_work); + if (current_work() != &codec->jackpoll_work.work) + cancel_delayed_work_sync(&codec->jackpoll_work); #ifdef CONFIG_PM if (hda_codec_is_power_on(codec)) { hda_call_codec_suspend(codec); -- GitLab From ec210e3226dc0b481ac1b33082b3b508f89387e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 24 Aug 2018 10:48:12 +0200 Subject: [PATCH 0730/1692] drm/amdgpu: put GART away from VRAM v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Always try to put the GART away from where VRAM is. v2: correctly handle the 4GB limitation Signed-off-by: Christian König Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 265ec6807130..c6bcc4715373 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -116,6 +116,7 @@ void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc, */ void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) { + const uint64_t four_gb = 0x100000000ULL; u64 size_af, size_bf; mc->gart_size += adev->pm.smu_prv_buffer_size; @@ -124,8 +125,7 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) * the GART base on a 4GB boundary as well. */ size_bf = mc->vram_start; - size_af = adev->gmc.mc_mask + 1 - - ALIGN(mc->vram_end + 1, 0x100000000ULL); + size_af = adev->gmc.mc_mask + 1 - ALIGN(mc->vram_end + 1, four_gb); if (mc->gart_size > max(size_bf, size_af)) { dev_warn(adev->dev, "limiting GART\n"); @@ -136,7 +136,9 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) (size_af < mc->gart_size)) mc->gart_start = 0; else - mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL); + mc->gart_start = mc->mc_mask - mc->gart_size + 1; + + mc->gart_start &= four_gb - 1; mc->gart_end = mc->gart_start + mc->gart_size - 1; dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n", mc->gart_size >> 20, mc->gart_start, mc->gart_end); -- GitLab From 17cc525206d6dba36d0fde12fd512c77dcfa1954 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 30 Aug 2018 09:45:07 +0200 Subject: [PATCH 0731/1692] drm/amdgpu: Revert "kmap PDs/PTs in amdgpu_vm_update_directories" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit a7f91061c60ad9cac2e6a03b642be6a4f88b3662. Felix pointed out that we need to have the BOs mapped even before amdgpu_vm_update_directories is called. Signed-off-by: Christian König Acked-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index f50697df9799..f31fa351caba 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -343,7 +343,10 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, list_move(&bo_base->vm_status, &vm->moved); spin_unlock(&vm->moved_lock); } else { - r = amdgpu_ttm_alloc_gart(&bo->tbo); + if (vm->use_cpu_for_update) + r = amdgpu_bo_kmap(bo, NULL); + else + r = amdgpu_ttm_alloc_gart(&bo->tbo); if (r) break; list_move(&bo_base->vm_status, &vm->relocated); @@ -1094,14 +1097,6 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev, params.adev = adev; if (vm->use_cpu_for_update) { - struct amdgpu_vm_bo_base *bo_base; - - list_for_each_entry(bo_base, &vm->relocated, vm_status) { - r = amdgpu_bo_kmap(bo_base->bo, NULL); - if (unlikely(r)) - return r; - } - r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM); if (unlikely(r)) return r; -- GitLab From b871da4a7778eda2e700ae8bf5f86e74a004c1ec Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Thu, 23 Aug 2018 18:24:24 +0200 Subject: [PATCH 0732/1692] KVM: nVMX: avoid redundant double assignment of nested_run_pending MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit nested_run_pending is set 20 lines above and check_vmentry_prereqs()/ check_vmentry_postreqs() don't seem to be resetting it (the later, however, checks it). Signed-off-by: Vitaly Kuznetsov Reviewed-by: Paolo Bonzini Reviewed-by: Jim Mattson Reviewed-by: Eduardo Valentin Reviewed-by: Krish Sadhukhan Signed-off-by: Radim Krčmář --- arch/x86/kvm/vmx.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 1d26f3c4985b..92f027745842 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -13988,9 +13988,6 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, check_vmentry_postreqs(vcpu, vmcs12, &exit_qual)) return -EINVAL; - if (kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING) - vmx->nested.nested_run_pending = 1; - vmx->nested.dirty_vmcs12 = true; ret = enter_vmx_non_root_mode(vcpu, NULL); if (ret) -- GitLab From 0186ec823280f5db55ab2e6291933bebe2e92772 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 28 Aug 2018 16:22:28 +0100 Subject: [PATCH 0733/1692] KVM: SVM: remove unused variable dst_vaddr_end MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Variable dst_vaddr_end is being assigned but is never used hence it is redundant and can be removed. Cleans up clang warning: variable 'dst_vaddr_end' set but not used [-Wunused-but-set-variable] Signed-off-by: Colin Ian King Signed-off-by: Radim Krčmář --- arch/x86/kvm/svm.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 6276140044d0..4339fc4715fa 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -6747,7 +6747,7 @@ static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr, static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec) { unsigned long vaddr, vaddr_end, next_vaddr; - unsigned long dst_vaddr, dst_vaddr_end; + unsigned long dst_vaddr; struct page **src_p, **dst_p; struct kvm_sev_dbg debug; unsigned long n; @@ -6763,7 +6763,6 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec) size = debug.len; vaddr_end = vaddr + size; dst_vaddr = debug.dst_uaddr; - dst_vaddr_end = dst_vaddr + size; for (; vaddr < vaddr_end; vaddr = next_vaddr) { int len, s_off, d_off; -- GitLab From c4409905cd6eb42cfd06126e9226b0150e05a715 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 23 Aug 2018 13:56:46 -0700 Subject: [PATCH 0734/1692] KVM: VMX: Do not allow reexecute_instruction() when skipping MMIO instr MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Re-execution after an emulation decode failure is only intended to handle a case where two or vCPUs race to write a shadowed page, i.e. we should never re-execute an instruction as part of MMIO emulation. As handle_ept_misconfig() is only used for MMIO emulation, it should pass EMULTYPE_NO_REEXECUTE when using the emulator to skip an instr in the fast-MMIO case where VM_EXIT_INSTRUCTION_LEN is invalid. And because the cr2 value passed to x86_emulate_instruction() is only destined for use when retrying or reexecuting, we can simply call emulate_instruction(). Fixes: d391f1207067 ("x86/kvm/vmx: do not use vm-exit instruction length for fast MMIO when running nested") Cc: Vitaly Kuznetsov Signed-off-by: Sean Christopherson Cc: stable@vger.kernel.org Signed-off-by: Radim Krčmář --- arch/x86/kvm/vmx.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 92f027745842..b345ad91809c 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -7704,8 +7704,8 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu) if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) return kvm_skip_emulated_instruction(vcpu); else - return x86_emulate_instruction(vcpu, gpa, EMULTYPE_SKIP, - NULL, 0) == EMULATE_DONE; + return emulate_instruction(vcpu, EMULTYPE_SKIP) == + EMULATE_DONE; } return kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0); -- GitLab From 35be0aded76b54a24dc8aa678a71bca22273e8d8 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 23 Aug 2018 13:56:47 -0700 Subject: [PATCH 0735/1692] KVM: x86: SVM: Set EMULTYPE_NO_REEXECUTE for RSM emulation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Re-execution after an emulation decode failure is only intended to handle a case where two or vCPUs race to write a shadowed page, i.e. we should never re-execute an instruction as part of RSM emulation. Add a new helper, kvm_emulate_instruction_from_buffer(), to support emulating from a pre-defined buffer. This eliminates the last direct call to x86_emulate_instruction() outside of kvm_mmu_page_fault(), which means x86_emulate_instruction() can be unexported in a future patch. Fixes: 7607b7174405 ("KVM: SVM: install RSM intercept") Cc: Brijesh Singh Signed-off-by: Sean Christopherson Cc: stable@vger.kernel.org Signed-off-by: Radim Krčmář --- arch/x86/include/asm/kvm_host.h | 7 +++++++ arch/x86/kvm/svm.c | 4 ++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 00ddb0c9e612..3b220b86c8e4 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1251,6 +1251,13 @@ static inline int emulate_instruction(struct kvm_vcpu *vcpu, emulation_type | EMULTYPE_NO_REEXECUTE, NULL, 0); } +static inline int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, + void *insn, int insn_len) +{ + return x86_emulate_instruction(vcpu, 0, EMULTYPE_NO_REEXECUTE, + insn, insn_len); +} + void kvm_enable_efer_bits(u64); bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer); int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr); diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 4339fc4715fa..b3cdfde8ff5e 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -3874,8 +3874,8 @@ static int emulate_on_interception(struct vcpu_svm *svm) static int rsm_interception(struct vcpu_svm *svm) { - return x86_emulate_instruction(&svm->vcpu, 0, 0, - rsm_ins_bytes, 2) == EMULATE_DONE; + return kvm_emulate_instruction_from_buffer(&svm->vcpu, + rsm_ins_bytes, 2) == EMULATE_DONE; } static int rdpmc_interception(struct vcpu_svm *svm) -- GitLab From 8065dbd1ee0ef04321d80da7999b4f0086e0a407 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 23 Aug 2018 13:56:48 -0700 Subject: [PATCH 0736/1692] KVM: x86: Invert emulation re-execute behavior to make it opt-in MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Re-execution of an instruction after emulation decode failure is intended to be used only when emulating shadow page accesses. Invert the flag to make allowing re-execution opt-in since that behavior is by far in the minority. Signed-off-by: Sean Christopherson Cc: stable@vger.kernel.org Signed-off-by: Radim Krčmář --- arch/x86/include/asm/kvm_host.h | 8 +++----- arch/x86/kvm/mmu.c | 2 +- arch/x86/kvm/x86.c | 2 +- 3 files changed, 5 insertions(+), 7 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 3b220b86c8e4..a69ea11f3bab 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1238,7 +1238,7 @@ enum emulation_result { #define EMULTYPE_TRAP_UD (1 << 1) #define EMULTYPE_SKIP (1 << 2) #define EMULTYPE_RETRY (1 << 3) -#define EMULTYPE_NO_REEXECUTE (1 << 4) +#define EMULTYPE_ALLOW_REEXECUTE (1 << 4) #define EMULTYPE_NO_UD_ON_FAIL (1 << 5) #define EMULTYPE_VMWARE (1 << 6) int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, @@ -1247,15 +1247,13 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, static inline int emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type) { - return x86_emulate_instruction(vcpu, 0, - emulation_type | EMULTYPE_NO_REEXECUTE, NULL, 0); + return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0); } static inline int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, void *insn, int insn_len) { - return x86_emulate_instruction(vcpu, 0, EMULTYPE_NO_REEXECUTE, - insn, insn_len); + return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len); } void kvm_enable_efer_bits(u64); diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index a282321329b5..4508c34eef20 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -5217,7 +5217,7 @@ static int make_mmu_pages_available(struct kvm_vcpu *vcpu) int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, void *insn, int insn_len) { - int r, emulation_type = EMULTYPE_RETRY; + int r, emulation_type = EMULTYPE_RETRY | EMULTYPE_ALLOW_REEXECUTE; enum emulation_result er; bool direct = vcpu->arch.mmu.direct_map; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 506bd2b4b8bb..d6f85ea23101 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5870,7 +5870,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2, gpa_t gpa = cr2; kvm_pfn_t pfn; - if (emulation_type & EMULTYPE_NO_REEXECUTE) + if (!(emulation_type & EMULTYPE_ALLOW_REEXECUTE)) return false; if (!vcpu->arch.mmu.direct_map) { -- GitLab From 384bf2218e96f57118270945b1841e4dbbe9e352 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 23 Aug 2018 13:56:49 -0700 Subject: [PATCH 0737/1692] KVM: x86: Merge EMULTYPE_RETRY and EMULTYPE_ALLOW_REEXECUTE MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit retry_instruction() and reexecute_instruction() are a package deal, i.e. there is no scenario where one is allowed and the other is not. Merge their controlling emulation type flags to enforce this in code. Name the combined flag EMULTYPE_ALLOW_RETRY to make it abundantly clear that we are allowing re{try,execute} to occur, as opposed to explicitly requesting retry of a previously failed instruction. Signed-off-by: Sean Christopherson Cc: stable@vger.kernel.org Signed-off-by: Radim Krčmář --- arch/x86/include/asm/kvm_host.h | 7 +++---- arch/x86/kvm/mmu.c | 2 +- arch/x86/kvm/x86.c | 4 ++-- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index a69ea11f3bab..35e03b13edcb 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1237,10 +1237,9 @@ enum emulation_result { #define EMULTYPE_NO_DECODE (1 << 0) #define EMULTYPE_TRAP_UD (1 << 1) #define EMULTYPE_SKIP (1 << 2) -#define EMULTYPE_RETRY (1 << 3) -#define EMULTYPE_ALLOW_REEXECUTE (1 << 4) -#define EMULTYPE_NO_UD_ON_FAIL (1 << 5) -#define EMULTYPE_VMWARE (1 << 6) +#define EMULTYPE_ALLOW_RETRY (1 << 3) +#define EMULTYPE_NO_UD_ON_FAIL (1 << 4) +#define EMULTYPE_VMWARE (1 << 5) int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, int emulation_type, void *insn, int insn_len); diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 4508c34eef20..0246a1ea7f55 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -5217,7 +5217,7 @@ static int make_mmu_pages_available(struct kvm_vcpu *vcpu) int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, void *insn, int insn_len) { - int r, emulation_type = EMULTYPE_RETRY | EMULTYPE_ALLOW_REEXECUTE; + int r, emulation_type = EMULTYPE_ALLOW_RETRY; enum emulation_result er; bool direct = vcpu->arch.mmu.direct_map; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index d6f85ea23101..924ce28723c4 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5870,7 +5870,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2, gpa_t gpa = cr2; kvm_pfn_t pfn; - if (!(emulation_type & EMULTYPE_ALLOW_REEXECUTE)) + if (!(emulation_type & EMULTYPE_ALLOW_RETRY)) return false; if (!vcpu->arch.mmu.direct_map) { @@ -5958,7 +5958,7 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt, */ vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; - if (!(emulation_type & EMULTYPE_RETRY)) + if (!(emulation_type & EMULTYPE_ALLOW_RETRY)) return false; if (x86_page_table_writing_insn(ctxt)) -- GitLab From 472faffacd9032164f611f56329d0025ddca55b5 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 23 Aug 2018 13:56:50 -0700 Subject: [PATCH 0738/1692] KVM: x86: Default to not allowing emulation retry in kvm_mmu_page_fault MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Effectively force kvm_mmu_page_fault() to opt-in to allowing retry to make it more obvious when and why it allows emulation to be retried. Previously this approach was less convenient due to retry and re-execute behavior being controlled by separate flags that were also inverted in their implementations (opt-in versus opt-out). Suggested-by: Paolo Bonzini Signed-off-by: Sean Christopherson Cc: stable@vger.kernel.org Signed-off-by: Radim Krčmář --- arch/x86/kvm/mmu.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 0246a1ea7f55..01fb8701ccd0 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -5217,7 +5217,7 @@ static int make_mmu_pages_available(struct kvm_vcpu *vcpu) int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, void *insn, int insn_len) { - int r, emulation_type = EMULTYPE_ALLOW_RETRY; + int r, emulation_type = 0; enum emulation_result er; bool direct = vcpu->arch.mmu.direct_map; @@ -5230,10 +5230,8 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, r = RET_PF_INVALID; if (unlikely(error_code & PFERR_RSVD_MASK)) { r = handle_mmio_page_fault(vcpu, cr2, direct); - if (r == RET_PF_EMULATE) { - emulation_type = 0; + if (r == RET_PF_EMULATE) goto emulate; - } } if (r == RET_PF_INVALID) { @@ -5260,8 +5258,16 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, return 1; } - if (mmio_info_in_cache(vcpu, cr2, direct)) - emulation_type = 0; + /* + * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still + * optimistically try to just unprotect the page and let the processor + * re-execute the instruction that caused the page fault. Do not allow + * retrying MMIO emulation, as it's not only pointless but could also + * cause us to enter an infinite loop because the processor will keep + * faulting on the non-existent MMIO address. + */ + if (!mmio_info_in_cache(vcpu, cr2, direct)) + emulation_type = EMULTYPE_ALLOW_RETRY; emulate: /* * On AMD platforms, under certain conditions insn_len may be zero on #NPF. -- GitLab From 6c3dfeb6a48b1562bd5b8ec5f3317ef34d0134ef Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 23 Aug 2018 13:56:51 -0700 Subject: [PATCH 0739/1692] KVM: x86: Do not re-{try,execute} after failed emulation in L2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit a6f177efaa58 ("KVM: Reenter guest after emulation failure if due to access to non-mmio address") added reexecute_instruction() to handle the scenario where two (or more) vCPUS race to write a shadowed page, i.e. reexecute_instruction() is intended to return true if and only if the instruction being emulated was accessing a shadowed page. As L0 is only explicitly shadowing L1 tables, an emulation failure of a nested VM instruction cannot be due to a race to write a shadowed page and so should never be re-executed. This fixes an issue where an "MMIO" emulation failure[1] in L2 is all but guaranteed to result in an infinite loop when TDP is enabled. Because "cr2" is actually an L2 GPA when TDP is enabled, calling kvm_mmu_gva_to_gpa_write() to translate cr2 in the non-direct mapped case (L2 is never direct mapped) will almost always yield UNMAPPED_GVA and cause reexecute_instruction() to immediately return true. The !mmio_info_in_cache() check in kvm_mmu_page_fault() doesn't catch this case because mmio_info_in_cache() returns false for a nested MMU (the MMIO caching currently handles L1 only, e.g. to cache nested guests' GPAs we'd have to manually flush the cache when switching between VMs and when L1 updated its page tables controlling the nested guest). Way back when, commit 68be0803456b ("KVM: x86: never re-execute instruction with enabled tdp") changed reexecute_instruction() to always return false when using TDP under the assumption that KVM would only get into the emulator for MMIO. Commit 95b3cf69bdf8 ("KVM: x86: let reexecute_instruction work for tdp") effectively reverted that behavior in order to handle the scenario where emulation failed due to an access from L1 to the shadow page tables for L2, but it didn't account for the case where emulation failed in L2 with TDP enabled. All of the above logic also applies to retry_instruction(), added by commit 1cb3f3ae5a38 ("KVM: x86: retry non-page-table writing instructions"). An indefinite loop in retry_instruction() should be impossible as it protects against retrying the same instruction over and over, but it's still correct to not retry an L2 instruction in the first place. Fix the immediate issue by adding a check for a nested guest when determining whether or not to allow retry in kvm_mmu_page_fault(). In addition to fixing the immediate bug, add WARN_ON_ONCE in the retry functions since they are not designed to handle nested cases, i.e. they need to be modified even if there is some scenario in the future where we want to allow retrying a nested guest. [1] This issue was encountered after commit 3a2936dedd20 ("kvm: mmu: Don't expose private memslots to L2") changed the page fault path to return KVM_PFN_NOSLOT when translating an L2 access to a prive memslot. Returning KVM_PFN_NOSLOT is semantically correct when we want to hide a memslot from L2, i.e. there effectively is no defined memory region for L2, but it has the unfortunate side effect of making KVM think the GFN is a MMIO page, thus triggering emulation. The failure occurred with in-development code that deliberately exposed a private memslot to L2, which L2 accessed with an instruction that is not emulated by KVM. Fixes: 95b3cf69bdf8 ("KVM: x86: let reexecute_instruction work for tdp") Fixes: 1cb3f3ae5a38 ("KVM: x86: retry non-page-table writing instructions") Signed-off-by: Sean Christopherson Cc: Jim Mattson Cc: Krish Sadhukhan Cc: Xiao Guangrong Cc: stable@vger.kernel.org Signed-off-by: Radim Krčmář --- arch/x86/kvm/mmu.c | 7 +++++-- arch/x86/kvm/x86.c | 6 ++++++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 01fb8701ccd0..f7e83b1e0eb2 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -5264,9 +5264,12 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, * re-execute the instruction that caused the page fault. Do not allow * retrying MMIO emulation, as it's not only pointless but could also * cause us to enter an infinite loop because the processor will keep - * faulting on the non-existent MMIO address. + * faulting on the non-existent MMIO address. Retrying an instruction + * from a nested guest is also pointless and dangerous as we are only + * explicitly shadowing L1's page tables, i.e. unprotecting something + * for L1 isn't going to magically fix whatever issue cause L2 to fail. */ - if (!mmio_info_in_cache(vcpu, cr2, direct)) + if (!mmio_info_in_cache(vcpu, cr2, direct) && !is_guest_mode(vcpu)) emulation_type = EMULTYPE_ALLOW_RETRY; emulate: /* diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 924ce28723c4..cbe2921e972b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5873,6 +5873,9 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2, if (!(emulation_type & EMULTYPE_ALLOW_RETRY)) return false; + if (WARN_ON_ONCE(is_guest_mode(vcpu))) + return false; + if (!vcpu->arch.mmu.direct_map) { /* * Write permission should be allowed since only @@ -5961,6 +5964,9 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt, if (!(emulation_type & EMULTYPE_ALLOW_RETRY)) return false; + if (WARN_ON_ONCE(is_guest_mode(vcpu))) + return false; + if (x86_page_table_writing_insn(ctxt)) return false; -- GitLab From 0ce97a2b627c5e26347aee298f571ddf925e5fe4 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 23 Aug 2018 13:56:52 -0700 Subject: [PATCH 0740/1692] KVM: x86: Rename emulate_instruction() to kvm_emulate_instruction() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Lack of the kvm_ prefix gives the impression that it's a VMX or SVM specific function, and there's no conflict that prevents adding the kvm_ prefix. Signed-off-by: Sean Christopherson Signed-off-by: Radim Krčmář --- arch/x86/include/asm/kvm_host.h | 2 +- arch/x86/kvm/svm.c | 12 ++++++------ arch/x86/kvm/vmx.c | 16 ++++++++-------- arch/x86/kvm/x86.c | 4 ++-- 4 files changed, 17 insertions(+), 17 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 35e03b13edcb..8c9023661351 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1243,7 +1243,7 @@ enum emulation_result { int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, int emulation_type, void *insn, int insn_len); -static inline int emulate_instruction(struct kvm_vcpu *vcpu, +static inline int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type) { return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0); diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index b3cdfde8ff5e..89c4c5aa15f1 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -776,7 +776,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) } if (!svm->next_rip) { - if (emulate_instruction(vcpu, EMULTYPE_SKIP) != + if (kvm_emulate_instruction(vcpu, EMULTYPE_SKIP) != EMULATE_DONE) printk(KERN_DEBUG "%s: NOP\n", __func__); return; @@ -2715,7 +2715,7 @@ static int gp_interception(struct vcpu_svm *svm) WARN_ON_ONCE(!enable_vmware_backdoor); - er = emulate_instruction(vcpu, + er = kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL); if (er == EMULATE_USER_EXIT) return 0; @@ -2819,7 +2819,7 @@ static int io_interception(struct vcpu_svm *svm) string = (io_info & SVM_IOIO_STR_MASK) != 0; in = (io_info & SVM_IOIO_TYPE_MASK) != 0; if (string) - return emulate_instruction(vcpu, 0) == EMULATE_DONE; + return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE; port = io_info >> 16; size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT; @@ -3861,7 +3861,7 @@ static int iret_interception(struct vcpu_svm *svm) static int invlpg_interception(struct vcpu_svm *svm) { if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) - return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; + return kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1); return kvm_skip_emulated_instruction(&svm->vcpu); @@ -3869,7 +3869,7 @@ static int invlpg_interception(struct vcpu_svm *svm) static int emulate_on_interception(struct vcpu_svm *svm) { - return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; + return kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; } static int rsm_interception(struct vcpu_svm *svm) @@ -4700,7 +4700,7 @@ static int avic_unaccelerated_access_interception(struct vcpu_svm *svm) ret = avic_unaccel_trap_write(svm); } else { /* Handling Fault */ - ret = (emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE); + ret = (kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE); } return ret; diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index b345ad91809c..f910d33858d9 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -6983,7 +6983,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu, * Cause the #SS fault with 0 error code in VM86 mode. */ if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) { - if (emulate_instruction(vcpu, 0) == EMULATE_DONE) { + if (kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE) { if (vcpu->arch.halt_request) { vcpu->arch.halt_request = 0; return kvm_vcpu_halt(vcpu); @@ -7054,7 +7054,7 @@ static int handle_exception(struct kvm_vcpu *vcpu) if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) { WARN_ON_ONCE(!enable_vmware_backdoor); - er = emulate_instruction(vcpu, + er = kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL); if (er == EMULATE_USER_EXIT) return 0; @@ -7157,7 +7157,7 @@ static int handle_io(struct kvm_vcpu *vcpu) ++vcpu->stat.io_exits; if (string) - return emulate_instruction(vcpu, 0) == EMULATE_DONE; + return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE; port = exit_qualification >> 16; size = (exit_qualification & 7) + 1; @@ -7231,7 +7231,7 @@ static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val) static int handle_desc(struct kvm_vcpu *vcpu) { WARN_ON(!(vcpu->arch.cr4 & X86_CR4_UMIP)); - return emulate_instruction(vcpu, 0) == EMULATE_DONE; + return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE; } static int handle_cr(struct kvm_vcpu *vcpu) @@ -7480,7 +7480,7 @@ static int handle_vmcall(struct kvm_vcpu *vcpu) static int handle_invd(struct kvm_vcpu *vcpu) { - return emulate_instruction(vcpu, 0) == EMULATE_DONE; + return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE; } static int handle_invlpg(struct kvm_vcpu *vcpu) @@ -7547,7 +7547,7 @@ static int handle_apic_access(struct kvm_vcpu *vcpu) return kvm_skip_emulated_instruction(vcpu); } } - return emulate_instruction(vcpu, 0) == EMULATE_DONE; + return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE; } static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu) @@ -7704,7 +7704,7 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu) if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) return kvm_skip_emulated_instruction(vcpu); else - return emulate_instruction(vcpu, EMULTYPE_SKIP) == + return kvm_emulate_instruction(vcpu, EMULTYPE_SKIP) == EMULATE_DONE; } @@ -7748,7 +7748,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) if (kvm_test_request(KVM_REQ_EVENT, vcpu)) return 1; - err = emulate_instruction(vcpu, 0); + err = kvm_emulate_instruction(vcpu, 0); if (err == EMULATE_USER_EXIT) { ++vcpu->stat.mmio_exits; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index cbe2921e972b..915002c7f07e 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4987,7 +4987,7 @@ int handle_ud(struct kvm_vcpu *vcpu) emul_type = 0; } - er = emulate_instruction(vcpu, emul_type); + er = kvm_emulate_instruction(vcpu, emul_type); if (er == EMULATE_USER_EXIT) return 0; if (er != EMULATE_DONE) @@ -7740,7 +7740,7 @@ static inline int complete_emulated_io(struct kvm_vcpu *vcpu) { int r; vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); - r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE); + r = kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE); srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); if (r != EMULATE_DONE) return 0; -- GitLab From c60658d1d983641fcdbb16f86bc2f3806d88bab4 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 23 Aug 2018 13:56:53 -0700 Subject: [PATCH 0741/1692] KVM: x86: Unexport x86_emulate_instruction() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Allowing x86_emulate_instruction() to be called directly has led to subtle bugs being introduced, e.g. not setting EMULTYPE_NO_REEXECUTE in the emulation type. While most of the blame lies on re-execute being opt-out, exporting x86_emulate_instruction() also exposes its cr2 parameter, which may have contributed to commit d391f1207067 ("x86/kvm/vmx: do not use vm-exit instruction length for fast MMIO when running nested") using x86_emulate_instruction() instead of emulate_instruction() because "hey, I have a cr2!", which in turn introduced its EMULTYPE_NO_REEXECUTE bug. Signed-off-by: Sean Christopherson Signed-off-by: Radim Krčmář --- arch/x86/include/asm/kvm_host.h | 17 +++-------------- arch/x86/kvm/x86.c | 14 +++++++++++++- arch/x86/kvm/x86.h | 2 ++ 3 files changed, 18 insertions(+), 15 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 8c9023661351..e12916e7c2fb 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1240,20 +1240,9 @@ enum emulation_result { #define EMULTYPE_ALLOW_RETRY (1 << 3) #define EMULTYPE_NO_UD_ON_FAIL (1 << 4) #define EMULTYPE_VMWARE (1 << 5) -int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, - int emulation_type, void *insn, int insn_len); - -static inline int kvm_emulate_instruction(struct kvm_vcpu *vcpu, - int emulation_type) -{ - return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0); -} - -static inline int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, - void *insn, int insn_len) -{ - return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len); -} +int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type); +int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, + void *insn, int insn_len); void kvm_enable_efer_bits(u64); bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 915002c7f07e..542f6315444d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6282,7 +6282,19 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, return r; } -EXPORT_SYMBOL_GPL(x86_emulate_instruction); + +int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type) +{ + return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0); +} +EXPORT_SYMBOL_GPL(kvm_emulate_instruction); + +int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, + void *insn, int insn_len) +{ + return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len); +} +EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer); static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port) diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 257f27620bc2..67b9568613f3 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -274,6 +274,8 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int page_num); bool kvm_vector_hashing_enabled(void); +int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, + int emulation_type, void *insn, int insn_len); #define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \ | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \ -- GitLab From 381116d327b55b26cc99fb0fa78526c029fb969b Mon Sep 17 00:00:00 2001 From: Joonas Lahtinen Date: Thu, 30 Aug 2018 17:26:24 +0300 Subject: [PATCH 0742/1692] drm/i915: Update DRIVER_DATE to 20180830 Signed-off-by: Joonas Lahtinen --- drivers/gpu/drm/i915/i915_drv.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index dc38e99bdaf1..611b71462d3a 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -87,7 +87,7 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" #define DRIVER_DATE "20180830" -#define DRIVER_TIMESTAMP 1535628593 +#define DRIVER_TIMESTAMP 1535639183 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and * WARN_ON()) for hw state sanity checks to check for unexpected conditions -- GitLab From 80d34810815b1d708e3e59901a2afcdbd90c2a6f Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Mon, 27 Aug 2018 15:55:59 +0300 Subject: [PATCH 0743/1692] ovl: respect FIEMAP_FLAG_SYNC flag Stacked overlayfs fiemap operation broke xfstests that test delayed allocation (with "_test_generic_punch -d"), because ovl_fiemap() failed to write dirty pages when requested. Fixes: 9e142c4102db ("ovl: add ovl_fiemap()") Signed-off-by: Amir Goldstein Signed-off-by: Miklos Szeredi --- fs/overlayfs/inode.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index e0bb217c01e2..5014749fd4b4 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c @@ -467,6 +467,10 @@ static int ovl_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, return -EOPNOTSUPP; old_cred = ovl_override_creds(inode->i_sb); + + if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC) + filemap_write_and_wait(realinode->i_mapping); + err = realinode->i_op->fiemap(realinode, fieinfo, start, len); revert_creds(old_cred); -- GitLab From 5b910bd615ba947383e63cd1ed106ffa3060159e Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Mon, 27 Aug 2018 15:56:00 +0300 Subject: [PATCH 0744/1692] ovl: fix GPF in swapfile_activate of file from overlayfs over xfs Since overlayfs implements stacked file operations, the underlying filesystems are not supposed to be exposed to the overlayfs file, whose f_inode is an overlayfs inode. Assigning an overlayfs file to swap_file results in an attempt of xfs code to dereference an xfs_inode struct from an ovl_inode pointer: CPU: 0 PID: 2462 Comm: swapon Not tainted 4.18.0-xfstests-12721-g33e17876ea4e #3402 RIP: 0010:xfs_find_bdev_for_inode+0x23/0x2f Call Trace: xfs_iomap_swapfile_activate+0x1f/0x43 __se_sys_swapon+0xb1a/0xee9 Fix this by not assigning the real inode mapping to f_mapping, which will cause swapon() to return an error (-EINVAL). Although it makes sense not to allow setting swpafile on an overlayfs file, some users may depend on it, so we may need to fix this up in the future. Keeping f_mapping pointing to overlay inode mapping will cause O_DIRECT open to fail. Fix this by installing ovl_aops with noop_direct_IO in overlay inode mapping. Keeping f_mapping pointing to overlay inode mapping will cause other a_ops related operations to fail (e.g. readahead()). Those will be fixed by follow up patches. Suggested-by: Miklos Szeredi Fixes: f7c72396d0de ("ovl: add O_DIRECT support") Signed-off-by: Amir Goldstein Signed-off-by: Miklos Szeredi --- fs/overlayfs/file.c | 3 --- fs/overlayfs/inode.c | 6 ++++++ 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c index 32e9282893c9..a4acd84591d4 100644 --- a/fs/overlayfs/file.c +++ b/fs/overlayfs/file.c @@ -131,9 +131,6 @@ static int ovl_open(struct inode *inode, struct file *file) if (IS_ERR(realfile)) return PTR_ERR(realfile); - /* For O_DIRECT dentry_open() checks f_mapping->a_ops->direct_IO */ - file->f_mapping = realfile->f_mapping; - file->private_data = realfile; return 0; diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index 5014749fd4b4..b6ac545b5a32 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c @@ -504,6 +504,11 @@ static const struct inode_operations ovl_special_inode_operations = { .update_time = ovl_update_time, }; +const struct address_space_operations ovl_aops = { + /* For O_DIRECT dentry_open() checks f_mapping->a_ops->direct_IO */ + .direct_IO = noop_direct_IO, +}; + /* * It is possible to stack overlayfs instance on top of another * overlayfs instance as lower layer. We need to annonate the @@ -575,6 +580,7 @@ static void ovl_fill_inode(struct inode *inode, umode_t mode, dev_t rdev, case S_IFREG: inode->i_op = &ovl_file_inode_operations; inode->i_fop = &ovl_file_operations; + inode->i_mapping->a_ops = &ovl_aops; break; case S_IFDIR: -- GitLab From 17ef445f9befdc5c9adac270b18240ad24ee50ec Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Mon, 27 Aug 2018 15:56:01 +0300 Subject: [PATCH 0745/1692] Documentation/filesystems: update documentation of file_operations ...to kernel 4.18. Signed-off-by: Amir Goldstein Signed-off-by: Miklos Szeredi --- Documentation/filesystems/vfs.txt | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt index 4b2084d0f1fb..ec2142c8dbd3 100644 --- a/Documentation/filesystems/vfs.txt +++ b/Documentation/filesystems/vfs.txt @@ -848,7 +848,7 @@ struct file_operations ---------------------- This describes how the VFS can manipulate an open file. As of kernel -4.1, the following members are defined: +4.18, the following members are defined: struct file_operations { struct module *owner; @@ -858,11 +858,11 @@ struct file_operations { ssize_t (*read_iter) (struct kiocb *, struct iov_iter *); ssize_t (*write_iter) (struct kiocb *, struct iov_iter *); int (*iterate) (struct file *, struct dir_context *); + int (*iterate_shared) (struct file *, struct dir_context *); __poll_t (*poll) (struct file *, struct poll_table_struct *); long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); long (*compat_ioctl) (struct file *, unsigned int, unsigned long); int (*mmap) (struct file *, struct vm_area_struct *); - int (*mremap)(struct file *, struct vm_area_struct *); int (*open) (struct inode *, struct file *); int (*flush) (struct file *, fl_owner_t id); int (*release) (struct inode *, struct file *); @@ -882,6 +882,9 @@ struct file_operations { #ifndef CONFIG_MMU unsigned (*mmap_capabilities)(struct file *); #endif + ssize_t (*copy_file_range)(struct file *, loff_t, struct file *, loff_t, size_t, unsigned int); + int (*clone_file_range)(struct file *, loff_t, struct file *, loff_t, u64); + int (*dedupe_file_range)(struct file *, loff_t, struct file *, loff_t, u64); }; Again, all methods are called without any locks being held, unless @@ -899,6 +902,9 @@ otherwise noted. iterate: called when the VFS needs to read the directory contents + iterate_shared: called when the VFS needs to read the directory contents + when filesystem supports concurrent dir iterators + poll: called by the VFS when a process wants to check if there is activity on this file and (optionally) go to sleep until there is activity. Called by the select(2) and poll(2) system calls @@ -951,6 +957,14 @@ otherwise noted. fallocate: called by the VFS to preallocate blocks or punch a hole. + copy_file_range: called by the copy_file_range(2) system call. + + clone_file_range: called by the ioctl(2) system call for FICLONERANGE and + FICLONE commands. + + dedupe_file_range: called by the ioctl(2) system call for FIDEDUPERANGE + command. + Note that the file operations are implemented by the specific filesystem in which the inode resides. When opening a device node (character or block special) most filesystems will call special -- GitLab From 45cd0faae3715e305bc46e23b34c5ed4d185ceb8 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Mon, 27 Aug 2018 15:56:02 +0300 Subject: [PATCH 0746/1692] vfs: add the fadvise() file operation This is going to be used by overlayfs and possibly useful for other filesystems. Signed-off-by: Amir Goldstein Signed-off-by: Miklos Szeredi --- Documentation/filesystems/vfs.txt | 3 ++ include/linux/fs.h | 5 ++ mm/fadvise.c | 78 ++++++++++++++++++------------- 3 files changed, 53 insertions(+), 33 deletions(-) diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt index ec2142c8dbd3..a6c6a8af48a2 100644 --- a/Documentation/filesystems/vfs.txt +++ b/Documentation/filesystems/vfs.txt @@ -885,6 +885,7 @@ struct file_operations { ssize_t (*copy_file_range)(struct file *, loff_t, struct file *, loff_t, size_t, unsigned int); int (*clone_file_range)(struct file *, loff_t, struct file *, loff_t, u64); int (*dedupe_file_range)(struct file *, loff_t, struct file *, loff_t, u64); + int (*fadvise)(struct file *, loff_t, loff_t, int); }; Again, all methods are called without any locks being held, unless @@ -965,6 +966,8 @@ otherwise noted. dedupe_file_range: called by the ioctl(2) system call for FIDEDUPERANGE command. + fadvise: possibly called by the fadvise64() system call. + Note that the file operations are implemented by the specific filesystem in which the inode resides. When opening a device node (character or block special) most filesystems will call special diff --git a/include/linux/fs.h b/include/linux/fs.h index 33322702c910..6c0b4a1c22ff 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1763,6 +1763,7 @@ struct file_operations { u64); int (*dedupe_file_range)(struct file *, loff_t, struct file *, loff_t, u64); + int (*fadvise)(struct file *, loff_t, loff_t, int); } __randomize_layout; struct inode_operations { @@ -3459,4 +3460,8 @@ static inline bool dir_relax_shared(struct inode *inode) extern bool path_noexec(const struct path *path); extern void inode_nohighmem(struct inode *inode); +/* mm/fadvise.c */ +extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len, + int advice); + #endif /* _LINUX_FS_H */ diff --git a/mm/fadvise.c b/mm/fadvise.c index 2d8376e3c640..2f59bac1cb77 100644 --- a/mm/fadvise.c +++ b/mm/fadvise.c @@ -27,9 +27,9 @@ * deactivate the pages and clear PG_Referenced. */ -int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice) +static int generic_fadvise(struct file *file, loff_t offset, loff_t len, + int advice) { - struct fd f = fdget(fd); struct inode *inode; struct address_space *mapping; struct backing_dev_info *bdi; @@ -37,22 +37,14 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice) pgoff_t start_index; pgoff_t end_index; unsigned long nrpages; - int ret = 0; - if (!f.file) - return -EBADF; + inode = file_inode(file); + if (S_ISFIFO(inode->i_mode)) + return -ESPIPE; - inode = file_inode(f.file); - if (S_ISFIFO(inode->i_mode)) { - ret = -ESPIPE; - goto out; - } - - mapping = f.file->f_mapping; - if (!mapping || len < 0) { - ret = -EINVAL; - goto out; - } + mapping = file->f_mapping; + if (!mapping || len < 0) + return -EINVAL; bdi = inode_to_bdi(mapping->host); @@ -67,9 +59,9 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice) /* no bad return value, but ignore advice */ break; default: - ret = -EINVAL; + return -EINVAL; } - goto out; + return 0; } /* @@ -85,21 +77,21 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice) switch (advice) { case POSIX_FADV_NORMAL: - f.file->f_ra.ra_pages = bdi->ra_pages; - spin_lock(&f.file->f_lock); - f.file->f_mode &= ~FMODE_RANDOM; - spin_unlock(&f.file->f_lock); + file->f_ra.ra_pages = bdi->ra_pages; + spin_lock(&file->f_lock); + file->f_mode &= ~FMODE_RANDOM; + spin_unlock(&file->f_lock); break; case POSIX_FADV_RANDOM: - spin_lock(&f.file->f_lock); - f.file->f_mode |= FMODE_RANDOM; - spin_unlock(&f.file->f_lock); + spin_lock(&file->f_lock); + file->f_mode |= FMODE_RANDOM; + spin_unlock(&file->f_lock); break; case POSIX_FADV_SEQUENTIAL: - f.file->f_ra.ra_pages = bdi->ra_pages * 2; - spin_lock(&f.file->f_lock); - f.file->f_mode &= ~FMODE_RANDOM; - spin_unlock(&f.file->f_lock); + file->f_ra.ra_pages = bdi->ra_pages * 2; + spin_lock(&file->f_lock); + file->f_mode &= ~FMODE_RANDOM; + spin_unlock(&file->f_lock); break; case POSIX_FADV_WILLNEED: /* First and last PARTIAL page! */ @@ -115,8 +107,7 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice) * Ignore return value because fadvise() shall return * success even if filesystem can't retrieve a hint, */ - force_page_cache_readahead(mapping, f.file, start_index, - nrpages); + force_page_cache_readahead(mapping, file, start_index, nrpages); break; case POSIX_FADV_NOREUSE: break; @@ -183,9 +174,30 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice) } break; default: - ret = -EINVAL; + return -EINVAL; } -out: + return 0; +} + +int vfs_fadvise(struct file *file, loff_t offset, loff_t len, int advice) +{ + if (file->f_op->fadvise) + return file->f_op->fadvise(file, offset, len, advice); + + return generic_fadvise(file, offset, len, advice); +} +EXPORT_SYMBOL(vfs_fadvise); + +int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice) +{ + struct fd f = fdget(fd); + int ret; + + if (!f.file) + return -EBADF; + + ret = vfs_fadvise(f.file, offset, len, advice); + fdput(f); return ret; } -- GitLab From 58f33cfe73076b6497bada4f7b5bda961ed68083 Mon Sep 17 00:00:00 2001 From: Stefan Raspl Date: Fri, 24 Aug 2018 14:03:55 +0200 Subject: [PATCH 0747/1692] tools/kvm_stat: fix python3 issues MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Python3 returns a float for a regular division - switch to a division operator that returns an integer. Furthermore, filters return a generator object instead of the actual list - wrap result in yet another list, which makes it still work in both, Python2 and 3. Signed-off-by: Stefan Raspl Signed-off-by: Radim Krčmář --- tools/kvm/kvm_stat/kvm_stat | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat index 56c4b3f8a01b..e10b90a8917a 100755 --- a/tools/kvm/kvm_stat/kvm_stat +++ b/tools/kvm/kvm_stat/kvm_stat @@ -759,7 +759,7 @@ class DebugfsProvider(Provider): if len(vms) == 0: self.do_read = False - self.paths = filter(lambda x: "{}-".format(pid) in x, vms) + self.paths = list(filter(lambda x: "{}-".format(pid) in x, vms)) else: self.paths = [] @@ -1219,10 +1219,10 @@ class Tui(object): (x, term_width) = self.screen.getmaxyx() row = 2 for line in text: - start = (term_width - len(line)) / 2 + start = (term_width - len(line)) // 2 self.screen.addstr(row, start, line) row += 1 - self.screen.addstr(row + 1, (term_width - len(hint)) / 2, hint, + self.screen.addstr(row + 1, (term_width - len(hint)) // 2, hint, curses.A_STANDOUT) self.screen.getkey() -- GitLab From 617c66b9f236d20f11cecbb3f45e6d5675b2fae1 Mon Sep 17 00:00:00 2001 From: Stefan Raspl Date: Fri, 24 Aug 2018 14:03:56 +0200 Subject: [PATCH 0748/1692] tools/kvm_stat: fix handling of invalid paths in debugfs provider MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When filtering by guest, kvm_stat displays garbage when the guest is destroyed - see sample output below. We add code to remove the invalid paths from the providers, so at least no more garbage is displayed. Here's a sample output to illustrate: kvm statistics - pid 13986 (foo) Event Total %Total CurAvg/s diagnose_258 -2 0.0 0 deliver_program_interruption -3 0.0 0 diagnose_308 -4 0.0 0 halt_poll_invalid -91 0.0 -6 deliver_service_signal -244 0.0 -16 halt_successful_poll -250 0.1 -17 exit_pei -285 0.1 -19 exit_external_request -312 0.1 -21 diagnose_9c -328 0.1 -22 userspace_handled -713 0.1 -47 halt_attempted_poll -939 0.2 -62 deliver_emergency_signal -3126 0.6 -208 halt_wakeup -7199 1.5 -481 exit_wait_state -7379 1.5 -493 diagnose_500 -56499 11.5 -3757 exit_null -85491 17.4 -5685 diagnose_44 -133300 27.1 -8874 exit_instruction -195898 39.8 -13037 Total -492063 Signed-off-by: Stefan Raspl Signed-off-by: Radim Krčmář --- tools/kvm/kvm_stat/kvm_stat | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat index e10b90a8917a..b9e8d0def1ab 100755 --- a/tools/kvm/kvm_stat/kvm_stat +++ b/tools/kvm/kvm_stat/kvm_stat @@ -766,6 +766,13 @@ class DebugfsProvider(Provider): self.do_read = True self.reset() + def _verify_paths(self): + """Remove invalid paths""" + for path in self.paths: + if not os.path.exists(os.path.join(PATH_DEBUGFS_KVM, path)): + self.paths.remove(path) + continue + def read(self, reset=0, by_guest=0): """Returns a dict with format:'file name / field -> current value'. @@ -780,6 +787,7 @@ class DebugfsProvider(Provider): # If no debugfs filtering support is available, then don't read. if not self.do_read: return results + self._verify_paths() paths = self.paths if self._pid == 0: -- GitLab From 710ab11ad9329d2d4b044405e328c994b19a2aa9 Mon Sep 17 00:00:00 2001 From: Stefan Raspl Date: Fri, 24 Aug 2018 14:03:57 +0200 Subject: [PATCH 0749/1692] tools/kvm_stat: fix updates for dead guests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With pid filtering active, when a guest is removed e.g. via virsh shutdown, successive updates produce garbage. Therefore, we add code to detect this case and prevent further body updates. Note that when displaying the help dialog via 'h' in this case, once we exit we're stuck with the 'Collecting data...' message till we remove the filter. Signed-off-by: Stefan Raspl Signed-off-by: Radim Krčmář --- tools/kvm/kvm_stat/kvm_stat | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat index b9e8d0def1ab..7c92545931e3 100755 --- a/tools/kvm/kvm_stat/kvm_stat +++ b/tools/kvm/kvm_stat/kvm_stat @@ -1170,6 +1170,9 @@ class Tui(object): return sorted_items + if not self._is_running_guest(self.stats.pid_filter): + # leave final data on screen + return row = 3 self.screen.move(row, 0) self.screen.clrtobot() @@ -1327,6 +1330,12 @@ class Tui(object): msg = '"' + str(val) + '": Invalid value' self._refresh_header() + def _is_running_guest(self, pid): + """Check if pid is still a running process.""" + if not pid: + return True + return os.path.isdir(os.path.join('/proc/', str(pid))) + def _show_vm_selection_by_guest(self): """Draws guest selection mask. @@ -1354,7 +1363,7 @@ class Tui(object): if not guest or guest == '0': break if guest.isdigit(): - if not os.path.isdir(os.path.join('/proc/', guest)): + if not self._is_running_guest(guest): msg = '"' + guest + '": Not a running process' continue pid = int(guest) -- GitLab From 0db8b3102368755242b44f2b30f93302c70e8e82 Mon Sep 17 00:00:00 2001 From: Stefan Raspl Date: Fri, 24 Aug 2018 14:03:58 +0200 Subject: [PATCH 0750/1692] tools/kvm_stat: don't reset stats when setting PID filter for debugfs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When setting a PID filter in debugfs, we unnecessarily reset the statistics, although there is no reason to do so. This behavior was merely introduced with commit 9f114a03c6854f "tools/kvm_stat: add interactive command 'r'", most likely to mimic the behavior of the tracepoints provider in this respect. However, there are plenty of differences between the two providers, so there is no reason not to take advantage of the possibility to filter by PID without resetting the statistics. Signed-off-by: Stefan Raspl Signed-off-by: Radim Krčmář --- tools/kvm/kvm_stat/kvm_stat | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat index 7c92545931e3..62fbb8802f60 100755 --- a/tools/kvm/kvm_stat/kvm_stat +++ b/tools/kvm/kvm_stat/kvm_stat @@ -764,7 +764,6 @@ class DebugfsProvider(Provider): else: self.paths = [] self.do_read = True - self.reset() def _verify_paths(self): """Remove invalid paths""" -- GitLab From 29c39f38e4e8dbf0497e81db6c985ee59259f002 Mon Sep 17 00:00:00 2001 From: Stefan Raspl Date: Fri, 24 Aug 2018 14:03:59 +0200 Subject: [PATCH 0751/1692] tools/kvm_stat: handle guest removals more gracefully MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When running with the DebugFS provider, removal of a guest can result in a negative CurAvg/s, which looks rather confusing. If so, suppress the body refresh and print a message instead. To reproduce, have at least one guest A completely booted. Then start another guest B (which generates a huge amount of events), then destroy B. On the next refresh, kvm_stat should display a whole lot of negative values in the CurAvg/s column. Signed-off-by: Stefan Raspl Signed-off-by: Radim Krčmář --- tools/kvm/kvm_stat/kvm_stat | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat index 62fbb8802f60..bd620579eb6f 100755 --- a/tools/kvm/kvm_stat/kvm_stat +++ b/tools/kvm/kvm_stat/kvm_stat @@ -1194,6 +1194,7 @@ class Tui(object): # print events tavg = 0 tcur = 0 + guest_removed = False for key, values in get_sorted_events(self, stats): if row >= self.screen.getmaxyx()[0] - 1 or values == (0, 0): break @@ -1201,7 +1202,10 @@ class Tui(object): key = self.get_gname_from_pid(key) if not key: continue - cur = int(round(values.delta / sleeptime)) if values.delta else '' + cur = int(round(values.delta / sleeptime)) if values.delta else 0 + if cur < 0: + guest_removed = True + continue if key[0] != ' ': if values.delta: tcur += values.delta @@ -1214,7 +1218,10 @@ class Tui(object): values.value * 100 / float(ltotal), cur)) row += 1 if row == 3: - self.screen.addstr(4, 1, 'No matching events reported yet') + if guest_removed: + self.screen.addstr(4, 1, 'Guest removed, updating...') + else: + self.screen.addstr(4, 1, 'No matching events reported yet') if row > 4: tavg = int(round(tcur / sleeptime)) if tcur > 0 else '' self.screen.addstr(row, 1, '%-40s %10d %8s' % -- GitLab From 404517e40867aef60554ef497d5cf8d089a5b9cf Mon Sep 17 00:00:00 2001 From: Stefan Raspl Date: Fri, 24 Aug 2018 14:04:00 +0200 Subject: [PATCH 0752/1692] tools/kvm_stat: indicate dead guests as such MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For destroyed guests, kvm_stat essentially freezes with the last data displayed. This is acceptable for users, in case they want to inspect the final data. But it looks a bit irritating. Therefore, detect this situation and display a respective indicator in the header. Signed-off-by: Stefan Raspl Signed-off-by: Radim Krčmář --- tools/kvm/kvm_stat/kvm_stat | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat index bd620579eb6f..5c2422b0f2f8 100755 --- a/tools/kvm/kvm_stat/kvm_stat +++ b/tools/kvm/kvm_stat/kvm_stat @@ -1108,10 +1108,10 @@ class Tui(object): if len(gname) > MAX_GUEST_NAME_LEN else gname)) if pid > 0: - self.screen.addstr(0, 0, 'kvm statistics - pid {0} {1}' - .format(pid, gname), curses.A_BOLD) + self._headline = 'kvm statistics - pid {0} {1}'.format(pid, gname) else: - self.screen.addstr(0, 0, 'kvm statistics - summary', curses.A_BOLD) + self._headline = 'kvm statistics - summary' + self.screen.addstr(0, 0, self._headline, curses.A_BOLD) if self.stats.fields_filter: regex = self.stats.fields_filter if len(regex) > MAX_REGEX_LEN: @@ -1170,6 +1170,7 @@ class Tui(object): return sorted_items if not self._is_running_guest(self.stats.pid_filter): + self._display_guest_dead() # leave final data on screen return row = 3 @@ -1228,6 +1229,11 @@ class Tui(object): ('Total', total, tavg), curses.A_BOLD) self.screen.refresh() + def _display_guest_dead(self): + marker = ' Guest is DEAD ' + y = min(len(self._headline), 80 - len(marker)) + self.screen.addstr(0, y, marker, curses.A_BLINK | curses.A_STANDOUT) + def _show_msg(self, text): """Display message centered text and exit on key press""" hint = 'Press any key to continue' -- GitLab From c012a0f2677529a0ae8f53a15bd7c61dc4ca5b5e Mon Sep 17 00:00:00 2001 From: Stefan Raspl Date: Fri, 24 Aug 2018 14:04:01 +0200 Subject: [PATCH 0753/1692] tools/kvm_stat: re-animate display of dead guests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When filtering by guest (interactive commands 'p'/'g'), and the respective guest was destroyed, detect when the guest is up again through the guest name if possible. I.e. when displaying events for a specific guest, it is not necessary anymore to restart kvm_stat in case the guest is restarted. Signed-off-by: Stefan Raspl Signed-off-by: Radim Krčmář --- tools/kvm/kvm_stat/kvm_stat | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat index 5c2422b0f2f8..439b8a27488d 100755 --- a/tools/kvm/kvm_stat/kvm_stat +++ b/tools/kvm/kvm_stat/kvm_stat @@ -1103,6 +1103,7 @@ class Tui(object): pid = self.stats.pid_filter self.screen.erase() gname = self.get_gname_from_pid(pid) + self._gname = gname if gname: gname = ('({})'.format(gname[:MAX_GUEST_NAME_LEN] + '...' if len(gname) > MAX_GUEST_NAME_LEN @@ -1170,6 +1171,15 @@ class Tui(object): return sorted_items if not self._is_running_guest(self.stats.pid_filter): + if self._gname: + try: # ...to identify the guest by name in case it's back + pids = self.get_pid_from_gname(self._gname) + if len(pids) == 1: + self._refresh_header(pids[0]) + self._update_pid(pids[0]) + return + except: + pass self._display_guest_dead() # leave final data on screen return -- GitLab From 096055487115883dc82fdebb5d16444585e4fc24 Mon Sep 17 00:00:00 2001 From: Lionel Landwerlin Date: Thu, 30 Aug 2018 14:24:24 +0100 Subject: [PATCH 0754/1692] drm/i915: clear error registers after error capture We need to clear the register in order to get correct value after the next potential hang. v2: Centralize error register clearing in i915_irq.c (Chris) v3: Don't read gen8 register on < gen6 (Chris) v4: Don't swap gen8+ & gen6+ code... (Chris) Signed-off-by: Lionel Landwerlin Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20180830132424.21940-1-lionel.g.landwerlin@intel.com --- drivers/gpu/drm/i915/i915_drv.h | 2 ++ drivers/gpu/drm/i915/i915_gem_gtt.c | 18 ++++++------------ drivers/gpu/drm/i915/i915_irq.c | 18 +++++++++++++++++- 3 files changed, 25 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 611b71462d3a..14e562887307 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2807,6 +2807,8 @@ extern void intel_irq_fini(struct drm_i915_private *dev_priv); int intel_irq_install(struct drm_i915_private *dev_priv); void intel_irq_uninstall(struct drm_i915_private *dev_priv); +void i915_clear_error_registers(struct drm_i915_private *dev_priv); + static inline bool intel_gvt_active(struct drm_i915_private *dev_priv) { return dev_priv->gvt; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 4137af4bd8f5..d9d44639ba26 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2337,7 +2337,7 @@ static bool needs_idle_maps(struct drm_i915_private *dev_priv) return IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_vtd_active(); } -static void gen6_check_and_clear_faults(struct drm_i915_private *dev_priv) +static void gen6_check_faults(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; enum intel_engine_id id; @@ -2355,15 +2355,11 @@ static void gen6_check_and_clear_faults(struct drm_i915_private *dev_priv) fault & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT", RING_FAULT_SRCID(fault), RING_FAULT_FAULT_TYPE(fault)); - I915_WRITE(RING_FAULT_REG(engine), - fault & ~RING_FAULT_VALID); } } - - POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS])); } -static void gen8_check_and_clear_faults(struct drm_i915_private *dev_priv) +static void gen8_check_faults(struct drm_i915_private *dev_priv) { u32 fault = I915_READ(GEN8_RING_FAULT_REG); @@ -2388,22 +2384,20 @@ static void gen8_check_and_clear_faults(struct drm_i915_private *dev_priv) GEN8_RING_FAULT_ENGINE_ID(fault), RING_FAULT_SRCID(fault), RING_FAULT_FAULT_TYPE(fault)); - I915_WRITE(GEN8_RING_FAULT_REG, - fault & ~RING_FAULT_VALID); } - - POSTING_READ(GEN8_RING_FAULT_REG); } void i915_check_and_clear_faults(struct drm_i915_private *dev_priv) { /* From GEN8 onwards we only have one 'All Engine Fault Register' */ if (INTEL_GEN(dev_priv) >= 8) - gen8_check_and_clear_faults(dev_priv); + gen8_check_faults(dev_priv); else if (INTEL_GEN(dev_priv) >= 6) - gen6_check_and_clear_faults(dev_priv); + gen6_check_faults(dev_priv); else return; + + i915_clear_error_registers(dev_priv); } void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 8084e35b25c5..e31093ce871c 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -3215,7 +3215,7 @@ static void i915_reset_device(struct drm_i915_private *dev_priv, kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event); } -static void i915_clear_error_registers(struct drm_i915_private *dev_priv) +void i915_clear_error_registers(struct drm_i915_private *dev_priv) { u32 eir; @@ -3238,6 +3238,22 @@ static void i915_clear_error_registers(struct drm_i915_private *dev_priv) I915_WRITE(EMR, I915_READ(EMR) | eir); I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT); } + + if (INTEL_GEN(dev_priv) >= 8) { + I915_WRITE(GEN8_RING_FAULT_REG, + I915_READ(GEN8_RING_FAULT_REG) & ~RING_FAULT_VALID); + POSTING_READ(GEN8_RING_FAULT_REG); + } else if (INTEL_GEN(dev_priv) >= 6) { + struct intel_engine_cs *engine; + enum intel_engine_id id; + + for_each_engine(engine, dev_priv, id) { + I915_WRITE(RING_FAULT_REG(engine), + I915_READ(RING_FAULT_REG(engine)) & + ~RING_FAULT_VALID); + } + POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS])); + } } /** -- GitLab From 70b73f9ac113983f9c7db9887447f1344ac5b69b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 30 Aug 2018 17:10:42 +0100 Subject: [PATCH 0755/1692] drm/i915/ringbuffer: Delay after invalidating gen6+ xcs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit During stress testing of full-ppgtt (on Baytrail at least), we found that the invalidation around a context/mm switch was insufficient (writes would go astray). Adding a second MI_FLUSH_DW barrier prevents this, but it is unclear as to whether this is merely a delaying tactic or if it is truly serialising with the TLB invalidation. Either way, it is empirically required. v2: Avoid the loop for readability; Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=107715 References: https://bugs.freedesktop.org/show_bug.cgi?id=107759 Signed-off-by: Chris Wilson Cc: Joonas Lahtinen Cc: Mika Kuoppala Cc: Matthew Auld Cc: Tvrtko Ursulin Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20180830161042.29193-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_ringbuffer.c | 69 ++++++++++++------------- 1 file changed, 34 insertions(+), 35 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index d40f55a8dc34..44432677160c 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1944,7 +1944,7 @@ static void gen6_bsd_submit_request(struct i915_request *request) intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } -static int gen6_bsd_ring_flush(struct i915_request *rq, u32 mode) +static int emit_mi_flush_dw(struct i915_request *rq, u32 flags) { u32 cmd, *cs; @@ -1954,7 +1954,8 @@ static int gen6_bsd_ring_flush(struct i915_request *rq, u32 mode) cmd = MI_FLUSH_DW; - /* We always require a command barrier so that subsequent + /* + * We always require a command barrier so that subsequent * commands, such as breadcrumb interrupts, are strictly ordered * wrt the contents of the write cache being flushed to memory * (and thus being coherent from the CPU). @@ -1962,22 +1963,49 @@ static int gen6_bsd_ring_flush(struct i915_request *rq, u32 mode) cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; /* - * Bspec vol 1c.5 - video engine command streamer: + * Bspec vol 1c.3 - blitter engine command streamer: * "If ENABLED, all TLBs will be invalidated once the flush * operation is complete. This bit is only valid when the * Post-Sync Operation field is a value of 1h or 3h." */ - if (mode & EMIT_INVALIDATE) - cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD; + cmd |= flags; *cs++ = cmd; *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT; *cs++ = 0; *cs++ = MI_NOOP; + intel_ring_advance(rq, cs); + return 0; } +static int gen6_flush_dw(struct i915_request *rq, u32 mode, u32 invflags) +{ + int err; + + /* + * Not only do we need a full barrier (post-sync write) after + * invalidating the TLBs, but we need to wait a little bit + * longer. Whether this is merely delaying us, or the + * subsequent flush is a key part of serialising with the + * post-sync op, this extra pass appears vital before a + * mm switch! + */ + if (mode & EMIT_INVALIDATE) { + err = emit_mi_flush_dw(rq, invflags); + if (err) + return err; + } + + return emit_mi_flush_dw(rq, 0); +} + +static int gen6_bsd_ring_flush(struct i915_request *rq, u32 mode) +{ + return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB | MI_INVALIDATE_BSD); +} + static int hsw_emit_bb_start(struct i915_request *rq, u64 offset, u32 len, @@ -2022,36 +2050,7 @@ gen6_emit_bb_start(struct i915_request *rq, static int gen6_ring_flush(struct i915_request *rq, u32 mode) { - u32 cmd, *cs; - - cs = intel_ring_begin(rq, 4); - if (IS_ERR(cs)) - return PTR_ERR(cs); - - cmd = MI_FLUSH_DW; - - /* We always require a command barrier so that subsequent - * commands, such as breadcrumb interrupts, are strictly ordered - * wrt the contents of the write cache being flushed to memory - * (and thus being coherent from the CPU). - */ - cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; - - /* - * Bspec vol 1c.3 - blitter engine command streamer: - * "If ENABLED, all TLBs will be invalidated once the flush - * operation is complete. This bit is only valid when the - * Post-Sync Operation field is a value of 1h or 3h." - */ - if (mode & EMIT_INVALIDATE) - cmd |= MI_INVALIDATE_TLB; - *cs++ = cmd; - *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT; - *cs++ = 0; - *cs++ = MI_NOOP; - intel_ring_advance(rq, cs); - - return 0; + return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB); } static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv, -- GitLab From 1dc27f63303db58ce1b1a6932d1825305f86d574 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martin=20Li=C5=A1ka?= Date: Thu, 23 Aug 2018 14:29:34 +0200 Subject: [PATCH 0756/1692] perf annotate: Properly interpret indirect call MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The patch changes the parsing of: callq *0x8(%rbx) from: 0.26 │ → callq *8 to: 0.26 │ → callq *0x8(%rbx) in this case an address is followed by a register, thus one can't parse only the address. Committer testing: 1) run 'perf record sleep 10' 2) before applying the patch, run: perf annotate --stdio2 > /tmp/before 3) after applying the patch, run: perf annotate --stdio2 > /tmp/after 4) diff /tmp/before /tmp/after: --- /tmp/before 2018-08-28 11:16:03.238384143 -0300 +++ /tmp/after 2018-08-28 11:15:39.335341042 -0300 @@ -13274,7 +13274,7 @@ ↓ jle 128 hash_value = hash_table->hash_func (key); mov 0x8(%rsp),%rdi - 0.91 → callq *30 + 0.91 → callq *0x30(%r12) mov $0x2,%r8d cmp $0x2,%eax node_hash = hash_table->hashes[node_index]; @@ -13848,7 +13848,7 @@ mov %r14,%rdi sub %rbx,%r13 mov %r13,%rdx - → callq *38 + → callq *0x38(%r15) cmp %rax,%r13 1.91 ↓ je 240 1b4: mov $0xffffffff,%r13d @@ -14026,7 +14026,7 @@ mov %rcx,-0x500(%rbp) mov %r15,%rsi mov %r14,%rdi - → callq *38 + → callq *0x38(%rax) mov -0x500(%rbp),%rcx cmp %rax,%rcx ↓ jne 9b0 Signed-off-by: Martin Liška Tested-by: Arnaldo Carvalho de Melo Tested-by: Kim Phillips Cc: Jiri Olsa Link: http://lkml.kernel.org/r/bd1f3932-be2b-85f9-7582-111ee0a43b07@suse.cz Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/annotate.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 20061cf42288..e62b69ea87cd 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -246,8 +246,14 @@ static int call__parse(struct arch *arch, struct ins_operands *ops, struct map_s indirect_call: tok = strchr(endptr, '*'); - if (tok != NULL) - ops->target.addr = strtoull(tok + 1, NULL, 16); + if (tok != NULL) { + endptr++; + + /* Indirect call can use a non-rip register and offset: callq *0x8(%rbx). + * Do not parse such instruction. */ + if (strstr(endptr, "(%r") == NULL) + ops->target.addr = strtoull(endptr, NULL, 16); + } goto find_target; } -- GitLab From 9b3579fc6c6ac45502de1fa9a1fdf873805c2157 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Mon, 27 Aug 2018 11:12:24 +0200 Subject: [PATCH 0757/1692] perf tests: Add breakpoint modify tests Adding to tests that aims on kernel breakpoint modification bugs. First test creates HW breakpoint, tries to change it and checks it was properly changed. It aims on kernel issue that prevents HW breakpoint to be changed via ptrace interface. The first test forks, the child sets itself as ptrace tracee and waits in signal for parent to trace it, then it calls bp_1 and quits. The parent does following steps: - creates a new breakpoint (id 0) for bp_2 function - changes that breakpoint to bp_1 function - waits for the breakpoint to hit and checks it has proper rip of bp_1 function This test aims on an issue in kernel preventing to change disabled breakpoints Second test mimics the first one except for few steps in the parent: - creates a new breakpoint (id 0) for bp_1 function - changes that breakpoint to bogus (-1) address - waits for the breakpoint to hit and checks it has proper rip of bp_1 function This test aims on an issue in kernel disabling enabled breakpoint after unsuccesful change. Committer testing: # uname -a Linux jouet 4.18.0-rc8-00002-g1236568ee3cb #12 SMP Tue Aug 7 14:08:26 -03 2018 x86_64 x86_64 x86_64 GNU/Linux # perf test -v "bp modify" 62: x86 bp modify : --- start --- test child forked, pid 25671 in bp_1 tracee exited prematurely 2 FAILED arch/x86/tests/bp-modify.c:209 modify test 1 failed test child finished with -1 ---- end ---- x86 bp modify: FAILED! # Signed-off-by: Jiri Olsa Tested-by: Arnaldo Carvalho de Melo Cc: Alexander Shishkin Cc: David Ahern Cc: Milind Chabbi Cc: Namhyung Kim Cc: Oleg Nesterov Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20180827091228.2878-2-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/arch/x86/include/arch-tests.h | 1 + tools/perf/arch/x86/tests/Build | 1 + tools/perf/arch/x86/tests/arch-tests.c | 6 + tools/perf/arch/x86/tests/bp-modify.c | 213 +++++++++++++++++++++++ 4 files changed, 221 insertions(+) create mode 100644 tools/perf/arch/x86/tests/bp-modify.c diff --git a/tools/perf/arch/x86/include/arch-tests.h b/tools/perf/arch/x86/include/arch-tests.h index c1bd979b957b..613709cfbbd0 100644 --- a/tools/perf/arch/x86/include/arch-tests.h +++ b/tools/perf/arch/x86/include/arch-tests.h @@ -9,6 +9,7 @@ struct test; int test__rdpmc(struct test *test __maybe_unused, int subtest); int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest); int test__insn_x86(struct test *test __maybe_unused, int subtest); +int test__bp_modify(struct test *test, int subtest); #ifdef HAVE_DWARF_UNWIND_SUPPORT struct thread; diff --git a/tools/perf/arch/x86/tests/Build b/tools/perf/arch/x86/tests/Build index 8e2c5a38c3b9..586849ff83a0 100644 --- a/tools/perf/arch/x86/tests/Build +++ b/tools/perf/arch/x86/tests/Build @@ -5,3 +5,4 @@ libperf-y += arch-tests.o libperf-y += rdpmc.o libperf-y += perf-time-to-tsc.o libperf-$(CONFIG_AUXTRACE) += insn-x86.o +libperf-$(CONFIG_X86_64) += bp-modify.o diff --git a/tools/perf/arch/x86/tests/arch-tests.c b/tools/perf/arch/x86/tests/arch-tests.c index cc1802ff5410..d47d3f8e3c8e 100644 --- a/tools/perf/arch/x86/tests/arch-tests.c +++ b/tools/perf/arch/x86/tests/arch-tests.c @@ -23,6 +23,12 @@ struct test arch_tests[] = { .desc = "x86 instruction decoder - new instructions", .func = test__insn_x86, }, +#endif +#if defined(__x86_64__) + { + .desc = "x86 bp modify", + .func = test__bp_modify, + }, #endif { .func = NULL, diff --git a/tools/perf/arch/x86/tests/bp-modify.c b/tools/perf/arch/x86/tests/bp-modify.c new file mode 100644 index 000000000000..f53e4406709f --- /dev/null +++ b/tools/perf/arch/x86/tests/bp-modify.c @@ -0,0 +1,213 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "debug.h" +#include "tests/tests.h" +#include "arch-tests.h" + +static noinline int bp_1(void) +{ + pr_debug("in %s\n", __func__); + return 0; +} + +static noinline int bp_2(void) +{ + pr_debug("in %s\n", __func__); + return 0; +} + +static int spawn_child(void) +{ + int child = fork(); + + if (child == 0) { + /* + * The child sets itself for as tracee and + * waits in signal for parent to trace it, + * then it calls bp_1 and quits. + */ + int err = ptrace(PTRACE_TRACEME, 0, NULL, NULL); + + if (err) { + pr_debug("failed to PTRACE_TRACEME\n"); + exit(1); + } + + raise(SIGCONT); + bp_1(); + exit(0); + } + + return child; +} + +/* + * This tests creates HW breakpoint, tries to + * change it and checks it was properly changed. + */ +static int bp_modify1(void) +{ + pid_t child; + int status; + unsigned long rip = 0, dr7 = 1; + + child = spawn_child(); + + waitpid(child, &status, 0); + if (WIFEXITED(status)) { + pr_debug("tracee exited prematurely 1\n"); + return TEST_FAIL; + } + + /* + * The parent does following steps: + * - creates a new breakpoint (id 0) for bp_2 function + * - changes that breakponit to bp_1 function + * - waits for the breakpoint to hit and checks + * it has proper rip of bp_1 function + * - detaches the child + */ + if (ptrace(PTRACE_POKEUSER, child, + offsetof(struct user, u_debugreg[0]), bp_2)) { + pr_debug("failed to set breakpoint, 1st time: %s\n", + strerror(errno)); + goto out; + } + + if (ptrace(PTRACE_POKEUSER, child, + offsetof(struct user, u_debugreg[0]), bp_1)) { + pr_debug("failed to set breakpoint, 2nd time: %s\n", + strerror(errno)); + goto out; + } + + if (ptrace(PTRACE_POKEUSER, child, + offsetof(struct user, u_debugreg[7]), dr7)) { + pr_debug("failed to set dr7: %s\n", strerror(errno)); + goto out; + } + + if (ptrace(PTRACE_CONT, child, NULL, NULL)) { + pr_debug("failed to PTRACE_CONT: %s\n", strerror(errno)); + goto out; + } + + waitpid(child, &status, 0); + if (WIFEXITED(status)) { + pr_debug("tracee exited prematurely 2\n"); + return TEST_FAIL; + } + + rip = ptrace(PTRACE_PEEKUSER, child, + offsetof(struct user_regs_struct, rip), NULL); + if (rip == (unsigned long) -1) { + pr_debug("failed to PTRACE_PEEKUSER: %s\n", + strerror(errno)); + goto out; + } + + pr_debug("rip %lx, bp_1 %p\n", rip, bp_1); + +out: + if (ptrace(PTRACE_DETACH, child, NULL, NULL)) { + pr_debug("failed to PTRACE_DETACH: %s", strerror(errno)); + return TEST_FAIL; + } + + return rip == (unsigned long) bp_1 ? TEST_OK : TEST_FAIL; +} + +/* + * This tests creates HW breakpoint, tries to + * change it to bogus value and checks the original + * breakpoint is hit. + */ +static int bp_modify2(void) +{ + pid_t child; + int status; + unsigned long rip = 0, dr7 = 1; + + child = spawn_child(); + + waitpid(child, &status, 0); + if (WIFEXITED(status)) { + pr_debug("tracee exited prematurely 1\n"); + return TEST_FAIL; + } + + /* + * The parent does following steps: + * - creates a new breakpoint (id 0) for bp_1 function + * - tries to change that breakpoint to (-1) address + * - waits for the breakpoint to hit and checks + * it has proper rip of bp_1 function + * - detaches the child + */ + if (ptrace(PTRACE_POKEUSER, child, + offsetof(struct user, u_debugreg[0]), bp_1)) { + pr_debug("failed to set breakpoint: %s\n", + strerror(errno)); + goto out; + } + + if (ptrace(PTRACE_POKEUSER, child, + offsetof(struct user, u_debugreg[7]), dr7)) { + pr_debug("failed to set dr7: %s\n", strerror(errno)); + goto out; + } + + if (!ptrace(PTRACE_POKEUSER, child, + offsetof(struct user, u_debugreg[0]), (unsigned long) (-1))) { + pr_debug("failed, breakpoint set to bogus address\n"); + goto out; + } + + if (ptrace(PTRACE_CONT, child, NULL, NULL)) { + pr_debug("failed to PTRACE_CONT: %s\n", strerror(errno)); + goto out; + } + + waitpid(child, &status, 0); + if (WIFEXITED(status)) { + pr_debug("tracee exited prematurely 2\n"); + return TEST_FAIL; + } + + rip = ptrace(PTRACE_PEEKUSER, child, + offsetof(struct user_regs_struct, rip), NULL); + if (rip == (unsigned long) -1) { + pr_debug("failed to PTRACE_PEEKUSER: %s\n", + strerror(errno)); + goto out; + } + + pr_debug("rip %lx, bp_1 %p\n", rip, bp_1); + +out: + if (ptrace(PTRACE_DETACH, child, NULL, NULL)) { + pr_debug("failed to PTRACE_DETACH: %s", strerror(errno)); + return TEST_FAIL; + } + + return rip == (unsigned long) bp_1 ? TEST_OK : TEST_FAIL; +} + +int test__bp_modify(struct test *test __maybe_unused, + int subtest __maybe_unused) +{ + TEST_ASSERT_VAL("modify test 1 failed\n", !bp_modify1()); + TEST_ASSERT_VAL("modify test 2 failed\n", !bp_modify2()); + + return 0; +} -- GitLab From bd14406b78e6daa1ea3c1673bda1ffc9efdeead0 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Mon, 27 Aug 2018 11:12:25 +0200 Subject: [PATCH 0758/1692] perf/hw_breakpoint: Modify breakpoint even if the new attr has disabled set We need to change the breakpoint even if the attr with new fields has disabled set to true. Current code prevents following user code to change the breakpoint address: ptrace(PTRACE_POKEUSER, child, offsetof(struct user, u_debugreg[0]), addr_1) ptrace(PTRACE_POKEUSER, child, offsetof(struct user, u_debugreg[0]), addr_2) ptrace(PTRACE_POKEUSER, child, offsetof(struct user, u_debugreg[7]), dr7) The first PTRACE_POKEUSER creates the breakpoint with attr.disabled set to true: ptrace_set_breakpoint_addr(nr = 0) struct perf_event *bp = t->ptrace_bps[nr]; ptrace_register_breakpoint(..., disabled = true) ptrace_fill_bp_fields(..., disabled) register_user_hw_breakpoint So the second PTRACE_POKEUSER will be omitted: ptrace_set_breakpoint_addr(nr = 0) struct perf_event *bp = t->ptrace_bps[nr]; struct perf_event_attr attr = bp->attr; modify_user_hw_breakpoint(bp, &attr) if (!attr->disabled) modify_user_hw_breakpoint_check Reported-by: Milind Chabbi Signed-off-by: Jiri Olsa Acked-by: Frederic Weisbecker Acked-by: Oleg Nesterov Tested-by: Arnaldo Carvalho de Melo Cc: Alexander Shishkin Cc: David Ahern Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20180827091228.2878-3-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- kernel/events/hw_breakpoint.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index b3814fce5ecb..fb229d9c7f3c 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c @@ -509,6 +509,8 @@ modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *a */ int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) { + int err; + /* * modify_user_hw_breakpoint can be invoked with IRQs disabled and hence it * will not be possible to raise IPIs that invoke __perf_event_disable. @@ -520,11 +522,11 @@ int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *att else perf_event_disable(bp); - if (!attr->disabled) { - int err = modify_user_hw_breakpoint_check(bp, attr, false); + err = modify_user_hw_breakpoint_check(bp, attr, false); + if (err) + return err; - if (err) - return err; + if (!attr->disabled) { perf_event_enable(bp); bp->attr.disabled = 0; } -- GitLab From cb45302d7c5e20f0c0598cdbd7753fa44daceb2a Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Mon, 27 Aug 2018 11:12:26 +0200 Subject: [PATCH 0759/1692] perf/hw_breakpoint: Remove superfluous bp->attr.disabled = 0 Once the breakpoint was succesfully modified, the attr->disabled value is in bp->attr.disabled. So there's no reason to set it again, removing that. Signed-off-by: Jiri Olsa Acked-by: Frederic Weisbecker Acked-by: Oleg Nesterov Tested-by: Arnaldo Carvalho de Melo Cc: Alexander Shishkin Cc: David Ahern Cc: Milind Chabbi Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20180827091228.2878-4-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- kernel/events/hw_breakpoint.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index fb229d9c7f3c..3e560d7609fd 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c @@ -526,10 +526,9 @@ int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *att if (err) return err; - if (!attr->disabled) { + if (!attr->disabled) perf_event_enable(bp); - bp->attr.disabled = 0; - } + return 0; } EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); -- GitLab From 969558371bf926258241727ebb994f516f2e6f61 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Mon, 27 Aug 2018 11:12:27 +0200 Subject: [PATCH 0760/1692] perf/hw_breakpoint: Enable breakpoint in modify_user_hw_breakpoint Currently we enable the breakpoint back only if the breakpoint modification was successful. If it fails we can leave the breakpoint in disabled state with attr->disabled == 0. We can safely enable the breakpoint back for both the fail and success paths by checking the bp->attr.disabled, which either holds the new 'requested' disabled state or the original breakpoint state. Suggested-by: Oleg Nesterov Signed-off-by: Jiri Olsa Acked-by: Frederic Weisbecker Acked-by: Oleg Nesterov Tested-by: Arnaldo Carvalho de Melo Cc: Alexander Shishkin Cc: David Ahern Cc: Milind Chabbi Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20180827091228.2878-5-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- kernel/events/hw_breakpoint.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index 3e560d7609fd..d6b56180827c 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c @@ -523,13 +523,11 @@ int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *att perf_event_disable(bp); err = modify_user_hw_breakpoint_check(bp, attr, false); - if (err) - return err; - if (!attr->disabled) + if (!bp->attr.disabled) perf_event_enable(bp); - return 0; + return err; } EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); -- GitLab From bf06278c3fdf8909c3a9283e2c270b0fc170fa90 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Mon, 27 Aug 2018 11:12:28 +0200 Subject: [PATCH 0761/1692] perf/hw_breakpoint: Simplify breakpoint enable in perf_event_modify_breakpoint We can safely enable the breakpoint back for both the fail and success paths by checking only the bp->attr.disabled, which either holds the new 'requested' disabled state or the original breakpoint state. Committer testing: At the end of the series, the 'perf test' entry introduced as the first patch now runs to completion without finding the fixed issues: # perf test "bp modify" 62: x86 bp modify : Ok # In verbose mode: # perf test -v "bp modify" 62: x86 bp modify : --- start --- test child forked, pid 5161 rip 5950a0, bp_1 0x5950a0 in bp_1 rip 5950a0, bp_1 0x5950a0 in bp_1 test child finished with 0 ---- end ---- x86 bp modify: Ok Suggested-by: Oleg Nesterov Acked-by: Oleg Nesterov Signed-off-by: Jiri Olsa Tested-by: Arnaldo Carvalho de Melo Cc: Alexander Shishkin Cc: David Ahern Cc: Milind Chabbi Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20180827091228.2878-6-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- kernel/events/core.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/kernel/events/core.c b/kernel/events/core.c index f6ea33a9f904..22ede28ec07d 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -2867,16 +2867,11 @@ static int perf_event_modify_breakpoint(struct perf_event *bp, _perf_event_disable(bp); err = modify_user_hw_breakpoint_check(bp, attr, true); - if (err) { - if (!bp->attr.disabled) - _perf_event_enable(bp); - return err; - } - - if (!attr->disabled) + if (!bp->attr.disabled) _perf_event_enable(bp); - return 0; + + return err; } static int perf_event_modify_attr(struct perf_event *event, -- GitLab From 5ab1de932e2923f490645ad017a689c5b58dc433 Mon Sep 17 00:00:00 2001 From: Kim Phillips Date: Mon, 6 Aug 2018 17:28:00 -0500 Subject: [PATCH 0762/1692] perf arm64: Fix include path for asm-generic/unistd.h The new syscall table support for arm64 mistakenly used the system's asm-generic/unistd.h file when processing the tools/arch/arm64/include/uapi/asm/unistd.h file's include directive: #include See "Committer notes" section of commit 2b5882435606 "perf arm64: Generate system call table from asm/unistd.h" for more details. This patch removes the committer's temporary workaround, and instructs the host compiler to search the build tree's include path for the right copy of the unistd.h file, instead of the one on the system's /usr/include path. It thus fixes the committer's test that cross-builds an arm64 perf on an x86 platform running Ubuntu 14.04.5 LTS with an old toolchain: $ tools/perf/arch/arm64/entry/syscalls/mksyscalltbl /gcc-linaro-5.4.1-2017.05-x86_64_aarch64-linux-gnu/bin/aarch64-linux-gnu-gcc gcc `pwd`/tools tools/arch/arm64/include/uapi/asm/unistd.h | grep bpf [280] = "bpf", Signed-off-by: Kim Phillips Cc: Alexander Shishkin Cc: Hendrik Brueckner Cc: Jiri Olsa Cc: Michael Ellerman Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Thomas Richter Fixes: 2b5882435606 ("perf arm64: Generate system call table from asm/unistd.h") Link: http://lkml.kernel.org/r/20180806172800.bbcec3cfcc51e2facc978bf2@arm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/arch/arm64/Makefile | 5 +++-- tools/perf/arch/arm64/entry/syscalls/mksyscalltbl | 6 +++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/tools/perf/arch/arm64/Makefile b/tools/perf/arch/arm64/Makefile index f013b115dc86..dbef716a1913 100644 --- a/tools/perf/arch/arm64/Makefile +++ b/tools/perf/arch/arm64/Makefile @@ -11,7 +11,8 @@ PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1 out := $(OUTPUT)arch/arm64/include/generated/asm header := $(out)/syscalls.c -sysdef := $(srctree)/tools/include/uapi/asm-generic/unistd.h +incpath := $(srctree)/tools +sysdef := $(srctree)/tools/arch/arm64/include/uapi/asm/unistd.h sysprf := $(srctree)/tools/perf/arch/arm64/entry/syscalls/ systbl := $(sysprf)/mksyscalltbl @@ -19,7 +20,7 @@ systbl := $(sysprf)/mksyscalltbl _dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') $(header): $(sysdef) $(systbl) - $(Q)$(SHELL) '$(systbl)' '$(CC)' '$(HOSTCC)' $(sysdef) > $@ + $(Q)$(SHELL) '$(systbl)' '$(CC)' '$(HOSTCC)' $(incpath) $(sysdef) > $@ clean:: $(call QUIET_CLEAN, arm64) $(RM) $(header) diff --git a/tools/perf/arch/arm64/entry/syscalls/mksyscalltbl b/tools/perf/arch/arm64/entry/syscalls/mksyscalltbl index 52e197317d3e..2dbb8cade048 100755 --- a/tools/perf/arch/arm64/entry/syscalls/mksyscalltbl +++ b/tools/perf/arch/arm64/entry/syscalls/mksyscalltbl @@ -11,7 +11,8 @@ gcc=$1 hostcc=$2 -input=$3 +incpath=$3 +input=$4 if ! test -r $input; then echo "Could not read input file" >&2 @@ -28,7 +29,6 @@ create_table_from_c() cat <<-_EoHEADER #include - #define __ARCH_WANT_RENAMEAT #include "$input" int main(int argc, char *argv[]) { @@ -42,7 +42,7 @@ create_table_from_c() printf "%s\n" " printf(\"#define SYSCALLTBL_ARM64_MAX_ID %d\\n\", __NR_$last_sc);" printf "}\n" - } | $hostcc -o $create_table_exe -x c - + } | $hostcc -I $incpath/include/uapi -o $create_table_exe -x c - $create_table_exe -- GitLab From fd8d2702791a970c751f8b526a17d8e725a05b46 Mon Sep 17 00:00:00 2001 From: Hisao Tanabe Date: Sat, 25 Aug 2018 00:45:56 +0900 Subject: [PATCH 0763/1692] perf evsel: Fix potential null pointer dereference in perf_evsel__new_idx() If evsel is NULL, we should return NULL to avoid a NULL pointer dereference a bit later in the code. Signed-off-by: Hisao Tanabe Acked-by: Namhyung Kim Cc: Jiri Olsa Cc: Wang Nan Fixes: 03e0a7df3efd ("perf tools: Introduce bpf-output event") LPU-Reference: 20180824154556.23428-1-xtanabe@gmail.com Link: https://lkml.kernel.org/n/tip-e5plzjhx6595a5yjaf22jss3@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/evsel.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index c980bbff6353..1a61628a1c12 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -251,8 +251,9 @@ struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx) { struct perf_evsel *evsel = zalloc(perf_evsel__object.size); - if (evsel != NULL) - perf_evsel__init(evsel, attr, idx); + if (!evsel) + return NULL; + perf_evsel__init(evsel, attr, idx); if (perf_evsel__is_bpf_output(evsel)) { evsel->attr.sample_type |= (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | -- GitLab From dad2762aac17eac01ea97779e78a061ed1b83b86 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 29 Aug 2018 17:31:52 -0300 Subject: [PATCH 0764/1692] perf tools: Streamline bpf examples and headers installation We were emitting 4 lines, two of them misleading: make: Entering directory '/home/acme/git/perf/tools/perf' INSTALL lib INSTALL include/bpf INSTALL lib INSTALL examples/bpf make: Leaving directory '/home/acme/git/perf/tools/perf' Make it more compact by showing just two lines: make: Entering directory '/home/acme/git/perf/tools/perf' INSTALL bpf-headers INSTALL bpf-examples make: Leaving directory '/home/acme/git/perf/tools/perf' Cc: Adrian Hunter Cc: David Ahern Cc: Jiri Olsa Cc: Namhyung Kim Cc: Wang Nan Link: https://lkml.kernel.org/n/tip-0nvkyciqdkrgy829lony5925@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Makefile.perf | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf index b3d1b12a5081..5224ade3d5af 100644 --- a/tools/perf/Makefile.perf +++ b/tools/perf/Makefile.perf @@ -777,14 +777,12 @@ endif $(call QUIET_INSTALL, libexec) \ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' ifndef NO_LIBBPF - $(call QUIET_INSTALL, lib) \ - $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf' - $(call QUIET_INSTALL, include/bpf) \ - $(INSTALL) include/bpf/*.h '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf' - $(call QUIET_INSTALL, lib) \ - $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf' - $(call QUIET_INSTALL, examples/bpf) \ - $(INSTALL) examples/bpf/*.c '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf' + $(call QUIET_INSTALL, bpf-headers) \ + $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf'; \ + $(INSTALL) include/bpf/*.h -t '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf' + $(call QUIET_INSTALL, bpf-examples) \ + $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf'; \ + $(INSTALL) examples/bpf/*.c -t '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf' endif $(call QUIET_INSTALL, perf-archive) \ $(INSTALL) $(OUTPUT)perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' -- GitLab From a72f64261359b7451f8478f2a2bf357b4e6c757f Mon Sep 17 00:00:00 2001 From: Chris Phlipot Date: Tue, 28 Aug 2018 23:19:54 -0700 Subject: [PATCH 0765/1692] perf util: Fix bad memory access in trace info. In the write to the output_fd in the error condition of record_saved_cmdline(), we are writing 8 bytes from a memory location on the stack that contains a primitive that is only 4 bytes in size. Change the primitive to 8 bytes in size to match the size of the write in order to avoid reading unknown memory from the stack. Signed-off-by: Chris Phlipot Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20180829061954.18871-1-cphlipot0@gmail.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/trace-event-info.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c index c85d0d1a65ed..7b0ca7cbb7de 100644 --- a/tools/perf/util/trace-event-info.c +++ b/tools/perf/util/trace-event-info.c @@ -377,7 +377,7 @@ static int record_ftrace_printk(void) static int record_saved_cmdline(void) { - unsigned int size; + unsigned long long size; char *path; struct stat st; int ret, err = 0; -- GitLab From c9f23d2bc21cb263ae931f3e264d003d746107bb Mon Sep 17 00:00:00 2001 From: Chris Phlipot Date: Wed, 29 Aug 2018 19:19:50 -0700 Subject: [PATCH 0766/1692] perf event-parse: Use fixed size string for comms Some implementations of libc do not support the 'm' width modifier as part of the scanf string format specifier. This can cause the parsing to fail. Since the parser never checks if the scanf parsing was successesful, this can result in a crash. Change the comm string to be allocated as a fixed size instead of dynamically using 'm' scanf width modifier. This can be safely done since comm size is limited to 16 bytes by TASK_COMM_LEN within the kernel. This change prevents perf from crashing when linked against bionic as well as reduces the total number of heap allocations and frees invoked while accomplishing the same task. Signed-off-by: Chris Phlipot Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20180830021950.15563-1-cphlipot0@gmail.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/trace-event-parse.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index 920b1d58a068..e76214f8d596 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -164,16 +164,15 @@ void parse_ftrace_printk(struct tep_handle *pevent, void parse_saved_cmdline(struct tep_handle *pevent, char *file, unsigned int size __maybe_unused) { - char *comm; + char comm[17]; /* Max comm length in the kernel is 16. */ char *line; char *next = NULL; int pid; line = strtok_r(file, "\n", &next); while (line) { - sscanf(line, "%d %ms", &pid, &comm); - tep_register_comm(pevent, comm, pid); - free(comm); + if (sscanf(line, "%d %16s", &pid, comm) == 2) + tep_register_comm(pevent, comm, pid); line = strtok_r(NULL, "\n", &next); } } -- GitLab From 3d8f7615319b2bca87a4815e13787439e3339a93 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Wed, 29 Aug 2018 08:41:29 +0300 Subject: [PATCH 0767/1692] vfs: implement readahead(2) using POSIX_FADV_WILLNEED The implementation of readahead(2) syscall is identical to that of fadvise64(POSIX_FADV_WILLNEED) with a few exceptions: 1. readahead(2) returns -EINVAL for !mapping->a_ops and fadvise64() ignores the request and returns 0. 2. fadvise64() checks for integer overflow corner case 3. fadvise64() calls the optional filesystem fadvise() file operation Unite the two implementations by calling vfs_fadvise() from readahead(2) syscall. Check the !mapping->a_ops in readahead(2) syscall to preserve documented syscall ABI behaviour. Suggested-by: Miklos Szeredi Fixes: d1d04ef8572b ("ovl: stack file ops") Signed-off-by: Amir Goldstein Signed-off-by: Miklos Szeredi --- mm/Makefile | 3 +-- mm/fadvise.c | 3 +++ mm/readahead.c | 45 +++++++++++++++++---------------------------- 3 files changed, 21 insertions(+), 30 deletions(-) diff --git a/mm/Makefile b/mm/Makefile index 8716bdabe1e6..26ef77a3883b 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -32,7 +32,7 @@ ifdef CONFIG_CROSS_MEMORY_ATTACH mmu-$(CONFIG_MMU) += process_vm_access.o endif -obj-y := filemap.o mempool.o oom_kill.o \ +obj-y := filemap.o mempool.o oom_kill.o fadvise.o \ maccess.o page_alloc.o page-writeback.o \ readahead.o swap.o truncate.o vmscan.o shmem.o \ util.o mmzone.o vmstat.o backing-dev.o \ @@ -49,7 +49,6 @@ else obj-y += bootmem.o endif -obj-$(CONFIG_ADVISE_SYSCALLS) += fadvise.o ifdef CONFIG_MMU obj-$(CONFIG_ADVISE_SYSCALLS) += madvise.o endif diff --git a/mm/fadvise.c b/mm/fadvise.c index 2f59bac1cb77..467bcd032037 100644 --- a/mm/fadvise.c +++ b/mm/fadvise.c @@ -188,6 +188,8 @@ int vfs_fadvise(struct file *file, loff_t offset, loff_t len, int advice) } EXPORT_SYMBOL(vfs_fadvise); +#ifdef CONFIG_ADVISE_SYSCALLS + int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice) { struct fd f = fdget(fd); @@ -215,3 +217,4 @@ SYSCALL_DEFINE4(fadvise64, int, fd, loff_t, offset, size_t, len, int, advice) } #endif +#endif diff --git a/mm/readahead.c b/mm/readahead.c index a59ea70527b9..4e630143a0ba 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -20,6 +20,7 @@ #include #include #include +#include #include "internal.h" @@ -575,24 +576,6 @@ page_cache_async_readahead(struct address_space *mapping, } EXPORT_SYMBOL_GPL(page_cache_async_readahead); -static ssize_t -do_readahead(struct address_space *mapping, struct file *filp, - pgoff_t index, unsigned long nr) -{ - if (!mapping || !mapping->a_ops) - return -EINVAL; - - /* - * Readahead doesn't make sense for DAX inodes, but we don't want it - * to report a failure either. Instead, we just return success and - * don't do any work. - */ - if (dax_mapping(mapping)) - return 0; - - return force_page_cache_readahead(mapping, filp, index, nr); -} - ssize_t ksys_readahead(int fd, loff_t offset, size_t count) { ssize_t ret; @@ -600,16 +583,22 @@ ssize_t ksys_readahead(int fd, loff_t offset, size_t count) ret = -EBADF; f = fdget(fd); - if (f.file) { - if (f.file->f_mode & FMODE_READ) { - struct address_space *mapping = f.file->f_mapping; - pgoff_t start = offset >> PAGE_SHIFT; - pgoff_t end = (offset + count - 1) >> PAGE_SHIFT; - unsigned long len = end - start + 1; - ret = do_readahead(mapping, f.file, start, len); - } - fdput(f); - } + if (!f.file || !(f.file->f_mode & FMODE_READ)) + goto out; + + /* + * The readahead() syscall is intended to run only on files + * that can execute readahead. If readahead is not possible + * on this file, then we must return -EINVAL. + */ + ret = -EINVAL; + if (!f.file->f_mapping || !f.file->f_mapping->a_ops || + !S_ISREG(file_inode(f.file)->i_mode)) + goto out; + + ret = vfs_fadvise(f.file, offset, count, POSIX_FADV_WILLNEED); +out: + fdput(f); return ret; } -- GitLab From fa694160cca6dbba17c57dc7efec5f93feaf8795 Mon Sep 17 00:00:00 2001 From: Sandipan Das Date: Tue, 28 Aug 2018 14:38:48 +0530 Subject: [PATCH 0768/1692] perf probe powerpc: Ignore SyS symbols irrespective of endianness This makes sure that the SyS symbols are ignored for any powerpc system, not just the big endian ones. Reported-by: Naveen N. Rao Signed-off-by: Sandipan Das Reviewed-by: Kamalesh Babulal Acked-by: Naveen N. Rao Cc: Jiri Olsa Cc: Ravi Bangoria Fixes: fb6d59423115 ("perf probe ppc: Use the right prefix when ignoring SyS symbols on ppc") Link: http://lkml.kernel.org/r/20180828090848.1914-1-sandipan@linux.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/arch/powerpc/util/sym-handling.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/perf/arch/powerpc/util/sym-handling.c b/tools/perf/arch/powerpc/util/sym-handling.c index 20e7d74d86cd..10a44e946f77 100644 --- a/tools/perf/arch/powerpc/util/sym-handling.c +++ b/tools/perf/arch/powerpc/util/sym-handling.c @@ -22,15 +22,16 @@ bool elf__needs_adjust_symbols(GElf_Ehdr ehdr) #endif -#if !defined(_CALL_ELF) || _CALL_ELF != 2 int arch__choose_best_symbol(struct symbol *syma, struct symbol *symb __maybe_unused) { char *sym = syma->name; +#if !defined(_CALL_ELF) || _CALL_ELF != 2 /* Skip over any initial dot */ if (*sym == '.') sym++; +#endif /* Avoid "SyS" kernel syscall aliases */ if (strlen(sym) >= 3 && !strncmp(sym, "SyS", 3)) @@ -41,6 +42,7 @@ int arch__choose_best_symbol(struct symbol *syma, return SYMBOL_A; } +#if !defined(_CALL_ELF) || _CALL_ELF != 2 /* Allow matching against dot variants */ int arch__compare_symbol_names(const char *namea, const char *nameb) { -- GitLab From 4e67b2a5df5d3f341776d12ee575e00ca3ef92de Mon Sep 17 00:00:00 2001 From: Kim Phillips Date: Mon, 27 Aug 2018 12:53:40 -0500 Subject: [PATCH 0769/1692] perf annotate: Fix parsing aarch64 branch instructions after objdump update MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Starting with binutils 2.28, aarch64 objdump adds comments to the disassembly output to show the alternative names of a condition code [1]. It is assumed that commas in objdump comments could occur in other arches now or in the future, so this fix is arch-independent. The fix could have been done with arm64 specific jump__parse and jump__scnprintf functions, but the jump__scnprintf instruction would have to have its comment character be a literal, since the scnprintf functions cannot receive a struct arch easily. This inconvenience also applies to the generic jump__scnprintf, which is why we add a raw_comment pointer to struct ins_operands, so the __parse function assigns it to be re-used by its corresponding __scnprintf function. Example differences in 'perf annotate --stdio2' output on an aarch64 perf.data file: BEFORE: → b.cs ffff200008133d1c // b.hs, dffff7ecc47b AFTER : ↓ b.cs 18c BEFORE: → b.cc ffff200008d8d9cc // b.lo, b.ul, dffff727295b AFTER : ↓ b.cc 31c The branch target labels 18c and 31c also now appear in the output: BEFORE: add x26, x29, #0x80 AFTER : 18c: add x26, x29, #0x80 BEFORE: add x21, x21, #0x8 AFTER : 31c: add x21, x21, #0x8 The Fixes: tag below is added so stable branches will get the update; it doesn't necessarily mean that commit was broken at the time, rather it didn't withstand the aarch64 objdump update. Tested no difference in output for sample x86_64, power arch perf.data files. [1] https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;a=commit;h=bb7eff5206e4795ac79c177a80fe9f4630aaf730 Signed-off-by: Kim Phillips Tested-by: Arnaldo Carvalho de Melo Cc: Alexander Shishkin Cc: Anton Blanchard Cc: Christian Borntraeger Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Robin Murphy Cc: Taeung Song Cc: linux-arm-kernel@lists.infradead.org Fixes: b13bbeee5ee6 ("perf annotate: Fix branch instruction with multiple operands") Link: http://lkml.kernel.org/r/20180827125340.a2f7e291901d17cea05daba4@arm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/annotate.c | 22 +++++++++++++++++++++- tools/perf/util/annotate.h | 1 + 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index e62b69ea87cd..28cd6a17491b 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -282,7 +282,19 @@ bool ins__is_call(const struct ins *ins) return ins->ops == &call_ops || ins->ops == &s390_call_ops; } -static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *ops, struct map_symbol *ms) +/* + * Prevents from matching commas in the comment section, e.g.: + * ffff200008446e70: b.cs ffff2000084470f4 // b.hs, b.nlast + */ +static inline const char *validate_comma(const char *c, struct ins_operands *ops) +{ + if (ops->raw_comment && c > ops->raw_comment) + return NULL; + + return c; +} + +static int jump__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms) { struct map *map = ms->map; struct symbol *sym = ms->sym; @@ -291,6 +303,10 @@ static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *op }; const char *c = strchr(ops->raw, ','); u64 start, end; + + ops->raw_comment = strchr(ops->raw, arch->objdump.comment_char); + c = validate_comma(c, ops); + /* * Examples of lines to parse for the _cpp_lex_token@@Base * function: @@ -310,6 +326,7 @@ static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *op ops->target.addr = strtoull(c, NULL, 16); if (!ops->target.addr) { c = strchr(c, ','); + c = validate_comma(c, ops); if (c++ != NULL) ops->target.addr = strtoull(c, NULL, 16); } @@ -367,9 +384,12 @@ static int jump__scnprintf(struct ins *ins, char *bf, size_t size, return scnprintf(bf, size, "%-6s %s", ins->name, ops->target.sym->name); c = strchr(ops->raw, ','); + c = validate_comma(c, ops); + if (c != NULL) { const char *c2 = strchr(c + 1, ','); + c2 = validate_comma(c2, ops); /* check for 3-op insn */ if (c2 != NULL) c = c2; diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h index 005a5fe8a8c6..5399ba2321bb 100644 --- a/tools/perf/util/annotate.h +++ b/tools/perf/util/annotate.h @@ -22,6 +22,7 @@ struct ins { struct ins_operands { char *raw; + char *raw_comment; struct { char *raw; char *name; -- GitLab From f42b0e18f2e5cf34f73ef1b6327b49040b307a33 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Mon, 27 Aug 2018 07:50:47 -0500 Subject: [PATCH 0770/1692] of: add node name compare helper functions In preparation to remove device_node.name pointer, add helper functions for node name comparisons which are a common pattern throughout the kernel. Cc: Frank Rowand Signed-off-by: Rob Herring --- drivers/of/base.c | 22 ++++++++++++++++++++++ include/linux/of.h | 13 +++++++++++++ 2 files changed, 35 insertions(+) diff --git a/drivers/of/base.c b/drivers/of/base.c index bc420d2aa5f5..9095b8290150 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -54,6 +54,28 @@ DEFINE_MUTEX(of_mutex); */ DEFINE_RAW_SPINLOCK(devtree_lock); +bool of_node_name_eq(const struct device_node *np, const char *name) +{ + const char *node_name; + size_t len; + + if (!np) + return false; + + node_name = kbasename(np->full_name); + len = strchrnul(node_name, '@') - node_name; + + return (strlen(name) == len) && (strncmp(node_name, name, len) == 0); +} + +bool of_node_name_prefix(const struct device_node *np, const char *prefix) +{ + if (!np) + return false; + + return strncmp(kbasename(np->full_name), prefix, strlen(prefix)) == 0; +} + int of_n_addr_cells(struct device_node *np) { u32 cells; diff --git a/include/linux/of.h b/include/linux/of.h index b99a1a8c2952..688c52dd7b3e 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -256,6 +256,9 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size) #define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags) #define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags) +extern bool of_node_name_eq(const struct device_node *np, const char *name); +extern bool of_node_name_prefix(const struct device_node *np, const char *prefix); + static inline const char *of_node_full_name(const struct device_node *np) { return np ? np->full_name : ""; @@ -563,6 +566,16 @@ static inline struct device_node *to_of_node(const struct fwnode_handle *fwnode) return NULL; } +static inline bool of_node_name_eq(const struct device_node *np, const char *name) +{ + return false; +} + +static inline bool of_node_name_prefix(const struct device_node *np, const char *prefix) +{ + return false; +} + static inline const char* of_node_full_name(const struct device_node *np) { return ""; -- GitLab From cd2315d471f45a36cb1329722920d89cd6d3d11f Mon Sep 17 00:00:00 2001 From: Benjamin Fair Date: Fri, 6 Jul 2018 11:16:03 -0700 Subject: [PATCH 0771/1692] ipmi: kcs_bmc: don't change device name kcs_bmc_alloc(...) calls dev_set_name(...) which is incorrect as most bus driver frameworks, platform_driver in particular, assume that they are able to set the device name themselves. Signed-off-by: Benjamin Fair Signed-off-by: Corey Minyard --- drivers/char/ipmi/kcs_bmc.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/char/ipmi/kcs_bmc.c b/drivers/char/ipmi/kcs_bmc.c index bb882ab161fe..e6124bd548df 100644 --- a/drivers/char/ipmi/kcs_bmc.c +++ b/drivers/char/ipmi/kcs_bmc.c @@ -16,6 +16,8 @@ #include "kcs_bmc.h" +#define DEVICE_NAME "ipmi-kcs" + #define KCS_MSG_BUFSIZ 1000 #define KCS_ZERO_DATA 0 @@ -429,8 +431,6 @@ struct kcs_bmc *kcs_bmc_alloc(struct device *dev, int sizeof_priv, u32 channel) if (!kcs_bmc) return NULL; - dev_set_name(dev, "ipmi-kcs%u", channel); - spin_lock_init(&kcs_bmc->lock); kcs_bmc->channel = channel; @@ -444,7 +444,8 @@ struct kcs_bmc *kcs_bmc_alloc(struct device *dev, int sizeof_priv, u32 channel) return NULL; kcs_bmc->miscdev.minor = MISC_DYNAMIC_MINOR; - kcs_bmc->miscdev.name = dev_name(dev); + kcs_bmc->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s%u", + DEVICE_NAME, channel); kcs_bmc->miscdev.fops = &kcs_bmc_fops; return kcs_bmc; -- GitLab From 7fd6d98b89f382d414e1db528e29a67bbd749457 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Thu, 30 Aug 2018 11:50:13 +0300 Subject: [PATCH 0772/1692] i2c: i801: Allow ACPI AML access I/O ports not reserved for SMBus Commit 7ae81952cda ("i2c: i801: Allow ACPI SystemIO OpRegion to conflict with PCI BAR") made it possible for AML code to access SMBus I/O ports by installing custom SystemIO OpRegion handler and blocking i80i driver access upon first AML read/write to this OpRegion. However, while ThinkPad T560 does have SystemIO OpRegion declared under the SMBus device, it does not access any of the SMBus registers: Device (SMBU) { ... OperationRegion (SMBP, PCI_Config, 0x50, 0x04) Field (SMBP, DWordAcc, NoLock, Preserve) { , 5, TCOB, 11, Offset (0x04) } Name (TCBV, 0x00) Method (TCBS, 0, NotSerialized) { If ((TCBV == 0x00)) { TCBV = (\_SB.PCI0.SMBU.TCOB << 0x05) } Return (TCBV) /* \_SB_.PCI0.SMBU.TCBV */ } OperationRegion (TCBA, SystemIO, TCBS (), 0x10) Field (TCBA, ByteAcc, NoLock, Preserve) { Offset (0x04), , 9, CPSC, 1 } } Problem with the current approach is that it blocks all I/O port access and because this system has touchpad connected to the SMBus controller after first AML access (happens during suspend/resume cycle) the touchpad fails to work anymore. Fix this so that we allow ACPI AML I/O port access if it does not touch the region reserved for the SMBus. Fixes: 7ae81952cda ("i2c: i801: Allow ACPI SystemIO OpRegion to conflict with PCI BAR") Link: https://bugzilla.kernel.org/show_bug.cgi?id=200737 Reported-by: Yussuf Khalil Signed-off-by: Mika Westerberg Reviewed-by: Jean Delvare Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-i801.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 941c223f6491..04b60a349d7e 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -1415,6 +1415,13 @@ static void i801_add_tco(struct i801_priv *priv) } #ifdef CONFIG_ACPI +static bool i801_acpi_is_smbus_ioport(const struct i801_priv *priv, + acpi_physical_address address) +{ + return address >= priv->smba && + address <= pci_resource_end(priv->pci_dev, SMBBAR); +} + static acpi_status i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits, u64 *value, void *handler_context, void *region_context) @@ -1430,7 +1437,7 @@ i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits, */ mutex_lock(&priv->acpi_lock); - if (!priv->acpi_reserved) { + if (!priv->acpi_reserved && i801_acpi_is_smbus_ioport(priv, address)) { priv->acpi_reserved = true; dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n"); -- GitLab From 9d9a152ebaa86a9dede4624919566483c955d0a7 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Wed, 29 Aug 2018 15:06:31 +0200 Subject: [PATCH 0773/1692] i2c: designware: Re-init controllers with pm_disabled set on resume On Bay Trail and Cherry Trail devices we set the pm_disabled flag for I2C busses which the OS shares with the PUNIT as these need special handling. Until now we called dev_pm_syscore_device(dev, true) for I2C controllers with this flag set to keep these I2C controllers always on. After commit 12864ff8545f ("ACPI / LPSS: Avoid PM quirks on suspend and resume from hibernation"), this no longer works. This commit modifies lpss_iosf_exit_d3_state() to only run if lpss_iosf_enter_d3_state() has ran before it, so that it does not run on a resume from hibernate (or from S3). On these systems the conditions for lpss_iosf_enter_d3_state() to run never become true, so lpss_iosf_exit_d3_state() never gets called and the 2 LPSS DMA controllers never get forced into D0 mode, instead they are left in their default automatic power-on when needed mode. The not forcing of D0 mode for the DMA controllers enables these systems to properly enter S0ix modes, which is a good thing. But after entering S0ix modes the I2C controller connected to the PMIC no longer works, leading to e.g. broken battery monitoring. The _PS3 method for this I2C controller looks like this: Method (_PS3, 0, NotSerialized) // _PS3: Power State 3 { If ((((PMID == 0x04) || (PMID == 0x05)) || (PMID == 0x06))) { Return (Zero) } PSAT |= 0x03 Local0 = PSAT /* \_SB_.I2C5.PSAT */ } Where PMID = 0x05, so we enter the Return (Zero) path on these systems. So even if we were to not call dev_pm_syscore_device(dev, true) the I2C controller will be left in D0 rather then be switched to D3. Yet on other Bay and Cherry Trail devices S0ix is not entered unless *all* I2C controllers are in D3 mode. This combined with the I2C controller no longer working now that we reach S0ix states on these systems leads to me believing that the PUNIT itself puts the I2C controller in D3 when all other conditions for entering S0ix states are true. Since now the I2C controller is put in D3 over a suspend/resume we must re-initialize it afterwards and that does indeed fix it no longer working. This commit implements this fix by: 1) Making the suspend_late callback a no-op if pm_disabled is set and making the resume_early callback skip the clock re-enable (since it now was not disabled) while still doing the necessary I2C controller re-init. 2) Removing the dev_pm_syscore_device(dev, true) call, so that the suspend and resume callbacks are actually called. Normally this would cause the ACPI pm code to call _PS3 putting the I2C controller in D3, wreaking havoc since it is shared with the PUNIT, but in this special case the _PS3 method is a no-op so we can safely allow a "fake" suspend / resume. Fixes: 12864ff8545f ("ACPI / LPSS: Avoid PM quirks on suspend and resume ...") Link: https://bugzilla.kernel.org/show_bug.cgi?id=200861 Cc: 4.15+ # 4.15+ Signed-off-by: Hans de Goede Reviewed-by: Andy Shevchenko Acked-by: Jarkko Nikula Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-designware-master.c | 1 - drivers/i2c/busses/i2c-designware-platdrv.c | 7 ++++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index e18442b9973a..94d94b4a9a0d 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c @@ -708,7 +708,6 @@ int i2c_dw_probe(struct dw_i2c_dev *dev) i2c_set_adapdata(adap, dev); if (dev->pm_disabled) { - dev_pm_syscore_device(dev->dev, true); irq_flags = IRQF_NO_SUSPEND; } else { irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND; diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 1a8d2da5b000..b5750fd85125 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -434,6 +434,9 @@ static int dw_i2c_plat_suspend(struct device *dev) { struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); + if (i_dev->pm_disabled) + return 0; + i_dev->disable(i_dev); i2c_dw_prepare_clk(i_dev, false); @@ -444,7 +447,9 @@ static int dw_i2c_plat_resume(struct device *dev) { struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); - i2c_dw_prepare_clk(i_dev, true); + if (!i_dev->pm_disabled) + i2c_dw_prepare_clk(i_dev, true); + i_dev->init(i_dev); return 0; -- GitLab From 1204d12a494cf5dff497859a5febf2ae30a28970 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Kundr=C3=A1t?= Date: Tue, 28 Aug 2018 10:07:40 +0200 Subject: [PATCH 0774/1692] i2c: algos: bit: make the error messages grepable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Yep, I went looking for one of these, and I wasn't able to find it easily. That's worse than a line which is 82-chars long, IMHO. Signed-off-by: Jan Kundrát Signed-off-by: Wolfram Sang --- drivers/i2c/algos/i2c-algo-bit.c | 55 ++++++++++++++++++-------------- 1 file changed, 31 insertions(+), 24 deletions(-) diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c index 6ec65adaba49..c33dcfb87993 100644 --- a/drivers/i2c/algos/i2c-algo-bit.c +++ b/drivers/i2c/algos/i2c-algo-bit.c @@ -110,8 +110,8 @@ static int sclhi(struct i2c_algo_bit_data *adap) } #ifdef DEBUG if (jiffies != start && i2c_debug >= 3) - pr_debug("i2c-algo-bit: needed %ld jiffies for SCL to go " - "high\n", jiffies - start); + pr_debug("i2c-algo-bit: needed %ld jiffies for SCL to go high\n", + jiffies - start); #endif done: @@ -171,8 +171,9 @@ static int i2c_outb(struct i2c_adapter *i2c_adap, unsigned char c) setsda(adap, sb); udelay((adap->udelay + 1) / 2); if (sclhi(adap) < 0) { /* timed out */ - bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, " - "timeout at bit #%d\n", (int)c, i); + bit_dbg(1, &i2c_adap->dev, + "i2c_outb: 0x%02x, timeout at bit #%d\n", + (int)c, i); return -ETIMEDOUT; } /* FIXME do arbitration here: @@ -185,8 +186,8 @@ static int i2c_outb(struct i2c_adapter *i2c_adap, unsigned char c) } sdahi(adap); if (sclhi(adap) < 0) { /* timeout */ - bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, " - "timeout at ack\n", (int)c); + bit_dbg(1, &i2c_adap->dev, + "i2c_outb: 0x%02x, timeout at ack\n", (int)c); return -ETIMEDOUT; } @@ -215,8 +216,9 @@ static int i2c_inb(struct i2c_adapter *i2c_adap) sdahi(adap); for (i = 0; i < 8; i++) { if (sclhi(adap) < 0) { /* timeout */ - bit_dbg(1, &i2c_adap->dev, "i2c_inb: timeout at bit " - "#%d\n", 7 - i); + bit_dbg(1, &i2c_adap->dev, + "i2c_inb: timeout at bit #%d\n", + 7 - i); return -ETIMEDOUT; } indata *= 2; @@ -265,8 +267,9 @@ static int test_bus(struct i2c_adapter *i2c_adap) goto bailout; } if (!scl) { - printk(KERN_WARNING "%s: SCL unexpected low " - "while pulling SDA low!\n", name); + printk(KERN_WARNING + "%s: SCL unexpected low while pulling SDA low!\n", + name); goto bailout; } @@ -278,8 +281,9 @@ static int test_bus(struct i2c_adapter *i2c_adap) goto bailout; } if (!scl) { - printk(KERN_WARNING "%s: SCL unexpected low " - "while pulling SDA high!\n", name); + printk(KERN_WARNING + "%s: SCL unexpected low while pulling SDA high!\n", + name); goto bailout; } @@ -291,8 +295,9 @@ static int test_bus(struct i2c_adapter *i2c_adap) goto bailout; } if (!sda) { - printk(KERN_WARNING "%s: SDA unexpected low " - "while pulling SCL low!\n", name); + printk(KERN_WARNING + "%s: SDA unexpected low while pulling SCL low!\n", + name); goto bailout; } @@ -304,8 +309,9 @@ static int test_bus(struct i2c_adapter *i2c_adap) goto bailout; } if (!sda) { - printk(KERN_WARNING "%s: SDA unexpected low " - "while pulling SCL high!\n", name); + printk(KERN_WARNING + "%s: SDA unexpected low while pulling SCL high!\n", + name); goto bailout; } @@ -352,8 +358,8 @@ static int try_address(struct i2c_adapter *i2c_adap, i2c_start(adap); } if (i && ret) - bit_dbg(1, &i2c_adap->dev, "Used %d tries to %s client at " - "0x%02x: %s\n", i + 1, + bit_dbg(1, &i2c_adap->dev, + "Used %d tries to %s client at 0x%02x: %s\n", i + 1, addr & 1 ? "read from" : "write to", addr >> 1, ret == 1 ? "success" : "failed, timeout?"); return ret; @@ -442,8 +448,9 @@ static int readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg) if (inval <= 0 || inval > I2C_SMBUS_BLOCK_MAX) { if (!(flags & I2C_M_NO_RD_ACK)) acknak(i2c_adap, 0); - dev_err(&i2c_adap->dev, "readbytes: invalid " - "block length (%d)\n", inval); + dev_err(&i2c_adap->dev, + "readbytes: invalid block length (%d)\n", + inval); return -EPROTO; } /* The original count value accounts for the extra @@ -506,8 +513,8 @@ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg) return -ENXIO; } if (flags & I2C_M_RD) { - bit_dbg(3, &i2c_adap->dev, "emitting repeated " - "start condition\n"); + bit_dbg(3, &i2c_adap->dev, + "emitting repeated start condition\n"); i2c_repstart(adap); /* okay, now switch into reading mode */ addr |= 0x01; @@ -564,8 +571,8 @@ static int bit_xfer(struct i2c_adapter *i2c_adap, } ret = bit_doAddress(i2c_adap, pmsg); if ((ret != 0) && !nak_ok) { - bit_dbg(1, &i2c_adap->dev, "NAK from " - "device addr 0x%02x msg #%d\n", + bit_dbg(1, &i2c_adap->dev, + "NAK from device addr 0x%02x msg #%d\n", msgs[i].addr, i); goto bailout; } -- GitLab From 82fe39a6bc7b866fc3ffd838e3c5a4cadb328b04 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Fri, 24 Aug 2018 16:52:44 +0200 Subject: [PATCH 0775/1692] i2c: refactor function to release a DMA safe buffer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit a) rename to 'put' instead of 'release' to match 'get' when obtaining the buffer b) change the argument order to have the buffer as first argument c) add a new argument telling the function if the message was transferred. This allows the function to be used also in cases where setting up DMA failed, so the buffer needs to be freed without syncing to the message buffer. Also convert the only user. Signed-off-by: Wolfram Sang Reviewed-by: Niklas Söderlund Signed-off-by: Wolfram Sang --- Documentation/i2c/DMA-considerations | 10 +++++++--- drivers/i2c/busses/i2c-sh_mobile.c | 2 +- drivers/i2c/i2c-core-base.c | 11 ++++++----- include/linux/i2c.h | 2 +- 4 files changed, 15 insertions(+), 10 deletions(-) diff --git a/Documentation/i2c/DMA-considerations b/Documentation/i2c/DMA-considerations index 966610aa4620..203002054120 100644 --- a/Documentation/i2c/DMA-considerations +++ b/Documentation/i2c/DMA-considerations @@ -50,10 +50,14 @@ bounce buffer. But you don't need to care about that detail, just use the returned buffer. If NULL is returned, the threshold was not met or a bounce buffer could not be allocated. Fall back to PIO in that case. -In any case, a buffer obtained from above needs to be released. It ensures data -is copied back to the message and a potentially used bounce buffer is freed:: +In any case, a buffer obtained from above needs to be released. Another helper +function ensures a potentially used bounce buffer is freed:: - i2c_release_dma_safe_msg_buf(msg, dma_buf); + i2c_put_dma_safe_msg_buf(dma_buf, msg, xferred); + +The last argument 'xferred' controls if the buffer is synced back to the +message or not. No syncing is needed in cases setting up DMA had an error and +there was no data transferred. The bounce buffer handling from the core is generic and simple. It will always allocate a new bounce buffer. If you want a more sophisticated handling (e.g. diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c index 439e8778f849..279d0e5bd433 100644 --- a/drivers/i2c/busses/i2c-sh_mobile.c +++ b/drivers/i2c/busses/i2c-sh_mobile.c @@ -507,7 +507,7 @@ static void sh_mobile_i2c_dma_callback(void *data) pd->pos = pd->msg->len; pd->stop_after_dma = true; - i2c_release_dma_safe_msg_buf(pd->msg, pd->dma_buf); + i2c_put_dma_safe_msg_buf(pd->dma_buf, pd->msg, true); iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE); } diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index f15737763608..9ee9a15e7134 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -2293,21 +2293,22 @@ u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold) EXPORT_SYMBOL_GPL(i2c_get_dma_safe_msg_buf); /** - * i2c_release_dma_safe_msg_buf - release DMA safe buffer and sync with i2c_msg - * @msg: the message to be synced with + * i2c_put_dma_safe_msg_buf - release DMA safe buffer and sync with i2c_msg * @buf: the buffer obtained from i2c_get_dma_safe_msg_buf(). May be NULL. + * @msg: the message which the buffer corresponds to + * @xferred: bool saying if the message was transferred */ -void i2c_release_dma_safe_msg_buf(struct i2c_msg *msg, u8 *buf) +void i2c_put_dma_safe_msg_buf(u8 *buf, struct i2c_msg *msg, bool xferred) { if (!buf || buf == msg->buf) return; - if (msg->flags & I2C_M_RD) + if (xferred && msg->flags & I2C_M_RD) memcpy(msg->buf, buf, msg->len); kfree(buf); } -EXPORT_SYMBOL_GPL(i2c_release_dma_safe_msg_buf); +EXPORT_SYMBOL_GPL(i2c_put_dma_safe_msg_buf); MODULE_AUTHOR("Simon G. Vogl "); MODULE_DESCRIPTION("I2C-Bus main module"); diff --git a/include/linux/i2c.h b/include/linux/i2c.h index b79387fd57da..65b4eaed1d96 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -855,7 +855,7 @@ static inline u8 i2c_8bit_addr_from_msg(const struct i2c_msg *msg) } u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold); -void i2c_release_dma_safe_msg_buf(struct i2c_msg *msg, u8 *buf); +void i2c_put_dma_safe_msg_buf(u8 *buf, struct i2c_msg *msg, bool xferred); int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr); /** -- GitLab From 531db50170a3e6d113c968fe7a6dda8d55d02ede Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Fri, 24 Aug 2018 16:52:45 +0200 Subject: [PATCH 0776/1692] i2c: sh_mobile: define start_ch() void as it only returns 0 anyhow MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit After various refactoring over the years, start_ch() doesn't return errno anymore, so make the function return void. This saves the error handling when calling it which in turn eases cleanup of resources of a future patch. Signed-off-by: Wolfram Sang Reviewed-by: Niklas Söderlund Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-sh_mobile.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c index 279d0e5bd433..b965d52338ba 100644 --- a/drivers/i2c/busses/i2c-sh_mobile.c +++ b/drivers/i2c/busses/i2c-sh_mobile.c @@ -602,8 +602,8 @@ static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd) dma_async_issue_pending(chan); } -static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg, - bool do_init) +static void start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg, + bool do_init) { if (do_init) { /* Initialize channel registers */ @@ -627,7 +627,6 @@ static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg, /* Enable all interrupts to begin with */ iic_wr(pd, ICIC, ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE); - return 0; } static int poll_dte(struct sh_mobile_i2c_data *pd) @@ -698,9 +697,7 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter, pd->send_stop = i == num - 1 || msg->flags & I2C_M_STOP; pd->stop_after_dma = false; - err = start_ch(pd, msg, do_start); - if (err) - break; + start_ch(pd, msg, do_start); if (do_start) i2c_op(pd, OP_START, 0); -- GitLab From cebc07d84ad71bc58d6f59b770e4347da48a5a2b Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Fri, 24 Aug 2018 16:52:46 +0200 Subject: [PATCH 0777/1692] i2c: sh_mobile: fix leak when using DMA bounce buffer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We only freed the bounce buffer after successful DMA, missing the cases where DMA setup may have gone wrong. Use a better location which always gets called after each message and use 'stop_after_dma' as a flag for a successful transfer. Signed-off-by: Wolfram Sang Reviewed-by: Niklas Söderlund Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-sh_mobile.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c index b965d52338ba..818cab14e87c 100644 --- a/drivers/i2c/busses/i2c-sh_mobile.c +++ b/drivers/i2c/busses/i2c-sh_mobile.c @@ -507,8 +507,6 @@ static void sh_mobile_i2c_dma_callback(void *data) pd->pos = pd->msg->len; pd->stop_after_dma = true; - i2c_put_dma_safe_msg_buf(pd->dma_buf, pd->msg, true); - iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE); } @@ -706,6 +704,10 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter, timeout = wait_event_timeout(pd->wait, pd->sr & (ICSR_TACK | SW_DONE), adapter->timeout); + + /* 'stop_after_dma' tells if DMA transfer was complete */ + i2c_put_dma_safe_msg_buf(pd->dma_buf, pd->msg, pd->stop_after_dma); + if (!timeout) { dev_err(pd->dev, "Transfer request timed out\n"); if (pd->dma_direction != DMA_NONE) -- GitLab From bded6c03e398dc6e862dc8301fb9a60175740653 Mon Sep 17 00:00:00 2001 From: Akshu Agrawal Date: Tue, 21 Aug 2018 12:21:57 +0530 Subject: [PATCH 0778/1692] clk: x86: Set default parent to 48Mhz System clk provided in ST soc can be set to: 48Mhz, non-spread 25Mhz, spread To get accurate rate, we need it to set it at non-spread option which is 48Mhz. Signed-off-by: Akshu Agrawal Reviewed-by: Daniel Kurtz Fixes: 421bf6a1f061 ("clk: x86: Add ST oscout platform clock") Signed-off-by: Stephen Boyd --- drivers/clk/x86/clk-st.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clk/x86/clk-st.c b/drivers/clk/x86/clk-st.c index fb62f3938008..3a0996f2d556 100644 --- a/drivers/clk/x86/clk-st.c +++ b/drivers/clk/x86/clk-st.c @@ -46,7 +46,7 @@ static int st_clk_probe(struct platform_device *pdev) clk_oscout1_parents, ARRAY_SIZE(clk_oscout1_parents), 0, st_data->base + CLKDRVSTR2, OSCOUT1CLK25MHZ, 3, 0, NULL); - clk_set_parent(hws[ST_CLK_MUX]->clk, hws[ST_CLK_25M]->clk); + clk_set_parent(hws[ST_CLK_MUX]->clk, hws[ST_CLK_48M]->clk); hws[ST_CLK_GATE] = clk_hw_register_gate(NULL, "oscout1", "oscout1_mux", 0, st_data->base + MISCCLKCNTL1, OSCCLKENB, -- GitLab From 2b52e2a67c86b0714eeae0d2030cb7fc14737626 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 27 Aug 2018 12:07:37 +0900 Subject: [PATCH 0779/1692] arc: remove redundant GCC version checks Commit cafa0010cd51 ("Raise the minimum required gcc version to 4.6") bumped the minimum GCC version to 4.6 for all architectures. With GCC >= 4.6 assumed, 'upto_gcc44' is empty, 'atleast_gcc44' is y. Signed-off-by: Masahiro Yamada Signed-off-by: Vineet Gupta --- arch/arc/Makefile | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/arch/arc/Makefile b/arch/arc/Makefile index fb026196aaab..99cce77ab98f 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile @@ -43,10 +43,7 @@ ifdef CONFIG_ARC_CURR_IN_REG LINUXINCLUDE += -include ${src}/arch/arc/include/asm/current.h endif -upto_gcc44 := $(call cc-ifversion, -le, 0404, y) -atleast_gcc44 := $(call cc-ifversion, -ge, 0404, y) - -cflags-$(atleast_gcc44) += -fsection-anchors +cflags-y += -fsection-anchors cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape @@ -82,11 +79,6 @@ cflags-$(disable_small_data) += -mno-sdata -fcall-used-gp cflags-$(CONFIG_CPU_BIG_ENDIAN) += -mbig-endian ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB -# STAR 9000518362: (fixed with binutils shipping with gcc 4.8) -# arc-linux-uclibc-ld (buildroot) or arceb-elf32-ld (EZChip) don't accept -# --build-id w/o "-marclinux". Default arc-elf32-ld is OK -ldflags-$(upto_gcc44) += -marclinux - LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name) # Modules with short calls might break for calls into builtin-kernel -- GitLab From 217c3e0196758662aa0429863b09d1c13da1c5d6 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Fri, 31 Aug 2018 07:47:28 +1000 Subject: [PATCH 0780/1692] disable stringop truncation warnings for now They are too noisy Signed-off-by: Stephen Rothwell Signed-off-by: Linus Torvalds --- Makefile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Makefile b/Makefile index 2b458801ba74..a34a9283ee90 100644 --- a/Makefile +++ b/Makefile @@ -807,6 +807,9 @@ KBUILD_CFLAGS += $(call cc-option,-Wdeclaration-after-statement,) # disable pointer signed / unsigned warnings in gcc 4.0 KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign) +# disable stringop warnings in gcc 8+ +KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation) + # disable invalid "can't wrap" optimizations for signed / pointers KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow) -- GitLab From 0986b16ab49b18063d29a9e02e9c7fab1928bc8e Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Fri, 24 Aug 2018 12:02:06 +1000 Subject: [PATCH 0781/1692] m68k/mac: Use correct PMU response format Now that the 68k Mac port has adopted the via-pmu driver, it must decode the PMU response accordingly otherwise the date and time will be wrong. Fixes: ebd722275f9cfc67 ("macintosh/via-pmu: Replace via-pmu68k driver with via-pmu driver") Signed-off-by: Finn Thain Signed-off-by: Geert Uytterhoeven --- arch/m68k/mac/misc.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c index 3534aa6a4dc2..1b083c500b9a 100644 --- a/arch/m68k/mac/misc.c +++ b/arch/m68k/mac/misc.c @@ -98,11 +98,10 @@ static time64_t pmu_read_time(void) if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0) return 0; - while (!req.complete) - pmu_poll(); + pmu_wait_complete(&req); - time = (u32)((req.reply[1] << 24) | (req.reply[2] << 16) | - (req.reply[3] << 8) | req.reply[4]); + time = (u32)((req.reply[0] << 24) | (req.reply[1] << 16) | + (req.reply[2] << 8) | req.reply[3]); return time - RTC_OFFSET; } @@ -116,8 +115,7 @@ static void pmu_write_time(time64_t time) (data >> 24) & 0xFF, (data >> 16) & 0xFF, (data >> 8) & 0xFF, data & 0xFF) < 0) return; - while (!req.complete) - pmu_poll(); + pmu_wait_complete(&req); } static __u8 pmu_read_pram(int offset) -- GitLab From 4a477651033e48851386d12e773584c99a878670 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 17 Aug 2018 09:24:05 +0100 Subject: [PATCH 0782/1692] drm/i915: Keep physical cursors pinned while in use MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The optimisation inherent in commit 6a2c4232ece1 ("drm/i915: Make the physical object coherent with GTT") relies on that once we allocated a cursor we would have coherent, zero overhead access to the scanout plane holding the cursor. That is we could then do the very frequent cursor updates X enjoys with no indirection or kernel involvement. However, that all hinges on the GGTT mmap of the cursor being pinned and not require refaulting on each access -- handling such a page fault likely requires the busy GGTT to be rearranged causing a stall. A very simple fix is then to handle the physical cursor exactly like other cursors and keep its vma pinned while active. References: https://bugs.freedesktop.org/show_bug.cgi?id=107600 References: 6a2c4232ece1 ("drm/i915: Make the physical object coherent with GTT") Signed-off-by: Chris Wilson Cc: Ville Syrjälä Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20180817082405.755-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_display.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index ec3e24f07486..b79ad9c57d35 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -12969,8 +12969,11 @@ static int intel_plane_pin_fb(struct intel_plane_state *plane_state) INTEL_INFO(dev_priv)->cursor_needs_physical) { struct drm_i915_gem_object *obj = intel_fb_obj(fb); const int align = intel_cursor_alignment(dev_priv); + int err; - return i915_gem_object_attach_phys(obj, align); + err = i915_gem_object_attach_phys(obj, align); + if (err) + return err; } vma = intel_pin_and_fence_fb_obj(fb, -- GitLab From 3f51b7e1f36a37cfc6ed281a231485e4e6b511c3 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 30 Aug 2018 14:48:06 +0100 Subject: [PATCH 0783/1692] drm/i915/selftests: Add a simple exerciser for suspend/hibernate MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Although we cannot do a full system-level test of suspend/hibernate from deep with the kernel selftests, we can exercise the GEM subsystem in isolation and simulate the external effects (such as losing stolen contents and trashing the register state). v2: Don't forget to hold rpm v3: Suspend the GTT mappings, and more rpm! References: https://bugs.freedesktop.org/show_bug.cgi?id=96526 References: 5ab57c702069 ("drm/i915: Flush logical context image out to memory upon suspend") Signed-off-by: Chris Wilson Cc: Jakub Bartmiński Cc: Matthew Auld Cc: Joonas Lahtinen Reviewed-by: Jakub Bartmiński Link: https://patchwork.freedesktop.org/patch/msgid/20180830134806.21939-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem.c | 1 + drivers/gpu/drm/i915/selftests/i915_gem.c | 221 ++++++++++++++++++ .../drm/i915/selftests/i915_live_selftests.h | 1 + 3 files changed, 223 insertions(+) create mode 100644 drivers/gpu/drm/i915/selftests/i915_gem.c diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 0453eb42a1a3..7b7bbfe59697 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -6207,4 +6207,5 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align) #include "selftests/huge_pages.c" #include "selftests/i915_gem_object.c" #include "selftests/i915_gem_coherency.c" +#include "selftests/i915_gem.c" #endif diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c new file mode 100644 index 000000000000..d0aa19d17653 --- /dev/null +++ b/drivers/gpu/drm/i915/selftests/i915_gem.c @@ -0,0 +1,221 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + */ + +#include + +#include "../i915_selftest.h" + +#include "mock_context.h" +#include "igt_flush_test.h" + +static int switch_to_context(struct drm_i915_private *i915, + struct i915_gem_context *ctx) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = 0; + + intel_runtime_pm_get(i915); + + for_each_engine(engine, i915, id) { + struct i915_request *rq; + + rq = i915_request_alloc(engine, ctx); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + break; + } + + i915_request_add(rq); + } + + intel_runtime_pm_put(i915); + + return err; +} + +static void trash_stolen(struct drm_i915_private *i915) +{ + struct i915_ggtt *ggtt = &i915->ggtt; + const u64 slot = ggtt->error_capture.start; + const resource_size_t size = resource_size(&i915->dsm); + unsigned long page; + u32 prng = 0x12345678; + + for (page = 0; page < size; page += PAGE_SIZE) { + const dma_addr_t dma = i915->dsm.start + page; + u32 __iomem *s; + int x; + + ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0); + + s = io_mapping_map_atomic_wc(&ggtt->iomap, slot); + for (x = 0; x < PAGE_SIZE / sizeof(u32); x++) { + prng = next_pseudo_random32(prng); + iowrite32(prng, &s[x]); + } + io_mapping_unmap_atomic(s); + } + + ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE); +} + +static void simulate_hibernate(struct drm_i915_private *i915) +{ + intel_runtime_pm_get(i915); + + /* + * As a final sting in the tail, invalidate stolen. Under a real S4, + * stolen is lost and needs to be refilled on resume. However, under + * CI we merely do S4-device testing (as full S4 is too unreliable + * for automated testing across a cluster), so to simulate the effect + * of stolen being trashed across S4, we trash it ourselves. + */ + trash_stolen(i915); + + intel_runtime_pm_put(i915); +} + +static int pm_prepare(struct drm_i915_private *i915) +{ + int err = 0; + + if (i915_gem_suspend(i915)) { + pr_err("i915_gem_suspend failed\n"); + err = -EINVAL; + } + + return err; +} + +static void pm_suspend(struct drm_i915_private *i915) +{ + intel_runtime_pm_get(i915); + + i915_gem_suspend_gtt_mappings(i915); + i915_gem_suspend_late(i915); + + intel_runtime_pm_put(i915); +} + +static void pm_hibernate(struct drm_i915_private *i915) +{ + intel_runtime_pm_get(i915); + + i915_gem_suspend_gtt_mappings(i915); + + i915_gem_freeze(i915); + i915_gem_freeze_late(i915); + + intel_runtime_pm_put(i915); +} + +static void pm_resume(struct drm_i915_private *i915) +{ + /* + * Both suspend and hibernate follow the same wakeup path and assume + * that runtime-pm just works. + */ + intel_runtime_pm_get(i915); + + intel_engines_sanitize(i915); + i915_gem_sanitize(i915); + i915_gem_resume(i915); + + intel_runtime_pm_put(i915); +} + +static int igt_gem_suspend(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct i915_gem_context *ctx; + struct drm_file *file; + int err; + + file = mock_file(i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + err = -ENOMEM; + mutex_lock(&i915->drm.struct_mutex); + ctx = live_context(i915, file); + if (!IS_ERR(ctx)) + err = switch_to_context(i915, ctx); + mutex_unlock(&i915->drm.struct_mutex); + if (err) + goto out; + + err = pm_prepare(i915); + if (err) + goto out; + + pm_suspend(i915); + + /* Here be dragons! Note that with S3RST any S3 may become S4! */ + simulate_hibernate(i915); + + pm_resume(i915); + + mutex_lock(&i915->drm.struct_mutex); + err = switch_to_context(i915, ctx); + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + err = -EIO; + mutex_unlock(&i915->drm.struct_mutex); +out: + mock_file_free(i915, file); + return err; +} + +static int igt_gem_hibernate(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct i915_gem_context *ctx; + struct drm_file *file; + int err; + + file = mock_file(i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + err = -ENOMEM; + mutex_lock(&i915->drm.struct_mutex); + ctx = live_context(i915, file); + if (!IS_ERR(ctx)) + err = switch_to_context(i915, ctx); + mutex_unlock(&i915->drm.struct_mutex); + if (err) + goto out; + + err = pm_prepare(i915); + if (err) + goto out; + + pm_hibernate(i915); + + /* Here be dragons! */ + simulate_hibernate(i915); + + pm_resume(i915); + + mutex_lock(&i915->drm.struct_mutex); + err = switch_to_context(i915, ctx); + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + err = -EIO; + mutex_unlock(&i915->drm.struct_mutex); +out: + mock_file_free(i915, file); + return err; +} + +int i915_gem_live_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(igt_gem_suspend), + SUBTEST(igt_gem_hibernate), + }; + + return i915_subtests(tests, i915); +} diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h index a00e2bd08bce..a15713cae3b3 100644 --- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h @@ -17,6 +17,7 @@ selftest(objects, i915_gem_object_live_selftests) selftest(dmabuf, i915_gem_dmabuf_live_selftests) selftest(coherency, i915_gem_coherency_live_selftests) selftest(gtt, i915_gem_gtt_live_selftests) +selftest(gem, i915_gem_live_selftests) selftest(evict, i915_gem_evict_live_selftests) selftest(hugepages, i915_gem_huge_page_live_selftests) selftest(contexts, i915_gem_context_live_selftests) -- GitLab From d49b48f088c323dbacae44dfbe56d9c985c8a2a1 Mon Sep 17 00:00:00 2001 From: Vincent Whitchurch Date: Fri, 31 Aug 2018 09:04:18 +0200 Subject: [PATCH 0784/1692] gpio: Fix crash due to registration race gpiochip_add_data_with_key() adds the gpiochip to the gpio_devices list before of_gpiochip_add() is called, but it's only the latter which sets the ->of_xlate function pointer. gpiochip_find() can be called by someone else between these two actions, and it can find the chip and call of_gpiochip_match_node_and_xlate() which leads to the following crash due to a NULL ->of_xlate(). Unhandled prefetch abort: page domain fault (0x01b) at 0x00000000 Modules linked in: leds_gpio(+) gpio_generic(+) CPU: 0 PID: 830 Comm: insmod Not tainted 4.18.0+ #43 Hardware name: ARM-Versatile Express PC is at (null) LR is at of_gpiochip_match_node_and_xlate+0x2c/0x38 Process insmod (pid: 830, stack limit = 0x(ptrval)) (of_gpiochip_match_node_and_xlate) from (gpiochip_find+0x48/0x84) (gpiochip_find) from (of_get_named_gpiod_flags+0xa8/0x238) (of_get_named_gpiod_flags) from (gpiod_get_from_of_node+0x2c/0xc8) (gpiod_get_from_of_node) from (devm_fwnode_get_index_gpiod_from_child+0xb8/0x144) (devm_fwnode_get_index_gpiod_from_child) from (gpio_led_probe+0x208/0x3c4 [leds_gpio]) (gpio_led_probe [leds_gpio]) from (platform_drv_probe+0x48/0x9c) (platform_drv_probe) from (really_probe+0x1d0/0x3d4) (really_probe) from (driver_probe_device+0x78/0x1c0) (driver_probe_device) from (__driver_attach+0x120/0x13c) (__driver_attach) from (bus_for_each_dev+0x68/0xb4) (bus_for_each_dev) from (bus_add_driver+0x1a8/0x268) (bus_add_driver) from (driver_register+0x78/0x10c) (driver_register) from (do_one_initcall+0x54/0x1fc) (do_one_initcall) from (do_init_module+0x64/0x1f4) (do_init_module) from (load_module+0x2198/0x26ac) (load_module) from (sys_finit_module+0xe0/0x110) (sys_finit_module) from (ret_fast_syscall+0x0/0x54) One way to fix this would be to rework the hairy registration sequence in gpiochip_add_data_with_key(), but since I'd probably introduce a couple of new bugs if I attempted that, simply add a check for a non-NULL of_xlate function pointer in of_gpiochip_match_node_and_xlate(). This works since the driver looking for the gpio will simply fail to find the gpio and defer its probe and be reprobed when the driver which is registering the gpiochip has fully completed its probe. Signed-off-by: Vincent Whitchurch Signed-off-by: Linus Walleij --- drivers/gpio/gpiolib-of.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index a4f1157d6aa0..d4e7a09598fa 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -31,6 +31,7 @@ static int of_gpiochip_match_node_and_xlate(struct gpio_chip *chip, void *data) struct of_phandle_args *gpiospec = data; return chip->gpiodev->dev.of_node == gpiospec->np && + chip->of_xlate && chip->of_xlate(chip, gpiospec, NULL) >= 0; } -- GitLab From f52bb98f5aded4c43e52f5ce19fb83f7261e9e73 Mon Sep 17 00:00:00 2001 From: James Morse Date: Thu, 30 Aug 2018 16:05:32 +0100 Subject: [PATCH 0785/1692] arm64: mm: always enable CONFIG_HOLES_IN_ZONE Commit 6d526ee26ccd ("arm64: mm: enable CONFIG_HOLES_IN_ZONE for NUMA") only enabled HOLES_IN_ZONE for NUMA systems because the NUMA code was choking on the missing zone for nomap pages. This problem doesn't just apply to NUMA systems. If the architecture doesn't set HAVE_ARCH_PFN_VALID, pfn_valid() will return true if the pfn is part of a valid sparsemem section. When working with multiple pages, the mm code uses pfn_valid_within() to test each page it uses within the sparsemem section is valid. On most systems memory comes in MAX_ORDER_NR_PAGES chunks which all have valid/initialised struct pages. In this case pfn_valid_within() is optimised out. Systems where this isn't true (e.g. due to nomap) should set HOLES_IN_ZONE and provide HAVE_ARCH_PFN_VALID so that mm tests each page as it works with it. Currently non-NUMA arm64 systems can't enable HOLES_IN_ZONE, leading to a VM_BUG_ON(): | page:fffffdff802e1780 is uninitialized and poisoned | raw: ffffffffffffffff ffffffffffffffff ffffffffffffffff ffffffffffffffff | raw: ffffffffffffffff ffffffffffffffff ffffffffffffffff ffffffffffffffff | page dumped because: VM_BUG_ON_PAGE(PagePoisoned(p)) | ------------[ cut here ]------------ | kernel BUG at include/linux/mm.h:978! | Internal error: Oops - BUG: 0 [#1] PREEMPT SMP [...] | CPU: 1 PID: 25236 Comm: dd Not tainted 4.18.0 #7 | Hardware name: QEMU KVM Virtual Machine, BIOS 0.0.0 02/06/2015 | pstate: 40000085 (nZcv daIf -PAN -UAO) | pc : move_freepages_block+0x144/0x248 | lr : move_freepages_block+0x144/0x248 | sp : fffffe0071177680 [...] | Process dd (pid: 25236, stack limit = 0x0000000094cc07fb) | Call trace: | move_freepages_block+0x144/0x248 | steal_suitable_fallback+0x100/0x16c | get_page_from_freelist+0x440/0xb20 | __alloc_pages_nodemask+0xe8/0x838 | new_slab+0xd4/0x418 | ___slab_alloc.constprop.27+0x380/0x4a8 | __slab_alloc.isra.21.constprop.26+0x24/0x34 | kmem_cache_alloc+0xa8/0x180 | alloc_buffer_head+0x1c/0x90 | alloc_page_buffers+0x68/0xb0 | create_empty_buffers+0x20/0x1ec | create_page_buffers+0xb0/0xf0 | __block_write_begin_int+0xc4/0x564 | __block_write_begin+0x10/0x18 | block_write_begin+0x48/0xd0 | blkdev_write_begin+0x28/0x30 | generic_perform_write+0x98/0x16c | __generic_file_write_iter+0x138/0x168 | blkdev_write_iter+0x80/0xf0 | __vfs_write+0xe4/0x10c | vfs_write+0xb4/0x168 | ksys_write+0x44/0x88 | sys_write+0xc/0x14 | el0_svc_naked+0x30/0x34 | Code: aa1303e0 90001a01 91296421 94008902 (d4210000) | ---[ end trace 1601ba47f6e883fe ]--- Remove the NUMA dependency. Link: https://www.spinics.net/lists/arm-kernel/msg671851.html Cc: Cc: Ard Biesheuvel Reported-by: Mikulas Patocka Reviewed-by: Pavel Tatashin Tested-by: Mikulas Patocka Signed-off-by: James Morse Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 29e75b47becd..1b1a0e95c751 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -763,7 +763,6 @@ config NEED_PER_CPU_EMBED_FIRST_CHUNK config HOLES_IN_ZONE def_bool y - depends on NUMA source kernel/Kconfig.hz -- GitLab From 6fb86d97207880c1286cd4cb3a7e6a598afbc727 Mon Sep 17 00:00:00 2001 From: Mukesh Ojha Date: Tue, 28 Aug 2018 12:24:54 +0530 Subject: [PATCH 0786/1692] cpu/hotplug: Remove skip_onerr field from cpuhp_step structure When notifiers were there, `skip_onerr` was used to avoid calling particular step startup/teardown callbacks in the CPU up/down rollback path, which made the hotplug asymmetric. As notifiers are gone now after the full state machine conversion, the `skip_onerr` field is no longer required. Remove it from the structure and its usage. Signed-off-by: Mukesh Ojha Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/1535439294-31426-1-git-send-email-mojha@codeaurora.org --- kernel/cpu.c | 26 ++++---------------------- 1 file changed, 4 insertions(+), 22 deletions(-) diff --git a/kernel/cpu.c b/kernel/cpu.c index ed44d7d34c2d..aa7fe85ad62e 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -102,8 +102,6 @@ static inline void cpuhp_lock_release(bool bringup) { } * @name: Name of the step * @startup: Startup function of the step * @teardown: Teardown function of the step - * @skip_onerr: Do not invoke the functions on error rollback - * Will go away once the notifiers are gone * @cant_stop: Bringup/teardown can't be stopped at this step */ struct cpuhp_step { @@ -119,7 +117,6 @@ struct cpuhp_step { struct hlist_node *node); } teardown; struct hlist_head list; - bool skip_onerr; bool cant_stop; bool multi_instance; }; @@ -550,12 +547,8 @@ static int bringup_cpu(unsigned int cpu) static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st) { - for (st->state--; st->state > st->target; st->state--) { - struct cpuhp_step *step = cpuhp_get_step(st->state); - - if (!step->skip_onerr) - cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); - } + for (st->state--; st->state > st->target; st->state--) + cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); } static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, @@ -644,12 +637,6 @@ static void cpuhp_thread_fun(unsigned int cpu) WARN_ON_ONCE(!cpuhp_is_ap_state(state)); - if (st->rollback) { - struct cpuhp_step *step = cpuhp_get_step(state); - if (step->skip_onerr) - goto next; - } - if (cpuhp_is_atomic_state(state)) { local_irq_disable(); st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last); @@ -673,7 +660,6 @@ static void cpuhp_thread_fun(unsigned int cpu) st->should_run = false; } -next: cpuhp_lock_release(bringup); if (!st->should_run) @@ -916,12 +902,8 @@ void cpuhp_report_idle_dead(void) static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st) { - for (st->state++; st->state < st->target; st->state++) { - struct cpuhp_step *step = cpuhp_get_step(st->state); - - if (!step->skip_onerr) - cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL); - } + for (st->state++; st->state < st->target; st->state++) + cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL); } static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, -- GitLab From 0413bedabc886c3a56804d1c80a58e99077b1d91 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Tue, 28 Aug 2018 15:10:48 -0500 Subject: [PATCH 0787/1692] of: Add device_type access helper functions In preparation to remove direct access to device_node.type, add of_node_is_type() and of_node_get_device_type() helpers to check and retrieve the device type. Cc: Frank Rowand Signed-off-by: Rob Herring --- include/linux/of.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/include/linux/of.h b/include/linux/of.h index 688c52dd7b3e..99b0ebf49632 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -988,6 +988,18 @@ static inline struct device_node *of_find_matching_node( return of_find_matching_node_and_match(from, matches, NULL); } +static inline const char *of_node_get_device_type(const struct device_node *np) +{ + return of_get_property(np, "type", NULL); +} + +static inline bool of_node_is_type(const struct device_node *np, const char *type) +{ + const char *match = of_node_get_device_type(np); + + return np && match && type && !strcmp(match, type); +} + /** * of_property_count_u8_elems - Count the number of u8 elements in a property * -- GitLab From 2512e40e48d21d8bac09f7e91d2c3ceb2d3b50b2 Mon Sep 17 00:00:00 2001 From: Corey Minyard Date: Wed, 22 Aug 2018 12:08:13 -0500 Subject: [PATCH 0788/1692] ipmi: Rework SMI registration failure There were certain situations where ipmi_register_smi() would return a failure, but the interface would still be registered and would need to be unregistered. This is obviously a bad design and resulted in an oops in certain failure cases. If the interface is started up in ipmi_register_smi(), then an error occurs, shut down the interface there so the cleanup can be done properly. Fix the various smi users, too. Signed-off-by: Corey Minyard Reported-by: Justin Ernst Tested-by: Justin Ernst Cc: Andrew Banman Cc: Russ Anderson Cc: # 4.18.x --- drivers/char/ipmi/ipmi_msghandler.c | 53 ++++++++++++++++------------- drivers/char/ipmi/ipmi_si_intf.c | 17 +++------ drivers/char/ipmi/ipmi_ssif.c | 13 ++----- 3 files changed, 37 insertions(+), 46 deletions(-) diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 51832b8a2c62..7fc9612070a1 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -3381,39 +3381,45 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers, rv = handlers->start_processing(send_info, intf); if (rv) - goto out; + goto out_err; rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i); if (rv) { dev_err(si_dev, "Unable to get the device id: %d\n", rv); - goto out; + goto out_err_started; } mutex_lock(&intf->bmc_reg_mutex); rv = __scan_channels(intf, &id); mutex_unlock(&intf->bmc_reg_mutex); + if (rv) + goto out_err_bmc_reg; - out: - if (rv) { - ipmi_bmc_unregister(intf); - list_del_rcu(&intf->link); - mutex_unlock(&ipmi_interfaces_mutex); - synchronize_srcu(&ipmi_interfaces_srcu); - cleanup_srcu_struct(&intf->users_srcu); - kref_put(&intf->refcount, intf_free); - } else { - /* - * Keep memory order straight for RCU readers. Make - * sure everything else is committed to memory before - * setting intf_num to mark the interface valid. - */ - smp_wmb(); - intf->intf_num = i; - mutex_unlock(&ipmi_interfaces_mutex); + /* + * Keep memory order straight for RCU readers. Make + * sure everything else is committed to memory before + * setting intf_num to mark the interface valid. + */ + smp_wmb(); + intf->intf_num = i; + mutex_unlock(&ipmi_interfaces_mutex); - /* After this point the interface is legal to use. */ - call_smi_watchers(i, intf->si_dev); - } + /* After this point the interface is legal to use. */ + call_smi_watchers(i, intf->si_dev); + + return 0; + + out_err_bmc_reg: + ipmi_bmc_unregister(intf); + out_err_started: + if (intf->handlers->shutdown) + intf->handlers->shutdown(intf->send_info); + out_err: + list_del_rcu(&intf->link); + mutex_unlock(&ipmi_interfaces_mutex); + synchronize_srcu(&ipmi_interfaces_srcu); + cleanup_srcu_struct(&intf->users_srcu); + kref_put(&intf->refcount, intf_free); return rv; } @@ -3504,7 +3510,8 @@ void ipmi_unregister_smi(struct ipmi_smi *intf) } srcu_read_unlock(&intf->users_srcu, index); - intf->handlers->shutdown(intf->send_info); + if (intf->handlers->shutdown) + intf->handlers->shutdown(intf->send_info); cleanup_smi_msgs(intf); diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 90ec010bffbd..5faa917df1b6 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -2083,18 +2083,9 @@ static int try_smi_init(struct smi_info *new_smi) si_to_str[new_smi->io.si_type]); WARN_ON(new_smi->io.dev->init_name != NULL); - kfree(init_name); - - return 0; - -out_err: - if (new_smi->intf) { - ipmi_unregister_smi(new_smi->intf); - new_smi->intf = NULL; - } + out_err: kfree(init_name); - return rv; } @@ -2227,6 +2218,8 @@ static void shutdown_smi(void *send_info) kfree(smi_info->si_sm); smi_info->si_sm = NULL; + + smi_info->intf = NULL; } /* @@ -2240,10 +2233,8 @@ static void cleanup_one_si(struct smi_info *smi_info) list_del(&smi_info->link); - if (smi_info->intf) { + if (smi_info->intf) ipmi_unregister_smi(smi_info->intf); - smi_info->intf = NULL; - } if (smi_info->pdev) { if (smi_info->pdev_registered) diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index 18e4650c233b..c12edc8e91df 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -1214,18 +1214,11 @@ static void shutdown_ssif(void *send_info) complete(&ssif_info->wake_thread); kthread_stop(ssif_info->thread); } - - /* - * No message can be outstanding now, we have removed the - * upper layer and it permitted us to do so. - */ - kfree(ssif_info); } static int ssif_remove(struct i2c_client *client) { struct ssif_info *ssif_info = i2c_get_clientdata(client); - struct ipmi_smi *intf; struct ssif_addr_info *addr_info; if (!ssif_info) @@ -1235,9 +1228,7 @@ static int ssif_remove(struct i2c_client *client) * After this point, we won't deliver anything asychronously * to the message handler. We can unregister ourself. */ - intf = ssif_info->intf; - ssif_info->intf = NULL; - ipmi_unregister_smi(intf); + ipmi_unregister_smi(ssif_info->intf); list_for_each_entry(addr_info, &ssif_infos, link) { if (addr_info->client == client) { @@ -1246,6 +1237,8 @@ static int ssif_remove(struct i2c_client *client) } } + kfree(ssif_info); + return 0; } -- GitLab From c86ba91be75702c013bbf7379542920b6920e98f Mon Sep 17 00:00:00 2001 From: Corey Minyard Date: Thu, 23 Aug 2018 15:22:35 -0500 Subject: [PATCH 0789/1692] ipmi: Move BT capabilities detection to the detect call The capabilities detection was being done as part of the normal state machine, but it was possible for it to be running while the upper layers of the IPMI driver were initializing the device, resulting in error and failure to initialize. Move the capabilities detection to the the detect function, so it's done before anything else runs on the device. This also simplifies the state machine and removes some code, as a bonus. Signed-off-by: Corey Minyard Reported-by: Andrew Banman Tested-by: Andrew Banman Cc: --- drivers/char/ipmi/ipmi_bt_sm.c | 92 ++++++++++++++++++---------------- 1 file changed, 48 insertions(+), 44 deletions(-) diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c index a3397664f800..97d6856c9c0f 100644 --- a/drivers/char/ipmi/ipmi_bt_sm.c +++ b/drivers/char/ipmi/ipmi_bt_sm.c @@ -59,8 +59,6 @@ enum bt_states { BT_STATE_RESET3, BT_STATE_RESTART, BT_STATE_PRINTME, - BT_STATE_CAPABILITIES_BEGIN, - BT_STATE_CAPABILITIES_END, BT_STATE_LONG_BUSY /* BT doesn't get hosed :-) */ }; @@ -86,7 +84,6 @@ struct si_sm_data { int error_retries; /* end of "common" fields */ int nonzero_status; /* hung BMCs stay all 0 */ enum bt_states complete; /* to divert the state machine */ - int BT_CAP_outreqs; long BT_CAP_req2rsp; int BT_CAP_retries; /* Recommended retries */ }; @@ -137,8 +134,6 @@ static char *state2txt(unsigned char state) case BT_STATE_RESET3: return("RESET3"); case BT_STATE_RESTART: return("RESTART"); case BT_STATE_LONG_BUSY: return("LONG_BUSY"); - case BT_STATE_CAPABILITIES_BEGIN: return("CAP_BEGIN"); - case BT_STATE_CAPABILITIES_END: return("CAP_END"); } return("BAD STATE"); } @@ -185,7 +180,6 @@ static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io) bt->complete = BT_STATE_IDLE; /* end here */ bt->BT_CAP_req2rsp = BT_NORMAL_TIMEOUT * USEC_PER_SEC; bt->BT_CAP_retries = BT_NORMAL_RETRY_LIMIT; - /* BT_CAP_outreqs == zero is a flag to read BT Capabilities */ return 3; /* We claim 3 bytes of space; ought to check SPMI table */ } @@ -451,7 +445,7 @@ static enum si_sm_result error_recovery(struct si_sm_data *bt, static enum si_sm_result bt_event(struct si_sm_data *bt, long time) { - unsigned char status, BT_CAP[8]; + unsigned char status; static enum bt_states last_printed = BT_STATE_PRINTME; int i; @@ -504,12 +498,6 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time) if (status & BT_H_BUSY) /* clear a leftover H_BUSY */ BT_CONTROL(BT_H_BUSY); - bt->timeout = bt->BT_CAP_req2rsp; - - /* Read BT capabilities if it hasn't been done yet */ - if (!bt->BT_CAP_outreqs) - BT_STATE_CHANGE(BT_STATE_CAPABILITIES_BEGIN, - SI_SM_CALL_WITHOUT_DELAY); BT_SI_SM_RETURN(SI_SM_IDLE); case BT_STATE_XACTION_START: @@ -614,37 +602,6 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time) BT_STATE_CHANGE(BT_STATE_XACTION_START, SI_SM_CALL_WITH_DELAY); - /* - * Get BT Capabilities, using timing of upper level state machine. - * Set outreqs to prevent infinite loop on timeout. - */ - case BT_STATE_CAPABILITIES_BEGIN: - bt->BT_CAP_outreqs = 1; - { - unsigned char GetBT_CAP[] = { 0x18, 0x36 }; - bt->state = BT_STATE_IDLE; - bt_start_transaction(bt, GetBT_CAP, sizeof(GetBT_CAP)); - } - bt->complete = BT_STATE_CAPABILITIES_END; - BT_STATE_CHANGE(BT_STATE_XACTION_START, - SI_SM_CALL_WITH_DELAY); - - case BT_STATE_CAPABILITIES_END: - i = bt_get_result(bt, BT_CAP, sizeof(BT_CAP)); - bt_init_data(bt, bt->io); - if ((i == 8) && !BT_CAP[2]) { - bt->BT_CAP_outreqs = BT_CAP[3]; - bt->BT_CAP_req2rsp = BT_CAP[6] * USEC_PER_SEC; - bt->BT_CAP_retries = BT_CAP[7]; - } else - printk(KERN_WARNING "IPMI BT: using default values\n"); - if (!bt->BT_CAP_outreqs) - bt->BT_CAP_outreqs = 1; - printk(KERN_WARNING "IPMI BT: req2rsp=%ld secs retries=%d\n", - bt->BT_CAP_req2rsp / USEC_PER_SEC, bt->BT_CAP_retries); - bt->timeout = bt->BT_CAP_req2rsp; - return SI_SM_CALL_WITHOUT_DELAY; - default: /* should never occur */ return error_recovery(bt, status, @@ -655,6 +612,11 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time) static int bt_detect(struct si_sm_data *bt) { + unsigned char GetBT_CAP[] = { 0x18, 0x36 }; + unsigned char BT_CAP[8]; + enum si_sm_result smi_result; + int rv; + /* * It's impossible for the BT status and interrupt registers to be * all 1's, (assuming a properly functioning, self-initialized BMC) @@ -665,6 +627,48 @@ static int bt_detect(struct si_sm_data *bt) if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF)) return 1; reset_flags(bt); + + /* + * Try getting the BT capabilities here. + */ + rv = bt_start_transaction(bt, GetBT_CAP, sizeof(GetBT_CAP)); + if (rv) { + dev_warn(bt->io->dev, + "Can't start capabilities transaction: %d\n", rv); + goto out_no_bt_cap; + } + + smi_result = SI_SM_CALL_WITHOUT_DELAY; + for (;;) { + if (smi_result == SI_SM_CALL_WITH_DELAY || + smi_result == SI_SM_CALL_WITH_TICK_DELAY) { + schedule_timeout_uninterruptible(1); + smi_result = bt_event(bt, jiffies_to_usecs(1)); + } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) { + smi_result = bt_event(bt, 0); + } else + break; + } + + rv = bt_get_result(bt, BT_CAP, sizeof(BT_CAP)); + bt_init_data(bt, bt->io); + if (rv < 8) { + dev_warn(bt->io->dev, "bt cap response too short: %d\n", rv); + goto out_no_bt_cap; + } + + if (BT_CAP[2]) { + dev_warn(bt->io->dev, "Error fetching bt cap: %x\n", BT_CAP[2]); +out_no_bt_cap: + dev_warn(bt->io->dev, "using default values\n"); + } else { + bt->BT_CAP_req2rsp = BT_CAP[6] * USEC_PER_SEC; + bt->BT_CAP_retries = BT_CAP[7]; + } + + dev_info(bt->io->dev, "req2rsp=%ld secs retries=%d\n", + bt->BT_CAP_req2rsp / USEC_PER_SEC, bt->BT_CAP_retries); + return 0; } -- GitLab From 0745dde62835be7e2afe62fcdb482fcad79cb743 Mon Sep 17 00:00:00 2001 From: Corey Minyard Date: Thu, 30 Aug 2018 13:06:21 -0500 Subject: [PATCH 0790/1692] ipmi: Fix I2C client removal in the SSIF driver The SSIF driver was removing any client that came in through the platform interface, but it should only remove clients that it added. On a failure in the probe function, this could result in the following oops when the driver is removed and the client gets unregistered twice: CPU: 107 PID: 30266 Comm: rmmod Not tainted 4.18.0+ #80 Hardware name: Cavium Inc. Saber/Saber, BIOS Cavium reference firmware version 7.0 08/04/2018 pstate: 60400009 (nZCv daif +PAN -UAO) pc : kernfs_find_ns+0x28/0x120 lr : kernfs_find_and_get_ns+0x40/0x60 sp : ffff00002310fb50 x29: ffff00002310fb50 x28: ffff800a8240f800 x27: 0000000000000000 x26: 0000000000000000 x25: 0000000056000000 x24: ffff000009073000 x23: ffff000008998b38 x22: 0000000000000000 x21: ffff800ed86de820 x20: 0000000000000000 x19: ffff00000913a1d8 x18: 0000000000000000 x17: 0000000000000000 x16: 0000000000000000 x15: 0000000000000000 x14: 5300737265766972 x13: 643d4d4554535953 x12: 0000000000000030 x11: 0000000000000030 x10: 0101010101010101 x9 : ffff800ea06cc3f9 x8 : 0000000000000000 x7 : 0000000000000141 x6 : ffff000009073000 x5 : ffff800adb706b00 x4 : 0000000000000000 x3 : 00000000ffffffff x2 : 0000000000000000 x1 : ffff000008998b38 x0 : ffff000008356760 Process rmmod (pid: 30266, stack limit = 0x00000000e218418d) Call trace: kernfs_find_ns+0x28/0x120 kernfs_find_and_get_ns+0x40/0x60 sysfs_unmerge_group+0x2c/0x6c dpm_sysfs_remove+0x34/0x70 device_del+0x58/0x30c device_unregister+0x30/0x7c i2c_unregister_device+0x84/0x90 [i2c_core] ssif_platform_remove+0x38/0x98 [ipmi_ssif] platform_drv_remove+0x2c/0x6c device_release_driver_internal+0x168/0x1f8 driver_detach+0x50/0xbc bus_remove_driver+0x74/0xe8 driver_unregister+0x34/0x5c platform_driver_unregister+0x20/0x2c cleanup_ipmi_ssif+0x50/0xd82c [ipmi_ssif] __arm64_sys_delete_module+0x1b4/0x220 el0_svc_handler+0x104/0x160 el0_svc+0x8/0xc Code: aa1e03e0 aa0203f6 aa0103f7 d503201f (7940e280) ---[ end trace 09f0e34cce8e2d8c ]--- Kernel panic - not syncing: Fatal exception SMP: stopping secondary CPUs Kernel Offset: disabled CPU features: 0x23800c38 So track the clients that the SSIF driver adds and only remove those. Reported-by: George Cherian Signed-off-by: Corey Minyard Tested-by: George Cherian Cc: # 4.14.x --- drivers/char/ipmi/ipmi_ssif.c | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index c12edc8e91df..265d6a6583bc 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -181,6 +181,8 @@ struct ssif_addr_info { struct device *dev; struct i2c_client *client; + struct i2c_client *added_client; + struct mutex clients_mutex; struct list_head clients; @@ -1641,15 +1643,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) out: if (rv) { - /* - * Note that if addr_info->client is assigned, we - * leave it. The i2c client hangs around even if we - * return a failure here, and the failure here is not - * propagated back to the i2c code. This seems to be - * design intent, strange as it may be. But if we - * don't leave it, ssif_platform_remove will not remove - * the client like it should. - */ + addr_info->client = NULL; dev_err(&client->dev, "Unable to start IPMI SSIF: %d\n", rv); kfree(ssif_info); } @@ -1669,7 +1663,8 @@ static int ssif_adapter_handler(struct device *adev, void *opaque) if (adev->type != &i2c_adapter_type) return 0; - i2c_new_device(to_i2c_adapter(adev), &addr_info->binfo); + addr_info->added_client = i2c_new_device(to_i2c_adapter(adev), + &addr_info->binfo); if (!addr_info->adapter_name) return 1; /* Only try the first I2C adapter by default. */ @@ -1842,7 +1837,7 @@ static int ssif_platform_remove(struct platform_device *dev) return 0; mutex_lock(&ssif_infos_mutex); - i2c_unregister_device(addr_info->client); + i2c_unregister_device(addr_info->added_client); list_del(&addr_info->link); kfree(addr_info); -- GitLab From a64f88874930be944f2c78c7df501d5d1d19becc Mon Sep 17 00:00:00 2001 From: Jyoti Yadav Date: Fri, 31 Aug 2018 02:00:23 -0400 Subject: [PATCH 0791/1692] drm/i915/intel_csr.c Fix DMC FW Loading issue on ICL. This patch resolves the DMC FW loading issue. Earlier DMC FW package have only one DMC FW for one stepping. But as such there is no such restriction from Package side. For ICL icl_dmc_ver1_07.bin binary package has DMC FW for 2 steppings. So while reading the dmc_offset from package header, for 1st stepping offset used to come 0x0 and was working fine till now. But for second stepping and other steppings, offset is non zero number and is in dwords. So we need to convert into bytes to fetch correct DMC FW from correct place. v2 : Added check for DMC FW max size for various gen. (Imre Deak) v3 : Corrected naming convention for various gen. (Imre Deak) v4 : Initialized max_fw_size to 0 v5 : Corrected DMC FW MAX_SIZE for various gen. (Imre Deak) v6 : Fixed the typo issues. Reviewed-by: Imre Deak Signed-off-by: Jyoti Yadav Signed-off-by: Imre Deak Link: https://patchwork.freedesktop.org/patch/msgid/1535695223-4648-1-git-send-email-jyoti.r.yadav@intel.com --- drivers/gpu/drm/i915/intel_csr.c | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index 1ec4f09c61f6..14cf4c367e36 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c @@ -55,7 +55,9 @@ MODULE_FIRMWARE(I915_CSR_BXT); #define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7) -#define CSR_MAX_FW_SIZE 0x2FFF +#define BXT_CSR_MAX_FW_SIZE 0x3000 +#define GLK_CSR_MAX_FW_SIZE 0x4000 +#define ICL_CSR_MAX_FW_SIZE 0x6000 #define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF struct intel_css_header { @@ -279,6 +281,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, struct intel_csr *csr = &dev_priv->csr; const struct stepping_info *si = intel_get_stepping_info(dev_priv); uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes; + uint32_t max_fw_size = 0; uint32_t i; uint32_t *dmc_payload; uint32_t required_version; @@ -359,6 +362,8 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, si->stepping); return NULL; } + /* Convert dmc_offset into number of bytes. By default it is in dwords*/ + dmc_offset *= 4; readcount += dmc_offset; /* Extract dmc_header information. */ @@ -391,8 +396,16 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, /* fw_size is in dwords, so multiplied by 4 to convert into bytes. */ nbytes = dmc_header->fw_size * 4; - if (nbytes > CSR_MAX_FW_SIZE) { - DRM_ERROR("DMC firmware too big (%u bytes)\n", nbytes); + if (INTEL_GEN(dev_priv) >= 11) + max_fw_size = ICL_CSR_MAX_FW_SIZE; + else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) + max_fw_size = GLK_CSR_MAX_FW_SIZE; + else if (IS_GEN9(dev_priv)) + max_fw_size = BXT_CSR_MAX_FW_SIZE; + else + MISSING_CASE(INTEL_REVID(dev_priv)); + if (nbytes > max_fw_size) { + DRM_ERROR("DMC FW too big (%u bytes)\n", nbytes); return NULL; } csr->dmc_fw_size = dmc_header->fw_size; -- GitLab From 342db04ae71273322f0011384a9ed414df8bdae4 Mon Sep 17 00:00:00 2001 From: Jann Horn Date: Tue, 28 Aug 2018 17:49:01 +0200 Subject: [PATCH 0792/1692] x86/dumpstack: Don't dump kernel memory based on usermode RIP show_opcodes() is used both for dumping kernel instructions and for dumping user instructions. If userspace causes #PF by jumping to a kernel address, show_opcodes() can be reached with regs->ip controlled by the user, pointing to kernel code. Make sure that userspace can't trick us into dumping kernel memory into dmesg. Fixes: 7cccf0725cf7 ("x86/dumpstack: Add a show_ip() function") Signed-off-by: Jann Horn Signed-off-by: Thomas Gleixner Reviewed-by: Kees Cook Reviewed-by: Borislav Petkov Cc: "H. Peter Anvin" Cc: Andy Lutomirski Cc: security@kernel.org Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20180828154901.112726-1-jannh@google.com --- arch/x86/include/asm/stacktrace.h | 2 +- arch/x86/kernel/dumpstack.c | 16 +++++++++++++--- arch/x86/mm/fault.c | 2 +- 3 files changed, 15 insertions(+), 5 deletions(-) diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h index b6dc698f992a..f335aad404a4 100644 --- a/arch/x86/include/asm/stacktrace.h +++ b/arch/x86/include/asm/stacktrace.h @@ -111,6 +111,6 @@ static inline unsigned long caller_frame_pointer(void) return (unsigned long)frame; } -void show_opcodes(u8 *rip, const char *loglvl); +void show_opcodes(struct pt_regs *regs, const char *loglvl); void show_ip(struct pt_regs *regs, const char *loglvl); #endif /* _ASM_X86_STACKTRACE_H */ diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 1596e6bfea6f..f56895106ccf 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -90,14 +90,24 @@ static void printk_stack_address(unsigned long address, int reliable, * Thus, the 2/3rds prologue and 64 byte OPCODE_BUFSIZE is just a random * guesstimate in attempt to achieve all of the above. */ -void show_opcodes(u8 *rip, const char *loglvl) +void show_opcodes(struct pt_regs *regs, const char *loglvl) { #define PROLOGUE_SIZE 42 #define EPILOGUE_SIZE 21 #define OPCODE_BUFSIZE (PROLOGUE_SIZE + 1 + EPILOGUE_SIZE) u8 opcodes[OPCODE_BUFSIZE]; + unsigned long prologue = regs->ip - PROLOGUE_SIZE; + bool bad_ip; - if (probe_kernel_read(opcodes, rip - PROLOGUE_SIZE, OPCODE_BUFSIZE)) { + /* + * Make sure userspace isn't trying to trick us into dumping kernel + * memory by pointing the userspace instruction pointer at it. + */ + bad_ip = user_mode(regs) && + __chk_range_not_ok(prologue, OPCODE_BUFSIZE, TASK_SIZE_MAX); + + if (bad_ip || probe_kernel_read(opcodes, (u8 *)prologue, + OPCODE_BUFSIZE)) { printk("%sCode: Bad RIP value.\n", loglvl); } else { printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %" @@ -113,7 +123,7 @@ void show_ip(struct pt_regs *regs, const char *loglvl) #else printk("%sRIP: %04x:%pS\n", loglvl, (int)regs->cs, (void *)regs->ip); #endif - show_opcodes((u8 *)regs->ip, loglvl); + show_opcodes(regs, loglvl); } void show_iret_regs(struct pt_regs *regs) diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index b9123c497e0a..47bebfe6efa7 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -837,7 +837,7 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code, printk(KERN_CONT "\n"); - show_opcodes((u8 *)regs->ip, loglvl); + show_opcodes(regs, loglvl); } static void -- GitLab From 829fe4aa9ac16417a904ad1de1307de906854bcf Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Wed, 29 Aug 2018 20:43:17 +0100 Subject: [PATCH 0793/1692] x86: Allow generating user-space headers without a compiler When bootstrapping an architecture, it's usual to generate the kernel's user-space headers (make headers_install) before building a compiler. Move the compiler check (for asm goto support) to the archprepare target so that it is only done when building code for the target. Fixes: e501ce957a78 ("x86: Force asm-goto") Reported-by: Helmut Grohne Signed-off-by: Ben Hutchings Signed-off-by: Thomas Gleixner Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20180829194317.GA4765@decadent.org.uk --- arch/x86/Makefile | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 8fc8f94ef5f5..8f6e7eb8ae9f 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -175,10 +175,6 @@ ifdef CONFIG_FUNCTION_GRAPH_TRACER endif endif -ifndef CC_HAVE_ASM_GOTO - $(error Compiler lacks asm-goto support.) -endif - ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1) # This compiler flag is not supported by Clang: KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args,) @@ -300,6 +296,13 @@ PHONY += vdso_install vdso_install: $(Q)$(MAKE) $(build)=arch/x86/entry/vdso $@ +archprepare: checkbin +checkbin: +ifndef CC_HAVE_ASM_GOTO + @echo Compiler lacks asm-goto support. + @exit 1 +endif + archclean: $(Q)rm -rf $(objtree)/arch/i386 $(Q)rm -rf $(objtree)/arch/x86_64 -- GitLab From 4012e77a903d114f915fc607d6d2ed54a3d6c9b1 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Wed, 29 Aug 2018 08:47:18 -0700 Subject: [PATCH 0794/1692] x86/nmi: Fix NMI uaccess race against CR3 switching A NMI can hit in the middle of context switching or in the middle of switch_mm_irqs_off(). In either case, CR3 might not match current->mm, which could cause copy_from_user_nmi() and friends to read the wrong memory. Fix it by adding a new nmi_uaccess_okay() helper and checking it in copy_from_user_nmi() and in __copy_from_user_nmi()'s callers. Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Reviewed-by: Rik van Riel Cc: Nadav Amit Cc: Borislav Petkov Cc: Jann Horn Cc: Peter Zijlstra Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/dd956eba16646fd0b15c3c0741269dfd84452dac.1535557289.git.luto@kernel.org --- arch/x86/events/core.c | 2 +- arch/x86/include/asm/tlbflush.h | 40 +++++++++++++++++++++++++++++++++ arch/x86/lib/usercopy.c | 5 +++++ arch/x86/mm/tlb.c | 7 ++++++ 4 files changed, 53 insertions(+), 1 deletion(-) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 5f4829f10129..dfb2f7c0d019 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -2465,7 +2465,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs perf_callchain_store(entry, regs->ip); - if (!current->mm) + if (!nmi_uaccess_okay()) return; if (perf_callchain_user32(regs, entry)) diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 29c9da6c62fc..58ce5288878e 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -175,8 +175,16 @@ struct tlb_state { * are on. This means that it may not match current->active_mm, * which will contain the previous user mm when we're in lazy TLB * mode even if we've already switched back to swapper_pg_dir. + * + * During switch_mm_irqs_off(), loaded_mm will be set to + * LOADED_MM_SWITCHING during the brief interrupts-off window + * when CR3 and loaded_mm would otherwise be inconsistent. This + * is for nmi_uaccess_okay()'s benefit. */ struct mm_struct *loaded_mm; + +#define LOADED_MM_SWITCHING ((struct mm_struct *)1) + u16 loaded_mm_asid; u16 next_asid; /* last user mm's ctx id */ @@ -246,6 +254,38 @@ struct tlb_state { }; DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); +/* + * Blindly accessing user memory from NMI context can be dangerous + * if we're in the middle of switching the current user task or + * switching the loaded mm. It can also be dangerous if we + * interrupted some kernel code that was temporarily using a + * different mm. + */ +static inline bool nmi_uaccess_okay(void) +{ + struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); + struct mm_struct *current_mm = current->mm; + + VM_WARN_ON_ONCE(!loaded_mm); + + /* + * The condition we want to check is + * current_mm->pgd == __va(read_cr3_pa()). This may be slow, though, + * if we're running in a VM with shadow paging, and nmi_uaccess_okay() + * is supposed to be reasonably fast. + * + * Instead, we check the almost equivalent but somewhat conservative + * condition below, and we rely on the fact that switch_mm_irqs_off() + * sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3. + */ + if (loaded_mm != current_mm) + return false; + + VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa())); + + return true; +} + /* Initialize cr4 shadow for this CPU. */ static inline void cr4_init_shadow(void) { diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c index c8c6ad0d58b8..3f435d7fca5e 100644 --- a/arch/x86/lib/usercopy.c +++ b/arch/x86/lib/usercopy.c @@ -7,6 +7,8 @@ #include #include +#include + /* * We rely on the nested NMI work to allow atomic faults from the NMI path; the * nested NMI paths are careful to preserve CR2. @@ -19,6 +21,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n) if (__range_not_ok(from, n, TASK_SIZE)) return n; + if (!nmi_uaccess_okay()) + return n; + /* * Even though this function is typically called from NMI/IRQ context * disable pagefaults so that its behaviour is consistent even when diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 9517d1b2a281..e96b99eb800c 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -305,6 +305,10 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush); + /* Let nmi_uaccess_okay() know that we're changing CR3. */ + this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING); + barrier(); + if (need_flush) { this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); @@ -335,6 +339,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, if (next != &init_mm) this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id); + /* Make sure we write CR3 before loaded_mm. */ + barrier(); + this_cpu_write(cpu_tlbstate.loaded_mm, next); this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid); } -- GitLab From 1c71bc565cdbd592f9bca4fbf60111f664899a76 Mon Sep 17 00:00:00 2001 From: Lionel Landwerlin Date: Mon, 13 Aug 2018 09:02:17 +0100 Subject: [PATCH 0795/1692] drm/i915/perf: simplify configure all context function We don't need any special treatment on error so just return as soon as possible. Signed-off-by: Lionel Landwerlin Reviewed-by: Tvrtko Ursulin Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20180813080218.28994-2-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_perf.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 0376338d1f8d..49597cf31707 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1819,7 +1819,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, /* Switch away from any user context. */ ret = gen8_switch_to_updated_kernel_context(dev_priv, oa_config); if (ret) - goto out; + return ret; /* * The OA register config is setup through the context image. This image @@ -1838,7 +1838,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, wait_flags, MAX_SCHEDULE_TIMEOUT); if (ret) - goto out; + return ret; /* Update all contexts now that we've stalled the submission. */ list_for_each_entry(ctx, &dev_priv->contexts.list, link) { @@ -1850,10 +1850,8 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, continue; regs = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB); - if (IS_ERR(regs)) { - ret = PTR_ERR(regs); - goto out; - } + if (IS_ERR(regs)) + return PTR_ERR(regs); ce->state->obj->mm.dirty = true; regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs); @@ -1863,7 +1861,6 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, i915_gem_object_unpin_map(ce->state->obj); } - out: return ret; } -- GitLab From 35ab4fd2b98b8ad11d67606dd209e0947e448074 Mon Sep 17 00:00:00 2001 From: Lionel Landwerlin Date: Mon, 13 Aug 2018 09:02:18 +0100 Subject: [PATCH 0796/1692] drm/i915/perf: reuse intel_lrc ctx regs macro Abstract the context image access a bit. Signed-off-by: Lionel Landwerlin Reviewed-by: Tvrtko Ursulin Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20180813080218.28994-3-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_perf.c | 34 +++++++++++++++----------------- 1 file changed, 16 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 49597cf31707..ccb20230df2c 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -210,6 +210,7 @@ #include "i915_oa_cflgt3.h" #include "i915_oa_cnl.h" #include "i915_oa_icl.h" +#include "intel_lrc_reg.h" /* HW requires this to be a power of two, between 128k and 16M, though driver * is currently generally designed assuming the largest 16M size is used such @@ -1636,27 +1637,25 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx, u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset; u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset; /* The MMIO offsets for Flex EU registers aren't contiguous */ - u32 flex_mmio[] = { - i915_mmio_reg_offset(EU_PERF_CNTL0), - i915_mmio_reg_offset(EU_PERF_CNTL1), - i915_mmio_reg_offset(EU_PERF_CNTL2), - i915_mmio_reg_offset(EU_PERF_CNTL3), - i915_mmio_reg_offset(EU_PERF_CNTL4), - i915_mmio_reg_offset(EU_PERF_CNTL5), - i915_mmio_reg_offset(EU_PERF_CNTL6), + i915_reg_t flex_regs[] = { + EU_PERF_CNTL0, + EU_PERF_CNTL1, + EU_PERF_CNTL2, + EU_PERF_CNTL3, + EU_PERF_CNTL4, + EU_PERF_CNTL5, + EU_PERF_CNTL6, }; int i; - reg_state[ctx_oactxctrl] = i915_mmio_reg_offset(GEN8_OACTXCONTROL); - reg_state[ctx_oactxctrl+1] = (dev_priv->perf.oa.period_exponent << - GEN8_OA_TIMER_PERIOD_SHIFT) | - (dev_priv->perf.oa.periodic ? - GEN8_OA_TIMER_ENABLE : 0) | - GEN8_OA_COUNTER_RESUME; + CTX_REG(reg_state, ctx_oactxctrl, GEN8_OACTXCONTROL, + (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | + (dev_priv->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) | + GEN8_OA_COUNTER_RESUME); - for (i = 0; i < ARRAY_SIZE(flex_mmio); i++) { + for (i = 0; i < ARRAY_SIZE(flex_regs); i++) { u32 state_offset = ctx_flexeu0 + i * 2; - u32 mmio = flex_mmio[i]; + u32 mmio = i915_mmio_reg_offset(flex_regs[i]); /* * This arbitrary default will select the 'EU FPU0 Pipeline @@ -1676,8 +1675,7 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx, } } - reg_state[state_offset] = mmio; - reg_state[state_offset+1] = value; + CTX_REG(reg_state, state_offset, flex_regs[i], value); } } -- GitLab From d40e3e9e44db4b3c8777f3b515ba6097ba26e3b2 Mon Sep 17 00:00:00 2001 From: "Andrew F. Davis" Date: Fri, 31 Aug 2018 10:14:05 -0500 Subject: [PATCH 0797/1692] ASoC: tas6424: Save last fault register even when clear When there is no fault bit set in a fault register we skip the fault reporting section for that register. This also skips over saving that registers value. We save the value so we will not double report an error, but if an error clears then returns we will also not report it as we did not save the all cleared register value. Fix this by saving the fault register value in the all clear path. Signed-off-by: Andrew F. Davis Signed-off-by: Mark Brown Cc: stable@vger.kernel.org --- sound/soc/codecs/tas6424.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/sound/soc/codecs/tas6424.c b/sound/soc/codecs/tas6424.c index 14999b999fd3..0d6145549a98 100644 --- a/sound/soc/codecs/tas6424.c +++ b/sound/soc/codecs/tas6424.c @@ -424,8 +424,10 @@ static void tas6424_fault_check_work(struct work_struct *work) TAS6424_FAULT_PVDD_UV | TAS6424_FAULT_VBAT_UV; - if (reg) + if (!reg) { + tas6424->last_fault1 = reg; goto check_global_fault2_reg; + } /* * Only flag errors once for a given occurrence. This is needed as @@ -461,8 +463,10 @@ static void tas6424_fault_check_work(struct work_struct *work) TAS6424_FAULT_OTSD_CH3 | TAS6424_FAULT_OTSD_CH4; - if (!reg) + if (!reg) { + tas6424->last_fault2 = reg; goto check_warn_reg; + } if ((reg & TAS6424_FAULT_OTSD) && !(tas6424->last_fault2 & TAS6424_FAULT_OTSD)) dev_crit(dev, "experienced a global overtemp shutdown\n"); @@ -497,8 +501,10 @@ static void tas6424_fault_check_work(struct work_struct *work) TAS6424_WARN_VDD_OTW_CH3 | TAS6424_WARN_VDD_OTW_CH4; - if (!reg) + if (!reg) { + tas6424->last_warn = reg; goto out; + } if ((reg & TAS6424_WARN_VDD_UV) && !(tas6424->last_warn & TAS6424_WARN_VDD_UV)) dev_warn(dev, "experienced a VDD under voltage condition\n"); -- GitLab From eeb89e2bb1ac45b0836d4170e97a988c3a746c62 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 31 Aug 2018 10:05:38 +0200 Subject: [PATCH 0798/1692] x86/efi: Load fixmap GDT in efi_call_phys_epilog() When PTI is enabled on x86-32 the kernel uses the GDT mapped in the fixmap for the simple reason that this address is also mapped for user-space. The efi_call_phys_prolog()/efi_call_phys_epilog() wrappers change the GDT to call EFI runtime services and switch back to the kernel GDT when they return. But the switch-back uses the writable GDT, not the fixmap GDT. When that happened and and the CPU returns to user-space it switches to the user %cr3 and tries to restore user segment registers. This fails because the writable GDT is not mapped in the user page-table, and without a GDT the fault handlers also can't be launched. The result is a triple fault and reboot of the machine. Fix that by restoring the GDT back to the fixmap GDT which is also mapped in the user page-table. Fixes: 7757d607c6b3 x86/pti: ('Allow CONFIG_PAGE_TABLE_ISOLATION for x86_32') Reported-by: Guenter Roeck Signed-off-by: Joerg Roedel Signed-off-by: Thomas Gleixner Tested-by: Guenter Roeck Cc: Ard Biesheuvel Cc: Michal Hocko Cc: Andi Kleen Cc: Linus Torvalds Cc: Dave Hansen Cc: Pavel Machek Cc: hpa@zytor.com Cc: linux-efi@vger.kernel.org Link: https://lkml.kernel.org/r/1535702738-10971-1-git-send-email-joro@8bytes.org --- arch/x86/platform/efi/efi_32.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c index 324b93328b37..05ca14222463 100644 --- a/arch/x86/platform/efi/efi_32.c +++ b/arch/x86/platform/efi/efi_32.c @@ -85,14 +85,10 @@ pgd_t * __init efi_call_phys_prolog(void) void __init efi_call_phys_epilog(pgd_t *save_pgd) { - struct desc_ptr gdt_descr; - - gdt_descr.address = (unsigned long)get_cpu_gdt_rw(0); - gdt_descr.size = GDT_SIZE - 1; - load_gdt(&gdt_descr); - load_cr3(save_pgd); __flush_tlb_all(); + + load_fixmap_gdt(0); } void __init efi_runtime_update_mappings(void) -- GitLab From 6147b1cf19651c7de297e69108b141fb30aa2349 Mon Sep 17 00:00:00 2001 From: Genki Sky Date: Tue, 28 Aug 2018 23:26:24 -0400 Subject: [PATCH 0799/1692] scripts/setlocalversion: git: Make -dirty check more robust $(git diff-index) relies on the index being refreshed. This refreshing of the index used to happen, but was removed in cdf2bc632ebc ("scripts/setlocalversion on write-protected source tree", 2013-06-14) due to issues with a read-only filesystem. If the index is not refreshed, one runs into problems. E.g. as described in [0], git stores the uid in its index, so even if just the uid has changed (or git is tricked into thinking so), then we will think the tree is dirty. So as in [1], if you package linux-git with a system that uses fakeroot(1), you get a "-dirty" version. Unless you manually $(git update-index --refresh) themselves. The simplest solution seems to be $(git status --porcelain), with an additional flag saying "ignore untracked files". It seems clearer about what it does, and avoids issues regarding cached indexes and writable filesystems, but still has stable output for scripting. [0]: https://public-inbox.org/git/0190ae30-b6c8-2a8b-b1fb-fd9d84e6dfdf@oracle.com/ [1]: https://bbs.archlinux.org/viewtopic.php?id=236702 Signed-off-by: Genki Sky Signed-off-by: Masahiro Yamada --- scripts/setlocalversion | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/setlocalversion b/scripts/setlocalversion index 71f39410691b..79f7dd57d571 100755 --- a/scripts/setlocalversion +++ b/scripts/setlocalversion @@ -74,7 +74,7 @@ scm_version() fi # Check for uncommitted changes - if git diff-index --name-only HEAD | grep -qv "^scripts/package"; then + if git status -uno --porcelain | grep -qv '^.. scripts/package'; then printf '%s' -dirty fi -- GitLab From bc8d2e20a3eb2f8d268ad7bbca878cf3acdcf389 Mon Sep 17 00:00:00 2001 From: Lukas Bulwahn Date: Thu, 30 Aug 2018 11:18:42 +0200 Subject: [PATCH 0800/1692] kconfig: remove a spurious self-assignment The self assignment was probably introduced by an automated code refactoring in commit 694c49a7c01c ("kconfig: drop localization support"). The issue was identified by a self-assign warning when running make menuconfig with clang. Fixes: 694c49a7c01c ("kconfig: drop localization support") Signed-off-by: Lukas Bulwahn Signed-off-by: Masahiro Yamada --- scripts/kconfig/mconf.c | 1 - 1 file changed, 1 deletion(-) diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c index 83b5836615fb..143c05fec161 100644 --- a/scripts/kconfig/mconf.c +++ b/scripts/kconfig/mconf.c @@ -490,7 +490,6 @@ static void build_conf(struct menu *menu) switch (prop->type) { case P_MENU: child_count++; - prompt = prompt; if (single_menu_mode) { item_make("%s%*c%s", menu->data ? "-->" : "++>", -- GitLab From e0758412208960be9de11e6d2350c81ffd88410f Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Sat, 25 Aug 2018 01:14:46 +0200 Subject: [PATCH 0801/1692] netfilter: kconfig: nat related expression depend on nftables core NF_TABLES_IPV4 is now boolean so it is possible to set NF_TABLES=m NF_TABLES_IPV4=y NFT_CHAIN_NAT_IPV4=y which causes: nft_chain_nat_ipv4.c:(.text+0x6d): undefined reference to `nft_do_chain' Wrap NFT_CHAIN_NAT_IPV4 and related nat expressions with NF_TABLES to restore the dependency. Reported-by: Randy Dunlap Fixes: 02c7b25e5f54 ("netfilter: nf_tables: build-in filter chain type") Signed-off-by: Florian Westphal Acked-by: Randy Dunlap Signed-off-by: Pablo Neira Ayuso --- net/ipv4/netfilter/Kconfig | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index d9504adc47b3..184bf2e0a1ed 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig @@ -106,6 +106,10 @@ config NF_NAT_IPV4 if NF_NAT_IPV4 +config NF_NAT_MASQUERADE_IPV4 + bool + +if NF_TABLES config NFT_CHAIN_NAT_IPV4 depends on NF_TABLES_IPV4 tristate "IPv4 nf_tables nat chain support" @@ -115,9 +119,6 @@ config NFT_CHAIN_NAT_IPV4 packet transformations such as the source, destination address and source and destination ports. -config NF_NAT_MASQUERADE_IPV4 - bool - config NFT_MASQ_IPV4 tristate "IPv4 masquerading support for nf_tables" depends on NF_TABLES_IPV4 @@ -135,6 +136,7 @@ config NFT_REDIR_IPV4 help This is the expression that provides IPv4 redirect support for nf_tables. +endif # NF_TABLES config NF_NAT_SNMP_BASIC tristate "Basic SNMP-ALG support" -- GitLab From 7acfda539c0b9636a58bfee56abfb3aeee806d96 Mon Sep 17 00:00:00 2001 From: Taehee Yoo Date: Sun, 26 Aug 2018 02:35:44 +0900 Subject: [PATCH 0802/1692] netfilter: nf_tables: release chain in flushing set When element of verdict map is deleted, the delete routine should release chain. however, flush element of verdict map routine doesn't release chain. test commands: %nft add table ip filter %nft add chain ip filter c1 %nft add map ip filter map1 { type ipv4_addr : verdict \; } %nft add element ip filter map1 { 1 : jump c1 } %nft flush map ip filter map1 %nft flush ruleset splat looks like: [ 4895.170899] kernel BUG at net/netfilter/nf_tables_api.c:1415! [ 4895.178114] invalid opcode: 0000 [#1] SMP DEBUG_PAGEALLOC KASAN PTI [ 4895.178880] CPU: 0 PID: 1670 Comm: nft Not tainted 4.18.0+ #55 [ 4895.178880] RIP: 0010:nf_tables_chain_destroy.isra.28+0x39/0x220 [nf_tables] [ 4895.178880] Code: fc ff df 53 48 89 fb 48 83 c7 50 48 89 fa 48 c1 ea 03 0f b6 04 02 84 c0 74 09 3c 03 7f 05 e8 3e 4c 25 e1 8b 43 50 85 c0 74 02 <0f> 0b 48 89 da 48 b8 00 00 00 00 00 fc ff df 48 c1 ea 03 80 3c 02 [ 4895.228342] RSP: 0018:ffff88010b98f4c0 EFLAGS: 00010202 [ 4895.234841] RAX: 0000000000000001 RBX: ffff8801131c6968 RCX: ffff8801146585b0 [ 4895.234841] RDX: 1ffff10022638d37 RSI: ffff8801191a9348 RDI: ffff8801131c69b8 [ 4895.234841] RBP: ffff8801146585a8 R08: 1ffff1002323526a R09: 0000000000000000 [ 4895.234841] R10: 0000000000000000 R11: 0000000000000000 R12: dead000000000200 [ 4895.234841] R13: dead000000000100 R14: ffffffffa3638af8 R15: dffffc0000000000 [ 4895.234841] FS: 00007f6d188e6700(0000) GS:ffff88011b600000(0000) knlGS:0000000000000000 [ 4895.234841] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 4895.234841] CR2: 00007ffe72b8df88 CR3: 000000010e2d4000 CR4: 00000000001006f0 [ 4895.234841] Call Trace: [ 4895.234841] nf_tables_commit+0x2704/0x2c70 [nf_tables] [ 4895.234841] ? nfnetlink_rcv_batch+0xa4f/0x11b0 [nfnetlink] [ 4895.234841] ? nf_tables_setelem_notify.constprop.48+0x1a0/0x1a0 [nf_tables] [ 4895.323824] ? __lock_is_held+0x9d/0x130 [ 4895.323824] ? kasan_unpoison_shadow+0x30/0x40 [ 4895.333299] ? kasan_kmalloc+0xa9/0xc0 [ 4895.333299] ? kmem_cache_alloc_trace+0x2c0/0x310 [ 4895.333299] ? nfnetlink_rcv_batch+0xa4f/0x11b0 [nfnetlink] [ 4895.333299] nfnetlink_rcv_batch+0xdb9/0x11b0 [nfnetlink] [ 4895.333299] ? debug_show_all_locks+0x290/0x290 [ 4895.333299] ? nfnetlink_net_init+0x150/0x150 [nfnetlink] [ 4895.333299] ? sched_clock_cpu+0xe5/0x170 [ 4895.333299] ? sched_clock_local+0xff/0x130 [ 4895.333299] ? sched_clock_cpu+0xe5/0x170 [ 4895.333299] ? find_held_lock+0x39/0x1b0 [ 4895.333299] ? sched_clock_local+0xff/0x130 [ 4895.333299] ? memset+0x1f/0x40 [ 4895.333299] ? nla_parse+0x33/0x260 [ 4895.333299] ? ns_capable_common+0x6e/0x110 [ 4895.333299] nfnetlink_rcv+0x2c0/0x310 [nfnetlink] [ ... ] Fixes: 591054469b3e ("netfilter: nf_tables: revisit chain/object refcounting from elements") Signed-off-by: Taehee Yoo Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_tables_api.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 1dca5683f59f..2cfb173cd0b2 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -4637,6 +4637,7 @@ static int nft_flush_set(const struct nft_ctx *ctx, } set->ndeact++; + nft_set_elem_deactivate(ctx->net, set, elem); nft_trans_elem_set(trans) = set; nft_trans_elem(trans) = *elem; list_add_tail(&trans->list, &ctx->net->nft.commit_list); -- GitLab From 0f02cfbc3d9e413d450d8d0fd660077c23f67eff Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Thu, 30 Aug 2018 11:01:21 -0700 Subject: [PATCH 0803/1692] MIPS: VDSO: Match data page cache colouring when D$ aliases When a system suffers from dcache aliasing a user program may observe stale VDSO data from an aliased cache line. Notably this can break the expectation that clock_gettime(CLOCK_MONOTONIC, ...) is, as its name suggests, monotonic. In order to ensure that users observe updates to the VDSO data page as intended, align the user mappings of the VDSO data page such that their cache colouring matches that of the virtual address range which the kernel will use to update the data page - typically its unmapped address within kseg0. This ensures that we don't introduce aliasing cache lines for the VDSO data page, and therefore that userland will observe updates without requiring cache invalidation. Signed-off-by: Paul Burton Reported-by: Hauke Mehrtens Reported-by: Rene Nielsen Reported-by: Alexandre Belloni Fixes: ebb5e78cc634 ("MIPS: Initial implementation of a VDSO") Patchwork: https://patchwork.linux-mips.org/patch/20344/ Tested-by: Alexandre Belloni Tested-by: Hauke Mehrtens Cc: James Hogan Cc: linux-mips@linux-mips.org Cc: stable@vger.kernel.org # v4.4+ --- arch/mips/kernel/vdso.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c index 019035d7225c..8f845f6e5f42 100644 --- a/arch/mips/kernel/vdso.c +++ b/arch/mips/kernel/vdso.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -20,6 +21,7 @@ #include #include +#include #include /* Kernel-provided data used by the VDSO. */ @@ -128,12 +130,30 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) vvar_size = gic_size + PAGE_SIZE; size = vvar_size + image->size; + /* + * Find a region that's large enough for us to perform the + * colour-matching alignment below. + */ + if (cpu_has_dc_aliases) + size += shm_align_mask + 1; + base = get_unmapped_area(NULL, 0, size, 0, 0); if (IS_ERR_VALUE(base)) { ret = base; goto out; } + /* + * If we suffer from dcache aliasing, ensure that the VDSO data page + * mapping is coloured the same as the kernel's mapping of that memory. + * This ensures that when the kernel updates the VDSO data userland + * will observe it without requiring cache invalidations. + */ + if (cpu_has_dc_aliases) { + base = __ALIGN_MASK(base, shm_align_mask); + base += ((unsigned long)&vdso_data - gic_size) & shm_align_mask; + } + data_addr = base + gic_size; vdso_addr = data_addr + PAGE_SIZE; -- GitLab From 3fcbb8260a87efb691d837e8cd24e81f65b3eb70 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 30 Aug 2018 13:52:38 -0700 Subject: [PATCH 0804/1692] ARC: atomics: unbork atomic_fetch_##op() In 4.19-rc1, Eugeniy reported weird boot and IO errors on ARC HSDK | INFO: task syslogd:77 blocked for more than 10 seconds. | Not tainted 4.19.0-rc1-00007-gf213acea4e88 #40 | "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this | message. | syslogd D 0 77 76 0x00000000 | | Stack Trace: | __switch_to+0x0/0xac | __schedule+0x1b2/0x730 | io_schedule+0x5c/0xc0 | __lock_page+0x98/0xdc | find_lock_entry+0x38/0x100 | shmem_getpage_gfp.isra.3+0x82/0xbfc | shmem_fault+0x46/0x138 | handle_mm_fault+0x5bc/0x924 | do_page_fault+0x100/0x2b8 | ret_from_exception+0x0/0x8 He bisected to 84c6591103db ("locking/atomics, asm-generic/bitops/lock.h: Rewrite using atomic_fetch_*()") This commit however only unmasked the real issue introduced by commit 4aef66c8ae9 ("locking/atomic, arch/arc: Fix build") which missed the retry-if-scond-failed branch in atomic_fetch_##op() macros. The bisected commit started using atomic_fetch_##op() macros for building the rest of atomics. Fixes: 4aef66c8ae9 ("locking/atomic, arch/arc: Fix build") Reported-by: Eugeniy Paltsev Acked-by: Peter Zijlstra (Intel) Signed-off-by: Will Deacon Signed-off-by: Vineet Gupta [vgupta: wrote changelog] --- arch/arc/include/asm/atomic.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h index 4e0072730241..158af079838d 100644 --- a/arch/arc/include/asm/atomic.h +++ b/arch/arc/include/asm/atomic.h @@ -84,7 +84,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \ "1: llock %[orig], [%[ctr]] \n" \ " " #asm_op " %[val], %[orig], %[i] \n" \ " scond %[val], [%[ctr]] \n" \ - " \n" \ + " bnz 1b \n" \ : [val] "=&r" (val), \ [orig] "=&r" (orig) \ : [ctr] "r" (&v->counter), \ -- GitLab From a8627cda7cfffe1792c199660c2b4f03ba2bd97b Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Fri, 31 Aug 2018 10:00:34 -0500 Subject: [PATCH 0805/1692] ipmi: Fix NULL pointer dereference in ssif_probe There is a potential execution path in which function ssif_info_find() returns NULL, hence there is a NULL pointer dereference when accessing pointer *addr_info* Fix this by null checking *addr_info* before dereferencing it. Addresses-Coverity-ID: 1473145 ("Explicit null dereferenced") Fixes: e333054a91d1 ("ipmi: Fix I2C client removal in the SSIF driver") Signed-off-by: Gustavo A. R. Silva Signed-off-by: Corey Minyard --- drivers/char/ipmi/ipmi_ssif.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index 265d6a6583bc..29e67a80fb20 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -1643,7 +1643,9 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) out: if (rv) { - addr_info->client = NULL; + if (addr_info) + addr_info->client = NULL; + dev_err(&client->dev, "Unable to start IPMI SSIF: %d\n", rv); kfree(ssif_info); } -- GitLab From 678c8110d23c0ac7d69fda558992c31be64e7507 Mon Sep 17 00:00:00 2001 From: Eugeniy Paltsev Date: Mon, 30 Jul 2018 19:26:33 +0300 Subject: [PATCH 0806/1692] ARC: dma [IOC]: mark DMA devices connected as dma-coherent Mark DMA devices on AXS103 and HSDK boards connected through IOC port as dma-coherent. Signed-off-by: Eugeniy Paltsev Signed-off-by: Vineet Gupta --- arch/arc/boot/dts/axc003.dtsi | 26 ++++++++++++++++++++++++++ arch/arc/boot/dts/axc003_idu.dtsi | 26 ++++++++++++++++++++++++++ arch/arc/boot/dts/hsdk.dts | 4 ++++ 3 files changed, 56 insertions(+) diff --git a/arch/arc/boot/dts/axc003.dtsi b/arch/arc/boot/dts/axc003.dtsi index dc91c663bcc0..d75d65ddf8e3 100644 --- a/arch/arc/boot/dts/axc003.dtsi +++ b/arch/arc/boot/dts/axc003.dtsi @@ -93,6 +93,32 @@ arcpct0: pct { }; }; + /* + * Mark DMA peripherals connected via IOC port as dma-coherent. We do + * it via overlay because peripherals defined in axs10x_mb.dtsi are + * used for both AXS101 and AXS103 boards and only AXS103 has IOC (so + * only AXS103 board has HW-coherent DMA peripherals) + * We don't need to mark pgu@17000 as dma-coherent because it uses + * external DMA buffer located outside of IOC aperture. + */ + axs10x_mb { + ethernet@0x18000 { + dma-coherent; + }; + + ehci@0x40000 { + dma-coherent; + }; + + ohci@0x60000 { + dma-coherent; + }; + + mmc@0x15000 { + dma-coherent; + }; + }; + /* * The DW APB ICTL intc on MB is connected to CPU intc via a * DT "invisible" DW APB GPIO block, configured to simply pass thru diff --git a/arch/arc/boot/dts/axc003_idu.dtsi b/arch/arc/boot/dts/axc003_idu.dtsi index 69ff4895f2ba..a05bb737ea63 100644 --- a/arch/arc/boot/dts/axc003_idu.dtsi +++ b/arch/arc/boot/dts/axc003_idu.dtsi @@ -100,6 +100,32 @@ arcpct0: pct { }; }; + /* + * Mark DMA peripherals connected via IOC port as dma-coherent. We do + * it via overlay because peripherals defined in axs10x_mb.dtsi are + * used for both AXS101 and AXS103 boards and only AXS103 has IOC (so + * only AXS103 board has HW-coherent DMA peripherals) + * We don't need to mark pgu@17000 as dma-coherent because it uses + * external DMA buffer located outside of IOC aperture. + */ + axs10x_mb { + ethernet@0x18000 { + dma-coherent; + }; + + ehci@0x40000 { + dma-coherent; + }; + + ohci@0x60000 { + dma-coherent; + }; + + mmc@0x15000 { + dma-coherent; + }; + }; + /* * This INTC is actually connected to DW APB GPIO * which acts as a wire between MB INTC and CPU INTC. diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts index d00f283094d3..ef149f59929a 100644 --- a/arch/arc/boot/dts/hsdk.dts +++ b/arch/arc/boot/dts/hsdk.dts @@ -181,6 +181,7 @@ gmac: ethernet@8000 { resets = <&cgu_rst HSDK_ETH_RESET>; reset-names = "stmmaceth"; mac-address = [00 00 00 00 00 00]; /* Filled in by U-Boot */ + dma-coherent; mdio { #address-cells = <1>; @@ -199,12 +200,14 @@ ohci@60000 { compatible = "snps,hsdk-v1.0-ohci", "generic-ohci"; reg = <0x60000 0x100>; interrupts = <15>; + dma-coherent; }; ehci@40000 { compatible = "snps,hsdk-v1.0-ehci", "generic-ehci"; reg = <0x40000 0x100>; interrupts = <15>; + dma-coherent; }; mmc@a000 { @@ -217,6 +220,7 @@ mmc@a000 { clock-names = "biu", "ciu"; interrupts = <12>; bus-width = <4>; + dma-coherent; }; }; -- GitLab From 6b06546206868f723f2061d703a3c3c378dcbf4c Mon Sep 17 00:00:00 2001 From: "Dennis Zhou (Facebook)" Date: Fri, 31 Aug 2018 16:22:42 -0400 Subject: [PATCH 0807/1692] Revert "blk-throttle: fix race between blkcg_bio_issue_check() and cgroup_rmdir()" This reverts commit 4c6994806f708559c2812b73501406e21ae5dcd0. Destroying blkgs is tricky because of the nature of the relationship. A blkg should go away when either a blkcg or a request_queue goes away. However, blkg's pin the blkcg to ensure they remain valid. To break this cycle, when a blkcg is offlined, blkgs put back their css ref. This eventually lets css_free() get called which frees the blkcg. The above commit (4c6994806f70) breaks this order of events by trying to destroy blkgs in css_free(). As the blkgs still hold references to the blkcg, css_free() is never called. The race between blkcg_bio_issue_check() and cgroup_rmdir() will be addressed in the following patch by delaying destruction of a blkg until all writeback associated with the blkcg has been finished. Fixes: 4c6994806f70 ("blk-throttle: fix race between blkcg_bio_issue_check() and cgroup_rmdir()") Reviewed-by: Josef Bacik Signed-off-by: Dennis Zhou Cc: Jiufei Xue Cc: Joseph Qi Cc: Tejun Heo Cc: Jens Axboe Signed-off-by: Jens Axboe --- block/blk-cgroup.c | 78 ++++++++------------------------------ include/linux/blk-cgroup.h | 1 - 2 files changed, 16 insertions(+), 63 deletions(-) diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 694595b29b8f..2998e4f095d1 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -310,28 +310,11 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, } } -static void blkg_pd_offline(struct blkcg_gq *blkg) -{ - int i; - - lockdep_assert_held(blkg->q->queue_lock); - lockdep_assert_held(&blkg->blkcg->lock); - - for (i = 0; i < BLKCG_MAX_POLS; i++) { - struct blkcg_policy *pol = blkcg_policy[i]; - - if (blkg->pd[i] && !blkg->pd[i]->offline && - pol->pd_offline_fn) { - pol->pd_offline_fn(blkg->pd[i]); - blkg->pd[i]->offline = true; - } - } -} - static void blkg_destroy(struct blkcg_gq *blkg) { struct blkcg *blkcg = blkg->blkcg; struct blkcg_gq *parent = blkg->parent; + int i; lockdep_assert_held(blkg->q->queue_lock); lockdep_assert_held(&blkcg->lock); @@ -340,6 +323,13 @@ static void blkg_destroy(struct blkcg_gq *blkg) WARN_ON_ONCE(list_empty(&blkg->q_node)); WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node)); + for (i = 0; i < BLKCG_MAX_POLS; i++) { + struct blkcg_policy *pol = blkcg_policy[i]; + + if (blkg->pd[i] && pol->pd_offline_fn) + pol->pd_offline_fn(blkg->pd[i]); + } + if (parent) { blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes); blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios); @@ -382,7 +372,6 @@ static void blkg_destroy_all(struct request_queue *q) struct blkcg *blkcg = blkg->blkcg; spin_lock(&blkcg->lock); - blkg_pd_offline(blkg); blkg_destroy(blkg); spin_unlock(&blkcg->lock); } @@ -1058,54 +1047,21 @@ static struct cftype blkcg_legacy_files[] = { * @css: css of interest * * This function is called when @css is about to go away and responsible - * for offlining all blkgs pd and killing all wbs associated with @css. - * blkgs pd offline should be done while holding both q and blkcg locks. - * As blkcg lock is nested inside q lock, this function performs reverse - * double lock dancing. + * for shooting down all blkgs associated with @css. blkgs should be + * removed while holding both q and blkcg locks. As blkcg lock is nested + * inside q lock, this function performs reverse double lock dancing. * * This is the blkcg counterpart of ioc_release_fn(). */ static void blkcg_css_offline(struct cgroup_subsys_state *css) { struct blkcg *blkcg = css_to_blkcg(css); - struct blkcg_gq *blkg; spin_lock_irq(&blkcg->lock); - hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { - struct request_queue *q = blkg->q; - - if (spin_trylock(q->queue_lock)) { - blkg_pd_offline(blkg); - spin_unlock(q->queue_lock); - } else { - spin_unlock_irq(&blkcg->lock); - cpu_relax(); - spin_lock_irq(&blkcg->lock); - } - } - - spin_unlock_irq(&blkcg->lock); - - wb_blkcg_offline(blkcg); -} - -/** - * blkcg_destroy_all_blkgs - destroy all blkgs associated with a blkcg - * @blkcg: blkcg of interest - * - * This function is called when blkcg css is about to free and responsible for - * destroying all blkgs associated with @blkcg. - * blkgs should be removed while holding both q and blkcg locks. As blkcg lock - * is nested inside q lock, this function performs reverse double lock dancing. - */ -static void blkcg_destroy_all_blkgs(struct blkcg *blkcg) -{ - spin_lock_irq(&blkcg->lock); while (!hlist_empty(&blkcg->blkg_list)) { struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, - struct blkcg_gq, - blkcg_node); + struct blkcg_gq, blkcg_node); struct request_queue *q = blkg->q; if (spin_trylock(q->queue_lock)) { @@ -1117,7 +1073,10 @@ static void blkcg_destroy_all_blkgs(struct blkcg *blkcg) spin_lock_irq(&blkcg->lock); } } + spin_unlock_irq(&blkcg->lock); + + wb_blkcg_offline(blkcg); } static void blkcg_css_free(struct cgroup_subsys_state *css) @@ -1125,8 +1084,6 @@ static void blkcg_css_free(struct cgroup_subsys_state *css) struct blkcg *blkcg = css_to_blkcg(css); int i; - blkcg_destroy_all_blkgs(blkcg); - mutex_lock(&blkcg_pol_mutex); list_del(&blkcg->all_blkcgs_node); @@ -1480,11 +1437,8 @@ void blkcg_deactivate_policy(struct request_queue *q, list_for_each_entry(blkg, &q->blkg_list, q_node) { if (blkg->pd[pol->plid]) { - if (!blkg->pd[pol->plid]->offline && - pol->pd_offline_fn) { + if (pol->pd_offline_fn) pol->pd_offline_fn(blkg->pd[pol->plid]); - blkg->pd[pol->plid]->offline = true; - } pol->pd_free_fn(blkg->pd[pol->plid]); blkg->pd[pol->plid] = NULL; } diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index 34aec30e06c7..1615cdd4c797 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -89,7 +89,6 @@ struct blkg_policy_data { /* the blkg and policy id this per-policy data belongs to */ struct blkcg_gq *blkg; int plid; - bool offline; }; /* -- GitLab From 59b57717fff8b562825d9d25e0180ad7e8048ca9 Mon Sep 17 00:00:00 2001 From: "Dennis Zhou (Facebook)" Date: Fri, 31 Aug 2018 16:22:43 -0400 Subject: [PATCH 0808/1692] blkcg: delay blkg destruction until after writeback has finished Currently, blkcg destruction relies on a sequence of events: 1. Destruction starts. blkcg_css_offline() is called and blkgs release their reference to the blkcg. This immediately destroys the cgwbs (writeback). 2. With blkgs giving up their reference, the blkcg ref count should become zero and eventually call blkcg_css_free() which finally frees the blkcg. Jiufei Xue reported that there is a race between blkcg_bio_issue_check() and cgroup_rmdir(). To remedy this, blkg destruction becomes contingent on the completion of all writeback associated with the blkcg. A count of the number of cgwbs is maintained and once that goes to zero, blkg destruction can follow. This should prevent premature blkg destruction related to writeback. The new process for blkcg cleanup is as follows: 1. Destruction starts. blkcg_css_offline() is called which offlines writeback. Blkg destruction is delayed on the cgwb_refcnt count to avoid punting potentially large amounts of outstanding writeback to root while maintaining any ongoing policies. Here, the base cgwb_refcnt is put back. 2. When the cgwb_refcnt becomes zero, blkcg_destroy_blkgs() is called and handles destruction of blkgs. This is where the css reference held by each blkg is released. 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called. This finally frees the blkg. It seems in the past blk-throttle didn't do the most understandable things with taking data from a blkg while associating with current. So, the simplification and unification of what blk-throttle is doing caused this. Fixes: 08e18eab0c579 ("block: add bi_blkg to the bio for cgroups") Reviewed-by: Josef Bacik Signed-off-by: Dennis Zhou Cc: Jiufei Xue Cc: Joseph Qi Cc: Tejun Heo Cc: Josef Bacik Cc: Jens Axboe Signed-off-by: Jens Axboe --- block/blk-cgroup.c | 53 ++++++++++++++++++++++++++++++++------ include/linux/blk-cgroup.h | 44 +++++++++++++++++++++++++++++++ mm/backing-dev.c | 5 ++++ 3 files changed, 94 insertions(+), 8 deletions(-) diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 2998e4f095d1..c19f9078da1e 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1042,21 +1042,59 @@ static struct cftype blkcg_legacy_files[] = { { } /* terminate */ }; +/* + * blkcg destruction is a three-stage process. + * + * 1. Destruction starts. The blkcg_css_offline() callback is invoked + * which offlines writeback. Here we tie the next stage of blkg destruction + * to the completion of writeback associated with the blkcg. This lets us + * avoid punting potentially large amounts of outstanding writeback to root + * while maintaining any ongoing policies. The next stage is triggered when + * the nr_cgwbs count goes to zero. + * + * 2. When the nr_cgwbs count goes to zero, blkcg_destroy_blkgs() is called + * and handles the destruction of blkgs. Here the css reference held by + * the blkg is put back eventually allowing blkcg_css_free() to be called. + * This work may occur in cgwb_release_workfn() on the cgwb_release + * workqueue. Any submitted ios that fail to get the blkg ref will be + * punted to the root_blkg. + * + * 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called. + * This finally frees the blkcg. + */ + /** * blkcg_css_offline - cgroup css_offline callback * @css: css of interest * - * This function is called when @css is about to go away and responsible - * for shooting down all blkgs associated with @css. blkgs should be - * removed while holding both q and blkcg locks. As blkcg lock is nested - * inside q lock, this function performs reverse double lock dancing. - * - * This is the blkcg counterpart of ioc_release_fn(). + * This function is called when @css is about to go away. Here the cgwbs are + * offlined first and only once writeback associated with the blkcg has + * finished do we start step 2 (see above). */ static void blkcg_css_offline(struct cgroup_subsys_state *css) { struct blkcg *blkcg = css_to_blkcg(css); + /* this prevents anyone from attaching or migrating to this blkcg */ + wb_blkcg_offline(blkcg); + + /* put the base cgwb reference allowing step 2 to be triggered */ + blkcg_cgwb_put(blkcg); +} + +/** + * blkcg_destroy_blkgs - responsible for shooting down blkgs + * @blkcg: blkcg of interest + * + * blkgs should be removed while holding both q and blkcg locks. As blkcg lock + * is nested inside q lock, this function performs reverse double lock dancing. + * Destroying the blkgs releases the reference held on the blkcg's css allowing + * blkcg_css_free to eventually be called. + * + * This is the blkcg counterpart of ioc_release_fn(). + */ +void blkcg_destroy_blkgs(struct blkcg *blkcg) +{ spin_lock_irq(&blkcg->lock); while (!hlist_empty(&blkcg->blkg_list)) { @@ -1075,8 +1113,6 @@ static void blkcg_css_offline(struct cgroup_subsys_state *css) } spin_unlock_irq(&blkcg->lock); - - wb_blkcg_offline(blkcg); } static void blkcg_css_free(struct cgroup_subsys_state *css) @@ -1146,6 +1182,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css) INIT_HLIST_HEAD(&blkcg->blkg_list); #ifdef CONFIG_CGROUP_WRITEBACK INIT_LIST_HEAD(&blkcg->cgwb_list); + refcount_set(&blkcg->cgwb_refcnt, 1); #endif list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs); diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index 1615cdd4c797..6d766a19f2bb 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -56,6 +56,7 @@ struct blkcg { struct list_head all_blkcgs_node; #ifdef CONFIG_CGROUP_WRITEBACK struct list_head cgwb_list; + refcount_t cgwb_refcnt; #endif }; @@ -386,6 +387,49 @@ static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd) return cpd ? cpd->blkcg : NULL; } +extern void blkcg_destroy_blkgs(struct blkcg *blkcg); + +#ifdef CONFIG_CGROUP_WRITEBACK + +/** + * blkcg_cgwb_get - get a reference for blkcg->cgwb_list + * @blkcg: blkcg of interest + * + * This is used to track the number of active wb's related to a blkcg. + */ +static inline void blkcg_cgwb_get(struct blkcg *blkcg) +{ + refcount_inc(&blkcg->cgwb_refcnt); +} + +/** + * blkcg_cgwb_put - put a reference for @blkcg->cgwb_list + * @blkcg: blkcg of interest + * + * This is used to track the number of active wb's related to a blkcg. + * When this count goes to zero, all active wb has finished so the + * blkcg can continue destruction by calling blkcg_destroy_blkgs(). + * This work may occur in cgwb_release_workfn() on the cgwb_release + * workqueue. + */ +static inline void blkcg_cgwb_put(struct blkcg *blkcg) +{ + if (refcount_dec_and_test(&blkcg->cgwb_refcnt)) + blkcg_destroy_blkgs(blkcg); +} + +#else + +static inline void blkcg_cgwb_get(struct blkcg *blkcg) { } + +static inline void blkcg_cgwb_put(struct blkcg *blkcg) +{ + /* wb isn't being accounted, so trigger destruction right away */ + blkcg_destroy_blkgs(blkcg); +} + +#endif + /** * blkg_path - format cgroup path of blkg * @blkg: blkg of interest diff --git a/mm/backing-dev.c b/mm/backing-dev.c index f5981e9d6ae2..8a8bb8796c6c 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -491,6 +491,7 @@ static void cgwb_release_workfn(struct work_struct *work) { struct bdi_writeback *wb = container_of(work, struct bdi_writeback, release_work); + struct blkcg *blkcg = css_to_blkcg(wb->blkcg_css); mutex_lock(&wb->bdi->cgwb_release_mutex); wb_shutdown(wb); @@ -499,6 +500,9 @@ static void cgwb_release_workfn(struct work_struct *work) css_put(wb->blkcg_css); mutex_unlock(&wb->bdi->cgwb_release_mutex); + /* triggers blkg destruction if cgwb_refcnt becomes zero */ + blkcg_cgwb_put(blkcg); + fprop_local_destroy_percpu(&wb->memcg_completions); percpu_ref_exit(&wb->refcnt); wb_exit(wb); @@ -597,6 +601,7 @@ static int cgwb_create(struct backing_dev_info *bdi, list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list); list_add(&wb->memcg_node, memcg_cgwb_list); list_add(&wb->blkcg_node, blkcg_cgwb_list); + blkcg_cgwb_get(blkcg); css_get(memcg_css); css_get(blkcg_css); } -- GitLab From 3111885015b458c97b4cf272e2a87f1d6f0ed06a Mon Sep 17 00:00:00 2001 From: "Dennis Zhou (Facebook)" Date: Fri, 31 Aug 2018 16:22:44 -0400 Subject: [PATCH 0809/1692] blkcg: use tryget logic when associating a blkg with a bio There is a very small change a bio gets caught up in a really unfortunate race between a task migration, cgroup exiting, and itself trying to associate with a blkg. This is due to css offlining being performed after the css->refcnt is killed which triggers removal of blkgs that reach their blkg->refcnt of 0. To avoid this, association with a blkg should use tryget and fallback to using the root_blkg. Fixes: 08e18eab0c579 ("block: add bi_blkg to the bio for cgroups") Reviewed-by: Josef Bacik Signed-off-by: Dennis Zhou Cc: Jiufei Xue Cc: Joseph Qi Cc: Tejun Heo Cc: Josef Bacik Cc: Jens Axboe Signed-off-by: Jens Axboe --- block/bio.c | 3 ++- block/blk-throttle.c | 5 +++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/block/bio.c b/block/bio.c index b12966e415d3..8c680a776171 100644 --- a/block/bio.c +++ b/block/bio.c @@ -2015,7 +2015,8 @@ int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg) { if (unlikely(bio->bi_blkg)) return -EBUSY; - blkg_get(blkg); + if (!blkg_try_get(blkg)) + return -ENODEV; bio->bi_blkg = blkg; return 0; } diff --git a/block/blk-throttle.c b/block/blk-throttle.c index a3eede00d302..01d0620a4e4a 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -2129,8 +2129,9 @@ static inline void throtl_update_latency_buckets(struct throtl_data *td) static void blk_throtl_assoc_bio(struct throtl_grp *tg, struct bio *bio) { #ifdef CONFIG_BLK_DEV_THROTTLING_LOW - if (bio->bi_css) - bio_associate_blkg(bio, tg_to_blkg(tg)); + /* fallback to root_blkg if we fail to get a blkg ref */ + if (bio->bi_css && (bio_associate_blkg(bio, tg_to_blkg(tg)) == -ENODEV)) + bio_associate_blkg(bio, bio->bi_disk->queue->root_blkg); bio_issue_init(&bio->bi_issue, bio_sectors(bio)); #endif } -- GitLab From e254de6bcf3f5b6e78a92ac95fb91acef8adfe1a Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Wed, 29 Aug 2018 11:05:42 -0700 Subject: [PATCH 0810/1692] md/raid5-cache: disable reshape completely We don't support reshape yet if an array supports log device. Previously we determine the fact by checking ->log. However, ->log could be NULL after a log device is removed, but the array is still marked to support log device. Don't allow reshape in this case too. User can disable log device support by setting 'consistency_policy' to 'resync' then do reshape. Reported-by: Xiao Ni Tested-by: Xiao Ni Signed-off-by: Shaohua Li --- drivers/md/raid5-log.h | 5 +++++ drivers/md/raid5.c | 6 +++--- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/md/raid5-log.h b/drivers/md/raid5-log.h index a001808a2b77..bfb811407061 100644 --- a/drivers/md/raid5-log.h +++ b/drivers/md/raid5-log.h @@ -46,6 +46,11 @@ extern int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add); extern void ppl_quiesce(struct r5conf *conf, int quiesce); extern int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio); +static inline bool raid5_has_log(struct r5conf *conf) +{ + return test_bit(MD_HAS_JOURNAL, &conf->mddev->flags); +} + static inline bool raid5_has_ppl(struct r5conf *conf) { return test_bit(MD_HAS_PPL, &conf->mddev->flags); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 4ce0d7502fad..e4e98f47865d 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -733,7 +733,7 @@ static bool stripe_can_batch(struct stripe_head *sh) { struct r5conf *conf = sh->raid_conf; - if (conf->log || raid5_has_ppl(conf)) + if (raid5_has_log(conf) || raid5_has_ppl(conf)) return false; return test_bit(STRIPE_BATCH_READY, &sh->state) && !test_bit(STRIPE_BITMAP_PENDING, &sh->state) && @@ -7737,7 +7737,7 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors) sector_t newsize; struct r5conf *conf = mddev->private; - if (conf->log || raid5_has_ppl(conf)) + if (raid5_has_log(conf) || raid5_has_ppl(conf)) return -EINVAL; sectors &= ~((sector_t)conf->chunk_sectors - 1); newsize = raid5_size(mddev, sectors, mddev->raid_disks); @@ -7788,7 +7788,7 @@ static int check_reshape(struct mddev *mddev) { struct r5conf *conf = mddev->private; - if (conf->log || raid5_has_ppl(conf)) + if (raid5_has_log(conf) || raid5_has_ppl(conf)) return -EINVAL; if (mddev->delta_disks == 0 && mddev->new_layout == mddev->layout && -- GitLab From 1d0ffd264204eba1861865560f1f7f7a92919384 Mon Sep 17 00:00:00 2001 From: Xiao Ni Date: Thu, 30 Aug 2018 15:57:09 +0800 Subject: [PATCH 0811/1692] RAID10 BUG_ON in raise_barrier when force is true and conf->barrier is 0 In raid10 reshape_request it gets max_sectors in read_balance. If the underlayer disks have bad blocks, the max_sectors is less than last. It will call goto read_more many times. It calls raise_barrier(conf, sectors_done != 0) every time. In this condition sectors_done is not 0. So the value passed to the argument force of raise_barrier is true. In raise_barrier it checks conf->barrier when force is true. If force is true and conf->barrier is 0, it panic. In this case reshape_request submits bio to under layer disks. And in the callback function of the bio it calls lower_barrier. If the bio finishes before calling raise_barrier again, it can trigger the BUG_ON. Add one pair of raise_barrier/lower_barrier to fix this bug. Signed-off-by: Xiao Ni Suggested-by: Neil Brown Signed-off-by: Shaohua Li --- drivers/md/raid10.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 981898049491..d6f7978b4449 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -4529,11 +4529,12 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, allow_barrier(conf); } + raise_barrier(conf, 0); read_more: /* Now schedule reads for blocks from sector_nr to last */ r10_bio = raid10_alloc_init_r10buf(conf); r10_bio->state = 0; - raise_barrier(conf, sectors_done != 0); + raise_barrier(conf, 1); atomic_set(&r10_bio->remaining, 0); r10_bio->mddev = mddev; r10_bio->sector = sector_nr; @@ -4629,6 +4630,8 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, if (sector_nr <= last) goto read_more; + lower_barrier(conf); + /* Now that we have done the whole section we can * update reshape_progress */ -- GitLab From 41a95041126522a921fb73df22cbdd520dfdebad Mon Sep 17 00:00:00 2001 From: Guoqing Jiang Date: Fri, 31 Aug 2018 10:05:57 +0800 Subject: [PATCH 0812/1692] md-cluster: release RESYNC lock after the last resync message All the RESYNC messages are sent with resync lock held, the only exception is resync_finish which releases resync_lockres before send the last resync message, this should be changed as well. Otherwise, we can see deadlock issue as follows: clustermd2-gqjiang2:~ # cat /proc/mdstat Personalities : [raid10] [raid1] md0 : active raid1 sdg[0] sdf[1] 134144 blocks super 1.2 [2/2] [UU] [===================>.] resync = 99.6% (134144/134144) finish=0.0min speed=26K/sec bitmap: 1/1 pages [4KB], 65536KB chunk unused devices: clustermd2-gqjiang2:~ # ps aux|grep md|grep D root 20497 0.0 0.0 0 0 ? D 16:00 0:00 [md0_raid1] clustermd2-gqjiang2:~ # cat /proc/20497/stack [] dlm_lock_sync+0x8e/0xc0 [md_cluster] [] __sendmsg+0x98/0x130 [md_cluster] [] sendmsg+0x20/0x30 [md_cluster] [] resync_info_update+0xb5/0xc0 [md_cluster] [] md_reap_sync_thread+0x134/0x170 [md_mod] [] md_check_recovery+0x28c/0x510 [md_mod] [] raid1d+0x42/0x800 [raid1] [] md_thread+0x121/0x150 [md_mod] [] kthread+0xff/0x140 [] ret_from_fork+0x35/0x40 [] 0xffffffffffffffff clustermd-gqjiang1:~ # ps aux|grep md|grep D root 20531 0.0 0.0 0 0 ? D 16:00 0:00 [md0_raid1] root 20537 0.0 0.0 0 0 ? D 16:00 0:00 [md0_cluster_rec] root 20676 0.0 0.0 0 0 ? D 16:01 0:00 [md0_resync] clustermd-gqjiang1:~ # cat /proc/mdstat Personalities : [raid10] [raid1] md0 : active raid1 sdf[1] sdg[0] 134144 blocks super 1.2 [2/2] [UU] [===================>.] resync = 97.3% (131072/134144) finish=8076.8min speed=0K/sec bitmap: 1/1 pages [4KB], 65536KB chunk unused devices: clustermd-gqjiang1:~ # cat /proc/20531/stack [] metadata_update_start+0xcd/0xd0 [md_cluster] [] md_update_sb.part.61+0x97/0x820 [md_mod] [] md_check_recovery+0x29b/0x510 [md_mod] [] raid1d+0x42/0x800 [raid1] [] md_thread+0x121/0x150 [md_mod] [] kthread+0xff/0x140 [] ret_from_fork+0x35/0x40 [] 0xffffffffffffffff clustermd-gqjiang1:~ # cat /proc/20537/stack [] freeze_array+0xf2/0x140 [raid1] [] recv_daemon+0x41e/0x580 [md_cluster] [] md_thread+0x121/0x150 [md_mod] [] kthread+0xff/0x140 [] ret_from_fork+0x35/0x40 [] 0xffffffffffffffff clustermd-gqjiang1:~ # cat /proc/20676/stack [] dlm_lock_sync+0x8e/0xc0 [md_cluster] [] lock_token+0x2f/0xa0 [md_cluster] [] lock_comm+0x32/0x90 [md_cluster] [] sendmsg+0x15/0x30 [md_cluster] [] resync_info_update+0x8a/0xc0 [md_cluster] [] raid1_sync_request+0xa9a/0xb10 [raid1] [] md_do_sync+0xbaa/0xf90 [md_mod] [] md_thread+0x121/0x150 [md_mod] [] kthread+0xff/0x140 [] ret_from_fork+0x35/0x40 [] 0xffffffffffffffff Reviewed-by: NeilBrown Signed-off-by: Guoqing Jiang Signed-off-by: Shaohua Li --- drivers/md/md-cluster.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 94329e03001e..0b2af6e74fc3 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -1276,18 +1276,18 @@ static int resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi) static int resync_finish(struct mddev *mddev) { struct md_cluster_info *cinfo = mddev->cluster_info; + int ret = 0; clear_bit(MD_RESYNCING_REMOTE, &mddev->recovery); - dlm_unlock_sync(cinfo->resync_lockres); /* * If resync thread is interrupted so we can't say resync is finished, * another node will launch resync thread to continue. */ - if (test_bit(MD_CLOSING, &mddev->flags)) - return 0; - else - return resync_info_update(mddev, 0, 0); + if (!test_bit(MD_CLOSING, &mddev->flags)) + ret = resync_info_update(mddev, 0, 0); + dlm_unlock_sync(cinfo->resync_lockres); + return ret; } static int area_resyncing(struct mddev *mddev, int direction, -- GitLab From 3a7ad0634f0986d807772ba74f66f7c3a73612e5 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 29 Aug 2018 11:50:12 -0700 Subject: [PATCH 0813/1692] Revert "packet: switch kvzalloc to allocate memory" This reverts commit 71e41286203c017d24f041a7cd71abea7ca7b1e0. mmap()/munmap() can not be backed by kmalloced pages : We fault in : VM_BUG_ON_PAGE(PageSlab(page), page); unmap_single_vma+0x8a/0x110 unmap_vmas+0x4b/0x90 unmap_region+0xc9/0x140 do_munmap+0x274/0x360 vm_munmap+0x81/0xc0 SyS_munmap+0x2b/0x40 do_syscall_64+0x13e/0x1c0 entry_SYSCALL_64_after_hwframe+0x42/0xb7 Fixes: 71e41286203c ("packet: switch kvzalloc to allocate memory") Signed-off-by: Eric Dumazet Reported-by: John Sperbeck Bisected-by: John Sperbeck Cc: Zhang Yu Cc: Li RongQing Signed-off-by: David S. Miller --- net/packet/af_packet.c | 44 +++++++++++++++++++++++++++++------------- net/packet/internal.h | 1 + 2 files changed, 32 insertions(+), 13 deletions(-) diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 5610061e7f2e..75c92a87e7b2 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -4137,36 +4137,52 @@ static const struct vm_operations_struct packet_mmap_ops = { .close = packet_mm_close, }; -static void free_pg_vec(struct pgv *pg_vec, unsigned int len) +static void free_pg_vec(struct pgv *pg_vec, unsigned int order, + unsigned int len) { int i; for (i = 0; i < len; i++) { if (likely(pg_vec[i].buffer)) { - kvfree(pg_vec[i].buffer); + if (is_vmalloc_addr(pg_vec[i].buffer)) + vfree(pg_vec[i].buffer); + else + free_pages((unsigned long)pg_vec[i].buffer, + order); pg_vec[i].buffer = NULL; } } kfree(pg_vec); } -static char *alloc_one_pg_vec_page(unsigned long size) +static char *alloc_one_pg_vec_page(unsigned long order) { char *buffer; + gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | + __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY; - buffer = kvzalloc(size, GFP_KERNEL); + buffer = (char *) __get_free_pages(gfp_flags, order); if (buffer) return buffer; - buffer = kvzalloc(size, GFP_KERNEL | __GFP_RETRY_MAYFAIL); + /* __get_free_pages failed, fall back to vmalloc */ + buffer = vzalloc(array_size((1 << order), PAGE_SIZE)); + if (buffer) + return buffer; - return buffer; + /* vmalloc failed, lets dig into swap here */ + gfp_flags &= ~__GFP_NORETRY; + buffer = (char *) __get_free_pages(gfp_flags, order); + if (buffer) + return buffer; + + /* complete and utter failure */ + return NULL; } -static struct pgv *alloc_pg_vec(struct tpacket_req *req) +static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order) { unsigned int block_nr = req->tp_block_nr; - unsigned long size = req->tp_block_size; struct pgv *pg_vec; int i; @@ -4175,7 +4191,7 @@ static struct pgv *alloc_pg_vec(struct tpacket_req *req) goto out; for (i = 0; i < block_nr; i++) { - pg_vec[i].buffer = alloc_one_pg_vec_page(size); + pg_vec[i].buffer = alloc_one_pg_vec_page(order); if (unlikely(!pg_vec[i].buffer)) goto out_free_pgvec; } @@ -4184,7 +4200,7 @@ static struct pgv *alloc_pg_vec(struct tpacket_req *req) return pg_vec; out_free_pgvec: - free_pg_vec(pg_vec, block_nr); + free_pg_vec(pg_vec, order, block_nr); pg_vec = NULL; goto out; } @@ -4194,9 +4210,9 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, { struct pgv *pg_vec = NULL; struct packet_sock *po = pkt_sk(sk); + int was_running, order = 0; struct packet_ring_buffer *rb; struct sk_buff_head *rb_queue; - int was_running; __be16 num; int err = -EINVAL; /* Added to avoid minimal code churn */ @@ -4258,7 +4274,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, goto out; err = -ENOMEM; - pg_vec = alloc_pg_vec(req); + order = get_order(req->tp_block_size); + pg_vec = alloc_pg_vec(req, order); if (unlikely(!pg_vec)) goto out; switch (po->tp_version) { @@ -4312,6 +4329,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, rb->frame_size = req->tp_frame_size; spin_unlock_bh(&rb_queue->lock); + swap(rb->pg_vec_order, order); swap(rb->pg_vec_len, req->tp_block_nr); rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE; @@ -4337,7 +4355,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, } if (pg_vec) - free_pg_vec(pg_vec, req->tp_block_nr); + free_pg_vec(pg_vec, order, req->tp_block_nr); out: return err; } diff --git a/net/packet/internal.h b/net/packet/internal.h index 8f50036f62f0..3bb7c5fb3bff 100644 --- a/net/packet/internal.h +++ b/net/packet/internal.h @@ -64,6 +64,7 @@ struct packet_ring_buffer { unsigned int frame_size; unsigned int frame_max; + unsigned int pg_vec_order; unsigned int pg_vec_pages; unsigned int pg_vec_len; -- GitLab From 9ad716b95fd6c6be46a4f2d5936e514b5bcd744d Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 29 Aug 2018 12:46:08 -0700 Subject: [PATCH 0814/1692] nfp: wait for posted reconfigs when disabling the device To avoid leaking a running timer we need to wait for the posted reconfigs after netdev is unregistered. In common case the process of deinitializing the device will perform synchronous reconfigs which wait for posted requests, but especially with VXLAN ports being actively added and removed there can be a race condition leaving a timer running after adapter structure is freed leading to a crash. Add an explicit flush after deregistering and for a good measure a warning to check if timer is running just before structures are freed. Fixes: 3d780b926a12 ("nfp: add async reconfiguration mechanism") Signed-off-by: Jakub Kicinski Reviewed-by: Dirk van der Merwe Signed-off-by: David S. Miller --- .../ethernet/netronome/nfp/nfp_net_common.c | 48 +++++++++++++------ 1 file changed, 33 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index a8b9fbab5f73..253bdaef1505 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -229,29 +229,16 @@ static void nfp_net_reconfig_post(struct nfp_net *nn, u32 update) spin_unlock_bh(&nn->reconfig_lock); } -/** - * nfp_net_reconfig() - Reconfigure the firmware - * @nn: NFP Net device to reconfigure - * @update: The value for the update field in the BAR config - * - * Write the update word to the BAR and ping the reconfig queue. The - * poll until the firmware has acknowledged the update by zeroing the - * update word. - * - * Return: Negative errno on error, 0 on success - */ -int nfp_net_reconfig(struct nfp_net *nn, u32 update) +static void nfp_net_reconfig_sync_enter(struct nfp_net *nn) { bool cancelled_timer = false; u32 pre_posted_requests; - int ret; spin_lock_bh(&nn->reconfig_lock); nn->reconfig_sync_present = true; if (nn->reconfig_timer_active) { - del_timer(&nn->reconfig_timer); nn->reconfig_timer_active = false; cancelled_timer = true; } @@ -260,14 +247,43 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update) spin_unlock_bh(&nn->reconfig_lock); - if (cancelled_timer) + if (cancelled_timer) { + del_timer_sync(&nn->reconfig_timer); nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires); + } /* Run the posted reconfigs which were issued before we started */ if (pre_posted_requests) { nfp_net_reconfig_start(nn, pre_posted_requests); nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT); } +} + +static void nfp_net_reconfig_wait_posted(struct nfp_net *nn) +{ + nfp_net_reconfig_sync_enter(nn); + + spin_lock_bh(&nn->reconfig_lock); + nn->reconfig_sync_present = false; + spin_unlock_bh(&nn->reconfig_lock); +} + +/** + * nfp_net_reconfig() - Reconfigure the firmware + * @nn: NFP Net device to reconfigure + * @update: The value for the update field in the BAR config + * + * Write the update word to the BAR and ping the reconfig queue. The + * poll until the firmware has acknowledged the update by zeroing the + * update word. + * + * Return: Negative errno on error, 0 on success + */ +int nfp_net_reconfig(struct nfp_net *nn, u32 update) +{ + int ret; + + nfp_net_reconfig_sync_enter(nn); nfp_net_reconfig_start(nn, update); ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT); @@ -3633,6 +3649,7 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev, */ void nfp_net_free(struct nfp_net *nn) { + WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted); if (nn->dp.netdev) free_netdev(nn->dp.netdev); else @@ -3920,4 +3937,5 @@ void nfp_net_clean(struct nfp_net *nn) return; unregister_netdev(nn->dp.netdev); + nfp_net_reconfig_wait_posted(nn); } -- GitLab From e04e7a7bbd4bbabef4e1a58367e5fc9b2edc3b10 Mon Sep 17 00:00:00 2001 From: Dexuan Cui Date: Thu, 30 Aug 2018 05:42:13 +0000 Subject: [PATCH 0815/1692] hv_netvsc: Fix a deadlock by getting rtnl lock earlier in netvsc_probe() This patch fixes the race between netvsc_probe() and rndis_set_subchannel(), which can cause a deadlock. These are the related 3 paths which show the deadlock: path #1: Workqueue: hv_vmbus_con vmbus_onmessage_work [hv_vmbus] Call Trace: schedule schedule_preempt_disabled __mutex_lock __device_attach bus_probe_device device_add vmbus_device_register vmbus_onoffer vmbus_onmessage_work process_one_work worker_thread kthread ret_from_fork path #2: schedule schedule_preempt_disabled __mutex_lock netvsc_probe vmbus_probe really_probe __driver_attach bus_for_each_dev driver_attach_async async_run_entry_fn process_one_work worker_thread kthread ret_from_fork path #3: Workqueue: events netvsc_subchan_work [hv_netvsc] Call Trace: schedule rndis_set_subchannel netvsc_subchan_work process_one_work worker_thread kthread ret_from_fork Before path #1 finishes, path #2 can start to run, because just before the "bus_probe_device(dev);" in device_add() in path #1, there is a line "object_uevent(&dev->kobj, KOBJ_ADD);", so systemd-udevd can immediately try to load hv_netvsc and hence path #2 can start to run. Next, path #2 offloads the subchannal's initialization to a workqueue, i.e. path #3, so we can end up in a deadlock situation like this: Path #2 gets the device lock, and is trying to get the rtnl lock; Path #3 gets the rtnl lock and is waiting for all the subchannel messages to be processed; Path #1 is trying to get the device lock, but since #2 is not releasing the device lock, path #1 has to sleep; since the VMBus messages are processed one by one, this means the sub-channel messages can't be procedded, so #3 has to sleep with the rtnl lock held, and finally #2 has to sleep... Now all the 3 paths are sleeping and we hit the deadlock. With the patch, we can make sure #2 gets both the device lock and the rtnl lock together, gets its job done, and releases the locks, so #1 and #3 will not be blocked for ever. Fixes: 8195b1396ec8 ("hv_netvsc: fix deadlock on hotplug") Signed-off-by: Dexuan Cui Cc: Stephen Hemminger Cc: K. Y. Srinivasan Cc: Haiyang Zhang Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc_drv.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 1121a1ec407c..70921bbe0e28 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -2206,6 +2206,16 @@ static int netvsc_probe(struct hv_device *dev, memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); + /* We must get rtnl lock before scheduling nvdev->subchan_work, + * otherwise netvsc_subchan_work() can get rtnl lock first and wait + * all subchannels to show up, but that may not happen because + * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer() + * -> ... -> device_add() -> ... -> __device_attach() can't get + * the device lock, so all the subchannels can't be processed -- + * finally netvsc_subchan_work() hangs for ever. + */ + rtnl_lock(); + if (nvdev->num_chn > 1) schedule_work(&nvdev->subchan_work); @@ -2224,7 +2234,6 @@ static int netvsc_probe(struct hv_device *dev, else net->max_mtu = ETH_DATA_LEN; - rtnl_lock(); ret = register_netdevice(net); if (ret != 0) { pr_err("Unable to register netdev.\n"); -- GitLab From b0e0b0abbd5e52568e3848b0ba54a9efa3a11547 Mon Sep 17 00:00:00 2001 From: Pavel Machek Date: Thu, 30 Aug 2018 13:30:03 +0200 Subject: [PATCH 0816/1692] net/rds: RDS is not Radio Data System Getting prompt "The RDS Protocol" (RDS) is not too helpful, and it is easily confused with Radio Data System (which we may want to support in kernel, too). Signed-off-by: Pavel Machek Acked-by: Sowmini Varadhan Acked-by: Santosh Shilimkar Acked-by: Sowmini Varadhan Signed-off-by: David S. Miller --- net/rds/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/rds/Kconfig b/net/rds/Kconfig index 01b3bd6a3708..b9092111bc45 100644 --- a/net/rds/Kconfig +++ b/net/rds/Kconfig @@ -1,6 +1,6 @@ config RDS - tristate "The RDS Protocol" + tristate "The Reliable Datagram Sockets Protocol" depends on INET ---help--- The RDS (Reliable Datagram Sockets) protocol provides reliable, -- GitLab From 63cc357f7bba6729869565a12df08441a5995d9a Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 30 Aug 2018 14:24:29 +0200 Subject: [PATCH 0817/1692] tcp: do not restart timewait timer on rst reception RFC 1337 says: ''Ignore RST segments in TIME-WAIT state. If the 2 minute MSL is enforced, this fix avoids all three hazards.'' So with net.ipv4.tcp_rfc1337=1, expected behaviour is to have TIME-WAIT sk expire rather than removing it instantly when a reset is received. However, Linux will also re-start the TIME-WAIT timer. This causes connect to fail when tying to re-use ports or very long delays (until syn retry interval exceeds MSL). packetdrill test case: // Demonstrate bogus rearming of TIME-WAIT timer in rfc1337 mode. `sysctl net.ipv4.tcp_rfc1337=1` 0.000 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3 0.000 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0 0.000 bind(3, ..., ...) = 0 0.000 listen(3, 1) = 0 0.100 < S 0:0(0) win 29200 0.100 > S. 0:0(0) ack 1 0.200 < . 1:1(0) ack 1 win 257 0.200 accept(3, ..., ...) = 4 // Receive first segment 0.310 < P. 1:1001(1000) ack 1 win 46 // Send one ACK 0.310 > . 1:1(0) ack 1001 // read 1000 byte 0.310 read(4, ..., 1000) = 1000 // Application writes 100 bytes 0.350 write(4, ..., 100) = 100 0.350 > P. 1:101(100) ack 1001 // ACK 0.500 < . 1001:1001(0) ack 101 win 257 // close the connection 0.600 close(4) = 0 0.600 > F. 101:101(0) ack 1001 win 244 // Our side is in FIN_WAIT_1 & waits for ack to fin 0.7 < . 1001:1001(0) ack 102 win 244 // Our side is in FIN_WAIT_2 with no outstanding data. 0.8 < F. 1001:1001(0) ack 102 win 244 0.8 > . 102:102(0) ack 1002 win 244 // Our side is now in TIME_WAIT state, send ack for fin. 0.9 < F. 1002:1002(0) ack 102 win 244 0.9 > . 102:102(0) ack 1002 win 244 // Peer reopens with in-window SYN: 1.000 < S 1000:1000(0) win 9200 // Therefore, reply with ACK. 1.000 > . 102:102(0) ack 1002 win 244 // Peer sends RST for this ACK. Normally this RST results // in tw socket removal, but rfc1337=1 setting prevents this. 1.100 < R 1002:1002(0) win 244 // second syn. Due to rfc1337=1 expect another pure ACK. 31.0 < S 1000:1000(0) win 9200 31.0 > . 102:102(0) ack 1002 win 244 // .. and another RST from peer. 31.1 < R 1002:1002(0) win 244 31.2 `echo no timer restart;ss -m -e -a -i -n -t -o state TIME-WAIT` // third syn after one minute. Time-Wait socket should have expired by now. 63.0 < S 1000:1000(0) win 9200 // so we expect a syn-ack & 3whs to proceed from here on. 63.0 > S. 0:0(0) ack 1 Without this patch, 'ss' shows restarts of tw timer and last packet is thus just another pure ack, more than one minute later. This restores the original code from commit 283fd6cf0be690a83 ("Merge in ANK networking jumbo patch") in netdev-vger-cvs.git . For some reason the else branch was removed/lost in 1f28b683339f7 ("Merge in TCP/UDP optimizations and [..]") and timer restart became unconditional. Reported-by: Michal Tesar Signed-off-by: Florian Westphal Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_minisocks.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 75ef332a7caf..12affb7864d9 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -184,8 +184,9 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, inet_twsk_deschedule_put(tw); return TCP_TW_SUCCESS; } + } else { + inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); } - inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); if (tmp_opt.saw_tstamp) { tcptw->tw_ts_recent = tmp_opt.rcv_tsval; -- GitLab From 2b5cf4ef541f1b2facaca58cae5e8e0b5f19ad4c Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 31 Aug 2018 20:47:39 +0300 Subject: [PATCH 0818/1692] drm/i915/dp_mst: Fix enabling pipe clock for all streams MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit afb2c4437dae ("drm/i915/ddi: Push pipe clock enabling to encoders") inadvertently stopped enabling the pipe clock for any DP-MST stream after the first one. It also rearranged the pipe clock enabling wrt. initial MST payload allocation step (which may or may not be a problem, but it's contrary to the spec.). Fix things by making the above commit truly a non-functional change. Fixes: afb2c4437dae ("drm/i915/ddi: Push pipe clock enabling to encoders") Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=107365 Reported-by: Lyude Paul Reported-by: dmummenschanz@web.de Tested-by: dmummenschanz@web.de Tested-by: Lyude Paul Cc: Lyude Paul Cc: dmummenschanz@web.de Cc: Ville Syrjälä Cc: Rodrigo Vivi Cc: Chris Wilson Signed-off-by: Imre Deak Reviewed-by: Ville Syrjälä Reviewed-by: Lyude Paul Reviewed-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20180831174739.30387-1-imre.deak@intel.com --- drivers/gpu/drm/i915/intel_ddi.c | 17 +++++++++-------- drivers/gpu/drm/i915/intel_dp_mst.c | 4 ++++ 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index f3b115ce4029..dcb1a98d624d 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -2912,7 +2912,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, icl_enable_phy_clock_gating(dig_port); - intel_ddi_enable_pipe_clock(crtc_state); + if (!is_mst) + intel_ddi_enable_pipe_clock(crtc_state); } static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, @@ -3015,14 +3016,14 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder, bool is_mst = intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST); - intel_ddi_disable_pipe_clock(old_crtc_state); - - /* - * Power down sink before disabling the port, otherwise we end - * up getting interrupts from the sink on detecting link loss. - */ - if (!is_mst) + if (!is_mst) { + intel_ddi_disable_pipe_clock(old_crtc_state); + /* + * Power down sink before disabling the port, otherwise we end + * up getting interrupts from the sink on detecting link loss. + */ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); + } intel_disable_ddi_buf(encoder); diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 352e5216cc65..77920f1a3da1 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -166,6 +166,8 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder, struct intel_connector *connector = to_intel_connector(old_conn_state->connector); + intel_ddi_disable_pipe_clock(old_crtc_state); + /* this can fail */ drm_dp_check_act_status(&intel_dp->mst_mgr); /* and this can also fail */ @@ -249,6 +251,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder, I915_WRITE(DP_TP_STATUS(port), temp); ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr); + + intel_ddi_enable_pipe_clock(pipe_config); } static void intel_mst_enable_dp(struct intel_encoder *encoder, -- GitLab From 902b5417f28d955cdb4898df6ffaab15f56c5cff Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Thu, 30 Aug 2018 16:01:17 +0200 Subject: [PATCH 0819/1692] selftests: pmtu: maximum MTU for vti4 is 2^16-1-20 Since commit 82612de1c98e ("ip_tunnel: restore binding to ifaces with a large mtu"), the maximum MTU for vti4 is based on IP_MAX_MTU instead of the mysterious constant 0xFFF8. This makes this selftest fail. Fixes: 82612de1c98e ("ip_tunnel: restore binding to ifaces with a large mtu") Signed-off-by: Sabrina Dubroca Acked-by: Stefano Brivio Acked-by: Nicolas Dichtel Signed-off-by: David S. Miller --- tools/testing/selftests/net/pmtu.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh index f8cc38afffa2..0ecf2609b9a4 100755 --- a/tools/testing/selftests/net/pmtu.sh +++ b/tools/testing/selftests/net/pmtu.sh @@ -334,7 +334,7 @@ test_pmtu_vti4_link_add_mtu() { fail=0 min=68 - max=$((65528 - 20)) + max=$((65535 - 20)) # Check invalid values first for v in $((min - 1)) $((max + 1)); do ${ns_a} ip link add vti4_a mtu ${v} type vti local ${veth4_a_addr} remote ${veth4_b_addr} key 10 2>/dev/null -- GitLab From c81c7012e0c769b5704c2b07bd5224965e76fb70 Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Thu, 30 Aug 2018 16:01:18 +0200 Subject: [PATCH 0820/1692] selftests: pmtu: detect correct binary to ping ipv6 addresses Some systems don't have the ping6 binary anymore, and use ping for everything. Detect the absence of ping6 and try to use ping instead. Fixes: d1f1b9cbf34c ("selftests: net: Introduce first PMTU test") Signed-off-by: Sabrina Dubroca Acked-by: Stefano Brivio Signed-off-by: David S. Miller --- tools/testing/selftests/net/pmtu.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh index 0ecf2609b9a4..32a194e3e07a 100755 --- a/tools/testing/selftests/net/pmtu.sh +++ b/tools/testing/selftests/net/pmtu.sh @@ -46,6 +46,9 @@ # Kselftest framework requirement - SKIP code is 4. ksft_skip=4 +# Some systems don't have a ping6 binary anymore +which ping6 > /dev/null 2>&1 && ping6=$(which ping6) || ping6=$(which ping) + tests=" pmtu_vti6_exception vti6: PMTU exceptions pmtu_vti4_exception vti4: PMTU exceptions @@ -274,7 +277,7 @@ test_pmtu_vti6_exception() { mtu "${ns_b}" veth_b 4000 mtu "${ns_a}" vti6_a 5000 mtu "${ns_b}" vti6_b 5000 - ${ns_a} ping6 -q -i 0.1 -w 2 -s 60000 ${vti6_b_addr} > /dev/null + ${ns_a} ${ping6} -q -i 0.1 -w 2 -s 60000 ${vti6_b_addr} > /dev/null # Check that exception was created if [ "$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti6_b_addr})" = "" ]; then -- GitLab From f611a5b4a51fa36a0aa792be474f5d6aacaef7e3 Mon Sep 17 00:00:00 2001 From: Thomas Falcon Date: Thu, 30 Aug 2018 13:19:53 -0500 Subject: [PATCH 0821/1692] ibmvnic: Include missing return code checks in reset function Check the return codes of these functions and halt reset in case of failure. The driver will remain in a dormant state until the next reset event, when device initialization will be re-attempted. Signed-off-by: Thomas Falcon Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index dafdd4ade705..4f0daf67b18d 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1823,11 +1823,17 @@ static int do_reset(struct ibmvnic_adapter *adapter, adapter->map_id = 1; release_rx_pools(adapter); release_tx_pools(adapter); - init_rx_pools(netdev); - init_tx_pools(netdev); + rc = init_rx_pools(netdev); + if (rc) + return rc; + rc = init_tx_pools(netdev); + if (rc) + return rc; release_napi(adapter); - init_napi(adapter); + rc = init_napi(adapter); + if (rc) + return rc; } else { rc = reset_tx_pools(adapter); if (rc) -- GitLab From 48e905048f39ae97bd08dbbbc78a848d1d555d80 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 31 Aug 2018 15:36:43 +0100 Subject: [PATCH 0822/1692] drm/i915: Explicitly mark Global GTT address spaces So far we have been relying on vm->file pointer being NULL to declare something GGTT. This has the unfortunate consequence that the default kernel context is also declared GGTT and interferes with the following patch which wants to instantiate VMA's and execute requests against the kernel context. Change the is_ggtt test to use an explicit flag in struct address_space to solve this issue. Note that the bit used is free since there is an alignment hole in the struct. v2: * Mark mock ggtt. Signed-off-by: Tvrtko Ursulin Cc: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20180831143643.12366-1-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_gem_gtt.c | 2 ++ drivers/gpu/drm/i915/i915_gem_gtt.h | 5 ++++- drivers/gpu/drm/i915/selftests/mock_gtt.c | 2 ++ 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index d9d44639ba26..eb0e446d6482 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -3604,6 +3604,8 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv) mutex_lock(&dev_priv->drm.struct_mutex); i915_address_space_init(&ggtt->vm, dev_priv); + ggtt->vm.is_ggtt = true; + /* Only VLV supports read-only GGTT mappings */ ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 01d83a943142..7e2af5f4f39b 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -324,6 +324,9 @@ struct i915_address_space { struct pagestash free_pages; + /* Global GTT */ + bool is_ggtt:1; + /* Some systems require uncached updates of the page directories */ bool pt_kmap_wc:1; @@ -357,7 +360,7 @@ struct i915_address_space { I915_SELFTEST_DECLARE(bool scrub_64K); }; -#define i915_is_ggtt(V) (!(V)->file) +#define i915_is_ggtt(vm) ((vm)->is_ggtt) static inline bool i915_vm_is_48bit(const struct i915_address_space *vm) diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c index a140ea5c3a7c..6ae418c76015 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gtt.c +++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c @@ -118,6 +118,8 @@ void mock_init_ggtt(struct drm_i915_private *i915) ggtt->vm.vma_ops.clear_pages = clear_pages; i915_address_space_init(&ggtt->vm, i915); + + ggtt->vm.is_ggtt = true; } void mock_fini_ggtt(struct drm_i915_private *i915) -- GitLab From c7486104a5ce7e8763e3cb5157bba8d0f1468d87 Mon Sep 17 00:00:00 2001 From: LuckTony Date: Fri, 31 Aug 2018 09:55:06 -0700 Subject: [PATCH 0823/1692] x86/mce: Fix set_mce_nospec() to avoid #GP fault The trick with flipping bit 63 to avoid loading the address of the 1:1 mapping of the poisoned page while the 1:1 map is updated used to work when unmapping the page. But it falls down horribly when attempting to directly set the page as uncacheable. The problem is that when the cache mode is changed to uncachable, the pages needs to be flushed from the cache first. But the decoy address is non-canonical due to bit 63 flipped, and the CLFLUSH instruction throws a #GP fault. Add code to change_page_attr_set_clr() to fix the address before calling flush. Fixes: 284ce4011ba6 ("x86/memory_failure: Introduce {set, clear}_mce_nospec()") Suggested-by: Linus Torvalds Signed-off-by: Tony Luck Signed-off-by: Thomas Gleixner Acked-by: Linus Torvalds Cc: Peter Anvin Cc: Borislav Petkov Cc: linux-edac Cc: Dan Williams Cc: Dave Jiang Link: https://lkml.kernel.org/r/20180831165506.GA9605@agluck-desk --- arch/x86/mm/pageattr.c | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 8d6c34fe49be..51a5a69ecac9 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -1420,6 +1420,29 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) return 0; } +/* + * Machine check recovery code needs to change cache mode of poisoned + * pages to UC to avoid speculative access logging another error. But + * passing the address of the 1:1 mapping to set_memory_uc() is a fine + * way to encourage a speculative access. So we cheat and flip the top + * bit of the address. This works fine for the code that updates the + * page tables. But at the end of the process we need to flush the cache + * and the non-canonical address causes a #GP fault when used by the + * CLFLUSH instruction. + * + * But in the common case we already have a canonical address. This code + * will fix the top bit if needed and is a no-op otherwise. + */ +static inline unsigned long make_addr_canonical_again(unsigned long addr) +{ +#ifdef CONFIG_X86_64 + return (long)(addr << 1) >> 1; +#else + return addr; +#endif +} + + static int change_page_attr_set_clr(unsigned long *addr, int numpages, pgprot_t mask_set, pgprot_t mask_clr, int force_split, int in_flag, @@ -1465,7 +1488,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, * Save address for cache flush. *addr is modified in the call * to __change_page_attr_set_clr() below. */ - baddr = *addr; + baddr = make_addr_canonical_again(*addr); } /* Must avoid aliasing mappings in the highmem code */ -- GitLab From c1d0af1a1d5dfde880f588eceb4c00710e0f60ff Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 24 Aug 2018 08:07:52 +0200 Subject: [PATCH 0824/1692] kernel/dma/direct: take DMA offset into account in dma_direct_supported When a device has a DMA offset the dma capable result will change due to the difference between the physical and DMA address. Take that into account. Signed-off-by: Christoph Hellwig Reviewed-by: Benjamin Herrenschmidt Reviewed-by: Robin Murphy --- kernel/dma/direct.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 1c35b7b945d0..de87b0282e74 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -168,7 +168,7 @@ int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, int dma_direct_supported(struct device *dev, u64 mask) { #ifdef CONFIG_ZONE_DMA - if (mask < DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)) + if (mask < phys_to_dma(dev, DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))) return 0; #else /* @@ -177,7 +177,7 @@ int dma_direct_supported(struct device *dev, u64 mask) * memory, or by providing a ZONE_DMA32. If neither is the case, the * architecture needs to use an IOMMU instead of the direct mapping. */ - if (mask < DMA_BIT_MASK(32)) + if (mask < phys_to_dma(dev, DMA_BIT_MASK(32))) return 0; #endif /* -- GitLab From bcd8e91f98c156f4b1ebcfacae675f9cfd962441 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Sat, 1 Sep 2018 12:45:04 -0400 Subject: [PATCH 0825/1692] ext4: avoid arithemetic overflow that can trigger a BUG A maliciously crafted file system can cause an overflow when the results of a 64-bit calculation is stored into a 32-bit length parameter. https://bugzilla.kernel.org/show_bug.cgi?id=200623 Signed-off-by: Theodore Ts'o Reported-by: Wen Xu Cc: stable@vger.kernel.org --- fs/ext4/ext4.h | 3 +++ fs/ext4/inode.c | 8 ++++++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 249bcee4d7b2..ac05bd86643a 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -686,6 +686,9 @@ enum { /* Max physical block we can address w/o extents */ #define EXT4_MAX_BLOCK_FILE_PHYS 0xFFFFFFFF +/* Max logical block we can support */ +#define EXT4_MAX_LOGICAL_BLOCK 0xFFFFFFFF + /* * Structure of an inode on the disk */ diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 8f6ad7667974..694f31364206 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -3412,12 +3412,16 @@ static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length, { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); unsigned int blkbits = inode->i_blkbits; - unsigned long first_block = offset >> blkbits; - unsigned long last_block = (offset + length - 1) >> blkbits; + unsigned long first_block, last_block; struct ext4_map_blocks map; bool delalloc = false; int ret; + if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK) + return -EINVAL; + first_block = offset >> blkbits; + last_block = min_t(loff_t, (offset + length - 1) >> blkbits, + EXT4_MAX_LOGICAL_BLOCK); if (flags & IOMAP_REPORT) { if (ext4_has_inline_data(inode)) { -- GitLab From 9b25436662d5fb4c66eb527ead53cab15f596ee0 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 27 Aug 2018 14:51:54 -0700 Subject: [PATCH 0826/1692] random: make CPU trust a boot parameter Instead of forcing a distro or other system builder to choose at build time whether the CPU is trusted for CRNG seeding via CONFIG_RANDOM_TRUST_CPU, provide a boot-time parameter for end users to control the choice. The CONFIG will set the default state instead. Signed-off-by: Kees Cook Signed-off-by: Theodore Ts'o --- Documentation/admin-guide/kernel-parameters.txt | 6 ++++++ drivers/char/Kconfig | 4 ++-- drivers/char/random.c | 11 ++++++++--- 3 files changed, 16 insertions(+), 5 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 0c8f7889efa1..227c5c6fa4c1 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -3390,6 +3390,12 @@ ramdisk_size= [RAM] Sizes of RAM disks in kilobytes See Documentation/blockdev/ramdisk.txt. + random.trust_cpu={on,off} + [KNL] Enable or disable trusting the use of the + CPU's random number generator (if available) to + fully seed the kernel's CRNG. Default is controlled + by CONFIG_RANDOM_TRUST_CPU. + ras=option[,option,...] [KNL] RAS-specific options cec_disable [X86] diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index ce277ee0a28a..40728491f37b 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -566,5 +566,5 @@ config RANDOM_TRUST_CPU that CPU manufacturer (perhaps with the insistence or mandate of a Nation State's intelligence or law enforcement agencies) has not installed a hidden back door to compromise the CPU's - random number generation facilities. - + random number generation facilities. This can also be configured + at boot with "random.trust_cpu=on/off". diff --git a/drivers/char/random.c b/drivers/char/random.c index bf5f99fc36f1..c75b6cdf0053 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -779,6 +779,13 @@ static struct crng_state **crng_node_pool __read_mostly; static void invalidate_batched_entropy(void); +static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU); +static int __init parse_trust_cpu(char *arg) +{ + return kstrtobool(arg, &trust_cpu); +} +early_param("random.trust_cpu", parse_trust_cpu); + static void crng_initialize(struct crng_state *crng) { int i; @@ -799,12 +806,10 @@ static void crng_initialize(struct crng_state *crng) } crng->state[i] ^= rv; } -#ifdef CONFIG_RANDOM_TRUST_CPU - if (arch_init) { + if (trust_cpu && arch_init) { crng_init = 2; pr_notice("random: crng done (trusting CPU's manufacturer)\n"); } -#endif crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; } -- GitLab From 4274f516d4bc50648a4d97e4f67ecbd7b65cde4a Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Sat, 1 Sep 2018 14:42:14 -0400 Subject: [PATCH 0827/1692] ext4: recalucate superblock checksum after updating free blocks/inodes When mounting the superblock, ext4_fill_super() calculates the free blocks and free inodes and stores them in the superblock. It's not strictly necessary, since we don't use them any more, but it's nice to keep them roughly aligned to reality. Since it's not critical for file system correctness, the code doesn't call ext4_commit_super(). The problem is that it's in ext4_commit_super() that we recalculate the superblock checksum. So if we're not going to call ext4_commit_super(), we need to call ext4_superblock_csum_set() to make sure the superblock checksum is consistent. Most of the time, this doesn't matter, since we end up calling ext4_commit_super() very soon thereafter, and definitely by the time the file system is unmounted. However, it doesn't work in this sequence: mke2fs -Fq -t ext4 /dev/vdc 128M mount /dev/vdc /vdc cp xfstests/git-versions /vdc godown /vdc umount /vdc mount /dev/vdc tune2fs -l /dev/vdc With this commit, the "tune2fs -l" no longer fails. Reported-by: Chengguang Xu Signed-off-by: Theodore Ts'o Cc: stable@vger.kernel.org --- fs/ext4/super.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/ext4/super.c b/fs/ext4/super.c index f7750bc5b85a..e41da553b430 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -4378,11 +4378,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) block = ext4_count_free_clusters(sb); ext4_free_blocks_count_set(sbi->s_es, EXT4_C2B(sbi, block)); + ext4_superblock_csum_set(sb); err = percpu_counter_init(&sbi->s_freeclusters_counter, block, GFP_KERNEL); if (!err) { unsigned long freei = ext4_count_free_inodes(sb); sbi->s_es->s_free_inodes_count = cpu_to_le32(freei); + ext4_superblock_csum_set(sb); err = percpu_counter_init(&sbi->s_freeinodes_counter, freei, GFP_KERNEL); } -- GitLab From e78e5a91456fcecaa2efbb3706572fe043766f4d Mon Sep 17 00:00:00 2001 From: Samuel Neves Date: Sat, 1 Sep 2018 21:14:52 +0100 Subject: [PATCH 0828/1692] x86/vdso: Fix lsl operand order In the __getcpu function, lsl is using the wrong target and destination registers. Luckily, the compiler tends to choose %eax for both variables, so it has been working so far. Fixes: a582c540ac1b ("x86/vdso: Use RDPID in preference to LSL when available") Signed-off-by: Samuel Neves Signed-off-by: Thomas Gleixner Acked-by: Andy Lutomirski Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20180901201452.27828-1-sneves@dei.uc.pt --- arch/x86/include/asm/vgtod.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h index fb856c9f0449..53748541c487 100644 --- a/arch/x86/include/asm/vgtod.h +++ b/arch/x86/include/asm/vgtod.h @@ -93,7 +93,7 @@ static inline unsigned int __getcpu(void) * * If RDPID is available, use it. */ - alternative_io ("lsl %[p],%[seg]", + alternative_io ("lsl %[seg],%[p]", ".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */ X86_FEATURE_RDPID, [p] "=a" (p), [seg] "r" (__PER_CPU_SEG)); -- GitLab From 93bbadd6e0a2a58e49d265b9b1aa58e621b60a26 Mon Sep 17 00:00:00 2001 From: Alexey Kodanev Date: Thu, 30 Aug 2018 19:11:24 +0300 Subject: [PATCH 0829/1692] ipv6: don't get lwtstate twice in ip6_rt_copy_init() Commit 80f1a0f4e0cd ("net/ipv6: Put lwtstate when destroying fib6_info") partially fixed the kmemleak [1], lwtstate can be copied from fib6_info, with ip6_rt_copy_init(), and it should be done only once there. rt->dst.lwtstate is set by ip6_rt_init_dst(), at the start of the function ip6_rt_copy_init(), so there is no need to get it again at the end. With this patch, lwtstate also isn't copied from RTF_REJECT routes. [1]: unreferenced object 0xffff880b6aaa14e0 (size 64): comm "ip", pid 10577, jiffies 4295149341 (age 1273.903s) hex dump (first 32 bytes): 01 00 04 00 04 00 00 00 10 00 00 00 00 00 00 00 ................ 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ backtrace: [<0000000018664623>] lwtunnel_build_state+0x1bc/0x420 [<00000000b73aa29a>] ip6_route_info_create+0x9f7/0x1fd0 [<00000000ee2c5d1f>] ip6_route_add+0x14/0x70 [<000000008537b55c>] inet6_rtm_newroute+0xd9/0xe0 [<000000002acc50f5>] rtnetlink_rcv_msg+0x66f/0x8e0 [<000000008d9cd381>] netlink_rcv_skb+0x268/0x3b0 [<000000004c893c76>] netlink_unicast+0x417/0x5a0 [<00000000f2ab1afb>] netlink_sendmsg+0x70b/0xc30 [<00000000890ff0aa>] sock_sendmsg+0xb1/0xf0 [<00000000a2e7b66f>] ___sys_sendmsg+0x659/0x950 [<000000001e7426c8>] __sys_sendmsg+0xde/0x170 [<00000000fe411443>] do_syscall_64+0x9f/0x4a0 [<000000001be7b28b>] entry_SYSCALL_64_after_hwframe+0x49/0xbe [<000000006d21f353>] 0xffffffffffffffff Fixes: 6edb3c96a5f0 ("net/ipv6: Defer initialization of dst to data path") Signed-off-by: Alexey Kodanev Reviewed-by: David Ahern Signed-off-by: David S. Miller --- net/ipv6/route.c | 1 - 1 file changed, 1 deletion(-) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index c4ea13e8360b..18e00ce1719a 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -996,7 +996,6 @@ static void ip6_rt_copy_init(struct rt6_info *rt, struct fib6_info *ort) rt->rt6i_src = ort->fib6_src; #endif rt->rt6i_prefsrc = ort->fib6_prefsrc; - rt->dst.lwtstate = lwtstate_get(ort->fib6_nh.nh_lwtstate); } static struct fib6_node* fib6_backtrack(struct fib6_node *fn, -- GitLab From 5a7faef72eb9d51487feac467c8c68afa459534c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 28 Aug 2018 11:25:51 +0200 Subject: [PATCH 0830/1692] sparc: set a default 32-bit dma mask for OF devices This keeps the historic default behavior for devices without a DMA mask, but removes the warning about a lacking DMA mask for doing DMA without a mask. Reported-by: Meelis Roos Signed-off-by: Christoph Hellwig Tested-by: Guenter Roeck --- arch/sparc/kernel/of_device_32.c | 4 ++++ arch/sparc/kernel/of_device_64.c | 3 +++ 2 files changed, 7 insertions(+) diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c index 3641a294ed54..e4abe9b8f97a 100644 --- a/arch/sparc/kernel/of_device_32.c +++ b/arch/sparc/kernel/of_device_32.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include @@ -381,6 +382,9 @@ static struct platform_device * __init scan_one_device(struct device_node *dp, else dev_set_name(&op->dev, "%08x", dp->phandle); + op->dev.coherent_dma_mask = DMA_BIT_MASK(32); + op->dev.dma_mask = &op->dev.coherent_dma_mask; + if (of_device_register(op)) { printk("%s: Could not register of device.\n", dp->full_name); diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c index 44e4d4435bed..6df6086968c6 100644 --- a/arch/sparc/kernel/of_device_64.c +++ b/arch/sparc/kernel/of_device_64.c @@ -2,6 +2,7 @@ #include #include #include +#include #include #include #include @@ -675,6 +676,8 @@ static struct platform_device * __init scan_one_device(struct device_node *dp, dev_set_name(&op->dev, "root"); else dev_set_name(&op->dev, "%08x", dp->phandle); + op->dev.coherent_dma_mask = DMA_BIT_MASK(32); + op->dev.dma_mask = &op->dev.coherent_dma_mask; if (of_device_register(op)) { printk("%s: Could not register of device.\n", -- GitLab From 8c89ef7b6b64ba093239305f77a485905d03f7bf Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Fri, 31 Aug 2018 16:13:07 +0200 Subject: [PATCH 0831/1692] of/platform: initialise AMBA default DMA masks This addresses a v4.19-rc1 regression in the PL111 DRM driver in drivers/gpu/pl111/* The driver uses the CMA KMS helpers and will thus at some point call down to dma_alloc_attrs() to allocate a chunk of contigous DMA memory for the framebuffer. It appears that in v4.18, it was OK that this (and other DMA mastering AMBA devices) left dev->coherent_dma_mask blank (zero). In v4.19-rc1 the WARN_ON_ONCE(dev && !dev->coherent_dma_mask) in dma_alloc_attrs() in include/linux/dma-mapping.h is triggered. The allocation later fails when get_coherent_dma_mask() is called from __dma_alloc() and __dma_alloc() returns NULL: drm-clcd-pl111 dev:20: coherent DMA mask is unset drm-clcd-pl111 dev:20: [drm:drm_fb_helper_fbdev_setup] *ERROR* Failed to set fbdev configuration It turns out that in commit 4d8bde883bfb ("OF: Don't set default coherent DMA mask") the OF core stops setting the default DMA mask on new devices, especially those lines of the patch: - if (!dev->coherent_dma_mask) - dev->coherent_dma_mask = DMA_BIT_MASK(32); Robin Murphy solved a similar problem in a5516219b102 ("of/platform: Initialise default DMA masks") by simply assigning dev.coherent_dma_mask and the dev.dma_mask to point to the same when creating devices from the device tree, and introducing the same code into the code path creating AMBA/PrimeCell devices solved my problem, graphics now come up. The code simply assumes that the device can access all of the system memory by setting the coherent DMA mask to 0xffffffff when creating a device from the device tree, which is crude, but seems to be what kernel v4.18 assumed. The AMBA PrimeCells do not differ between coherent and streaming DMA so we can just assign the same to any DMA mask. Possibly drivers should augment their coherent DMA mask in accordance with "dma-ranges" from the device tree if more finegranular masking is needed. Reported-by: Russell King Fixes: 4d8bde883bfb ("OF: Don't set default coherent DMA mask") Cc: Russell King Cc: Robin Murphy Signed-off-by: Linus Walleij Signed-off-by: Christoph Hellwig --- drivers/of/platform.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/of/platform.c b/drivers/of/platform.c index 7ba90c290a42..6c59673933e9 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c @@ -241,6 +241,10 @@ static struct amba_device *of_amba_device_create(struct device_node *node, if (!dev) goto err_clear_flag; + /* AMBA devices only support a single DMA mask */ + dev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + dev->dev.dma_mask = &dev->dev.coherent_dma_mask; + /* setup generic device info */ dev->dev.of_node = of_node_get(node); dev->dev.fwnode = &node->fwnode; -- GitLab From 65099ea85e885c3ea1272eca8774b771419d8ce8 Mon Sep 17 00:00:00 2001 From: Matt Ranostay Date: Sat, 25 Aug 2018 02:00:48 -0700 Subject: [PATCH 0832/1692] Revert "iio: temperature: maxim_thermocouple: add MAX31856 part" This reverts commit 535fba29b3e1afef4ba201b3c69a6992583ec0bd. Seems the submitter (er me, hang head in shame) didn't look at the datasheet enough to see that the registers are quite different. This needs to be reverted because a) would never work b) to open it be added to a Maxim RTDs (Resistance Temperature Detectors) under development by author Signed-off-by: Matt Ranostay Signed-off-by: Jonathan Cameron --- drivers/iio/temperature/maxim_thermocouple.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c index 54e383231d1e..c31b9633f32d 100644 --- a/drivers/iio/temperature/maxim_thermocouple.c +++ b/drivers/iio/temperature/maxim_thermocouple.c @@ -258,7 +258,6 @@ static int maxim_thermocouple_remove(struct spi_device *spi) static const struct spi_device_id maxim_thermocouple_id[] = { {"max6675", MAX6675}, {"max31855", MAX31855}, - {"max31856", MAX31855}, {}, }; MODULE_DEVICE_TABLE(spi, maxim_thermocouple_id); -- GitLab From ff924c5a1ec7548825cc2d07980b03be4224ffac Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 1 Sep 2018 21:01:28 -0700 Subject: [PATCH 0833/1692] x86/pti: Fix section mismatch warning/error Fix the section mismatch warning in arch/x86/mm/pti.c: WARNING: vmlinux.o(.text+0x6972a): Section mismatch in reference from the function pti_clone_pgtable() to the function .init.text:pti_user_pagetable_walk_pte() The function pti_clone_pgtable() references the function __init pti_user_pagetable_walk_pte(). This is often because pti_clone_pgtable lacks a __init annotation or the annotation of pti_user_pagetable_walk_pte is wrong. FATAL: modpost: Section mismatches detected. Fixes: 85900ea51577 ("x86/pti: Map the vsyscall page if needed") Reported-by: kbuild test robot Signed-off-by: Randy Dunlap Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Link: https://lkml.kernel.org/r/43a6d6a3-d69d-5eda-da09-0b1c88215a2a@infradead.org --- arch/x86/mm/pti.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c index 31341ae7309f..c1fc1ae6b429 100644 --- a/arch/x86/mm/pti.c +++ b/arch/x86/mm/pti.c @@ -248,7 +248,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address) * * Returns a pointer to a PTE on success, or NULL on failure. */ -static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address) +static pte_t *pti_user_pagetable_walk_pte(unsigned long address) { gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); pmd_t *pmd; -- GitLab From 370a132bb2227ff76278f98370e0e701d86ff752 Mon Sep 17 00:00:00 2001 From: Prarit Bhargava Date: Tue, 31 Jul 2018 07:27:39 -0400 Subject: [PATCH 0834/1692] x86/microcode: Make sure boot_cpu_data.microcode is up-to-date When preparing an MCE record for logging, boot_cpu_data.microcode is used to read out the microcode revision on the box. However, on systems where late microcode update has happened, the microcode revision output in a MCE log record is wrong because boot_cpu_data.microcode is not updated when the microcode gets updated. But, the microcode revision saved in boot_cpu_data's microcode member should be kept up-to-date, regardless, for consistency. Make it so. Fixes: fa94d0c6e0f3 ("x86/MCE: Save microcode revision in machine check records") Signed-off-by: Prarit Bhargava Signed-off-by: Borislav Petkov Signed-off-by: Thomas Gleixner Cc: Tony Luck Cc: sironi@amazon.de Cc: stable@vger.kernel.org Link: http://lkml.kernel.org/r/20180731112739.32338-1-prarit@redhat.com --- arch/x86/kernel/cpu/microcode/amd.c | 4 ++++ arch/x86/kernel/cpu/microcode/intel.c | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 0624957aa068..602f17134103 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -537,6 +537,10 @@ static enum ucode_state apply_microcode_amd(int cpu) uci->cpu_sig.rev = mc_amd->hdr.patch_id; c->microcode = mc_amd->hdr.patch_id; + /* Update boot_cpu_data's revision too, if we're on the BSP: */ + if (c->cpu_index == boot_cpu_data.cpu_index) + boot_cpu_data.microcode = mc_amd->hdr.patch_id; + return UCODE_UPDATED; } diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 97ccf4c3b45b..256d336cbc04 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -851,6 +851,10 @@ static enum ucode_state apply_microcode_intel(int cpu) uci->cpu_sig.rev = rev; c->microcode = rev; + /* Update boot_cpu_data's revision too, if we're on the BSP: */ + if (c->cpu_index == boot_cpu_data.cpu_index) + boot_cpu_data.microcode = rev; + return UCODE_UPDATED; } -- GitLab From 8da38ebaad23fe1b0c4a205438676f6356607cfc Mon Sep 17 00:00:00 2001 From: Filippo Sironi Date: Tue, 31 Jul 2018 17:29:30 +0200 Subject: [PATCH 0835/1692] x86/microcode: Update the new microcode revision unconditionally Handle the case where microcode gets loaded on the BSP's hyperthread sibling first and the boot_cpu_data's microcode revision doesn't get updated because of early exit due to the siblings sharing a microcode engine. For that, simply write the updated revision on all CPUs unconditionally. Signed-off-by: Filippo Sironi Signed-off-by: Borislav Petkov Signed-off-by: Thomas Gleixner Cc: prarit@redhat.com Cc: stable@vger.kernel.org Link: http://lkml.kernel.org/r/1533050970-14385-1-git-send-email-sironi@amazon.de --- arch/x86/kernel/cpu/microcode/amd.c | 22 +++++++++++++--------- arch/x86/kernel/cpu/microcode/intel.c | 13 ++++++++----- 2 files changed, 21 insertions(+), 14 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 602f17134103..07b5fc00b188 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -504,6 +504,7 @@ static enum ucode_state apply_microcode_amd(int cpu) struct microcode_amd *mc_amd; struct ucode_cpu_info *uci; struct ucode_patch *p; + enum ucode_state ret; u32 rev, dummy; BUG_ON(raw_smp_processor_id() != cpu); @@ -521,9 +522,8 @@ static enum ucode_state apply_microcode_amd(int cpu) /* need to apply patch? */ if (rev >= mc_amd->hdr.patch_id) { - c->microcode = rev; - uci->cpu_sig.rev = rev; - return UCODE_OK; + ret = UCODE_OK; + goto out; } if (__apply_microcode_amd(mc_amd)) { @@ -531,17 +531,21 @@ static enum ucode_state apply_microcode_amd(int cpu) cpu, mc_amd->hdr.patch_id); return UCODE_ERROR; } - pr_info("CPU%d: new patch_level=0x%08x\n", cpu, - mc_amd->hdr.patch_id); - uci->cpu_sig.rev = mc_amd->hdr.patch_id; - c->microcode = mc_amd->hdr.patch_id; + rev = mc_amd->hdr.patch_id; + ret = UCODE_UPDATED; + + pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev); + +out: + uci->cpu_sig.rev = rev; + c->microcode = rev; /* Update boot_cpu_data's revision too, if we're on the BSP: */ if (c->cpu_index == boot_cpu_data.cpu_index) - boot_cpu_data.microcode = mc_amd->hdr.patch_id; + boot_cpu_data.microcode = rev; - return UCODE_UPDATED; + return ret; } static int install_equiv_cpu_table(const u8 *buf) diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 256d336cbc04..16936a24795c 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -795,6 +795,7 @@ static enum ucode_state apply_microcode_intel(int cpu) struct ucode_cpu_info *uci = ucode_cpu_info + cpu; struct cpuinfo_x86 *c = &cpu_data(cpu); struct microcode_intel *mc; + enum ucode_state ret; static int prev_rev; u32 rev; @@ -817,9 +818,8 @@ static enum ucode_state apply_microcode_intel(int cpu) */ rev = intel_get_microcode_revision(); if (rev >= mc->hdr.rev) { - uci->cpu_sig.rev = rev; - c->microcode = rev; - return UCODE_OK; + ret = UCODE_OK; + goto out; } /* @@ -848,14 +848,17 @@ static enum ucode_state apply_microcode_intel(int cpu) prev_rev = rev; } + ret = UCODE_UPDATED; + +out: uci->cpu_sig.rev = rev; - c->microcode = rev; + c->microcode = rev; /* Update boot_cpu_data's revision too, if we're on the BSP: */ if (c->cpu_index == boot_cpu_data.cpu_index) boot_cpu_data.microcode = rev; - return UCODE_UPDATED; + return ret; } static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, -- GitLab From 88b35d83a79c19e0d817f500f9306fe3eef43057 Mon Sep 17 00:00:00 2001 From: Andrey Grodzovsky Date: Thu, 30 Aug 2018 11:24:17 -0400 Subject: [PATCH 0836/1692] drm/amdgpu: Use drm_dev_unplug in PCI .remove MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This at least allows to fail any subsequent IOCTLs with -ENODEV after the device is gone. Still this operation is not supported yet in graphic mode and will lead at least to page faults and other issues. Signed-off-by: Andrey Grodzovsky Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index d7d9a9d32381..a96ceff8abe3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -978,8 +978,8 @@ amdgpu_pci_remove(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); - drm_dev_unregister(dev); - drm_dev_put(dev); + DRM_ERROR("Device removal is currently not supported outside of fbcon\n"); + drm_dev_unplug(dev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } -- GitLab From 972a21f94631642d6714bb2a1983b7b15a77526d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 31 Aug 2018 15:06:35 +0200 Subject: [PATCH 0837/1692] drm/ttm: fix ttm_bo_bulk_move_helper MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Staring at the function for six hours, just to essentially move one line of code. The problem was that the first list_cut_position call could result in list2 pointing to la-la-land. Signed-off-by: Christian König Tested-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/ttm/ttm_bo.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 35d53d81f486..138c98902033 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -250,15 +250,18 @@ EXPORT_SYMBOL(ttm_bo_move_to_lru_tail); static void ttm_bo_bulk_move_helper(struct ttm_lru_bulk_move_pos *pos, struct list_head *lru, bool is_swap) { + struct list_head *list; LIST_HEAD(entries); LIST_HEAD(before); - struct list_head *list1, *list2; - list1 = is_swap ? &pos->last->swap : &pos->last->lru; - list2 = is_swap ? pos->first->swap.prev : pos->first->lru.prev; + reservation_object_assert_held(pos->last->resv); + list = is_swap ? &pos->last->swap : &pos->last->lru; + list_cut_position(&entries, lru, list); + + reservation_object_assert_held(pos->first->resv); + list = is_swap ? pos->first->swap.prev : pos->first->lru.prev; + list_cut_position(&before, &entries, list); - list_cut_position(&entries, lru, list1); - list_cut_position(&before, &entries, list2); list_splice(&before, lru); list_splice_tail(&entries, lru); } -- GitLab From b995795bf09b6bb7847a2a9fc8e6b5b4ab0ce20c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 30 Aug 2018 10:04:53 +0200 Subject: [PATCH 0838/1692] drm/amdgpu: fix "use bulk moves for efficient VM LRU handling" v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit First step to fix the LRU corruption, we accidentially tried to move things on the LRU after dropping the lock. Signed-off-by: Christian König Tested-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index dd734970e167..349dcc37ee64 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1237,6 +1237,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, ring = to_amdgpu_ring(entity->rq->sched); amdgpu_ring_priority_get(ring, priority); + amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm); + ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence); amdgpu_mn_unlock(p->mn); @@ -1258,7 +1260,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) union drm_amdgpu_cs *cs = data; struct amdgpu_cs_parser parser = {}; bool reserved_buffers = false; - struct amdgpu_fpriv *fpriv; int i, r; if (!adev->accel_working) @@ -1303,8 +1304,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) r = amdgpu_cs_submit(&parser, cs); - fpriv = filp->driver_priv; - amdgpu_vm_move_to_lru_tail(adev, &fpriv->vm); out: amdgpu_cs_parser_fini(&parser, r, reserved_buffers); return r; -- GitLab From 5d35ed4832dab334e076a24c18a52776c2f24911 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 31 Aug 2018 11:08:06 +0200 Subject: [PATCH 0839/1692] drm/amdgpu: fix idle state and bulk_moveable flag MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add BOs to the idle state again and correctly clear the flag when new BOs are added. Signed-off-by: Christian König Tested-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index f31fa351caba..d59222fb5931 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -156,12 +156,15 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, return; list_add_tail(&base->bo_list, &bo->va); - if (bo->tbo.type == ttm_bo_type_kernel) - list_move(&base->vm_status, &vm->relocated); - if (bo->tbo.resv != vm->root.base.bo->tbo.resv) return; + vm->bulk_moveable = false; + if (bo->tbo.type == ttm_bo_type_kernel) + list_move(&base->vm_status, &vm->relocated); + else + list_move(&base->vm_status, &vm->idle); + if (bo->preferred_domains & amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type)) return; @@ -1121,7 +1124,7 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev, struct amdgpu_vm_bo_base, vm_status); bo_base->moved = false; - list_del_init(&bo_base->vm_status); + list_move(&bo_base->vm_status, &vm->idle); bo = bo_base->bo->parent; if (!bo) @@ -2646,7 +2649,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, return r; vm->pte_support_ats = false; - vm->bulk_moveable = true; if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) { vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & -- GitLab From cd2b56238ef7173e1e0363c9e6d035cfd4f47140 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 30 Aug 2018 09:31:56 -0500 Subject: [PATCH 0840/1692] drm/amdgpu/gmc9: rework stolen vga memory handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit No functional change, just rework it in order to adjust the behavior on a per asic level. The problem is that on vega10, something corrupts the lower 8 MB of vram on the second resume from S3. This does not seem to affect Raven, other gmc9 based asics need testing. Reviewed-by: Junwei Zhang Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 48 ++++++++++++++++----------- 1 file changed, 29 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 04d50893a6f2..46cff7d8b375 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -692,6 +692,28 @@ static int gmc_v9_0_ecc_available(struct amdgpu_device *adev) return lost_sheep == 0; } +static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev) +{ + + /* + * TODO: + * Currently there is a bug where some memory client outside + * of the driver writes to first 8M of VRAM on S3 resume, + * this overrides GART which by default gets placed in first 8M and + * causes VM_FAULTS once GTT is accessed. + * Keep the stolen memory reservation until the while this is not solved. + * Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init + */ + switch (adev->asic_type) { + case CHIP_RAVEN: + case CHIP_VEGA10: + case CHIP_VEGA12: + case CHIP_VEGA20: + default: + return true; + } +} + static int gmc_v9_0_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -708,10 +730,8 @@ static int gmc_v9_0_late_init(void *handle) unsigned i; int r; - /* - * TODO - Uncomment once GART corruption issue is fixed. - */ - /* amdgpu_bo_late_init(adev); */ + if (!gmc_v9_0_keep_stolen_memory(adev)) + amdgpu_bo_late_init(adev); for(i = 0; i < adev->num_rings; ++i) { struct amdgpu_ring *ring = adev->rings[i]; @@ -848,18 +868,16 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev) static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) { -#if 0 u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL); -#endif unsigned size; /* * TODO Remove once GART corruption is resolved * Check related code in gmc_v9_0_sw_fini * */ - size = 9 * 1024 * 1024; + if (gmc_v9_0_keep_stolen_memory(adev)) + return 9 * 1024 * 1024; -#if 0 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */ } else { @@ -876,6 +894,7 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) break; case CHIP_VEGA10: case CHIP_VEGA12: + case CHIP_VEGA20: default: viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE); size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) * @@ -888,7 +907,6 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) return 0; -#endif return size; } @@ -1000,16 +1018,8 @@ static int gmc_v9_0_sw_fini(void *handle) amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); - /* - * TODO: - * Currently there is a bug where some memory client outside - * of the driver writes to first 8M of VRAM on S3 resume, - * this overrides GART which by default gets placed in first 8M and - * causes VM_FAULTS once GTT is accessed. - * Keep the stolen memory reservation until the while this is not solved. - * Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init - */ - amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL); + if (gmc_v9_0_keep_stolen_memory(adev)) + amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL); amdgpu_gart_table_vram_free(adev); amdgpu_bo_fini(adev); -- GitLab From 6fb81375db0f959ea7fda02ecc5388f65ab4ac7c Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 30 Aug 2018 09:41:12 -0500 Subject: [PATCH 0841/1692] drm/amdgpu/gmc9: don't keep stolen memory on Raven MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Raven does not appear to be affected by the same issue as vega10. Enable the full stolen memory handling on Raven. Reserve the appropriate size at init time to avoid display artifacts and then free it at the end of init once the new FB is up and running. Bug: https://bugs.freedesktop.org/show_bug.cgi?id=106639 Reviewed-by: Junwei Zhang Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 46cff7d8b375..938d03593713 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -706,6 +706,7 @@ static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev) */ switch (adev->asic_type) { case CHIP_RAVEN: + return false; case CHIP_VEGA10: case CHIP_VEGA12: case CHIP_VEGA20: -- GitLab From 95010ba79baae078c2bcabda49cf564261d0b796 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 30 Aug 2018 09:44:31 -0500 Subject: [PATCH 0842/1692] drm/amdgpu/gmc9: don't keep stolen memory on vega12 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit vega12 does not appear to be affected by the same issue as vega10. Enable the full stolen memory handling on vega12. Reserve the appropriate size at init time to avoid display artifacts and then free it at the end of init once the new FB is up and running. Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 938d03593713..3180113f49ae 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -707,8 +707,9 @@ static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_RAVEN: return false; - case CHIP_VEGA10: case CHIP_VEGA12: + return false; + case CHIP_VEGA10: case CHIP_VEGA20: default: return true; -- GitLab From 6abc0c8f8cf3e0c47707b01f027f9f9b9aa75646 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 30 Aug 2018 09:46:27 -0500 Subject: [PATCH 0843/1692] drm/amdgpu/gmc9: don't keep stolen memory on vega20 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Vega20 does not appear to be affected by the same issue as vega10. Enable the full stolen memory handling on vega20. Reserve the appropriate size at init time to avoid display artifacts and then free it at the end of init once the new FB is up and running. Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 3180113f49ae..f467638eb49d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -705,14 +705,13 @@ static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev) * Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init */ switch (adev->asic_type) { + case CHIP_VEGA10: + return true; case CHIP_RAVEN: - return false; case CHIP_VEGA12: - return false; - case CHIP_VEGA10: case CHIP_VEGA20: default: - return true; + return false; } } -- GitLab From fd65465b7016dc6d9fa5c2f39cc706c231c9a089 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Fri, 31 Aug 2018 18:34:55 +0900 Subject: [PATCH 0844/1692] kconfig: do not require pkg-config on make {menu,n}config Meelis Roos reported a {menu,n}config regression: "I have libncurses devel package installed in the default system location (as do 99%+ on actual developers probably) and in this case, pkg-config is useless. pkg-config is needed only when libraries and headers are installed in non-default locations but it is bad to require installation of pkg-config on all the machines where make menuconfig would be possibly run." For {menu,n}config, do not use pkg-config if it is not installed. For {g,x}config, keep checking pkg-config since we really rely on it for finding the installation paths of the required packages. Fixes: 4ab3b80159d4 ("kconfig: check for pkg-config on make {menu,n,g,x}config") Reported-by: Meelis Roos Signed-off-by: Masahiro Yamada Tested-by: Meelis Roos Tested-by: Randy Dunlap --- Documentation/process/changes.rst | 2 +- scripts/kconfig/Makefile | 1 - scripts/kconfig/check-pkgconfig.sh | 8 -------- scripts/kconfig/gconf-cfg.sh | 7 +++++++ scripts/kconfig/mconf-cfg.sh | 25 ++++++++++++++----------- scripts/kconfig/nconf-cfg.sh | 25 ++++++++++++++----------- scripts/kconfig/qconf-cfg.sh | 7 +++++++ 7 files changed, 43 insertions(+), 32 deletions(-) delete mode 100644 scripts/kconfig/check-pkgconfig.sh diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst index 61f918b10a0c..d1bf143b446f 100644 --- a/Documentation/process/changes.rst +++ b/Documentation/process/changes.rst @@ -86,7 +86,7 @@ pkg-config The build system, as of 4.18, requires pkg-config to check for installed kconfig tools and to determine flags settings for use in -'make {menu,n,g,x}config'. Previously pkg-config was being used but not +'make {g,x}config'. Previously pkg-config was being used but not verified or documented. Flex diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile index 4a7bd2192073..67ed9f6ccdf8 100644 --- a/scripts/kconfig/Makefile +++ b/scripts/kconfig/Makefile @@ -221,7 +221,6 @@ $(obj)/zconf.tab.o: $(obj)/zconf.lex.c # check if necessary packages are available, and configure build flags define filechk_conf_cfg - $(CONFIG_SHELL) $(srctree)/scripts/kconfig/check-pkgconfig.sh; \ $(CONFIG_SHELL) $< endef diff --git a/scripts/kconfig/check-pkgconfig.sh b/scripts/kconfig/check-pkgconfig.sh deleted file mode 100644 index 7a1c40bfb58c..000000000000 --- a/scripts/kconfig/check-pkgconfig.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 -# Check for pkg-config presence - -if [ -z $(command -v pkg-config) ]; then - echo "'make *config' requires 'pkg-config'. Please install it." 1>&2 - exit 1 -fi diff --git a/scripts/kconfig/gconf-cfg.sh b/scripts/kconfig/gconf-cfg.sh index 533b3d8f8f08..480ecd8b9f41 100755 --- a/scripts/kconfig/gconf-cfg.sh +++ b/scripts/kconfig/gconf-cfg.sh @@ -3,6 +3,13 @@ PKG="gtk+-2.0 gmodule-2.0 libglade-2.0" +if [ -z "$(command -v pkg-config)" ]; then + echo >&2 "*" + echo >&2 "* 'make gconfig' requires 'pkg-config'. Please install it." + echo >&2 "*" + exit 1 +fi + if ! pkg-config --exists $PKG; then echo >&2 "*" echo >&2 "* Unable to find the GTK+ installation. Please make sure that" diff --git a/scripts/kconfig/mconf-cfg.sh b/scripts/kconfig/mconf-cfg.sh index e6f9facd0077..c812872d7f9d 100755 --- a/scripts/kconfig/mconf-cfg.sh +++ b/scripts/kconfig/mconf-cfg.sh @@ -4,20 +4,23 @@ PKG="ncursesw" PKG2="ncurses" -if pkg-config --exists $PKG; then - echo cflags=\"$(pkg-config --cflags $PKG)\" - echo libs=\"$(pkg-config --libs $PKG)\" - exit 0 -fi +if [ -n "$(command -v pkg-config)" ]; then + if pkg-config --exists $PKG; then + echo cflags=\"$(pkg-config --cflags $PKG)\" + echo libs=\"$(pkg-config --libs $PKG)\" + exit 0 + fi -if pkg-config --exists $PKG2; then - echo cflags=\"$(pkg-config --cflags $PKG2)\" - echo libs=\"$(pkg-config --libs $PKG2)\" - exit 0 + if pkg-config --exists $PKG2; then + echo cflags=\"$(pkg-config --cflags $PKG2)\" + echo libs=\"$(pkg-config --libs $PKG2)\" + exit 0 + fi fi -# Unfortunately, some distributions (e.g. openSUSE) cannot find ncurses -# by pkg-config. +# Check the default paths in case pkg-config is not installed. +# (Even if it is installed, some distributions such as openSUSE cannot +# find ncurses by pkg-config.) if [ -f /usr/include/ncursesw/ncurses.h ]; then echo cflags=\"-D_GNU_SOURCE -I/usr/include/ncursesw\" echo libs=\"-lncursesw\" diff --git a/scripts/kconfig/nconf-cfg.sh b/scripts/kconfig/nconf-cfg.sh index 42f5ac73548e..001559ef0a60 100644 --- a/scripts/kconfig/nconf-cfg.sh +++ b/scripts/kconfig/nconf-cfg.sh @@ -4,20 +4,23 @@ PKG="ncursesw menuw panelw" PKG2="ncurses menu panel" -if pkg-config --exists $PKG; then - echo cflags=\"$(pkg-config --cflags $PKG)\" - echo libs=\"$(pkg-config --libs $PKG)\" - exit 0 -fi +if [ -n "$(command -v pkg-config)" ]; then + if pkg-config --exists $PKG; then + echo cflags=\"$(pkg-config --cflags $PKG)\" + echo libs=\"$(pkg-config --libs $PKG)\" + exit 0 + fi -if pkg-config --exists $PKG2; then - echo cflags=\"$(pkg-config --cflags $PKG2)\" - echo libs=\"$(pkg-config --libs $PKG2)\" - exit 0 + if pkg-config --exists $PKG2; then + echo cflags=\"$(pkg-config --cflags $PKG2)\" + echo libs=\"$(pkg-config --libs $PKG2)\" + exit 0 + fi fi -# Unfortunately, some distributions (e.g. openSUSE) cannot find ncurses -# by pkg-config. +# Check the default paths in case pkg-config is not installed. +# (Even if it is installed, some distributions such as openSUSE cannot +# find ncurses by pkg-config.) if [ -f /usr/include/ncursesw/ncurses.h ]; then echo cflags=\"-D_GNU_SOURCE -I/usr/include/ncursesw\" echo libs=\"-lncursesw -lmenuw -lpanelw\" diff --git a/scripts/kconfig/qconf-cfg.sh b/scripts/kconfig/qconf-cfg.sh index 0862e1562536..02ccc0ae1031 100755 --- a/scripts/kconfig/qconf-cfg.sh +++ b/scripts/kconfig/qconf-cfg.sh @@ -4,6 +4,13 @@ PKG="Qt5Core Qt5Gui Qt5Widgets" PKG2="QtCore QtGui" +if [ -z "$(command -v pkg-config)" ]; then + echo >&2 "*" + echo >&2 "* 'make xconfig' requires 'pkg-config'. Please install it." + echo >&2 "*" + exit 1 +fi + if pkg-config --exists $PKG; then echo cflags=\"-std=c++11 -fPIC $(pkg-config --cflags Qt5Core Qt5Gui Qt5Widgets)\" echo libs=\"$(pkg-config --libs $PKG)\" -- GitLab From 914b087ff9e0e9a399a4927fa30793064afc0178 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 28 Aug 2018 12:59:10 -0700 Subject: [PATCH 0845/1692] kbuild: make missing $DEPMOD a Warning instead of an Error When $DEPMOD is not found, only print a warning instead of exiting with an error message and error status: Warning: 'make modules_install' requires /sbin/depmod. Please install it. This is probably in the kmod package. Change the Error to a Warning because "not all build hosts for cross compiling Linux are Linux systems and are able to provide a working port of depmod, especially at the file patch /sbin/depmod." I.e., "make modules_install" may be used to copy/install the loadable modules files to a target directory on a build system and then transferred to an embedded device where /sbin/depmod is run instead of it being run on the build system. Fixes: 934193a654c1 ("kbuild: verify that $DEPMOD is installed") Signed-off-by: Randy Dunlap Reported-by: H. Nikolaus Schaller Cc: stable@vger.kernel.org Cc: Lucas De Marchi Cc: Lucas De Marchi Cc: Michal Marek Cc: Jessica Yu Cc: Chih-Wei Huang Signed-off-by: Masahiro Yamada --- scripts/depmod.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/depmod.sh b/scripts/depmod.sh index 999d585eaa73..e5f0aad75b96 100755 --- a/scripts/depmod.sh +++ b/scripts/depmod.sh @@ -15,9 +15,9 @@ if ! test -r System.map ; then fi if [ -z $(command -v $DEPMOD) ]; then - echo "'make modules_install' requires $DEPMOD. Please install it." >&2 + echo "Warning: 'make modules_install' requires $DEPMOD. Please install it." >&2 echo "This is probably in the kmod package." >&2 - exit 1 + exit 0 fi # older versions of depmod require the version string to start with three -- GitLab From a13bf65f3f2e36008ea60b49d3bda2527e09fd9c Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 31 Aug 2018 10:51:14 +0200 Subject: [PATCH 0846/1692] iio: imu: st_lsm6dsx: take into account ts samples in wm configuration Take into account hw timer samples in pattern length computation done in st_lsm6dsx_update_watermark routine for watermark configuration. Moreover use samples in pattern (sip) already computed in st_lsm6dsx_update_decimators routine Fixes: 213451076bd3 ("iio: imu: st_lsm6dsx: add hw timestamp support") Signed-off-by: Lorenzo Bianconi Signed-off-by: Jonathan Cameron --- drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c index 7589f2ad1dae..631360b14ca7 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c @@ -187,12 +187,15 @@ static int st_lsm6dsx_set_fifo_odr(struct st_lsm6dsx_sensor *sensor, int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark) { - u16 fifo_watermark = ~0, cur_watermark, sip = 0, fifo_th_mask; + u16 fifo_watermark = ~0, cur_watermark, fifo_th_mask; struct st_lsm6dsx_hw *hw = sensor->hw; struct st_lsm6dsx_sensor *cur_sensor; int i, err, data; __le16 wdata; + if (!hw->sip) + return 0; + for (i = 0; i < ST_LSM6DSX_ID_MAX; i++) { cur_sensor = iio_priv(hw->iio_devs[i]); @@ -203,14 +206,10 @@ int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark) : cur_sensor->watermark; fifo_watermark = min_t(u16, fifo_watermark, cur_watermark); - sip += cur_sensor->sip; } - if (!sip) - return 0; - - fifo_watermark = max_t(u16, fifo_watermark, sip); - fifo_watermark = (fifo_watermark / sip) * sip; + fifo_watermark = max_t(u16, fifo_watermark, hw->sip); + fifo_watermark = (fifo_watermark / hw->sip) * hw->sip; fifo_watermark = fifo_watermark * hw->settings->fifo_ops.th_wl; err = regmap_read(hw->regmap, hw->settings->fifo_ops.fifo_th.addr + 1, -- GitLab From 9db39f4d4f94b61e4b64b077f6ddb2bdfb533a88 Mon Sep 17 00:00:00 2001 From: Tushar Dave Date: Fri, 31 Aug 2018 23:45:16 +0200 Subject: [PATCH 0847/1692] bpf: Fix bpf_msg_pull_data() Helper bpf_msg_pull_data() mistakenly reuses variable 'offset' while linearizing multiple scatterlist elements. Variable 'offset' is used to find first starting scatterlist element i.e. msg->data = sg_virt(&sg[first_sg]) + start - offset" Use different variable name while linearizing multiple scatterlist elements so that value contained in variable 'offset' won't get overwritten. Fixes: 015632bb30da ("bpf: sk_msg program helper bpf_sk_msg_pull_data") Signed-off-by: Tushar Dave Signed-off-by: Daniel Borkmann --- net/core/filter.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/net/core/filter.c b/net/core/filter.c index 2c7801f6737a..aecdeba052d3 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2292,7 +2292,7 @@ static const struct bpf_func_proto bpf_msg_cork_bytes_proto = { BPF_CALL_4(bpf_msg_pull_data, struct sk_msg_buff *, msg, u32, start, u32, end, u64, flags) { - unsigned int len = 0, offset = 0, copy = 0; + unsigned int len = 0, offset = 0, copy = 0, poffset = 0; int bytes = end - start, bytes_sg_total; struct scatterlist *sg = msg->sg_data; int first_sg, last_sg, i, shift; @@ -2348,16 +2348,15 @@ BPF_CALL_4(bpf_msg_pull_data, if (unlikely(!page)) return -ENOMEM; p = page_address(page); - offset = 0; i = first_sg; do { from = sg_virt(&sg[i]); len = sg[i].length; - to = p + offset; + to = p + poffset; memcpy(to, from, len); - offset += len; + poffset += len; sg[i].length = 0; put_page(sg_page(&sg[i])); -- GitLab From 97911e0ccb54c7478b9dac77e6c54c01b449e3a7 Mon Sep 17 00:00:00 2001 From: Prashant Bhole Date: Fri, 31 Aug 2018 15:32:42 +0900 Subject: [PATCH 0848/1692] tools/bpf: bpftool, add xskmap in map types When listed all maps, bpftool currently shows (null) for xskmap. Added xskmap type in map_type_name[] to show correct type. Signed-off-by: Prashant Bhole Acked-by: Jakub Kicinski Signed-off-by: Daniel Borkmann --- tools/bpf/bpftool/map.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c index b2ec20e562bd..b455930a3eaf 100644 --- a/tools/bpf/bpftool/map.c +++ b/tools/bpf/bpftool/map.c @@ -68,6 +68,7 @@ static const char * const map_type_name[] = { [BPF_MAP_TYPE_DEVMAP] = "devmap", [BPF_MAP_TYPE_SOCKMAP] = "sockmap", [BPF_MAP_TYPE_CPUMAP] = "cpumap", + [BPF_MAP_TYPE_XSKMAP] = "xskmap", [BPF_MAP_TYPE_SOCKHASH] = "sockhash", [BPF_MAP_TYPE_CGROUP_STORAGE] = "cgroup_storage", }; -- GitLab From 597222f72a94118f593e4f32bf58ae7e049a0df1 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Thu, 30 Aug 2018 21:25:02 -0700 Subject: [PATCH 0849/1692] bpf: avoid misuse of psock when TCP_ULP_BPF collides with another ULP Currently we check sk_user_data is non NULL to determine if the sk exists in a map. However, this is not sufficient to ensure the psock or the ULP ops are not in use by another user, such as kcm or TLS. To avoid this when adding a sock to a map also verify it is of the correct ULP type. Additionally, when releasing a psock verify that it is the TCP_ULP_BPF type before releasing the ULP. The error case where we abort an update due to ULP collision can cause this error path. For example, __sock_map_ctx_update_elem() [...] err = tcp_set_ulp_id(sock, TCP_ULP_BPF) <- collides with TLS if (err) <- so err out here goto out_free [...] out_free: smap_release_sock() <- calling tcp_cleanup_ulp releases the TLS ULP incorrectly. Fixes: 2f857d04601a ("bpf: sockmap, remove STRPARSER map_flags and add multi-map support") Signed-off-by: John Fastabend Signed-off-by: Daniel Borkmann --- kernel/bpf/sockmap.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index ce63e5801746..488ef9663c01 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c @@ -1462,10 +1462,16 @@ static void smap_destroy_psock(struct rcu_head *rcu) schedule_work(&psock->gc_work); } +static bool psock_is_smap_sk(struct sock *sk) +{ + return inet_csk(sk)->icsk_ulp_ops == &bpf_tcp_ulp_ops; +} + static void smap_release_sock(struct smap_psock *psock, struct sock *sock) { if (refcount_dec_and_test(&psock->refcnt)) { - tcp_cleanup_ulp(sock); + if (psock_is_smap_sk(sock)) + tcp_cleanup_ulp(sock); write_lock_bh(&sock->sk_callback_lock); smap_stop_sock(psock, sock); write_unlock_bh(&sock->sk_callback_lock); @@ -1892,6 +1898,10 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map, * doesn't update user data. */ if (psock) { + if (!psock_is_smap_sk(sock)) { + err = -EBUSY; + goto out_progs; + } if (READ_ONCE(psock->bpf_parse) && parse) { err = -EBUSY; goto out_progs; -- GitLab From 4fb7253e4f9a8f06a986a3b317e2f79d9b43d552 Mon Sep 17 00:00:00 2001 From: Hangbin Liu Date: Wed, 29 Aug 2018 18:06:08 +0800 Subject: [PATCH 0850/1692] igmp: fix incorrect unsolicit report count when join group We should not start timer if im->unsolicit_count equal to 0 after decrease. Or we will send one more unsolicit report message. i.e. 3 instead of 2 by default. Fixes: 1da177e4c3f41 ("Linux-2.6.12-rc2") Signed-off-by: Hangbin Liu Signed-off-by: David S. Miller --- net/ipv4/igmp.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index cf75f8944b05..deb1f8274d71 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -820,10 +820,9 @@ static void igmp_timer_expire(struct timer_list *t) spin_lock(&im->lock); im->tm_running = 0; - if (im->unsolicit_count) { - im->unsolicit_count--; + if (im->unsolicit_count && --im->unsolicit_count) igmp_start_timer(im, unsolicited_report_interval(in_dev)); - } + im->reporter = 1; spin_unlock(&im->lock); -- GitLab From ff06525fcb8ae3c302ac1319bf6c07c026dea964 Mon Sep 17 00:00:00 2001 From: Hangbin Liu Date: Wed, 29 Aug 2018 18:06:10 +0800 Subject: [PATCH 0851/1692] igmp: fix incorrect unsolicit report count after link down and up After link down and up, i.e. when call ip_mc_up(), we doesn't init im->unsolicit_count. So after igmp_timer_expire(), we will not start timer again and only send one unsolicit report at last. Fix it by initializing im->unsolicit_count in igmp_group_added(), so we can respect igmp robustness value. Fixes: 24803f38a5c0b ("igmp: do not remove igmp souce list info when set link down") Signed-off-by: Hangbin Liu Signed-off-by: David S. Miller --- net/ipv4/igmp.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index deb1f8274d71..4da39446da2d 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -1307,6 +1307,8 @@ static void igmp_group_added(struct ip_mc_list *im) if (in_dev->dead) return; + + im->unsolicit_count = net->ipv4.sysctl_igmp_qrv; if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) { spin_lock_bh(&im->lock); igmp_start_timer(im, IGMP_INITIAL_REPORT_DELAY); @@ -1390,9 +1392,6 @@ static void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr, unsigned int mode) { struct ip_mc_list *im; -#ifdef CONFIG_IP_MULTICAST - struct net *net = dev_net(in_dev->dev); -#endif ASSERT_RTNL(); @@ -1419,7 +1418,6 @@ static void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr, spin_lock_init(&im->lock); #ifdef CONFIG_IP_MULTICAST timer_setup(&im->timer, igmp_timer_expire, 0); - im->unsolicit_count = net->ipv4.sysctl_igmp_qrv; #endif im->next_rcu = in_dev->mc_list; -- GitLab From 10d7fac4c52618d94a42d701d28f114147291ecc Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Wed, 29 Aug 2018 08:00:23 -0700 Subject: [PATCH 0852/1692] dt-bindings: net: cpsw: Document cpsw-phy-sel usage but prefer phandle The current cpsw usage for cpsw-phy-sel is undocumented but is used for all the boards using cpsw. And cpsw-phy-sel is not really a child of the cpsw device, it lives in the system control module instead. Let's document the existing usage, and improve it a bit where we prefer to use a phandle instead of a child device for it. That way we can properly describe the hardware in dts files for things like genpd. Cc: devicetree@vger.kernel.org Cc: Andrew Lunn Cc: Grygorii Strashko Cc: Ivan Khoronzhuk Cc: Mark Rutland Cc: Murali Karicheri Cc: Rob Herring Signed-off-by: Tony Lindgren Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/cpsw.txt | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Documentation/devicetree/bindings/net/cpsw.txt b/Documentation/devicetree/bindings/net/cpsw.txt index 41089369f891..b3acebe08eb0 100644 --- a/Documentation/devicetree/bindings/net/cpsw.txt +++ b/Documentation/devicetree/bindings/net/cpsw.txt @@ -19,6 +19,10 @@ Required properties: - slaves : Specifies number for slaves - active_slave : Specifies the slave to use for time stamping, ethtool and SIOCGMIIPHY +- cpsw-phy-sel : Specifies the phandle to the CPSW phy mode selection + device. See also cpsw-phy-sel.txt for it's binding. + Note that in legacy cases cpsw-phy-sel may be + a child device instead of a phandle. Optional properties: - ti,hwmods : Must be "cpgmac0" @@ -75,6 +79,7 @@ Examples: cpts_clock_mult = <0x80000000>; cpts_clock_shift = <29>; syscon = <&cm>; + cpsw-phy-sel = <&phy_sel>; cpsw_emac0: slave@0 { phy_id = <&davinci_mdio>, <0>; phy-mode = "rgmii-txid"; @@ -103,6 +108,7 @@ Examples: cpts_clock_mult = <0x80000000>; cpts_clock_shift = <29>; syscon = <&cm>; + cpsw-phy-sel = <&phy_sel>; cpsw_emac0: slave@0 { phy_id = <&davinci_mdio>, <0>; phy-mode = "rgmii-txid"; -- GitLab From 18eb8aea7fb2fb4490e578b1b8a1096c34b2fc48 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Wed, 29 Aug 2018 08:00:24 -0700 Subject: [PATCH 0853/1692] net: ethernet: cpsw-phy-sel: prefer phandle for phy sel The cpsw-phy-sel device is not a child of the cpsw interconnect target module. It lives in the system control module. Let's fix this issue by trying to use cpsw-phy-sel phandle first if it exists and if not fall back to current usage of trying to find the cpsw-phy-sel child. That way the phy sel driver can be a child of the system control module where it belongs in the device tree. Without this fix, we cannot have a proper interconnect target module hierarchy in device tree for things like genpd. Note that deferred probe is mostly not supported by cpsw and this patch does not attempt to fix that. In case deferred probe support is needed, this could be added to cpsw_slave_open() and phy_connect() so they start handling and returning errors. For documenting it, looks like the cpsw-phy-sel is used for all cpsw device tree nodes. It's missing the related binding documentation, so let's also update the binding documentation accordingly. Cc: devicetree@vger.kernel.org Cc: Andrew Lunn Cc: Grygorii Strashko Cc: Ivan Khoronzhuk Cc: Mark Rutland Cc: Murali Karicheri Cc: Rob Herring Signed-off-by: Tony Lindgren Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw-phy-sel.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c index 0c1adad7415d..396e1cd10667 100644 --- a/drivers/net/ethernet/ti/cpsw-phy-sel.c +++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c @@ -170,10 +170,13 @@ void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave) struct device_node *node; struct cpsw_phy_sel_priv *priv; - node = of_get_child_by_name(dev->of_node, "cpsw-phy-sel"); + node = of_parse_phandle(dev->of_node, "cpsw-phy-sel", 0); if (!node) { - dev_err(dev, "Phy mode driver DT not found\n"); - return; + node = of_get_child_by_name(dev->of_node, "cpsw-phy-sel"); + if (!node) { + dev_err(dev, "Phy mode driver DT not found\n"); + return; + } } dev = bus_find_device(&platform_bus_type, NULL, node, match); -- GitLab From 15a81b418e22a9aa4a0504471fdcb0f4ebf69b96 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Thu, 30 Aug 2018 14:15:43 -0700 Subject: [PATCH 0854/1692] net/ipv6: Only update MTU metric if it set Jan reported a regression after an update to 4.18.5. In this case ipv6 default route is setup by systemd-networkd based on data from an RA. The RA contains an MTU of 1492 which is used when the route is first inserted but then systemd-networkd pushes down updates to the default route without the mtu set. Prior to the change to fib6_info, metrics such as MTU were held in the dst_entry and rt6i_pmtu in rt6_info contained an update to the mtu if any. ip6_mtu would look at rt6i_pmtu first and use it if set. If not, the value from the metrics is used if it is set and finally falling back to the idev value. After the fib6_info change metrics are contained in the fib6_info struct and there is no equivalent to rt6i_pmtu. To maintain consistency with the old behavior the new code should only reset the MTU in the metrics if the route update has it set. Fixes: d4ead6b34b67 ("net/ipv6: move metrics from dst to rt6_info") Reported-by: Jan Janssen Signed-off-by: David Ahern Signed-off-by: David S. Miller --- net/ipv6/ip6_fib.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index c861a6d4671d..5516f55e214b 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -989,7 +989,10 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt, fib6_clean_expires(iter); else fib6_set_expires(iter, rt->expires); - fib6_metric_set(iter, RTAX_MTU, rt->fib6_pmtu); + + if (rt->fib6_pmtu) + fib6_metric_set(iter, RTAX_MTU, + rt->fib6_pmtu); return -EEXIST; } /* If we have the same destination and the same metric, -- GitLab From 57361846b52bc686112da6ca5368d11210796804 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 2 Sep 2018 14:37:30 -0700 Subject: [PATCH 0855/1692] Linux 4.19-rc2 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index a34a9283ee90..19948e556941 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ VERSION = 4 PATCHLEVEL = 19 SUBLEVEL = 0 -EXTRAVERSION = -rc1 +EXTRAVERSION = -rc2 NAME = Merciless Moray # *DOCUMENTATION* -- GitLab From 38f5d8d8cbb2ffa2b54315118185332329ec891c Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Fri, 31 Aug 2018 23:30:47 +0900 Subject: [PATCH 0856/1692] i2c: uniphier: issue STOP only for last message or I2C_M_STOP This driver currently emits a STOP if the next message is not I2C_MD_RD. It should not do it because it disturbs the I2C_RDWR ioctl, where read/write transactions are combined without STOP between. Issue STOP only when the message is the last one _or_ flagged with I2C_M_STOP. Fixes: dd6fd4a32793 ("i2c: uniphier: add UniPhier FIFO-less I2C driver") Signed-off-by: Masahiro Yamada Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-uniphier.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c index bb181b088291..454f914ae66d 100644 --- a/drivers/i2c/busses/i2c-uniphier.c +++ b/drivers/i2c/busses/i2c-uniphier.c @@ -248,11 +248,8 @@ static int uniphier_i2c_master_xfer(struct i2c_adapter *adap, return ret; for (msg = msgs; msg < emsg; msg++) { - /* If next message is read, skip the stop condition */ - bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD); - /* but, force it if I2C_M_STOP is set */ - if (msg->flags & I2C_M_STOP) - stop = true; + /* Emit STOP if it is the last message or I2C_M_STOP is set. */ + bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP); ret = uniphier_i2c_master_xfer_one(adap, msg, stop); if (ret) -- GitLab From 4c85609b08c4761eca0a40fd7beb06bc650f252d Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Fri, 31 Aug 2018 23:30:48 +0900 Subject: [PATCH 0857/1692] i2c: uniphier-f: issue STOP only for last message or I2C_M_STOP This driver currently emits a STOP if the next message is not I2C_MD_RD. It should not do it because it disturbs the I2C_RDWR ioctl, where read/write transactions are combined without STOP between. Issue STOP only when the message is the last one _or_ flagged with I2C_M_STOP. Fixes: 6a62974b667f ("i2c: uniphier_f: add UniPhier FIFO-builtin I2C driver") Signed-off-by: Masahiro Yamada Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-uniphier-f.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c index 9918bdd81619..a403e8579b65 100644 --- a/drivers/i2c/busses/i2c-uniphier-f.c +++ b/drivers/i2c/busses/i2c-uniphier-f.c @@ -401,11 +401,8 @@ static int uniphier_fi2c_master_xfer(struct i2c_adapter *adap, return ret; for (msg = msgs; msg < emsg; msg++) { - /* If next message is read, skip the stop condition */ - bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD); - /* but, force it if I2C_M_STOP is set */ - if (msg->flags & I2C_M_STOP) - stop = true; + /* Emit STOP if it is the last message or I2C_M_STOP is set. */ + bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP); ret = uniphier_fi2c_master_xfer_one(adap, msg, stop); if (ret) -- GitLab From f6eb89349078b2ca3e42dbb272ab444bab1b9f42 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Fri, 31 Aug 2018 10:24:14 -0300 Subject: [PATCH 0858/1692] dt-bindings: imx-lpi2c: Remove mx8dv compatible entry mx8dv never entered into production and there is no other place in the kernel referring to this SoC, so remove it from the dt bindings documentation. Signed-off-by: Fabio Estevam Signed-off-by: Wolfram Sang --- Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt b/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt index 00e4365d7206..091c8dfd3229 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt @@ -3,7 +3,6 @@ Required properties: - compatible : - "fsl,imx7ulp-lpi2c" for LPI2C compatible with the one integrated on i.MX7ULP soc - - "fsl,imx8dv-lpi2c" for LPI2C compatible with the one integrated on i.MX8DV soc - reg : address and length of the lpi2c master registers - interrupts : lpi2c interrupt - clocks : lpi2c clock specifier @@ -11,7 +10,7 @@ Required properties: Examples: lpi2c7: lpi2c7@40a50000 { - compatible = "fsl,imx8dv-lpi2c"; + compatible = "fsl,imx7ulp-lpi2c"; reg = <0x40A50000 0x10000>; interrupt-parent = <&intc>; interrupts = ; -- GitLab From 20fdcd760a63ea66335c58bceb175e7712ab18ff Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Fri, 31 Aug 2018 10:24:15 -0300 Subject: [PATCH 0859/1692] i2c: imx-lpi2c: Remove mx8dv compatible entry mx8dv never entered into production and there is no other place in the kernel referring to this SoC, so remove it from the driver's compatible entry. Signed-off-by: Fabio Estevam Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-imx-lpi2c.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c index 6d975f5221ca..06c4c767af32 100644 --- a/drivers/i2c/busses/i2c-imx-lpi2c.c +++ b/drivers/i2c/busses/i2c-imx-lpi2c.c @@ -538,7 +538,6 @@ static const struct i2c_algorithm lpi2c_imx_algo = { static const struct of_device_id lpi2c_imx_of_match[] = { { .compatible = "fsl,imx7ulp-lpi2c" }, - { .compatible = "fsl,imx8dv-lpi2c" }, { }, }; MODULE_DEVICE_TABLE(of, lpi2c_imx_of_match); -- GitLab From 16fe10cf92783ed9ceb182d6ea2b8adf5e8ec1b8 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Sat, 1 Sep 2018 20:11:05 +0800 Subject: [PATCH 0860/1692] net: cadence: Fix a sleep-in-atomic-context bug in macb_halt_tx() The kernel module may sleep with holding a spinlock. The function call paths (from bottom to top) in Linux-4.16 are: [FUNC] usleep_range drivers/net/ethernet/cadence/macb_main.c, 648: usleep_range in macb_halt_tx drivers/net/ethernet/cadence/macb_main.c, 730: macb_halt_tx in macb_tx_error_task drivers/net/ethernet/cadence/macb_main.c, 721: _raw_spin_lock_irqsave in macb_tx_error_task To fix this bug, usleep_range() is replaced with udelay(). This bug is found by my static analysis tool DSAC. Signed-off-by: Jia-Ju Bai Signed-off-by: David S. Miller --- drivers/net/ethernet/cadence/macb_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index c6707ea2d751..16e4ef7d7185 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -649,7 +649,7 @@ static int macb_halt_tx(struct macb *bp) if (!(status & MACB_BIT(TGO))) return 0; - usleep_range(10, 250); + udelay(250); } while (time_before(halt_time, timeout)); return -ETIMEDOUT; -- GitLab From 59a03fea131d671a57b8ed3dc446264c61d4b75f Mon Sep 17 00:00:00 2001 From: Vinson Lee Date: Sat, 1 Sep 2018 21:20:27 +0000 Subject: [PATCH 0861/1692] uapi: Fix linux/rds.h userspace compilation errors. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Include linux/in6.h for struct in6_addr. /usr/include/linux/rds.h:156:18: error: field ‘laddr’ has incomplete type struct in6_addr laddr; ^~~~~ /usr/include/linux/rds.h:157:18: error: field ‘faddr’ has incomplete type struct in6_addr faddr; ^~~~~ /usr/include/linux/rds.h:178:18: error: field ‘laddr’ has incomplete type struct in6_addr laddr; ^~~~~ /usr/include/linux/rds.h:179:18: error: field ‘faddr’ has incomplete type struct in6_addr faddr; ^~~~~ /usr/include/linux/rds.h:198:18: error: field ‘bound_addr’ has incomplete type struct in6_addr bound_addr; ^~~~~~~~~~ /usr/include/linux/rds.h:199:18: error: field ‘connected_addr’ has incomplete type struct in6_addr connected_addr; ^~~~~~~~~~~~~~ /usr/include/linux/rds.h:219:18: error: field ‘local_addr’ has incomplete type struct in6_addr local_addr; ^~~~~~~~~~ /usr/include/linux/rds.h:221:18: error: field ‘peer_addr’ has incomplete type struct in6_addr peer_addr; ^~~~~~~~~ /usr/include/linux/rds.h:245:18: error: field ‘src_addr’ has incomplete type struct in6_addr src_addr; ^~~~~~~~ /usr/include/linux/rds.h:246:18: error: field ‘dst_addr’ has incomplete type struct in6_addr dst_addr; ^~~~~~~~ Fixes: b7ff8b1036f0 ("rds: Extend RDS API for IPv6 support") Signed-off-by: Vinson Lee Acked-by: Santosh Shilimkar Signed-off-by: David S. Miller --- include/uapi/linux/rds.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h index dc520e1a4123..8b73cb603c5f 100644 --- a/include/uapi/linux/rds.h +++ b/include/uapi/linux/rds.h @@ -37,6 +37,7 @@ #include #include /* For __kernel_sockaddr_storage. */ +#include /* For struct in6_addr. */ #define RDS_IB_ABI_VERSION 0x301 -- GitLab From c90bbce9eedd14f9428388d9442b5b3e98a2a6dd Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Mon, 20 Aug 2018 08:25:37 +0300 Subject: [PATCH 0862/1692] m68k: fix early memory reservation for ColdFire MMU systems The bootmem to memblock conversion introduced by the commit 1008a11590b9 ("m68k: switch to MEMBLOCK + NO_BOOTMEM") made reservation of kernel code and data to start from a wrong address. Fix it. Signed-off-by: Mike Rapoport Tested-by: Angelo Dureghello Signed-off-by: Greg Ungerer --- arch/m68k/mm/mcfmmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c index 70dde040779b..f5453d944ff5 100644 --- a/arch/m68k/mm/mcfmmu.c +++ b/arch/m68k/mm/mcfmmu.c @@ -172,7 +172,7 @@ void __init cf_bootmem_alloc(void) high_memory = (void *)_ramend; /* Reserve kernel text/data/bss */ - memblock_reserve(memstart, memstart - _rambase); + memblock_reserve(_rambase, memstart - _rambase); m68k_virt_to_node_shift = fls(_ramend - 1) - 6; module_fixup(NULL, __start_fixup, __stop_fixup); -- GitLab From c15e3f19a6d5c89b1209dc94b40e568177cb0921 Mon Sep 17 00:00:00 2001 From: Jon Kuhn Date: Mon, 9 Jul 2018 14:33:14 +0000 Subject: [PATCH 0863/1692] fs/cifs: don't translate SFM_SLASH (U+F026) to backslash When a Mac client saves an item containing a backslash to a file server the backslash is represented in the CIFS/SMB protocol as as U+F026. Before this change, listing a directory containing an item with a backslash in its name will return that item with the backslash represented with a true backslash character (U+005C) because convert_sfm_character mapped U+F026 to U+005C when interpretting the CIFS/SMB protocol response. However, attempting to open or stat the path using a true backslash will result in an error because convert_to_sfm_char does not map U+005C back to U+F026 causing the CIFS/SMB request to be made with the backslash represented as U+005C. This change simply prevents the U+F026 to U+005C conversion from happenning. This is analogous to how the code does not do any translation of UNI_SLASH (U+F000). Signed-off-by: Jon Kuhn Signed-off-by: Steve French --- fs/cifs/cifs_unicode.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c index b380e0871372..a2b2355e7f01 100644 --- a/fs/cifs/cifs_unicode.c +++ b/fs/cifs/cifs_unicode.c @@ -105,9 +105,6 @@ convert_sfm_char(const __u16 src_char, char *target) case SFM_LESSTHAN: *target = '<'; break; - case SFM_SLASH: - *target = '\\'; - break; case SFM_SPACE: *target = ' '; break; -- GitLab From 5e19697b56a64004e2d0ff1bb952ea05493c088f Mon Sep 17 00:00:00 2001 From: Steve French Date: Mon, 27 Aug 2018 17:04:13 -0500 Subject: [PATCH 0864/1692] SMB3: Backup intent flag missing for directory opens with backupuid mounts When "backup intent" is requested on the mount (e.g. backupuid or backupgid mount options), the corresponding flag needs to be set on opens of directories (and files) but was missing in some places causing access denied trying to enumerate and backup servers. Fixes kernel bugzilla #200953 https://bugzilla.kernel.org/show_bug.cgi?id=200953 Reported-and-tested-by: Signed-off-by: Steve French CC: Stable Reviewed-by: Pavel Shilovsky --- fs/cifs/inode.c | 2 ++ fs/cifs/smb2ops.c | 25 ++++++++++++++++++++----- 2 files changed, 22 insertions(+), 5 deletions(-) diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index d32eaa4b2437..6e8765f44508 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -467,6 +467,8 @@ cifs_sfu_type(struct cifs_fattr *fattr, const char *path, oparms.cifs_sb = cifs_sb; oparms.desired_access = GENERIC_READ; oparms.create_options = CREATE_NOT_DIR; + if (backup_cred(cifs_sb)) + oparms.create_options |= CREATE_OPEN_BACKUP_INTENT; oparms.disposition = FILE_OPEN; oparms.path = path; oparms.fid = &fid; diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 247a98e6c856..b6fada244527 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -630,7 +630,10 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon, oparms.tcon = tcon; oparms.desired_access = FILE_READ_ATTRIBUTES; oparms.disposition = FILE_OPEN; - oparms.create_options = 0; + if (backup_cred(cifs_sb)) + oparms.create_options = CREATE_OPEN_BACKUP_INTENT; + else + oparms.create_options = 0; oparms.fid = &fid; oparms.reconnect = false; @@ -779,7 +782,10 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon, oparms.tcon = tcon; oparms.desired_access = FILE_READ_EA; oparms.disposition = FILE_OPEN; - oparms.create_options = 0; + if (backup_cred(cifs_sb)) + oparms.create_options = CREATE_OPEN_BACKUP_INTENT; + else + oparms.create_options = 0; oparms.fid = &fid; oparms.reconnect = false; @@ -858,7 +864,10 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon, oparms.tcon = tcon; oparms.desired_access = FILE_WRITE_EA; oparms.disposition = FILE_OPEN; - oparms.create_options = 0; + if (backup_cred(cifs_sb)) + oparms.create_options = CREATE_OPEN_BACKUP_INTENT; + else + oparms.create_options = 0; oparms.fid = &fid; oparms.reconnect = false; @@ -1453,7 +1462,10 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon, oparms.tcon = tcon; oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA; oparms.disposition = FILE_OPEN; - oparms.create_options = 0; + if (backup_cred(cifs_sb)) + oparms.create_options = CREATE_OPEN_BACKUP_INTENT; + else + oparms.create_options = 0; oparms.fid = fid; oparms.reconnect = false; @@ -1857,7 +1869,10 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon, oparms.tcon = tcon; oparms.desired_access = FILE_READ_ATTRIBUTES; oparms.disposition = FILE_OPEN; - oparms.create_options = 0; + if (backup_cred(cifs_sb)) + oparms.create_options = CREATE_OPEN_BACKUP_INTENT; + else + oparms.create_options = 0; oparms.fid = &fid; oparms.reconnect = false; -- GitLab From 25f2573512d7b38bca4c0878109db9600b8b711f Mon Sep 17 00:00:00 2001 From: Steve French Date: Wed, 29 Aug 2018 09:22:22 -0500 Subject: [PATCH 0865/1692] smb3: minor debugging clarifications in rfc1001 len processing I ran into some cases where server was returning the wrong length on frames but I couldn't easily match them to the command in the network trace (or server logs) since I need the command and/or multiplex id to find the offending SMB2/SMB3 command. Add these two fields to the log message. In the case of padding too much it may not be a problem in all cases but might have correlated to a network disconnect case in some problems we have been looking at. In the case of frame too short is even more important. Signed-off-by: Steve French Reviewed-by: Ronnie Sahlberg --- fs/cifs/smb2misc.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c index db0453660ff6..6a9c47541c53 100644 --- a/fs/cifs/smb2misc.c +++ b/fs/cifs/smb2misc.c @@ -248,16 +248,20 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr) * MacOS server pads after SMB2.1 write response with 3 bytes * of junk. Other servers match RFC1001 len to actual * SMB2/SMB3 frame length (header + smb2 response specific data) - * Some windows servers do too when compounding is used. - * Log the server error (once), but allow it and continue + * Some windows servers also pad up to 8 bytes when compounding. + * If pad is longer than eight bytes, log the server behavior + * (once), since may indicate a problem but allow it and continue * since the frame is parseable. */ if (clc_len < len) { - printk_once(KERN_WARNING - "SMB2 server sent bad RFC1001 len %d not %d\n", - len, clc_len); + pr_warn_once( + "srv rsp padded more than expected. Length %d not %d for cmd:%d mid:%llu\n", + len, clc_len, command, mid); return 0; } + pr_warn_once( + "srv rsp too short, len %d not %d. cmd:%d mid:%llu\n", + len, clc_len, command, mid); return 1; } -- GitLab From f801568332321e2b1e7a8bd26c3e4913a312a2ec Mon Sep 17 00:00:00 2001 From: Steve French Date: Fri, 31 Aug 2018 15:12:10 -0500 Subject: [PATCH 0866/1692] smb3: check for and properly advertise directory lease support Although servers will typically ignore unsupported features, we should advertise the support for directory leases (as Windows e.g. does) in the negotiate protocol capabilities we pass to the server, and should check for the server capability (CAP_DIRECTORY_LEASING) before sending a lease request for an open of a directory. This will prevent us from accidentally sending directory leases to SMB2.1 or SMB2 server for example. Signed-off-by: Steve French CC: Stable Reviewed-by: Ronnie Sahlberg --- fs/cifs/smb2ops.c | 10 +++++----- fs/cifs/smb2pdu.c | 3 +++ 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index b6fada244527..d954ce36b473 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -3654,7 +3654,7 @@ struct smb_version_values smb21_values = { struct smb_version_values smb3any_values = { .version_string = SMB3ANY_VERSION_STRING, .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */ - .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION, + .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING, .large_lock_type = 0, .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK, .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK, @@ -3675,7 +3675,7 @@ struct smb_version_values smb3any_values = { struct smb_version_values smbdefault_values = { .version_string = SMBDEFAULT_VERSION_STRING, .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */ - .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION, + .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING, .large_lock_type = 0, .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK, .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK, @@ -3696,7 +3696,7 @@ struct smb_version_values smbdefault_values = { struct smb_version_values smb30_values = { .version_string = SMB30_VERSION_STRING, .protocol_id = SMB30_PROT_ID, - .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION, + .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING, .large_lock_type = 0, .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK, .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK, @@ -3717,7 +3717,7 @@ struct smb_version_values smb30_values = { struct smb_version_values smb302_values = { .version_string = SMB302_VERSION_STRING, .protocol_id = SMB302_PROT_ID, - .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION, + .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING, .large_lock_type = 0, .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK, .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK, @@ -3738,7 +3738,7 @@ struct smb_version_values smb302_values = { struct smb_version_values smb311_values = { .version_string = SMB311_VERSION_STRING, .protocol_id = SMB311_PROT_ID, - .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION, + .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING, .large_lock_type = 0, .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK, .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK, diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 5740aa809be6..c08acfc77abc 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -2178,6 +2178,9 @@ SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, __u8 *oplock, if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) || *oplock == SMB2_OPLOCK_LEVEL_NONE) req->RequestedOplockLevel = *oplock; + else if (!(server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING) && + (oparms->create_options & CREATE_NOT_FILE)) + req->RequestedOplockLevel = *oplock; /* no srv lease support */ else { rc = add_lease_context(server, iov, &n_iov, oparms->fid->lease_key, oplock); -- GitLab From 395a2076b4064f97d3fce03af15210ff2a7bb7f9 Mon Sep 17 00:00:00 2001 From: Thomas Werschlein Date: Thu, 30 Aug 2018 18:29:20 +0200 Subject: [PATCH 0867/1692] cifs: connect to servername instead of IP for IPC$ share MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch is required allows access to a Microsoft fileserver failover cluster behind a 1:1 NAT firewall. The change also provides stronger context for authentication and share connection (see MS-SMB2 3.3.5.7 and MS-SRVS 3.1.6.8) as noted by Tom Talpey, and addresses comments about the buffer size for the UNC made by Aurélien Aptel. Signed-off-by: Thomas Werschlein Signed-off-by: Steve French CC: Tom Talpey Reviewed-by: Aurelien Aptel CC: Stable --- fs/cifs/connect.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index c832a8a1970a..7aa08dba4719 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -2547,7 +2547,7 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb_vol *volume_info) if (tcon == NULL) return -ENOMEM; - snprintf(unc, sizeof(unc), "\\\\%s\\IPC$", ses->serverName); + snprintf(unc, sizeof(unc), "\\\\%s\\IPC$", ses->server->hostname); /* cannot fail */ nls_codepage = load_nls_default(); -- GitLab From 54ff01fd0d44b9681615f77c15fe9ea6dfadb501 Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Thu, 30 Aug 2018 11:33:43 +0800 Subject: [PATCH 0868/1692] drm/i915/gvt: Give new born vGPU higher scheduling chance This trys to give new born vGPU with higher scheduling chance not only with adding to sched list head and also have higher priority for workload sched for 2 seconds after starting to schedule it. In order for fast GPU execution during VM boot, and ensure guest driver setup with required state given in time. This fixes recent failure seen on one VM with multiple linux VMs running on kernel with commit 2621cefaa42b3("drm/i915: Provide a timeout to i915_gem_wait_for_idle() on setup"), which had shorter setup timeout that caused context state init failed. v2: change to 2s for higher scheduling period Cc: Yuan Hang Reviewed-by: Hang Yuan Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/sched_policy.c | 34 ++++++++++++++++++++----- 1 file changed, 27 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index 985fe81794dd..c32e7d5e8629 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c @@ -47,11 +47,15 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu) return false; } +/* We give 2 seconds higher prio for vGPU during start */ +#define GVT_SCHED_VGPU_PRI_TIME 2 + struct vgpu_sched_data { struct list_head lru_list; struct intel_vgpu *vgpu; bool active; - + bool pri_sched; + ktime_t pri_time; ktime_t sched_in_time; ktime_t sched_time; ktime_t left_ts; @@ -183,6 +187,14 @@ static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data) if (!vgpu_has_pending_workload(vgpu_data->vgpu)) continue; + if (vgpu_data->pri_sched) { + if (ktime_before(ktime_get(), vgpu_data->pri_time)) { + vgpu = vgpu_data->vgpu; + break; + } else + vgpu_data->pri_sched = false; + } + /* Return the vGPU only if it has time slice left */ if (vgpu_data->left_ts > 0) { vgpu = vgpu_data->vgpu; @@ -202,6 +214,7 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data) struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct vgpu_sched_data *vgpu_data; struct intel_vgpu *vgpu = NULL; + /* no active vgpu or has already had a target */ if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu) goto out; @@ -209,12 +222,13 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data) vgpu = find_busy_vgpu(sched_data); if (vgpu) { scheduler->next_vgpu = vgpu; - - /* Move the last used vGPU to the tail of lru_list */ vgpu_data = vgpu->sched_data; - list_del_init(&vgpu_data->lru_list); - list_add_tail(&vgpu_data->lru_list, - &sched_data->lru_runq_head); + if (!vgpu_data->pri_sched) { + /* Move the last used vGPU to the tail of lru_list */ + list_del_init(&vgpu_data->lru_list); + list_add_tail(&vgpu_data->lru_list, + &sched_data->lru_runq_head); + } } else { scheduler->next_vgpu = gvt->idle_vgpu; } @@ -328,11 +342,17 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) { struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data; struct vgpu_sched_data *vgpu_data = vgpu->sched_data; + ktime_t now; if (!list_empty(&vgpu_data->lru_list)) return; - list_add_tail(&vgpu_data->lru_list, &sched_data->lru_runq_head); + now = ktime_get(); + vgpu_data->pri_time = ktime_add(now, + ktime_set(GVT_SCHED_VGPU_PRI_TIME, 0)); + vgpu_data->pri_sched = true; + + list_add(&vgpu_data->lru_list, &sched_data->lru_runq_head); if (!hrtimer_active(&sched_data->timer)) hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(), -- GitLab From b833a3660394876541d2513ce2736debc7c6797a Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Tue, 28 Aug 2018 10:58:41 +0300 Subject: [PATCH 0869/1692] ovl: add ovl_fadvise() Implement stacked fadvise to fix syscalls readahead(2) and fadvise64(2) on an overlayfs file. Suggested-by: Miklos Szeredi Fixes: d1d04ef8572b ("ovl: stack file ops") Signed-off-by: Amir Goldstein Signed-off-by: Miklos Szeredi --- fs/overlayfs/file.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c index a4acd84591d4..aeaefd2a551b 100644 --- a/fs/overlayfs/file.c +++ b/fs/overlayfs/file.c @@ -331,6 +331,25 @@ static long ovl_fallocate(struct file *file, int mode, loff_t offset, loff_t len return ret; } +static int ovl_fadvise(struct file *file, loff_t offset, loff_t len, int advice) +{ + struct fd real; + const struct cred *old_cred; + int ret; + + ret = ovl_real_fdget(file, &real); + if (ret) + return ret; + + old_cred = ovl_override_creds(file_inode(file)->i_sb); + ret = vfs_fadvise(real.file, offset, len, advice); + revert_creds(old_cred); + + fdput(real); + + return ret; +} + static long ovl_real_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { @@ -499,6 +518,7 @@ const struct file_operations ovl_file_operations = { .fsync = ovl_fsync, .mmap = ovl_mmap, .fallocate = ovl_fallocate, + .fadvise = ovl_fadvise, .unlocked_ioctl = ovl_ioctl, .compat_ioctl = ovl_compat_ioctl, -- GitLab From 66eb02d839e8495ae6b612e2d09ff599374b80e2 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 31 Aug 2018 01:04:13 +0200 Subject: [PATCH 0870/1692] mac80211: fix an off-by-one issue in A-MSDU max_subframe computation Initialize 'n' to 2 in order to take into account also the first packet in the estimation of max_subframe limit for a given A-MSDU since frag_tail pointer is NULL when ieee80211_amsdu_aggregate routine analyzes the second frame. Fixes: 6e0456b54545 ("mac80211: add A-MSDU tx support") Signed-off-by: Lorenzo Bianconi Signed-off-by: Johannes Berg --- net/mac80211/tx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 6ca0865de945..9b3b069e418a 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -3174,7 +3174,7 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata, void *data; bool ret = false; unsigned int orig_len; - int n = 1, nfrags, pad = 0; + int n = 2, nfrags, pad = 0; u16 hdrlen; if (!ieee80211_hw_check(&local->hw, TX_AMSDU)) -- GitLab From 8442938c3a2177ba16043b3a935f2c78266ad399 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 31 Aug 2018 11:10:55 +0300 Subject: [PATCH 0871/1692] cfg80211: fix a type issue in ieee80211_chandef_to_operating_class() The "chandef->center_freq1" variable is a u32 but "freq" is a u16 so we are truncating away the high bits. I noticed this bug because in commit 9cf0a0b4b64a ("cfg80211: Add support for 60GHz band channels 5 and 6") we made "freq <= 56160 + 2160 * 6" a valid requency when before it was only "freq <= 56160 + 2160 * 4" that was valid. It introduces a static checker warning: net/wireless/util.c:1571 ieee80211_chandef_to_operating_class() warn: always true condition '(freq <= 56160 + 2160 * 6) => (0-u16max <= 69120)' But really we probably shouldn't have been truncating the high bits away to begin with. Signed-off-by: Dan Carpenter Signed-off-by: Johannes Berg --- net/wireless/util.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/wireless/util.c b/net/wireless/util.c index 3c654cd7ba56..908bf5b6d89e 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -1374,7 +1374,7 @@ bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef, u8 *op_class) { u8 vht_opclass; - u16 freq = chandef->center_freq1; + u32 freq = chandef->center_freq1; if (freq >= 2412 && freq <= 2472) { if (chandef->width > NL80211_CHAN_WIDTH_40) -- GitLab From abd76d255d69d70206c01b9cb19ba36a9c1df6a1 Mon Sep 17 00:00:00 2001 From: "Dreyfuss, Haim" Date: Fri, 31 Aug 2018 11:31:04 +0300 Subject: [PATCH 0872/1692] mac80211: fix WMM TXOP calculation In commit 9236c4523e5b ("mac80211: limit wmm params to comply with ETSI requirements"), we have limited the WMM parameters to comply with 802.11 and ETSI standard. Mistakenly the TXOP value was caluclated wrong. Fix it by taking the minimum between 802.11 to ETSI to make sure we are not violating both. Fixes: e552af058148 ("mac80211: limit wmm params to comply with ETSI requirements") Signed-off-by: Haim Dreyfuss Signed-off-by: Luca Coelho Signed-off-by: Johannes Berg --- net/mac80211/util.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/net/mac80211/util.c b/net/mac80211/util.c index c80187d6e6bb..93b5bb849ad7 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1151,8 +1151,7 @@ void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata, qparam->cw_min = max_t(u16, qparam->cw_min, wmm_ac->cw_min); qparam->cw_max = max_t(u16, qparam->cw_max, wmm_ac->cw_max); qparam->aifs = max_t(u8, qparam->aifs, wmm_ac->aifsn); - qparam->txop = !qparam->txop ? wmm_ac->cot / 32 : - min_t(u16, qparam->txop, wmm_ac->cot / 32); + qparam->txop = min_t(u16, qparam->txop, wmm_ac->cot / 32); rcu_read_unlock(); } -- GitLab From f3ffb6c3a28963657eb8b02a795d75f2ebbd5ef4 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Fri, 31 Aug 2018 11:31:06 +0300 Subject: [PATCH 0873/1692] mac80211: fix a race between restart and CSA flows We hit a problem with iwlwifi that was caused by a bug in mac80211. A bug in iwlwifi caused the firwmare to crash in certain cases in channel switch. Because of that bug, drv_pre_channel_switch would fail and trigger the restart flow. Now we had the hw restart worker which runs on the system's workqueue and the csa_connection_drop_work worker that runs on mac80211's workqueue that can run together. This is obviously problematic since the restart work wants to reconfigure the connection, while the csa_connection_drop_work worker does the exact opposite: it tries to disconnect. Fix this by cancelling the csa_connection_drop_work worker in the restart worker. Note that this can sound racy: we could have: driver iface_work CSA_work restart_work +++++++++++++++++++++++++++++++++++++++++++++ | <--drv_cs ---| -CS FAILED--> | | | cancel_work(CSA) schedule | CSA work | | | Race between those 2 But this is not possible because we flush the workqueue in the restart worker before we cancel the CSA worker. That would be bullet proof if we could guarantee that we schedule the CSA worker only from the iface_work which runs on the workqueue (and not on the system's workqueue), but unfortunately we do have an instance in which we schedule the CSA work outside the context of the workqueue (ieee80211_chswitch_done). Note also that we should probably cancel other workers like beacon_connection_loss_work and possibly others for different types of interfaces, at the very least, IBSS should suffer from the exact same problem, but for now, do the minimum to fix the actual bug that was actually experienced and reproduced. Signed-off-by: Emmanuel Grumbach Signed-off-by: Luca Coelho Signed-off-by: Johannes Berg --- net/mac80211/main.c | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 27cd64acaf00..66cbddd65b47 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -255,8 +255,27 @@ static void ieee80211_restart_work(struct work_struct *work) flush_work(&local->radar_detected_work); rtnl_lock(); - list_for_each_entry(sdata, &local->interfaces, list) + list_for_each_entry(sdata, &local->interfaces, list) { + /* + * XXX: there may be more work for other vif types and even + * for station mode: a good thing would be to run most of + * the iface type's dependent _stop (ieee80211_mg_stop, + * ieee80211_ibss_stop) etc... + * For now, fix only the specific bug that was seen: race + * between csa_connection_drop_work and us. + */ + if (sdata->vif.type == NL80211_IFTYPE_STATION) { + /* + * This worker is scheduled from the iface worker that + * runs on mac80211's workqueue, so we can't be + * scheduling this worker after the cancel right here. + * The exception is ieee80211_chswitch_done. + * Then we can have a race... + */ + cancel_work_sync(&sdata->u.mgd.csa_connection_drop_work); + } flush_delayed_work(&sdata->dec_tailroom_needed_wk); + } ieee80211_scan_cancel(local); /* make sure any new ROC will consider local->in_reconfig */ -- GitLab From 0007e94355fdb71a1cf5dba0754155cba08f0666 Mon Sep 17 00:00:00 2001 From: Ilan Peer Date: Fri, 31 Aug 2018 11:31:10 +0300 Subject: [PATCH 0874/1692] mac80211: Fix station bandwidth setting after channel switch When performing a channel switch flow for a managed interface, the flow did not update the bandwidth of the AP station and the rate scale algorithm. In case of a channel width downgrade, this would result with the rate scale algorithm using a bandwidth that does not match the interface channel configuration. Fix this by updating the AP station bandwidth and rate scaling algorithm before the actual channel change in case of a bandwidth downgrade, or after the actual channel change in case of a bandwidth upgrade. Signed-off-by: Ilan Peer Signed-off-by: Luca Coelho Signed-off-by: Johannes Berg --- net/mac80211/mlme.c | 53 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index a59187c016e0..22b699460176 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -978,6 +978,10 @@ static void ieee80211_chswitch_work(struct work_struct *work) */ if (sdata->reserved_chanctx) { + struct ieee80211_supported_band *sband = NULL; + struct sta_info *mgd_sta = NULL; + enum ieee80211_sta_rx_bandwidth bw = IEEE80211_STA_RX_BW_20; + /* * with multi-vif csa driver may call ieee80211_csa_finish() * many times while waiting for other interfaces to use their @@ -986,6 +990,48 @@ static void ieee80211_chswitch_work(struct work_struct *work) if (sdata->reserved_ready) goto out; + if (sdata->vif.bss_conf.chandef.width != + sdata->csa_chandef.width) { + /* + * For managed interface, we need to also update the AP + * station bandwidth and align the rate scale algorithm + * on the bandwidth change. Here we only consider the + * bandwidth of the new channel definition (as channel + * switch flow does not have the full HT/VHT/HE + * information), assuming that if additional changes are + * required they would be done as part of the processing + * of the next beacon from the AP. + */ + switch (sdata->csa_chandef.width) { + case NL80211_CHAN_WIDTH_20_NOHT: + case NL80211_CHAN_WIDTH_20: + default: + bw = IEEE80211_STA_RX_BW_20; + break; + case NL80211_CHAN_WIDTH_40: + bw = IEEE80211_STA_RX_BW_40; + break; + case NL80211_CHAN_WIDTH_80: + bw = IEEE80211_STA_RX_BW_80; + break; + case NL80211_CHAN_WIDTH_80P80: + case NL80211_CHAN_WIDTH_160: + bw = IEEE80211_STA_RX_BW_160; + break; + } + + mgd_sta = sta_info_get(sdata, ifmgd->bssid); + sband = + local->hw.wiphy->bands[sdata->csa_chandef.chan->band]; + } + + if (sdata->vif.bss_conf.chandef.width > + sdata->csa_chandef.width) { + mgd_sta->sta.bandwidth = bw; + rate_control_rate_update(local, sband, mgd_sta, + IEEE80211_RC_BW_CHANGED); + } + ret = ieee80211_vif_use_reserved_context(sdata); if (ret) { sdata_info(sdata, @@ -996,6 +1042,13 @@ static void ieee80211_chswitch_work(struct work_struct *work) goto out; } + if (sdata->vif.bss_conf.chandef.width < + sdata->csa_chandef.width) { + mgd_sta->sta.bandwidth = bw; + rate_control_rate_update(local, sband, mgd_sta, + IEEE80211_RC_BW_CHANGED); + } + goto out; } -- GitLab From 6c18b27d6e5c6a7206364eae2b47bc8d8b2fa68f Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Fri, 31 Aug 2018 11:31:12 +0300 Subject: [PATCH 0875/1692] mac80211: don't Tx a deauth frame if the AP forbade Tx If the driver fails to properly prepare for the channel switch, mac80211 will disconnect. If the CSA IE had mode set to 1, it means that the clients are not allowed to send any Tx on the current channel, and that includes the deauthentication frame. Make sure that we don't send the deauthentication frame in this case. In iwlwifi, this caused a failure to flush queues since the firmware already closed the queues after having parsed the CSA IE. Then mac80211 would wait until the deauthentication frame would go out (drv_flush(drop=false)) and that would never happen. Signed-off-by: Emmanuel Grumbach Signed-off-by: Luca Coelho Signed-off-by: Johannes Berg --- net/mac80211/mlme.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 22b699460176..b046bf95eb3c 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -1270,6 +1270,16 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, cbss->beacon_interval)); return; drop_connection: + /* + * This is just so that the disconnect flow will know that + * we were trying to switch channel and failed. In case the + * mode is 1 (we are not allowed to Tx), we will know not to + * send a deauthentication frame. Those two fields will be + * reset when the disconnection worker runs. + */ + sdata->vif.csa_active = true; + sdata->csa_block_tx = csa_ie.mode; + ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work); mutex_unlock(&local->chanctx_mtx); mutex_unlock(&local->mtx); @@ -2453,6 +2463,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; + bool tx; sdata_lock(sdata); if (!ifmgd->associated) { @@ -2460,6 +2471,8 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) return; } + tx = !sdata->csa_block_tx; + /* AP is probably out of range (or not reachable for another reason) so * remove the bss struct for that AP. */ @@ -2467,7 +2480,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, - true, frame_buf); + tx, frame_buf); mutex_lock(&local->mtx); sdata->vif.csa_active = false; ifmgd->csa_waiting_bcn = false; @@ -2478,7 +2491,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) } mutex_unlock(&local->mtx); - ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true, + ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), tx, WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY); sdata_unlock(sdata); -- GitLab From c6e57b3896fc76299913b8cfd82d853bee8a2c84 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Fri, 31 Aug 2018 11:31:13 +0300 Subject: [PATCH 0876/1692] mac80211: shorten the IBSS debug messages When tracing is enabled, all the debug messages are recorded and must not exceed MAX_MSG_LEN (100) columns. Longer debug messages grant the user with: WARNING: CPU: 3 PID: 32642 at /tmp/wifi-core-20180806094828/src/iwlwifi-stack-dev/net/mac80211/./trace_msg.h:32 trace_event_raw_event_mac80211_msg_event+0xab/0xc0 [mac80211] Workqueue: phy1 ieee80211_iface_work [mac80211] RIP: 0010:trace_event_raw_event_mac80211_msg_event+0xab/0xc0 [mac80211] Call Trace: __sdata_dbg+0xbd/0x120 [mac80211] ieee80211_ibss_rx_queued_mgmt+0x15f/0x510 [mac80211] ieee80211_iface_work+0x21d/0x320 [mac80211] Signed-off-by: Emmanuel Grumbach Signed-off-by: Luca Coelho Signed-off-by: Johannes Berg --- net/mac80211/ibss.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 6449a1c2283b..f0f5fedb8caa 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -947,8 +947,8 @@ static void ieee80211_rx_mgmt_deauth_ibss(struct ieee80211_sub_if_data *sdata, if (len < IEEE80211_DEAUTH_FRAME_LEN) return; - ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM BSSID=%pM (reason: %d)\n", - mgmt->sa, mgmt->da, mgmt->bssid, reason); + ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da); + ibss_dbg(sdata, "\tBSSID=%pM (reason: %d)\n", mgmt->bssid, reason); sta_info_destroy_addr(sdata, mgmt->sa); } @@ -966,9 +966,9 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata, auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg); auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); - ibss_dbg(sdata, - "RX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=%d)\n", - mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction); + ibss_dbg(sdata, "RX Auth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da); + ibss_dbg(sdata, "\tBSSID=%pM (auth_transaction=%d)\n", + mgmt->bssid, auth_transaction); if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1) return; @@ -1175,10 +1175,10 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, rx_timestamp = drv_get_tsf(local, sdata); } - ibss_dbg(sdata, - "RX beacon SA=%pM BSSID=%pM TSF=0x%llx BCN=0x%llx diff=%lld @%lu\n", + ibss_dbg(sdata, "RX beacon SA=%pM BSSID=%pM TSF=0x%llx\n", mgmt->sa, mgmt->bssid, - (unsigned long long)rx_timestamp, + (unsigned long long)rx_timestamp); + ibss_dbg(sdata, "\tBCN=0x%llx diff=%lld @%lu\n", (unsigned long long)beacon_timestamp, (unsigned long long)(rx_timestamp - beacon_timestamp), jiffies); @@ -1537,9 +1537,9 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata, tx_last_beacon = drv_tx_last_beacon(local); - ibss_dbg(sdata, - "RX ProbeReq SA=%pM DA=%pM BSSID=%pM (tx_last_beacon=%d)\n", - mgmt->sa, mgmt->da, mgmt->bssid, tx_last_beacon); + ibss_dbg(sdata, "RX ProbeReq SA=%pM DA=%pM\n", mgmt->sa, mgmt->da); + ibss_dbg(sdata, "\tBSSID=%pM (tx_last_beacon=%d)\n", + mgmt->bssid, tx_last_beacon); if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da)) return; -- GitLab From 4f2c7337af638bd73fd1f247f84a85521a34b74c Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 1 Sep 2018 10:24:51 +0100 Subject: [PATCH 0877/1692] drm/i915: Determine uses-full-ppgtt from context for execbuf Rather than inspect the global module parameter for whether full-ppgtt maybe enabled, we can inspect the context directly as to whether it has its own vm. Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Cc: Bob Paauwe Cc: Rodrigo Vivi Reviewed-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20180901092451.7233-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index a926d7d47183..020a2394fc85 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -735,7 +735,12 @@ static int eb_select_context(struct i915_execbuffer *eb) return -ENOENT; eb->ctx = ctx; - eb->vm = ctx->ppgtt ? &ctx->ppgtt->vm : &eb->i915->ggtt.vm; + if (ctx->ppgtt) { + eb->vm = &ctx->ppgtt->vm; + eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT; + } else { + eb->vm = &eb->i915->ggtt.vm; + } eb->context_flags = 0; if (ctx->flags & CONTEXT_NO_ZEROMAP) @@ -2201,8 +2206,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, eb.flags = (unsigned int *)(eb.vma + args->buffer_count + 1); eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS; - if (USES_FULL_PPGTT(eb.i915)) - eb.invalid_flags |= EXEC_OBJECT_NEEDS_GTT; reloc_cache_init(&eb.reloc_cache, eb.i915); eb.buffer_count = args->buffer_count; -- GitLab From 4331f4d5ada5684fc77fa16e3f6177f077c9e6ec Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sun, 2 Sep 2018 19:30:53 -0700 Subject: [PATCH 0878/1692] x86: Fix kernel-doc atomic.h warnings Fix kernel-doc warnings in arch/x86/include/asm/atomic.h that are caused by having a #define macro between the kernel-doc notation and the function name. Fixed by moving the #define macro to after the function implementation. Make the same change for atomic64_{32,64}.h for consistency even though there were no kernel-doc warnings found in these header files, but there would be if they were used in generation of documentation. Fixes these kernel-doc warnings: ../arch/x86/include/asm/atomic.h:84: warning: Excess function parameter 'i' description in 'arch_atomic_sub_and_test' ../arch/x86/include/asm/atomic.h:84: warning: Excess function parameter 'v' description in 'arch_atomic_sub_and_test' ../arch/x86/include/asm/atomic.h:96: warning: Excess function parameter 'v' description in 'arch_atomic_inc' ../arch/x86/include/asm/atomic.h:109: warning: Excess function parameter 'v' description in 'arch_atomic_dec' ../arch/x86/include/asm/atomic.h:124: warning: Excess function parameter 'v' description in 'arch_atomic_dec_and_test' ../arch/x86/include/asm/atomic.h:138: warning: Excess function parameter 'v' description in 'arch_atomic_inc_and_test' ../arch/x86/include/asm/atomic.h:153: warning: Excess function parameter 'i' description in 'arch_atomic_add_negative' ../arch/x86/include/asm/atomic.h:153: warning: Excess function parameter 'v' description in 'arch_atomic_add_negative' Fixes: 18cc1814d4e7 ("atomics/treewide: Make test ops optional") Signed-off-by: Randy Dunlap Signed-off-by: Thomas Gleixner Cc: Mark Rutland Link: https://lkml.kernel.org/r/0a1e678d-c8c5-b32c-2640-ed4e94d399d2@infradead.org --- arch/x86/include/asm/atomic.h | 12 ++++++------ arch/x86/include/asm/atomic64_32.h | 8 ++++---- arch/x86/include/asm/atomic64_64.h | 12 ++++++------ 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index b143717b92b3..ce84388e540c 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h @@ -80,11 +80,11 @@ static __always_inline void arch_atomic_sub(int i, atomic_t *v) * true if the result is zero, or false for all * other cases. */ -#define arch_atomic_sub_and_test arch_atomic_sub_and_test static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v) { GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e); } +#define arch_atomic_sub_and_test arch_atomic_sub_and_test /** * arch_atomic_inc - increment atomic variable @@ -92,12 +92,12 @@ static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v) * * Atomically increments @v by 1. */ -#define arch_atomic_inc arch_atomic_inc static __always_inline void arch_atomic_inc(atomic_t *v) { asm volatile(LOCK_PREFIX "incl %0" : "+m" (v->counter)); } +#define arch_atomic_inc arch_atomic_inc /** * arch_atomic_dec - decrement atomic variable @@ -105,12 +105,12 @@ static __always_inline void arch_atomic_inc(atomic_t *v) * * Atomically decrements @v by 1. */ -#define arch_atomic_dec arch_atomic_dec static __always_inline void arch_atomic_dec(atomic_t *v) { asm volatile(LOCK_PREFIX "decl %0" : "+m" (v->counter)); } +#define arch_atomic_dec arch_atomic_dec /** * arch_atomic_dec_and_test - decrement and test @@ -120,11 +120,11 @@ static __always_inline void arch_atomic_dec(atomic_t *v) * returns true if the result is 0, or false for all other * cases. */ -#define arch_atomic_dec_and_test arch_atomic_dec_and_test static __always_inline bool arch_atomic_dec_and_test(atomic_t *v) { GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e); } +#define arch_atomic_dec_and_test arch_atomic_dec_and_test /** * arch_atomic_inc_and_test - increment and test @@ -134,11 +134,11 @@ static __always_inline bool arch_atomic_dec_and_test(atomic_t *v) * and returns true if the result is zero, or false for all * other cases. */ -#define arch_atomic_inc_and_test arch_atomic_inc_and_test static __always_inline bool arch_atomic_inc_and_test(atomic_t *v) { GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e); } +#define arch_atomic_inc_and_test arch_atomic_inc_and_test /** * arch_atomic_add_negative - add and test if negative @@ -149,11 +149,11 @@ static __always_inline bool arch_atomic_inc_and_test(atomic_t *v) * if the result is negative, or false when * result is greater than or equal to zero. */ -#define arch_atomic_add_negative arch_atomic_add_negative static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v) { GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s); } +#define arch_atomic_add_negative arch_atomic_add_negative /** * arch_atomic_add_return - add integer and return diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h index ef959f02d070..6a5b0ec460da 100644 --- a/arch/x86/include/asm/atomic64_32.h +++ b/arch/x86/include/asm/atomic64_32.h @@ -205,12 +205,12 @@ static inline long long arch_atomic64_sub(long long i, atomic64_t *v) * * Atomically increments @v by 1. */ -#define arch_atomic64_inc arch_atomic64_inc static inline void arch_atomic64_inc(atomic64_t *v) { __alternative_atomic64(inc, inc_return, /* no output */, "S" (v) : "memory", "eax", "ecx", "edx"); } +#define arch_atomic64_inc arch_atomic64_inc /** * arch_atomic64_dec - decrement atomic64 variable @@ -218,12 +218,12 @@ static inline void arch_atomic64_inc(atomic64_t *v) * * Atomically decrements @v by 1. */ -#define arch_atomic64_dec arch_atomic64_dec static inline void arch_atomic64_dec(atomic64_t *v) { __alternative_atomic64(dec, dec_return, /* no output */, "S" (v) : "memory", "eax", "ecx", "edx"); } +#define arch_atomic64_dec arch_atomic64_dec /** * arch_atomic64_add_unless - add unless the number is a given value @@ -245,7 +245,6 @@ static inline int arch_atomic64_add_unless(atomic64_t *v, long long a, return (int)a; } -#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero static inline int arch_atomic64_inc_not_zero(atomic64_t *v) { int r; @@ -253,8 +252,8 @@ static inline int arch_atomic64_inc_not_zero(atomic64_t *v) "S" (v) : "ecx", "edx", "memory"); return r; } +#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero -#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive static inline long long arch_atomic64_dec_if_positive(atomic64_t *v) { long long r; @@ -262,6 +261,7 @@ static inline long long arch_atomic64_dec_if_positive(atomic64_t *v) "S" (v) : "ecx", "memory"); return r; } +#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive #undef alternative_atomic64 #undef __alternative_atomic64 diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h index 4343d9b4f30e..5f851d92eecd 100644 --- a/arch/x86/include/asm/atomic64_64.h +++ b/arch/x86/include/asm/atomic64_64.h @@ -71,11 +71,11 @@ static inline void arch_atomic64_sub(long i, atomic64_t *v) * true if the result is zero, or false for all * other cases. */ -#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v) { GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e); } +#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test /** * arch_atomic64_inc - increment atomic64 variable @@ -83,13 +83,13 @@ static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v) * * Atomically increments @v by 1. */ -#define arch_atomic64_inc arch_atomic64_inc static __always_inline void arch_atomic64_inc(atomic64_t *v) { asm volatile(LOCK_PREFIX "incq %0" : "=m" (v->counter) : "m" (v->counter)); } +#define arch_atomic64_inc arch_atomic64_inc /** * arch_atomic64_dec - decrement atomic64 variable @@ -97,13 +97,13 @@ static __always_inline void arch_atomic64_inc(atomic64_t *v) * * Atomically decrements @v by 1. */ -#define arch_atomic64_dec arch_atomic64_dec static __always_inline void arch_atomic64_dec(atomic64_t *v) { asm volatile(LOCK_PREFIX "decq %0" : "=m" (v->counter) : "m" (v->counter)); } +#define arch_atomic64_dec arch_atomic64_dec /** * arch_atomic64_dec_and_test - decrement and test @@ -113,11 +113,11 @@ static __always_inline void arch_atomic64_dec(atomic64_t *v) * returns true if the result is 0, or false for all other * cases. */ -#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test static inline bool arch_atomic64_dec_and_test(atomic64_t *v) { GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e); } +#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test /** * arch_atomic64_inc_and_test - increment and test @@ -127,11 +127,11 @@ static inline bool arch_atomic64_dec_and_test(atomic64_t *v) * and returns true if the result is zero, or false for all * other cases. */ -#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test static inline bool arch_atomic64_inc_and_test(atomic64_t *v) { GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e); } +#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test /** * arch_atomic64_add_negative - add and test if negative @@ -142,11 +142,11 @@ static inline bool arch_atomic64_inc_and_test(atomic64_t *v) * if the result is negative, or false when * result is greater than or equal to zero. */ -#define arch_atomic64_add_negative arch_atomic64_add_negative static inline bool arch_atomic64_add_negative(long i, atomic64_t *v) { GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s); } +#define arch_atomic64_add_negative arch_atomic64_add_negative /** * arch_atomic64_add_return - add and return -- GitLab From 2bfbf6fed1a0f672726b0637b89b44d94dedd2b6 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 3 Sep 2018 09:33:33 +0100 Subject: [PATCH 0879/1692] drm/i915: Do a full device reset after being wedged We only call unset_wedged on the global reset path (since it's a global operation), so if we are terminally wedged and wish to reset, take the full device reset path rather than the quicker individual engine resets. Signed-off-by: Chris Wilson Reviewed-by: Joonas Lahtinen Link: https://patchwork.freedesktop.org/patch/msgid/20180903083337.13134-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_irq.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index e31093ce871c..10f28a2ee2e6 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -3309,7 +3309,8 @@ void i915_handle_error(struct drm_i915_private *dev_priv, * Try engine reset when available. We fall back to full reset if * single reset fails. */ - if (intel_has_reset_engine(dev_priv)) { + if (intel_has_reset_engine(dev_priv) && + !i915_terminally_wedged(&dev_priv->gpu_error)) { for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE); if (test_and_set_bit(I915_RESET_ENGINE + engine->id, -- GitLab From aae7c06b34e4a351c8dab28f3cda6b1ba0637bf9 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 3 Sep 2018 09:33:34 +0100 Subject: [PATCH 0880/1692] drm/i915: Flag any possible writes for a GTT fault We do not explicitly mark the PTE for the user's GTT mmap as being wrprotect, so we don't get a refault when we would need to change a read-only mmapping into read-write. As such, we must presume that if the vma has PROT_WRITE it may be written to, although this is supposed to be indicated by set-domain there are cases (e.g. after swap) where userspace may not be aware of the implicit domain change. Signed-off-by: Chris Wilson Reviewed-by: Joonas Lahtinen Link: https://patchwork.freedesktop.org/patch/msgid/20180903083337.13134-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 7b7bbfe59697..625e07c56fe2 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2018,7 +2018,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; - bool write = !!(vmf->flags & FAULT_FLAG_WRITE); + bool write = area->vm_flags & VM_WRITE; struct i915_vma *vma; pgoff_t page_offset; int ret; -- GitLab From fddcd00a49e9122a3579247151e9cb3ce5a1a36e Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 3 Sep 2018 09:33:35 +0100 Subject: [PATCH 0881/1692] drm/i915: Force the slow path after a user-write error If we fail to write the user relocation back when it is changed, force ourselves to take the slow relocation path where we can handle faults in the write path. There is still an element of dubiousness as having patched up the batch to use the correct offset, it no longer matches the presumed_offset in the relocation, so a second pass may miss any changes in layout. Signed-off-by: Chris Wilson Reviewed-by: Joonas Lahtinen Link: https://patchwork.freedesktop.org/patch/msgid/20180903083337.13134-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 020a2394fc85..43706c1db31a 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1491,8 +1491,10 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma) * can read from this userspace address. */ offset = gen8_canonical_addr(offset & ~UPDATE); - __put_user(offset, - &urelocs[r-stack].presumed_offset); + if (unlikely(__put_user(offset, &urelocs[r-stack].presumed_offset))) { + remain = -EFAULT; + goto out; + } } } while (r++, --count); urelocs += ARRAY_SIZE(stack); @@ -1577,7 +1579,6 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb) relocs = kvmalloc_array(size, 1, GFP_KERNEL); if (!relocs) { - kvfree(relocs); err = -ENOMEM; goto err; } @@ -1591,6 +1592,7 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb) if (__copy_from_user((char *)relocs + copied, (char __user *)urelocs + copied, len)) { +end_user: kvfree(relocs); err = -EFAULT; goto err; @@ -1614,7 +1616,6 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb) unsafe_put_user(-1, &urelocs[copied].presumed_offset, end_user); -end_user: user_access_end(); eb->exec[i].relocs_ptr = (uintptr_t)relocs; -- GitLab From e0ff7a7cddeff8ab2445f9d33f2416f8f89f9bca Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 3 Sep 2018 09:33:36 +0100 Subject: [PATCH 0882/1692] drm/i915: Early rejection of buffer allocations larger than RAM We currently try to pin and allocate the whole buffer at a time. If that object is larger than RAM, we will try to pin the whole of physical memory, force the machine into oom, and then still fail the allocation. If the request is obviously too large, error out early. We opt to do this in the backend to make it easy to use alternate paths that do not require the entire object pinned, or may easily handle proxy objects that are larger than physical memory. Signed-off-by: Chris Wilson Reviewed-by: Joonas Lahtinen Link: https://patchwork.freedesktop.org/patch/msgid/20180903083337.13134-4-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem.c | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 625e07c56fe2..89834ce19acd 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2533,13 +2533,21 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) gfp_t noreclaim; int ret; - /* Assert that the object is not currently in any GPU domain. As it + /* + * Assert that the object is not currently in any GPU domain. As it * wasn't in the GTT, there shouldn't be any way it could have been in * a GPU cache */ GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); + /* + * If there's no chance of allocating enough pages for the whole + * object, bail early. + */ + if (page_count > totalram_pages) + return -ENOMEM; + st = kmalloc(sizeof(*st), GFP_KERNEL); if (st == NULL) return -ENOMEM; @@ -2550,7 +2558,8 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) return -ENOMEM; } - /* Get the list of pages out of our struct file. They'll be pinned + /* + * Get the list of pages out of our struct file. They'll be pinned * at this point until we release them. * * Fail silently without starting the shrinker @@ -2582,7 +2591,8 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++); cond_resched(); - /* We've tried hard to allocate the memory by reaping + /* + * We've tried hard to allocate the memory by reaping * our own buffer, now let the real VM do its job and * go down in flames if truly OOM. * @@ -2594,7 +2604,8 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) /* reclaim and warn, but no oom */ gfp = mapping_gfp_mask(mapping); - /* Our bo are always dirty and so we require + /* + * Our bo are always dirty and so we require * kswapd to reclaim our pages (direct reclaim * does not effectively begin pageout of our * buffers on its own). However, direct reclaim @@ -2638,7 +2649,8 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ret = i915_gem_gtt_prepare_pages(obj, st); if (ret) { - /* DMA remapping failed? One possible cause is that + /* + * DMA remapping failed? One possible cause is that * it could not reserve enough large entries, asking * for PAGE_SIZE chunks instead may be helpful. */ @@ -2672,7 +2684,8 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) sg_free_table(st); kfree(st); - /* shmemfs first checks if there is enough memory to allocate the page + /* + * shmemfs first checks if there is enough memory to allocate the page * and reports ENOSPC should there be insufficient, along with the usual * ENOMEM for a genuine allocation failure. * -- GitLab From 6b048706f407f138293f06db31f88370f970db3a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 3 Sep 2018 09:33:37 +0100 Subject: [PATCH 0883/1692] drm/i915: Forcibly flush unwanted requests in drop-caches Add a mode to debugfs/drop-caches to flush unwanted requests off the GPU (by wedging the device and resetting). This is very useful if a test terminated leaving a long queue of hanging batches that would ordinarily require a round trip through hangcheck for each. It reduces the inter-test operation to just a write into drop-caches to reset driver/GPU state between tests. Signed-off-by: Chris Wilson Reviewed-by: Joonas Lahtinen Link: https://patchwork.freedesktop.org/patch/msgid/20180903083337.13134-5-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_debugfs.c | 52 ++++++++++++++++++++--------- 1 file changed, 36 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index a5265c236a33..4ad0e2ed8610 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -4131,13 +4131,17 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops, #define DROP_FREED BIT(4) #define DROP_SHRINK_ALL BIT(5) #define DROP_IDLE BIT(6) +#define DROP_RESET_ACTIVE BIT(7) +#define DROP_RESET_SEQNO BIT(8) #define DROP_ALL (DROP_UNBOUND | \ DROP_BOUND | \ DROP_RETIRE | \ DROP_ACTIVE | \ DROP_FREED | \ DROP_SHRINK_ALL |\ - DROP_IDLE) + DROP_IDLE | \ + DROP_RESET_ACTIVE | \ + DROP_RESET_SEQNO) static int i915_drop_caches_get(void *data, u64 *val) { @@ -4149,53 +4153,69 @@ i915_drop_caches_get(void *data, u64 *val) static int i915_drop_caches_set(void *data, u64 val) { - struct drm_i915_private *dev_priv = data; - struct drm_device *dev = &dev_priv->drm; + struct drm_i915_private *i915 = data; int ret = 0; DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n", val, val & DROP_ALL); + if (val & DROP_RESET_ACTIVE && !intel_engines_are_idle(i915)) + i915_gem_set_wedged(i915); + /* No need to check and wait for gpu resets, only libdrm auto-restarts * on ioctls on -EAGAIN. */ - if (val & (DROP_ACTIVE | DROP_RETIRE)) { - ret = mutex_lock_interruptible(&dev->struct_mutex); + if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) { + ret = mutex_lock_interruptible(&i915->drm.struct_mutex); if (ret) return ret; if (val & DROP_ACTIVE) - ret = i915_gem_wait_for_idle(dev_priv, + ret = i915_gem_wait_for_idle(i915, I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED, MAX_SCHEDULE_TIMEOUT); + if (val & DROP_RESET_SEQNO) { + intel_runtime_pm_get(i915); + ret = i915_gem_set_global_seqno(&i915->drm, 1); + intel_runtime_pm_put(i915); + } + if (val & DROP_RETIRE) - i915_retire_requests(dev_priv); + i915_retire_requests(i915); - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&i915->drm.struct_mutex); + } + + if (val & DROP_RESET_ACTIVE && + i915_terminally_wedged(&i915->gpu_error)) { + i915_handle_error(i915, ALL_ENGINES, 0, NULL); + wait_on_bit(&i915->gpu_error.flags, + I915_RESET_HANDOFF, + TASK_UNINTERRUPTIBLE); } fs_reclaim_acquire(GFP_KERNEL); if (val & DROP_BOUND) - i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_BOUND); + i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND); if (val & DROP_UNBOUND) - i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_UNBOUND); + i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND); if (val & DROP_SHRINK_ALL) - i915_gem_shrink_all(dev_priv); + i915_gem_shrink_all(i915); fs_reclaim_release(GFP_KERNEL); if (val & DROP_IDLE) { do { - if (READ_ONCE(dev_priv->gt.active_requests)) - flush_delayed_work(&dev_priv->gt.retire_work); - drain_delayed_work(&dev_priv->gt.idle_work); - } while (READ_ONCE(dev_priv->gt.awake)); + if (READ_ONCE(i915->gt.active_requests)) + flush_delayed_work(&i915->gt.retire_work); + drain_delayed_work(&i915->gt.idle_work); + } while (READ_ONCE(i915->gt.awake)); } if (val & DROP_FREED) - i915_gem_drain_freed_objects(dev_priv); + i915_gem_drain_freed_objects(i915); return ret; } -- GitLab From 7aa09ff24301535491cd4de1b93107ee91449a12 Mon Sep 17 00:00:00 2001 From: Srinivas Kandagatla Date: Mon, 3 Sep 2018 12:07:47 +0100 Subject: [PATCH 0884/1692] ASoC: q6routing: initialize data correctly Some of the router data fields are left as default zeros which are valid dai ids, so initialize these to invalid value of -1. Without intializing these correctly get_session_from_id() can return incorrect session resulting in not closing the opened copp and messing up with the copp ref count. Fixes: e3a33673e845 ("ASoC: qdsp6: q6routing: Add q6routing driver") Signed-off-by: Srinivas Kandagatla Signed-off-by: Mark Brown --- sound/soc/qcom/qdsp6/q6routing.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c index dc94c5c53788..c6b51571be94 100644 --- a/sound/soc/qcom/qdsp6/q6routing.c +++ b/sound/soc/qcom/qdsp6/q6routing.c @@ -960,8 +960,10 @@ static int msm_routing_probe(struct snd_soc_component *c) { int i; - for (i = 0; i < MAX_SESSIONS; i++) + for (i = 0; i < MAX_SESSIONS; i++) { routing_data->sessions[i].port_id = -1; + routing_data->sessions[i].fedai_id = -1; + } return 0; } -- GitLab From c43c5e9f524ec914e7e202f9c5ab91779629ccc6 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Mon, 3 Sep 2018 10:15:33 +0200 Subject: [PATCH 0885/1692] timekeeping: Fix declaration of read_persistent_wall_and_boot_offset() It is read_persistent_wall_and_boot_offset() and not read_persistent_clock_and_boot_offset() Fixes: 3eca993740b8eb40f51 ("timekeeping: Replace read_boot_clock64() with read_persistent_wall_and_boot_offset()") Signed-off-by: Christian Borntraeger Signed-off-by: Thomas Gleixner Cc: Pavel Tatashin Link: https://lkml.kernel.org/r/20180903081533.34366-1-borntraeger@de.ibm.com --- include/linux/timekeeping.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index 5d738804e3d6..a5a3cfc3c2fa 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -258,8 +258,8 @@ extern void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot); extern int persistent_clock_is_local; extern void read_persistent_clock64(struct timespec64 *ts); -void read_persistent_clock_and_boot_offset(struct timespec64 *wall_clock, - struct timespec64 *boot_offset); +void read_persistent_wall_and_boot_offset(struct timespec64 *wall_clock, + struct timespec64 *boot_offset); extern int update_persistent_clock64(struct timespec64 now); /* -- GitLab From 34f89904b01e6d0cc7425d040ffa38f37d218725 Mon Sep 17 00:00:00 2001 From: Joonas Lahtinen Date: Mon, 3 Sep 2018 14:57:55 +0300 Subject: [PATCH 0886/1692] drm/i915: Update DRIVER_DATE to 20180903 Signed-off-by: Joonas Lahtinen --- drivers/gpu/drm/i915/i915_drv.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 14e562887307..9771f39d99b3 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -86,8 +86,8 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20180830" -#define DRIVER_TIMESTAMP 1535639183 +#define DRIVER_DATE "20180903" +#define DRIVER_TIMESTAMP 1535975875 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and * WARN_ON()) for hw state sanity checks to check for unexpected conditions -- GitLab From 9bdda4e9cf2dcecb60a0683b10ffb8cd7e5f2f45 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Sat, 1 Sep 2018 09:40:01 +0300 Subject: [PATCH 0887/1692] fsnotify: fix ignore mask logic in fsnotify() Commit 92183a42898d ("fsnotify: fix ignore mask logic in send_to_group()") acknoledges the use case of ignoring an event on an inode mark, because of an ignore mask on a mount mark of the same group (i.e. I want to get all events on this file, except for the events that came from that mount). This change depends on correctly merging the inode marks and mount marks group lists, so that the mount mark ignore mask would be tested in send_to_group(). Alas, the merging of the lists did not take into account the case where event in question is not in the mask of any of the mount marks. To fix this, completely remove the tests for inode and mount event masks from the lists merging code. Fixes: 92183a42898d ("fsnotify: fix ignore mask logic in send_to_group") Signed-off-by: Amir Goldstein Signed-off-by: Jan Kara --- fs/notify/fsnotify.c | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c index f174397b63a0..ababdbfab537 100644 --- a/fs/notify/fsnotify.c +++ b/fs/notify/fsnotify.c @@ -351,16 +351,9 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is, iter_info.srcu_idx = srcu_read_lock(&fsnotify_mark_srcu); - if ((mask & FS_MODIFY) || - (test_mask & to_tell->i_fsnotify_mask)) { - iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] = - fsnotify_first_mark(&to_tell->i_fsnotify_marks); - } - - if (mnt && ((mask & FS_MODIFY) || - (test_mask & mnt->mnt_fsnotify_mask))) { - iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] = - fsnotify_first_mark(&to_tell->i_fsnotify_marks); + iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] = + fsnotify_first_mark(&to_tell->i_fsnotify_marks); + if (mnt) { iter_info.marks[FSNOTIFY_OBJ_TYPE_VFSMOUNT] = fsnotify_first_mark(&mnt->mnt_fsnotify_marks); } -- GitLab From fb96b67c8ae0c91e17f0f9fe88cfce406ace6a94 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Mon, 12 Feb 2018 11:44:36 +0200 Subject: [PATCH 0888/1692] drm/omap: Allocate drm_device earlier and unref it as last step If we allocate the drm_device earlier we can just return the error code without the need to use goto. Do the unref of the drm_device as a last step when cleaning up. This will make the drm_device available longer for us and makes sure that we only free up the memory when all other cleanups have been already done. Signed-off-by: Peter Ujfalusi Reviewed-by: Laurent Pinchart Signed-off-by: Tomi Valkeinen Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/omap_drv.c | 29 +++++++++++++---------------- 1 file changed, 13 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index 1b6601e9b107..e5afecb4fd45 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -525,6 +525,14 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev) DBG("%s", dev_name(dev)); + /* Allocate and initialize the DRM device. */ + ddev = drm_dev_alloc(&omap_drm_driver, dev); + if (IS_ERR(ddev)) + return PTR_ERR(ddev); + + priv->ddev = ddev; + ddev->dev_private = priv; + priv->dev = dev; priv->dss = omapdss_get_dss(); priv->dispc = dispc_get_dispc(priv->dss); @@ -543,16 +551,6 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev) mutex_init(&priv->list_lock); INIT_LIST_HEAD(&priv->obj_list); - /* Allocate and initialize the DRM device. */ - ddev = drm_dev_alloc(&omap_drm_driver, priv->dev); - if (IS_ERR(ddev)) { - ret = PTR_ERR(ddev); - goto err_destroy_wq; - } - - priv->ddev = ddev; - ddev->dev_private = priv; - /* Get memory bandwidth limits */ if (priv->dispc_ops->get_memory_bandwidth_limit) priv->max_bandwidth = @@ -563,7 +561,7 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev) ret = omap_modeset_init(ddev); if (ret) { dev_err(priv->dev, "omap_modeset_init failed: ret=%d\n", ret); - goto err_free_drm_dev; + goto err_gem_deinit; } /* Initialize vblank handling, start with all CRTCs disabled. */ @@ -599,14 +597,13 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev) err_cleanup_modeset: drm_mode_config_cleanup(ddev); omap_drm_irq_uninstall(ddev); -err_free_drm_dev: +err_gem_deinit: omap_gem_deinit(ddev); - drm_dev_unref(ddev); -err_destroy_wq: destroy_workqueue(priv->wq); omap_disconnect_dssdevs(); err_crtc_uninit: omap_crtc_pre_uninit(); + drm_dev_unref(ddev); return ret; } @@ -630,12 +627,12 @@ static void omapdrm_cleanup(struct omap_drm_private *priv) omap_drm_irq_uninstall(ddev); omap_gem_deinit(ddev); - drm_dev_unref(ddev); - destroy_workqueue(priv->wq); omap_disconnect_dssdevs(); omap_crtc_pre_uninit(); + + drm_dev_unref(ddev); } static int pdev_probe(struct platform_device *pdev) -- GitLab From 52b9ef246d6a8667c87771d1b0fdb982afc88c7f Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Mon, 12 Feb 2018 11:44:37 +0200 Subject: [PATCH 0889/1692] drm/omap: Manage the usable omap_dss_device list within omap_drm_private Instead of reaching back to DSS to iterate through the dss_devices every time, use an internal array where we store the available and usable dss_devices. At the same time remove the omapdss_device_is_connected() check from omap_modeset_init() as it became irrelevant: We are not adding dssdevs if their connect failed. Signed-off-by: Peter Ujfalusi Signed-off-by: Tomi Valkeinen Reviewed-by: Laurent Pinchart Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/omap_drv.c | 94 +++++++++++++++++++----------- drivers/gpu/drm/omapdrm/omap_drv.h | 3 + 2 files changed, 62 insertions(+), 35 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index e5afecb4fd45..d1a1129a3f5d 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -149,18 +149,27 @@ static int get_connector_type(struct omap_dss_device *dssdev) } } -static void omap_disconnect_dssdevs(void) +static void omap_disconnect_dssdevs(struct drm_device *ddev) { - struct omap_dss_device *dssdev = NULL; + struct omap_drm_private *priv = ddev->dev_private; + unsigned int i; + + for (i = 0; i < priv->num_dssdevs; i++) { + struct omap_dss_device *dssdev = priv->dssdevs[i]; - for_each_dss_dev(dssdev) dssdev->driver->disconnect(dssdev); + priv->dssdevs[i] = NULL; + omap_dss_put_device(dssdev); + } + + priv->num_dssdevs = 0; } -static int omap_connect_dssdevs(void) +static int omap_connect_dssdevs(struct drm_device *ddev) { - int r; + struct omap_drm_private *priv = ddev->dev_private; struct omap_dss_device *dssdev = NULL; + int r; if (!omapdss_stack_is_ready()) return -EPROBE_DEFER; @@ -173,6 +182,14 @@ static int omap_connect_dssdevs(void) } else if (r) { dev_warn(dssdev->dev, "could not connect display: %s\n", dssdev->name); + } else { + omap_dss_get_device(dssdev); + priv->dssdevs[priv->num_dssdevs++] = dssdev; + if (priv->num_dssdevs == ARRAY_SIZE(priv->dssdevs)) { + /* To balance the 'for_each_dss_dev' loop */ + omap_dss_put_device(dssdev); + break; + } } } @@ -183,7 +200,7 @@ static int omap_connect_dssdevs(void) * if we are deferring probe, we disconnect the devices we previously * connected */ - omap_disconnect_dssdevs(); + omap_disconnect_dssdevs(ddev); return r; } @@ -208,6 +225,7 @@ static int omap_modeset_init(struct drm_device *dev) int num_ovls = priv->dispc_ops->get_num_ovls(priv->dispc); int num_mgrs = priv->dispc_ops->get_num_mgrs(priv->dispc); int num_crtcs, crtc_idx, plane_idx; + unsigned int i; int ret; u32 plane_crtc_mask; @@ -225,11 +243,7 @@ static int omap_modeset_init(struct drm_device *dev) * configuration does not match the expectations or exceeds * the available resources, the configuration is rejected. */ - num_crtcs = 0; - for_each_dss_dev(dssdev) - if (omapdss_device_is_connected(dssdev)) - num_crtcs++; - + num_crtcs = priv->num_dssdevs; if (num_crtcs > num_mgrs || num_crtcs > num_ovls || num_crtcs > ARRAY_SIZE(priv->crtcs) || num_crtcs > ARRAY_SIZE(priv->planes) || @@ -247,15 +261,13 @@ static int omap_modeset_init(struct drm_device *dev) crtc_idx = 0; plane_idx = 0; - for_each_dss_dev(dssdev) { + for (i = 0; i < priv->num_dssdevs; i++) { + struct omap_dss_device *dssdev = priv->dssdevs[i]; struct drm_connector *connector; struct drm_encoder *encoder; struct drm_plane *plane; struct drm_crtc *crtc; - if (!omapdss_device_is_connected(dssdev)) - continue; - encoder = omap_encoder_init(dev, dssdev); if (!encoder) return -ENOMEM; @@ -335,11 +347,14 @@ static int omap_modeset_init(struct drm_device *dev) /* * Enable the HPD in external components if supported */ -static void omap_modeset_enable_external_hpd(void) +static void omap_modeset_enable_external_hpd(struct drm_device *ddev) { - struct omap_dss_device *dssdev = NULL; + struct omap_drm_private *priv = ddev->dev_private; + int i; + + for (i = 0; i < priv->num_dssdevs; i++) { + struct omap_dss_device *dssdev = priv->dssdevs[i]; - for_each_dss_dev(dssdev) { if (dssdev->driver->enable_hpd) dssdev->driver->enable_hpd(dssdev); } @@ -348,11 +363,14 @@ static void omap_modeset_enable_external_hpd(void) /* * Disable the HPD in external components if supported */ -static void omap_modeset_disable_external_hpd(void) +static void omap_modeset_disable_external_hpd(struct drm_device *ddev) { - struct omap_dss_device *dssdev = NULL; + struct omap_drm_private *priv = ddev->dev_private; + int i; + + for (i = 0; i < priv->num_dssdevs; i++) { + struct omap_dss_device *dssdev = priv->dssdevs[i]; - for_each_dss_dev(dssdev) { if (dssdev->driver->disable_hpd) dssdev->driver->disable_hpd(dssdev); } @@ -540,7 +558,7 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev) omap_crtc_pre_init(priv); - ret = omap_connect_dssdevs(); + ret = omap_connect_dssdevs(ddev); if (ret) goto err_crtc_uninit; @@ -577,7 +595,7 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev) omap_fbdev_init(ddev); drm_kms_helper_poll_init(ddev); - omap_modeset_enable_external_hpd(); + omap_modeset_enable_external_hpd(ddev); /* * Register the DRM device with the core and the connectors with @@ -590,7 +608,7 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev) return 0; err_cleanup_helpers: - omap_modeset_disable_external_hpd(); + omap_modeset_disable_external_hpd(ddev); drm_kms_helper_poll_fini(ddev); omap_fbdev_fini(ddev); @@ -600,7 +618,7 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev) err_gem_deinit: omap_gem_deinit(ddev); destroy_workqueue(priv->wq); - omap_disconnect_dssdevs(); + omap_disconnect_dssdevs(ddev); err_crtc_uninit: omap_crtc_pre_uninit(); drm_dev_unref(ddev); @@ -615,7 +633,7 @@ static void omapdrm_cleanup(struct omap_drm_private *priv) drm_dev_unregister(ddev); - omap_modeset_disable_external_hpd(); + omap_modeset_disable_external_hpd(ddev); drm_kms_helper_poll_fini(ddev); omap_fbdev_fini(ddev); @@ -629,7 +647,7 @@ static void omapdrm_cleanup(struct omap_drm_private *priv) destroy_workqueue(priv->wq); - omap_disconnect_dssdevs(); + omap_disconnect_dssdevs(ddev); omap_crtc_pre_uninit(); drm_dev_unref(ddev); @@ -674,11 +692,14 @@ static int pdev_remove(struct platform_device *pdev) } #ifdef CONFIG_PM_SLEEP -static int omap_drm_suspend_all_displays(void) +static int omap_drm_suspend_all_displays(struct drm_device *ddev) { - struct omap_dss_device *dssdev = NULL; + struct omap_drm_private *priv = ddev->dev_private; + int i; + + for (i = 0; i < priv->num_dssdevs; i++) { + struct omap_dss_device *dssdev = priv->dssdevs[i]; - for_each_dss_dev(dssdev) { if (!dssdev->driver) continue; @@ -693,11 +714,14 @@ static int omap_drm_suspend_all_displays(void) return 0; } -static int omap_drm_resume_all_displays(void) +static int omap_drm_resume_all_displays(struct drm_device *ddev) { - struct omap_dss_device *dssdev = NULL; + struct omap_drm_private *priv = ddev->dev_private; + int i; + + for (i = 0; i < priv->num_dssdevs; i++) { + struct omap_dss_device *dssdev = priv->dssdevs[i]; - for_each_dss_dev(dssdev) { if (!dssdev->driver) continue; @@ -718,7 +742,7 @@ static int omap_drm_suspend(struct device *dev) drm_kms_helper_poll_disable(drm_dev); drm_modeset_lock_all(drm_dev); - omap_drm_suspend_all_displays(); + omap_drm_suspend_all_displays(drm_dev); drm_modeset_unlock_all(drm_dev); return 0; @@ -730,7 +754,7 @@ static int omap_drm_resume(struct device *dev) struct drm_device *drm_dev = priv->ddev; drm_modeset_lock_all(drm_dev); - omap_drm_resume_all_displays(); + omap_drm_resume_all_displays(drm_dev); drm_modeset_unlock_all(drm_dev); drm_kms_helper_poll_enable(drm_dev); diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h index f27c8e216adf..006c868c528d 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.h +++ b/drivers/gpu/drm/omapdrm/omap_drv.h @@ -54,6 +54,9 @@ struct omap_drm_private { struct dispc_device *dispc; const struct dispc_ops *dispc_ops; + unsigned int num_dssdevs; + struct omap_dss_device *dssdevs[8]; + unsigned int num_crtcs; struct drm_crtc *crtcs[8]; -- GitLab From eb5bc1f92ba38ae16a4db499e3ebeb0b86fe769e Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Mon, 12 Feb 2018 11:44:39 +0200 Subject: [PATCH 0890/1692] drm/omap: Do dss_device (display) ordering in omap_drv.c Sort the dssdev array based on DT aliases. With this change we can remove the panel ordering from dss/display.c and have all sorting related to dssdevs in one place. Signed-off-by: Peter Ujfalusi Signed-off-by: Tomi Valkeinen Reviewed-by: Laurent Pinchart Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/display.c | 2 ++ drivers/gpu/drm/omapdrm/dss/omapdss.h | 1 + drivers/gpu/drm/omapdrm/omap_drv.c | 18 ++++++++++++++++++ 3 files changed, 21 insertions(+) diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c index 9e7fcbd57e52..506327409caa 100644 --- a/drivers/gpu/drm/omapdrm/dss/display.c +++ b/drivers/gpu/drm/omapdrm/dss/display.c @@ -52,6 +52,8 @@ int omapdss_register_display(struct omap_dss_device *dssdev) if (id < 0) id = disp_num_counter++; + dssdev->alias_id = id; + snprintf(dssdev->alias, sizeof(dssdev->alias), "display%d", id); /* Use 'label' property for name, if it exists */ diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index 14d74adb13fb..eae105b0b961 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -467,6 +467,7 @@ struct omap_dss_device { /* alias in the form of "display%d" */ char alias[16]; + unsigned int alias_id; enum omap_display_type type; enum omap_display_type output_type; diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index d1a1129a3f5d..e411d46bd803 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -15,6 +15,8 @@ * this program. If not, see . */ +#include +#include #include #include @@ -165,6 +167,18 @@ static void omap_disconnect_dssdevs(struct drm_device *ddev) priv->num_dssdevs = 0; } +static int omap_compare_dssdevs(const void *a, const void *b) +{ + const struct omap_dss_device *dssdev1 = *(struct omap_dss_device **)a; + const struct omap_dss_device *dssdev2 = *(struct omap_dss_device **)b; + + if (dssdev1->alias_id > dssdev2->alias_id) + return 1; + else if (dssdev1->alias_id < dssdev2->alias_id) + return -1; + return 0; +} + static int omap_connect_dssdevs(struct drm_device *ddev) { struct omap_drm_private *priv = ddev->dev_private; @@ -193,6 +207,10 @@ static int omap_connect_dssdevs(struct drm_device *ddev) } } + /* Sort the list by DT aliases */ + sort(priv->dssdevs, priv->num_dssdevs, sizeof(priv->dssdevs[0]), + omap_compare_dssdevs, NULL); + return 0; cleanup: -- GitLab From 36c61ae2b755f1b64d5adcf43bb32932dcc4c330 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Mon, 12 Feb 2018 11:44:40 +0200 Subject: [PATCH 0891/1692] drm/omap: dss: Remove display ordering from dss/display.c As ordering of the dss_devices based on DT aliases is now implemented in omap_drm.c, there is no need to do the ordering in dss/display.c anymore. At the same time remove the alias member of the omap_dss_device struct since it is no longer needed. The only place it was used is in the omapdss_register_display() function. Signed-off-by: Peter Ujfalusi Signed-off-by: Tomi Valkeinen Reviewed-by: Laurent Pinchart Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/display.c | 15 +++------------ drivers/gpu/drm/omapdrm/dss/omapdss.h | 2 -- 2 files changed, 3 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c index 506327409caa..e1ab1592186e 100644 --- a/drivers/gpu/drm/omapdrm/dss/display.c +++ b/drivers/gpu/drm/omapdrm/dss/display.c @@ -41,7 +41,6 @@ static int disp_num_counter; int omapdss_register_display(struct omap_dss_device *dssdev) { struct omap_dss_driver *drv = dssdev->driver; - struct list_head *cur; int id; /* @@ -54,26 +53,18 @@ int omapdss_register_display(struct omap_dss_device *dssdev) dssdev->alias_id = id; - snprintf(dssdev->alias, sizeof(dssdev->alias), "display%d", id); - /* Use 'label' property for name, if it exists */ of_property_read_string(dssdev->dev->of_node, "label", &dssdev->name); if (dssdev->name == NULL) - dssdev->name = dssdev->alias; + dssdev->name = devm_kasprintf(dssdev->dev, GFP_KERNEL, + "display%u", id); if (drv && drv->get_timings == NULL) drv->get_timings = omapdss_default_get_timings; mutex_lock(&panel_list_mutex); - list_for_each(cur, &panel_list) { - struct omap_dss_device *ldev = list_entry(cur, - struct omap_dss_device, - panel_list); - if (strcmp(ldev->alias, dssdev->alias) > 0) - break; - } - list_add_tail(&dssdev->panel_list, cur); + list_add_tail(&dssdev->panel_list, &panel_list); mutex_unlock(&panel_list_mutex); return 0; } diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index eae105b0b961..8d530057a4b9 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -465,8 +465,6 @@ struct omap_dss_device { struct list_head panel_list; - /* alias in the form of "display%d" */ - char alias[16]; unsigned int alias_id; enum omap_display_type type; -- GitLab From f13e97cf3e72288b883a29e82a6d0d7cfb9827a4 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Thu, 1 Mar 2018 21:51:43 +0200 Subject: [PATCH 0892/1692] drm/omap: dss: Gather OMAP DSS components at probe time The omapdss_gather_components() function walks the OF graph to create a list of all components part of the display device. There's no need to delay this operation until DSS bind time as we have all the information we need at probe time. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/dss.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c index cb80ddaa19d2..9af7108dbf5f 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.c +++ b/drivers/gpu/drm/omapdrm/dss/dss.c @@ -1323,7 +1323,6 @@ static int dss_bind(struct device *dev) pm_set_vt_switch(0); - omapdss_gather_components(dev); omapdss_set_dss(dss); return 0; @@ -1474,6 +1473,8 @@ static int dss_probe(struct platform_device *pdev) dss); /* Add all the child devices as components. */ + omapdss_gather_components(&pdev->dev); + device_for_each_child(&pdev->dev, &match, dss_add_child_component); r = component_master_add_with_match(&pdev->dev, &dss_component_ops, match); -- GitLab From cc1876ce5791b1c87d0b4dd5d1507e9c80558af2 Mon Sep 17 00:00:00 2001 From: Jyri Sarha Date: Fri, 16 Feb 2018 13:25:07 +0200 Subject: [PATCH 0893/1692] drm/omap: dss: Move platform_device_register from core.c to dss.c probe Register the omapdrm device when we know that dss device probe going to succeed. This avoids DSS6 and DSS2 omapdrm device registration from colliding with each other. Signed-off-by: Jyri Sarha Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/core.c | 26 ++------------------------ drivers/gpu/drm/omapdrm/dss/dss.c | 13 +++++++++++++ drivers/gpu/drm/omapdrm/dss/dss.h | 2 ++ 3 files changed, 17 insertions(+), 24 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/core.c b/drivers/gpu/drm/omapdrm/dss/core.c index 07d00a186f15..a2edabc9f6b3 100644 --- a/drivers/gpu/drm/omapdrm/dss/core.c +++ b/drivers/gpu/drm/omapdrm/dss/core.c @@ -45,36 +45,14 @@ static struct platform_driver * const omap_dss_drivers[] = { #endif }; -static struct platform_device *omap_drm_device; - static int __init omap_dss_init(void) { - int r; - - r = platform_register_drivers(omap_dss_drivers, - ARRAY_SIZE(omap_dss_drivers)); - if (r) - goto err_reg; - - omap_drm_device = platform_device_register_simple("omapdrm", 0, NULL, 0); - if (IS_ERR(omap_drm_device)) { - r = PTR_ERR(omap_drm_device); - goto err_reg; - } - - return 0; - -err_reg: - platform_unregister_drivers(omap_dss_drivers, - ARRAY_SIZE(omap_dss_drivers)); - - return r; + return platform_register_drivers(omap_dss_drivers, + ARRAY_SIZE(omap_dss_drivers)); } static void __exit omap_dss_exit(void) { - platform_device_unregister(omap_drm_device); - platform_unregister_drivers(omap_dss_drivers, ARRAY_SIZE(omap_dss_drivers)); } diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c index 9af7108dbf5f..64479b209c1f 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.c +++ b/drivers/gpu/drm/omapdrm/dss/dss.c @@ -1315,6 +1315,7 @@ static const struct soc_device_attribute dss_soc_devices[] = { static int dss_bind(struct device *dev) { struct dss_device *dss = dev_get_drvdata(dev); + struct platform_device *drm_pdev; int r; r = component_bind_all(dev, NULL); @@ -1325,11 +1326,23 @@ static int dss_bind(struct device *dev) omapdss_set_dss(dss); + drm_pdev = platform_device_register_simple("omapdrm", 0, NULL, 0); + if (IS_ERR(drm_pdev)) { + component_unbind_all(dev, NULL); + return PTR_ERR(drm_pdev); + } + + dss->drm_pdev = drm_pdev; + return 0; } static void dss_unbind(struct device *dev) { + struct dss_device *dss = dev_get_drvdata(dev); + + platform_device_unregister(dss->drm_pdev); + omapdss_set_dss(NULL); component_unbind_all(dev, NULL); diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h index 38302631b64b..e6baad7e653f 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.h +++ b/drivers/gpu/drm/omapdrm/dss/dss.h @@ -238,6 +238,8 @@ struct dss_device { struct regmap *syscon_pll_ctrl; u32 syscon_pll_ctrl_offset; + struct platform_device *drm_pdev; + struct clk *parent_clk; struct clk *dss_clk; unsigned long dss_clk_rate; -- GitLab From 8023651bd3d9372779be731a1165058cd33b454e Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 7 Mar 2018 20:34:42 +0200 Subject: [PATCH 0894/1692] drm/omap: dss: Handle DPI and SDI port initialization failures The dpi_init_port() and sdi_init_port() functions can return errors but their return value is ignored. This prevents both probe failures and probe deferral from working correctly. Propagate the errors up the call stack. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/dss.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c index 64479b209c1f..55d837983a1e 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.c +++ b/drivers/gpu/drm/omapdrm/dss/dss.c @@ -1183,7 +1183,8 @@ static int dss_init_ports(struct dss_device *dss) struct platform_device *pdev = dss->pdev; struct device_node *parent = pdev->dev.of_node; struct device_node *port; - int i; + unsigned int i; + int r; for (i = 0; i < dss->feat->num_ports; i++) { port = of_graph_get_port_by_id(parent, i); @@ -1192,11 +1193,17 @@ static int dss_init_ports(struct dss_device *dss) switch (dss->feat->ports[i]) { case OMAP_DISPLAY_TYPE_DPI: - dpi_init_port(dss, pdev, port, dss->feat->model); + r = dpi_init_port(dss, pdev, port, dss->feat->model); + if (r) + return r; break; + case OMAP_DISPLAY_TYPE_SDI: - sdi_init_port(dss, pdev, port); + r = sdi_init_port(dss, pdev, port); + if (r) + return r; break; + default: break; } -- GitLab From 52dd898a30e0cc0354e34ee3955cec0ca60a80d5 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 14 Feb 2018 02:22:42 +0200 Subject: [PATCH 0895/1692] drm/omap: dss: Remove omapdss_atv_ops get_wss and set_wss operations The operations are never used, remove them. If the need to set wide screen signaling data arises later, it should be implemented by extending the DRM bridge API. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- .../omapdrm/displays/connector-analog-tv.c | 19 --------- drivers/gpu/drm/omapdrm/dss/omapdss.h | 6 --- drivers/gpu/drm/omapdrm/dss/venc.c | 41 ------------------- 3 files changed, 66 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c index 9eabd7201a12..5fdecc12b608 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c @@ -154,22 +154,6 @@ static int tvc_check_timings(struct omap_dss_device *dssdev, return in->ops.atv->check_timings(in, vm); } -static u32 tvc_get_wss(struct omap_dss_device *dssdev) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - return in->ops.atv->get_wss(in); -} - -static int tvc_set_wss(struct omap_dss_device *dssdev, u32 wss) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - return in->ops.atv->set_wss(in, wss); -} - static struct omap_dss_driver tvc_driver = { .connect = tvc_connect, .disconnect = tvc_disconnect, @@ -180,9 +164,6 @@ static struct omap_dss_driver tvc_driver = { .set_timings = tvc_set_timings, .get_timings = tvc_get_timings, .check_timings = tvc_check_timings, - - .get_wss = tvc_get_wss, - .set_wss = tvc_set_wss, }; static int tvc_probe(struct platform_device *pdev) diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index 8d530057a4b9..67db0ea272f3 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -362,9 +362,6 @@ struct omapdss_atv_ops { struct videomode *vm); void (*get_timings)(struct omap_dss_device *dssdev, struct videomode *vm); - - int (*set_wss)(struct omap_dss_device *dssdev, u32 wss); - u32 (*get_wss)(struct omap_dss_device *dssdev); }; struct omapdss_hdmi_ops { @@ -554,9 +551,6 @@ struct omap_dss_driver { void (*get_size)(struct omap_dss_device *dssdev, unsigned int *width, unsigned int *height); - int (*set_wss)(struct omap_dss_device *dssdev, u32 wss); - u32 (*get_wss)(struct omap_dss_device *dssdev); - int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len); bool (*detect)(struct omap_dss_device *dssdev); diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c index ac01907dcc34..c573254df766 100644 --- a/drivers/gpu/drm/omapdrm/dss/venc.c +++ b/drivers/gpu/drm/omapdrm/dss/venc.c @@ -626,44 +626,6 @@ static void venc_get_timings(struct omap_dss_device *dssdev, mutex_unlock(&venc->venc_lock); } -static u32 venc_get_wss(struct omap_dss_device *dssdev) -{ - struct venc_device *venc = dssdev_to_venc(dssdev); - - /* Invert due to VENC_L21_WC_CTL:INV=1 */ - return (venc->wss_data >> 8) ^ 0xfffff; -} - -static int venc_set_wss(struct omap_dss_device *dssdev, u32 wss) -{ - struct venc_device *venc = dssdev_to_venc(dssdev); - const struct venc_config *config; - int r; - - DSSDBG("venc_set_wss\n"); - - mutex_lock(&venc->venc_lock); - - config = venc_timings_to_config(&venc->vm); - - /* Invert due to VENC_L21_WC_CTL:INV=1 */ - venc->wss_data = (wss ^ 0xfffff) << 8; - - r = venc_runtime_get(venc); - if (r) - goto err; - - venc_write_reg(venc, VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data | - venc->wss_data); - - venc_runtime_put(venc); - -err: - mutex_unlock(&venc->venc_lock); - - return r; -} - static int venc_init_regulator(struct venc_device *venc) { struct regulator *vdda_dac; @@ -810,9 +772,6 @@ static const struct omapdss_atv_ops venc_ops = { .check_timings = venc_check_timings, .set_timings = venc_set_timings, .get_timings = venc_get_timings, - - .set_wss = venc_set_wss, - .get_wss = venc_get_wss, }; static void venc_init_output(struct venc_device *venc) -- GitLab From 9976782f331b0d54710fddd89848b9b6e22746d4 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 27 Feb 2018 20:29:21 +0200 Subject: [PATCH 0896/1692] drm/omap: dss: Remove DSS encoders get_timings operation The get_timings operation from DSS encoders (not to be confused with the identically named operation in omap_dss_driver) is never called. Remove it. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/displays/encoder-opa362.c | 11 ----------- drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c | 9 --------- .../gpu/drm/omapdrm/displays/encoder-tpd12s015.c | 9 --------- drivers/gpu/drm/omapdrm/dss/dpi.c | 13 ------------- drivers/gpu/drm/omapdrm/dss/hdmi4.c | 9 --------- drivers/gpu/drm/omapdrm/dss/hdmi5.c | 9 --------- drivers/gpu/drm/omapdrm/dss/omapdss.h | 10 ---------- drivers/gpu/drm/omapdrm/dss/sdi.c | 9 --------- drivers/gpu/drm/omapdrm/dss/venc.c | 13 ------------- 9 files changed, 92 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c index afee1b8b457a..27d63a14efe3 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c @@ -148,16 +148,6 @@ static void opa362_set_timings(struct omap_dss_device *dssdev, in->ops.atv->set_timings(in, vm); } -static void opa362_get_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - - dev_dbg(dssdev->dev, "get_timings\n"); - - *vm = ddata->vm; -} - static int opa362_check_timings(struct omap_dss_device *dssdev, struct videomode *vm) { @@ -178,7 +168,6 @@ static const struct omapdss_atv_ops opa362_atv_ops = { .check_timings = opa362_check_timings, .set_timings = opa362_set_timings, - .get_timings = opa362_get_timings, }; static int opa362_probe(struct platform_device *pdev) diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c index ed7ae384c3ed..566c63a3ad59 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c @@ -142,14 +142,6 @@ static void tfp410_set_timings(struct omap_dss_device *dssdev, in->ops.dpi->set_timings(in, vm); } -static void tfp410_get_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - - *vm = ddata->vm; -} - static int tfp410_check_timings(struct omap_dss_device *dssdev, struct videomode *vm) { @@ -170,7 +162,6 @@ static const struct omapdss_dvi_ops tfp410_dvi_ops = { .check_timings = tfp410_check_timings, .set_timings = tfp410_set_timings, - .get_timings = tfp410_get_timings, }; static int tfp410_probe_of(struct platform_device *pdev) diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c index d275bf152da5..4753e5455f82 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c @@ -136,14 +136,6 @@ static void tpd_set_timings(struct omap_dss_device *dssdev, in->ops.hdmi->set_timings(in, vm); } -static void tpd_get_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - - *vm = ddata->vm; -} - static int tpd_check_timings(struct omap_dss_device *dssdev, struct videomode *vm) { @@ -249,7 +241,6 @@ static const struct omapdss_hdmi_ops tpd_hdmi_ops = { .check_timings = tpd_check_timings, .set_timings = tpd_set_timings, - .get_timings = tpd_get_timings, .read_edid = tpd_read_edid, .detect = tpd_detect, diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c index 9fcc50217133..5d2d4314055f 100644 --- a/drivers/gpu/drm/omapdrm/dss/dpi.c +++ b/drivers/gpu/drm/omapdrm/dss/dpi.c @@ -491,18 +491,6 @@ static void dpi_set_timings(struct omap_dss_device *dssdev, mutex_unlock(&dpi->lock); } -static void dpi_get_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev); - - mutex_lock(&dpi->lock); - - *vm = dpi->vm; - - mutex_unlock(&dpi->lock); -} - static int dpi_check_timings(struct omap_dss_device *dssdev, struct videomode *vm) { @@ -702,7 +690,6 @@ static const struct omapdss_dpi_ops dpi_ops = { .check_timings = dpi_check_timings, .set_timings = dpi_set_timings, - .get_timings = dpi_get_timings, }; static void dpi_init_output_port(struct dpi_data *dpi, struct device_node *port) diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index 5879f45f6fc9..b3d7865347a3 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c @@ -296,14 +296,6 @@ static void hdmi_display_set_timing(struct omap_dss_device *dssdev, mutex_unlock(&hdmi->lock); } -static void hdmi_display_get_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev); - - *vm = hdmi->cfg.vm; -} - static int hdmi_dump_regs(struct seq_file *s, void *p) { struct omap_hdmi *hdmi = s->private; @@ -557,7 +549,6 @@ static const struct omapdss_hdmi_ops hdmi_ops = { .check_timings = hdmi_display_check_timing, .set_timings = hdmi_display_set_timing, - .get_timings = hdmi_display_get_timings, .read_edid = hdmi_read_edid, .lost_hotplug = hdmi_lost_hotplug, diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index ae1a001d1b83..8e9b3a24b2ab 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c @@ -293,14 +293,6 @@ static void hdmi_display_set_timing(struct omap_dss_device *dssdev, mutex_unlock(&hdmi->lock); } -static void hdmi_display_get_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev); - - *vm = hdmi->cfg.vm; -} - static int hdmi_dump_regs(struct seq_file *s, void *p) { struct omap_hdmi *hdmi = s->private; @@ -549,7 +541,6 @@ static const struct omapdss_hdmi_ops hdmi_ops = { .check_timings = hdmi_display_check_timing, .set_timings = hdmi_display_set_timing, - .get_timings = hdmi_display_get_timings, .read_edid = hdmi_read_edid, .set_infoframe = hdmi_set_infoframe, diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index 67db0ea272f3..426ac8297f5f 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -309,8 +309,6 @@ struct omapdss_dpi_ops { struct videomode *vm); void (*set_timings)(struct omap_dss_device *dssdev, struct videomode *vm); - void (*get_timings)(struct omap_dss_device *dssdev, - struct videomode *vm); }; struct omapdss_sdi_ops { @@ -326,8 +324,6 @@ struct omapdss_sdi_ops { struct videomode *vm); void (*set_timings)(struct omap_dss_device *dssdev, struct videomode *vm); - void (*get_timings)(struct omap_dss_device *dssdev, - struct videomode *vm); }; struct omapdss_dvi_ops { @@ -343,8 +339,6 @@ struct omapdss_dvi_ops { struct videomode *vm); void (*set_timings)(struct omap_dss_device *dssdev, struct videomode *vm); - void (*get_timings)(struct omap_dss_device *dssdev, - struct videomode *vm); }; struct omapdss_atv_ops { @@ -360,8 +354,6 @@ struct omapdss_atv_ops { struct videomode *vm); void (*set_timings)(struct omap_dss_device *dssdev, struct videomode *vm); - void (*get_timings)(struct omap_dss_device *dssdev, - struct videomode *vm); }; struct omapdss_hdmi_ops { @@ -377,8 +369,6 @@ struct omapdss_hdmi_ops { struct videomode *vm); void (*set_timings)(struct omap_dss_device *dssdev, struct videomode *vm); - void (*get_timings)(struct omap_dss_device *dssdev, - struct videomode *vm); int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len); void (*lost_hotplug)(struct omap_dss_device *dssdev); diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c index 69c3b7a3d5c7..43cf331e80e4 100644 --- a/drivers/gpu/drm/omapdrm/dss/sdi.c +++ b/drivers/gpu/drm/omapdrm/dss/sdi.c @@ -237,14 +237,6 @@ static void sdi_set_timings(struct omap_dss_device *dssdev, sdi->vm = *vm; } -static void sdi_get_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct sdi_device *sdi = dssdev_to_sdi(dssdev); - - *vm = sdi->vm; -} - static int sdi_check_timings(struct omap_dss_device *dssdev, struct videomode *vm) { @@ -328,7 +320,6 @@ static const struct omapdss_sdi_ops sdi_ops = { .check_timings = sdi_check_timings, .set_timings = sdi_set_timings, - .get_timings = sdi_get_timings, }; static void sdi_init_output(struct sdi_device *sdi) diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c index c573254df766..ab0eeec38db0 100644 --- a/drivers/gpu/drm/omapdrm/dss/venc.c +++ b/drivers/gpu/drm/omapdrm/dss/venc.c @@ -614,18 +614,6 @@ static int venc_check_timings(struct omap_dss_device *dssdev, } } -static void venc_get_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct venc_device *venc = dssdev_to_venc(dssdev); - - mutex_lock(&venc->venc_lock); - - *vm = venc->vm; - - mutex_unlock(&venc->venc_lock); -} - static int venc_init_regulator(struct venc_device *venc) { struct regulator *vdda_dac; @@ -771,7 +759,6 @@ static const struct omapdss_atv_ops venc_ops = { .check_timings = venc_check_timings, .set_timings = venc_set_timings, - .get_timings = venc_get_timings, }; static void venc_init_output(struct venc_device *venc) -- GitLab From 21ebcbac5066a36e0d88ea43921cce8418fd453a Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 14 Feb 2018 03:26:52 +0200 Subject: [PATCH 0897/1692] drm/omap: dss: Remove unused omapdss_default_get_timings() All omap_dss_driver instances provide the get_timings operation. Remove the default function. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/display.c | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c index e1ab1592186e..752c9811c73a 100644 --- a/drivers/gpu/drm/omapdrm/dss/display.c +++ b/drivers/gpu/drm/omapdrm/dss/display.c @@ -28,19 +28,12 @@ #include "omapdss.h" -static void omapdss_default_get_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - *vm = dssdev->panel.vm; -} - static LIST_HEAD(panel_list); static DEFINE_MUTEX(panel_list_mutex); static int disp_num_counter; int omapdss_register_display(struct omap_dss_device *dssdev) { - struct omap_dss_driver *drv = dssdev->driver; int id; /* @@ -60,9 +53,6 @@ int omapdss_register_display(struct omap_dss_device *dssdev) dssdev->name = devm_kasprintf(dssdev->dev, GFP_KERNEL, "display%u", id); - if (drv && drv->get_timings == NULL) - drv->get_timings = omapdss_default_get_timings; - mutex_lock(&panel_list_mutex); list_add_tail(&dssdev->panel_list, &panel_list); mutex_unlock(&panel_list_mutex); -- GitLab From e0528c944058e137a1c74176173109a6864241ac Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 14 Feb 2018 03:12:37 +0200 Subject: [PATCH 0898/1692] drm/omap: dss: Constify omap_dss_driver operations structure The structure contains function pointers that don't need to be modified. Make all its instances const to improve security. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c | 2 +- drivers/gpu/drm/omapdrm/displays/connector-dvi.c | 2 +- drivers/gpu/drm/omapdrm/displays/connector-hdmi.c | 2 +- drivers/gpu/drm/omapdrm/displays/panel-dpi.c | 2 +- drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c | 2 +- drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c | 2 +- drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c | 2 +- drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c | 2 +- drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c | 2 +- drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c | 2 +- drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c | 2 +- drivers/gpu/drm/omapdrm/dss/omapdss.h | 2 +- drivers/gpu/drm/omapdrm/omap_connector.c | 6 +++--- drivers/gpu/drm/omapdrm/omap_encoder.c | 6 +++--- 14 files changed, 18 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c index 5fdecc12b608..a49bc4a8dcae 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c @@ -154,7 +154,7 @@ static int tvc_check_timings(struct omap_dss_device *dssdev, return in->ops.atv->check_timings(in, vm); } -static struct omap_dss_driver tvc_driver = { +static const struct omap_dss_driver tvc_driver = { .connect = tvc_connect, .disconnect = tvc_disconnect, diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c index 6d8cbd9e2110..c320f3c5ae6c 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c @@ -297,7 +297,7 @@ static void dvic_disable_hpd(struct omap_dss_device *dssdev) mutex_unlock(&ddata->hpd_lock); } -static struct omap_dss_driver dvic_driver = { +static const struct omap_dss_driver dvic_driver = { .connect = dvic_connect, .disconnect = dvic_disconnect, diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c index ca30ed9da7eb..6f12f9bb8054 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c @@ -269,7 +269,7 @@ static int hdmic_set_infoframe(struct omap_dss_device *dssdev, return in->ops.hdmi->set_infoframe(in, avi); } -static struct omap_dss_driver hdmic_driver = { +static const struct omap_dss_driver hdmic_driver = { .connect = hdmic_connect, .disconnect = hdmic_disconnect, diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c index 6cbf570d6727..e874f0b72798 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c @@ -153,7 +153,7 @@ static int panel_dpi_check_timings(struct omap_dss_device *dssdev, return in->ops.dpi->check_timings(in, vm); } -static struct omap_dss_driver panel_dpi_ops = { +static const struct omap_dss_driver panel_dpi_ops = { .connect = panel_dpi_connect, .disconnect = panel_dpi_disconnect, diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c index 428de90fced1..d7c57d84d7bd 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c @@ -1210,7 +1210,7 @@ static void dsicm_get_size(struct omap_dss_device *dssdev, *height = ddata->height_mm; } -static struct omap_dss_driver dsicm_ops = { +static const struct omap_dss_driver dsicm_ops = { .connect = dsicm_connect, .disconnect = dsicm_disconnect, diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c index 754197099440..ad98d2ffcf1b 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c @@ -228,7 +228,7 @@ static int lb035q02_check_timings(struct omap_dss_device *dssdev, return in->ops.dpi->check_timings(in, vm); } -static struct omap_dss_driver lb035q02_ops = { +static const struct omap_dss_driver lb035q02_ops = { .connect = lb035q02_connect, .disconnect = lb035q02_disconnect, diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c index 9a3b27fa5cb5..0846fb594352 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c @@ -222,7 +222,7 @@ static int nec_8048_check_timings(struct omap_dss_device *dssdev, return in->ops.dpi->check_timings(in, vm); } -static struct omap_dss_driver nec_8048_ops = { +static const struct omap_dss_driver nec_8048_ops = { .connect = nec_8048_connect, .disconnect = nec_8048_disconnect, diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c index bb5b680cabfe..e7c4a830e214 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c @@ -192,7 +192,7 @@ static int sharp_ls_check_timings(struct omap_dss_device *dssdev, return in->ops.dpi->check_timings(in, vm); } -static struct omap_dss_driver sharp_ls_ops = { +static const struct omap_dss_driver sharp_ls_ops = { .connect = sharp_ls_connect, .disconnect = sharp_ls_disconnect, diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c index f34c06bb5bd7..02415592f856 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c @@ -693,7 +693,7 @@ static int acx565akm_check_timings(struct omap_dss_device *dssdev, return in->ops.sdi->check_timings(in, vm); } -static struct omap_dss_driver acx565akm_ops = { +static const struct omap_dss_driver acx565akm_ops = { .connect = acx565akm_connect, .disconnect = acx565akm_disconnect, diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c index a1f1dc18407a..3c4036ee7b0c 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c @@ -362,7 +362,7 @@ static int td028ttec1_panel_check_timings(struct omap_dss_device *dssdev, return in->ops.dpi->check_timings(in, vm); } -static struct omap_dss_driver td028ttec1_ops = { +static const struct omap_dss_driver td028ttec1_ops = { .connect = td028ttec1_panel_connect, .disconnect = td028ttec1_panel_disconnect, diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c index c08e22b43447..14a37b82ee13 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c @@ -456,7 +456,7 @@ static int tpo_td043_check_timings(struct omap_dss_device *dssdev, return in->ops.dpi->check_timings(in, vm); } -static struct omap_dss_driver tpo_td043_ops = { +static const struct omap_dss_driver tpo_td043_ops = { .connect = tpo_td043_connect, .disconnect = tpo_td043_disconnect, diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index 426ac8297f5f..e3d31b2fee4c 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -466,7 +466,7 @@ struct omap_dss_device { const char *name; - struct omap_dss_driver *driver; + const struct omap_dss_driver *driver; union { const struct omapdss_dpi_ops *dpi; diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c index 2ddb856666c4..3b60086f2938 100644 --- a/drivers/gpu/drm/omapdrm/omap_connector.c +++ b/drivers/gpu/drm/omapdrm/omap_connector.c @@ -62,7 +62,7 @@ static enum drm_connector_status omap_connector_detect( { struct omap_connector *omap_connector = to_omap_connector(connector); struct omap_dss_device *dssdev = omap_connector->dssdev; - struct omap_dss_driver *dssdrv = dssdev->driver; + const struct omap_dss_driver *dssdrv = dssdev->driver; enum drm_connector_status ret; if (dssdrv->detect) { @@ -107,7 +107,7 @@ static int omap_connector_get_modes(struct drm_connector *connector) { struct omap_connector *omap_connector = to_omap_connector(connector); struct omap_dss_device *dssdev = omap_connector->dssdev; - struct omap_dss_driver *dssdrv = dssdev->driver; + const struct omap_dss_driver *dssdrv = dssdev->driver; struct drm_device *dev = connector->dev; int n = 0; @@ -170,7 +170,7 @@ static int omap_connector_mode_valid(struct drm_connector *connector, { struct omap_connector *omap_connector = to_omap_connector(connector); struct omap_dss_device *dssdev = omap_connector->dssdev; - struct omap_dss_driver *dssdrv = dssdev->driver; + const struct omap_dss_driver *dssdrv = dssdev->driver; struct videomode vm = {0}; struct drm_device *dev = connector->dev; struct drm_display_mode *new_mode; diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c index fcdf4b0a8eec..ec0f451e3b36 100644 --- a/drivers/gpu/drm/omapdrm/omap_encoder.c +++ b/drivers/gpu/drm/omapdrm/omap_encoder.c @@ -94,7 +94,7 @@ static void omap_encoder_disable(struct drm_encoder *encoder) { struct omap_encoder *omap_encoder = to_omap_encoder(encoder); struct omap_dss_device *dssdev = omap_encoder->dssdev; - struct omap_dss_driver *dssdrv = dssdev->driver; + const struct omap_dss_driver *dssdrv = dssdev->driver; dssdrv->disable(dssdev); } @@ -106,7 +106,7 @@ static int omap_encoder_update(struct drm_encoder *encoder, struct drm_device *dev = encoder->dev; struct omap_encoder *omap_encoder = to_omap_encoder(encoder); struct omap_dss_device *dssdev = omap_encoder->dssdev; - struct omap_dss_driver *dssdrv = dssdev->driver; + const struct omap_dss_driver *dssdrv = dssdev->driver; int ret; if (dssdrv->check_timings) { @@ -137,7 +137,7 @@ static void omap_encoder_enable(struct drm_encoder *encoder) { struct omap_encoder *omap_encoder = to_omap_encoder(encoder); struct omap_dss_device *dssdev = omap_encoder->dssdev; - struct omap_dss_driver *dssdrv = dssdev->driver; + const struct omap_dss_driver *dssdrv = dssdev->driver; int r; omap_encoder_update(encoder, omap_crtc_channel(encoder->crtc), -- GitLab From d65b0e0530bbdf2c90657c086319f68ddfd3837d Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 27 Feb 2018 20:11:52 +0200 Subject: [PATCH 0899/1692] drm/omap: displays: Remove videomode from omap_dss_device structure The omap_dss_device structure stores a videomode. All the connector and panel drivers that use omap_dss_device also store the videomode in their own panel_drv_data structures. There's no need to duplicate, remove the videomode field from omap_dss_device. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c | 2 -- drivers/gpu/drm/omapdrm/displays/connector-dvi.c | 2 -- drivers/gpu/drm/omapdrm/displays/connector-hdmi.c | 2 -- drivers/gpu/drm/omapdrm/displays/encoder-opa362.c | 1 - drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c | 1 - drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c | 1 - drivers/gpu/drm/omapdrm/displays/panel-dpi.c | 2 -- drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c | 8 +++----- .../gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c | 2 -- drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c | 2 -- .../gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c | 2 -- drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c | 2 -- drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c | 2 -- drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c | 2 -- drivers/gpu/drm/omapdrm/dss/omapdss.h | 2 -- 15 files changed, 3 insertions(+), 30 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c index a49bc4a8dcae..6b640ede6614 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c @@ -132,7 +132,6 @@ static void tvc_set_timings(struct omap_dss_device *dssdev, struct omap_dss_device *in = ddata->in; ddata->vm = *vm; - dssdev->panel.vm = *vm; in->ops.atv->set_timings(in, vm); } @@ -186,7 +185,6 @@ static int tvc_probe(struct platform_device *pdev) dssdev->dev = &pdev->dev; dssdev->type = OMAP_DISPLAY_TYPE_VENC; dssdev->owner = THIS_MODULE; - dssdev->panel.vm = tvc_pal_vm; r = omapdss_register_display(dssdev); if (r) { diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c index c320f3c5ae6c..84598ea12a9b 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c @@ -139,7 +139,6 @@ static void dvic_set_timings(struct omap_dss_device *dssdev, struct omap_dss_device *in = ddata->in; ddata->vm = *vm; - dssdev->panel.vm = *vm; in->ops.dvi->set_timings(in, vm); } @@ -403,7 +402,6 @@ static int dvic_probe(struct platform_device *pdev) dssdev->dev = &pdev->dev; dssdev->type = OMAP_DISPLAY_TYPE_DVI; dssdev->owner = THIS_MODULE; - dssdev->panel.vm = dvic_default_vm; r = omapdss_register_display(dssdev); if (r) { diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c index 6f12f9bb8054..e031280468fb 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c @@ -142,7 +142,6 @@ static void hdmic_set_timings(struct omap_dss_device *dssdev, struct omap_dss_device *in = ddata->in; ddata->vm = *vm; - dssdev->panel.vm = *vm; in->ops.hdmi->set_timings(in, vm); } @@ -368,7 +367,6 @@ static int hdmic_probe(struct platform_device *pdev) dssdev->dev = &pdev->dev; dssdev->type = OMAP_DISPLAY_TYPE_HDMI; dssdev->owner = THIS_MODULE; - dssdev->panel.vm = hdmic_default_vm; r = omapdss_register_display(dssdev); if (r) { diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c index 27d63a14efe3..0e3f4a20e531 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c @@ -143,7 +143,6 @@ static void opa362_set_timings(struct omap_dss_device *dssdev, dev_dbg(dssdev->dev, "set_timings\n"); ddata->vm = *vm; - dssdev->panel.vm = *vm; in->ops.atv->set_timings(in, vm); } diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c index 566c63a3ad59..08e63e39d0b7 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c @@ -137,7 +137,6 @@ static void tfp410_set_timings(struct omap_dss_device *dssdev, tfp410_fix_timings(vm); ddata->vm = *vm; - dssdev->panel.vm = *vm; in->ops.dpi->set_timings(in, vm); } diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c index 4753e5455f82..f35e2afa3e9c 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c @@ -131,7 +131,6 @@ static void tpd_set_timings(struct omap_dss_device *dssdev, struct omap_dss_device *in = ddata->in; ddata->vm = *vm; - dssdev->panel.vm = *vm; in->ops.hdmi->set_timings(in, vm); } diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c index e874f0b72798..bbd630ead3c1 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c @@ -131,7 +131,6 @@ static void panel_dpi_set_timings(struct omap_dss_device *dssdev, struct omap_dss_device *in = ddata->in; ddata->vm = *vm; - dssdev->panel.vm = *vm; in->ops.dpi->set_timings(in, vm); } @@ -230,7 +229,6 @@ static int panel_dpi_probe(struct platform_device *pdev) dssdev->driver = &panel_dpi_ops; dssdev->type = OMAP_DISPLAY_TYPE_DPI; dssdev->owner = THIS_MODULE; - dssdev->panel.vm = ddata->vm; r = omapdss_register_display(dssdev); if (r) { diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c index d7c57d84d7bd..555ab2ac5576 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c @@ -956,9 +956,8 @@ static int dsicm_update(struct omap_dss_device *dssdev, } /* XXX no need to send this every frame, but dsi break if not done */ - r = dsicm_set_update_window(ddata, 0, 0, - dssdev->panel.vm.hactive, - dssdev->panel.vm.vactive); + r = dsicm_set_update_window(ddata, 0, 0, ddata->vm.hactive, + ddata->vm.vactive); if (r) goto err; @@ -1089,7 +1088,7 @@ static int dsicm_memory_read(struct omap_dss_device *dssdev, } size = min((u32)w * h * 3, - dssdev->panel.vm.hactive * dssdev->panel.vm.vactive * 3); + ddata->vm.hactive * ddata->vm.vactive * 3); in->ops.dsi->bus_lock(in); @@ -1331,7 +1330,6 @@ static int dsicm_probe(struct platform_device *pdev) dssdev = &ddata->dssdev; dssdev->dev = dev; dssdev->driver = &dsicm_ops; - dssdev->panel.vm = ddata->vm; dssdev->type = OMAP_DISPLAY_TYPE_DSI; dssdev->owner = THIS_MODULE; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c index ad98d2ffcf1b..0a6ab6470253 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c @@ -206,7 +206,6 @@ static void lb035q02_set_timings(struct omap_dss_device *dssdev, struct omap_dss_device *in = ddata->in; ddata->vm = *vm; - dssdev->panel.vm = *vm; in->ops.dpi->set_timings(in, vm); } @@ -281,7 +280,6 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi) dssdev->driver = &lb035q02_ops; dssdev->type = OMAP_DISPLAY_TYPE_DPI; dssdev->owner = THIS_MODULE; - dssdev->panel.vm = ddata->vm; r = omapdss_register_display(dssdev); if (r) { diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c index 0846fb594352..9816e661c97d 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c @@ -200,7 +200,6 @@ static void nec_8048_set_timings(struct omap_dss_device *dssdev, struct omap_dss_device *in = ddata->in; ddata->vm = *vm; - dssdev->panel.vm = *vm; in->ops.dpi->set_timings(in, vm); } @@ -305,7 +304,6 @@ static int nec_8048_probe(struct spi_device *spi) dssdev->driver = &nec_8048_ops; dssdev->type = OMAP_DISPLAY_TYPE_DPI; dssdev->owner = THIS_MODULE; - dssdev->panel.vm = ddata->vm; r = omapdss_register_display(dssdev); if (r) { diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c index e7c4a830e214..e259240f96fa 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c @@ -170,7 +170,6 @@ static void sharp_ls_set_timings(struct omap_dss_device *dssdev, struct omap_dss_device *in = ddata->in; ddata->vm = *vm; - dssdev->panel.vm = *vm; in->ops.dpi->set_timings(in, vm); } @@ -281,7 +280,6 @@ static int sharp_ls_probe(struct platform_device *pdev) dssdev->driver = &sharp_ls_ops; dssdev->type = OMAP_DISPLAY_TYPE_DPI; dssdev->owner = THIS_MODULE; - dssdev->panel.vm = ddata->vm; r = omapdss_register_display(dssdev); if (r) { diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c index 02415592f856..d298a3f56189 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c @@ -671,7 +671,6 @@ static void acx565akm_set_timings(struct omap_dss_device *dssdev, struct omap_dss_device *in = ddata->in; ddata->vm = *vm; - dssdev->panel.vm = *vm; in->ops.sdi->set_timings(in, vm); } @@ -812,7 +811,6 @@ static int acx565akm_probe(struct spi_device *spi) dssdev->driver = &acx565akm_ops; dssdev->type = OMAP_DISPLAY_TYPE_SDI; dssdev->owner = THIS_MODULE; - dssdev->panel.vm = ddata->vm; r = omapdss_register_display(dssdev); if (r) { diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c index 3c4036ee7b0c..366e0f71288e 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c @@ -340,7 +340,6 @@ static void td028ttec1_panel_set_timings(struct omap_dss_device *dssdev, struct omap_dss_device *in = ddata->in; ddata->vm = *vm; - dssdev->panel.vm = *vm; in->ops.dpi->set_timings(in, vm); } @@ -406,7 +405,6 @@ static int td028ttec1_panel_probe(struct spi_device *spi) dssdev->driver = &td028ttec1_ops; dssdev->type = OMAP_DISPLAY_TYPE_DPI; dssdev->owner = THIS_MODULE; - dssdev->panel.vm = ddata->vm; r = omapdss_register_display(dssdev); if (r) { diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c index 14a37b82ee13..8e98232ee9d9 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c @@ -434,7 +434,6 @@ static void tpo_td043_set_timings(struct omap_dss_device *dssdev, struct omap_dss_device *in = ddata->in; ddata->vm = *vm; - dssdev->panel.vm = *vm; in->ops.dpi->set_timings(in, vm); } @@ -549,7 +548,6 @@ static int tpo_td043_probe(struct spi_device *spi) dssdev->driver = &tpo_td043_ops; dssdev->type = OMAP_DISPLAY_TYPE_DPI; dssdev->owner = THIS_MODULE; - dssdev->panel.vm = ddata->vm; r = omapdss_register_display(dssdev); if (r) { diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index e3d31b2fee4c..a93afbbe19de 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -458,8 +458,6 @@ struct omap_dss_device { enum omap_display_type output_type; struct { - struct videomode vm; - enum omap_dss_dsi_pixel_format dsi_pix_fmt; enum omap_dss_dsi_mode dsi_mode; } panel; -- GitLab From df91128b205d65a714ab3b2c82481c69ae18b1a9 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 28 Feb 2018 14:48:45 +0200 Subject: [PATCH 0900/1692] drm/omap: dss: Remove omap_dss_device panel fields The omap_dss_device panel.dsi_pix_fmt and panel.dsi_mode fields are unused. Remove them. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c | 1 - drivers/gpu/drm/omapdrm/dss/omapdss.h | 5 ----- 2 files changed, 6 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c index 555ab2ac5576..8d98cd628e11 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c @@ -1333,7 +1333,6 @@ static int dsicm_probe(struct platform_device *pdev) dssdev->type = OMAP_DISPLAY_TYPE_DSI; dssdev->owner = THIS_MODULE; - dssdev->panel.dsi_pix_fmt = OMAP_DSS_DSI_FMT_RGB888; dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE | OMAP_DSS_DISPLAY_CAP_TEAR_ELIM; diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index a93afbbe19de..39e2906fd5fe 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -457,11 +457,6 @@ struct omap_dss_device { enum omap_display_type type; enum omap_display_type output_type; - struct { - enum omap_dss_dsi_pixel_format dsi_pix_fmt; - enum omap_dss_dsi_mode dsi_mode; - } panel; - const char *name; const struct omap_dss_driver *driver; -- GitLab From 7e7a0edecca0e317b6a29478c24e8f7a2234382b Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 28 Feb 2018 23:48:35 +0200 Subject: [PATCH 0901/1692] drm/omap: dss: Rename omap_dss_device list field to output_list For coherency with the panel_list field, rename list to output_list. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/omapdss.h | 2 +- drivers/gpu/drm/omapdrm/dss/output.c | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index 39e2906fd5fe..abf101bb27ea 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -481,7 +481,7 @@ struct omap_dss_device { /* OMAP DSS output specific fields */ - struct list_head list; + struct list_head output_list; /* DISPC channel for this output */ enum omap_channel dispc_channel; diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c index 96b9d4cd505f..0fcd13ea8824 100644 --- a/drivers/gpu/drm/omapdrm/dss/output.c +++ b/drivers/gpu/drm/omapdrm/dss/output.c @@ -96,14 +96,14 @@ EXPORT_SYMBOL(omapdss_output_unset_device); int omapdss_register_output(struct omap_dss_device *out) { - list_add_tail(&out->list, &output_list); + list_add_tail(&out->output_list, &output_list); return 0; } EXPORT_SYMBOL(omapdss_register_output); void omapdss_unregister_output(struct omap_dss_device *out) { - list_del(&out->list); + list_del(&out->output_list); } EXPORT_SYMBOL(omapdss_unregister_output); @@ -111,7 +111,7 @@ bool omapdss_component_is_output(struct device_node *node) { struct omap_dss_device *out; - list_for_each_entry(out, &output_list, list) { + list_for_each_entry(out, &output_list, output_list) { if (out->dev->of_node == node) return true; } @@ -124,7 +124,7 @@ struct omap_dss_device *omap_dss_get_output(enum omap_dss_output_id id) { struct omap_dss_device *out; - list_for_each_entry(out, &output_list, list) { + list_for_each_entry(out, &output_list, output_list) { if (out->id == id) return out; } @@ -145,7 +145,7 @@ struct omap_dss_device *omap_dss_find_output_by_port_node(struct device_node *po reg = dss_of_port_get_port_number(port); - list_for_each_entry(out, &output_list, list) { + list_for_each_entry(out, &output_list, output_list) { if (out->dev->of_node == src_node && out->port_num == reg) { of_node_put(src_node); return omap_dss_get_device(out); -- GitLab From 6a7c5a2200ad10a23912d3f40ef104f0d0543de4 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 28 Feb 2018 23:49:24 +0200 Subject: [PATCH 0902/1692] drm/omap: dss: Create global list of all omap_dss_device instances The omap_dss_device instances are stored in two separate lists, depending on whether they are panels or outputs. Create a third list that stores all omap_dss_device instances to allow generic code to operate on all instances. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/base.c | 45 +++++++++++++++++++++------ drivers/gpu/drm/omapdrm/dss/display.c | 4 +++ drivers/gpu/drm/omapdrm/dss/omapdss.h | 4 +++ drivers/gpu/drm/omapdrm/dss/output.c | 2 ++ 4 files changed, 46 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c index 99e8cb8dc65b..18b72d7c717a 100644 --- a/drivers/gpu/drm/omapdrm/dss/base.c +++ b/drivers/gpu/drm/omapdrm/dss/base.c @@ -14,24 +14,17 @@ */ #include +#include #include +#include #include #include -#include #include "dss.h" #include "omapdss.h" static struct dss_device *dss_device; -static struct list_head omapdss_comp_list; - -struct omapdss_comp_node { - struct list_head list; - struct device_node *node; - bool dss_core_component; -}; - struct dss_device *omapdss_get_dss(void) { return dss_device; @@ -56,6 +49,40 @@ const struct dispc_ops *dispc_get_ops(struct dss_device *dss) } EXPORT_SYMBOL(dispc_get_ops); + +/* ----------------------------------------------------------------------------- + * OMAP DSS Devices Handling + */ + +static LIST_HEAD(omapdss_devices_list); +static DEFINE_MUTEX(omapdss_devices_lock); + +void omapdss_device_register(struct omap_dss_device *dssdev) +{ + mutex_lock(&omapdss_devices_lock); + list_add_tail(&dssdev->list, &omapdss_devices_list); + mutex_unlock(&omapdss_devices_lock); +} + +void omapdss_device_unregister(struct omap_dss_device *dssdev) +{ + mutex_lock(&omapdss_devices_lock); + list_del(&dssdev->list); + mutex_unlock(&omapdss_devices_lock); +} + +/* ----------------------------------------------------------------------------- + * Components Handling + */ + +static struct list_head omapdss_comp_list; + +struct omapdss_comp_node { + struct list_head list; + struct device_node *node; + bool dss_core_component; +}; + static bool omapdss_list_contains(const struct device_node *node) { struct omapdss_comp_node *comp; diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c index 752c9811c73a..f0f9239f09d1 100644 --- a/drivers/gpu/drm/omapdrm/dss/display.c +++ b/drivers/gpu/drm/omapdrm/dss/display.c @@ -56,6 +56,8 @@ int omapdss_register_display(struct omap_dss_device *dssdev) mutex_lock(&panel_list_mutex); list_add_tail(&dssdev->panel_list, &panel_list); mutex_unlock(&panel_list_mutex); + + omapdss_device_register(dssdev); return 0; } EXPORT_SYMBOL(omapdss_register_display); @@ -65,6 +67,8 @@ void omapdss_unregister_display(struct omap_dss_device *dssdev) mutex_lock(&panel_list_mutex); list_del(&dssdev->panel_list); mutex_unlock(&panel_list_mutex); + + omapdss_device_register(dssdev); } EXPORT_SYMBOL(omapdss_unregister_display); diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index abf101bb27ea..e029613509a1 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -450,6 +450,7 @@ struct omap_dss_device { struct module *owner; + struct list_head list; struct list_head panel_list; unsigned int alias_id; @@ -560,6 +561,9 @@ static inline bool omapdss_is_initialized(void) int omapdss_register_display(struct omap_dss_device *dssdev); void omapdss_unregister_display(struct omap_dss_device *dssdev); +void omapdss_device_register(struct omap_dss_device *dssdev); +void omapdss_device_unregister(struct omap_dss_device *dssdev); + struct omap_dss_device *omap_dss_get_device(struct omap_dss_device *dssdev); void omap_dss_put_device(struct omap_dss_device *dssdev); #define for_each_dss_dev(d) while ((d = omap_dss_get_next_device(d)) != NULL) diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c index 0fcd13ea8824..1a2d24906edd 100644 --- a/drivers/gpu/drm/omapdrm/dss/output.c +++ b/drivers/gpu/drm/omapdrm/dss/output.c @@ -97,6 +97,7 @@ EXPORT_SYMBOL(omapdss_output_unset_device); int omapdss_register_output(struct omap_dss_device *out) { list_add_tail(&out->output_list, &output_list); + omapdss_device_register(out); return 0; } EXPORT_SYMBOL(omapdss_register_output); @@ -104,6 +105,7 @@ EXPORT_SYMBOL(omapdss_register_output); void omapdss_unregister_output(struct omap_dss_device *out) { list_del(&out->output_list); + omapdss_device_unregister(out); } EXPORT_SYMBOL(omapdss_unregister_output); -- GitLab From 9184f8d94c389c4712b4f393cca4a09c9e770514 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 28 Feb 2018 23:53:16 +0200 Subject: [PATCH 0903/1692] drm/omap: dss: Create and use omapdss_device_is_registered() The omapdss_component_is_loaded() function test whether a component is loaded by checking whether it is present in the displays list or the outputs list. Simplify the implementation by checking for the component in the global omap_dss_device list. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/base.c | 22 +++++++++++++++++++--- drivers/gpu/drm/omapdrm/dss/display.c | 18 ------------------ drivers/gpu/drm/omapdrm/dss/omapdss.h | 3 --- drivers/gpu/drm/omapdrm/dss/output.c | 13 ------------- 4 files changed, 19 insertions(+), 37 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c index 18b72d7c717a..63fe0a717884 100644 --- a/drivers/gpu/drm/omapdrm/dss/base.c +++ b/drivers/gpu/drm/omapdrm/dss/base.c @@ -71,6 +71,24 @@ void omapdss_device_unregister(struct omap_dss_device *dssdev) mutex_unlock(&omapdss_devices_lock); } +static bool omapdss_device_is_registered(struct device_node *node) +{ + struct omap_dss_device *dssdev; + bool found = false; + + mutex_lock(&omapdss_devices_lock); + + list_for_each_entry(dssdev, &omapdss_devices_list, list) { + if (dssdev->dev->of_node == node) { + found = true; + break; + } + } + + mutex_unlock(&omapdss_devices_lock); + return found; +} + /* ----------------------------------------------------------------------------- * Components Handling */ @@ -157,9 +175,7 @@ static bool omapdss_component_is_loaded(struct omapdss_comp_node *comp) { if (comp->dss_core_component) return true; - if (omapdss_component_is_display(comp->node)) - return true; - if (omapdss_component_is_output(comp->node)) + if (omapdss_device_is_registered(comp->node)) return true; return false; diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c index f0f9239f09d1..e7872e0c8dab 100644 --- a/drivers/gpu/drm/omapdrm/dss/display.c +++ b/drivers/gpu/drm/omapdrm/dss/display.c @@ -72,24 +72,6 @@ void omapdss_unregister_display(struct omap_dss_device *dssdev) } EXPORT_SYMBOL(omapdss_unregister_display); -bool omapdss_component_is_display(struct device_node *node) -{ - struct omap_dss_device *dssdev; - bool found = false; - - mutex_lock(&panel_list_mutex); - list_for_each_entry(dssdev, &panel_list, panel_list) { - if (dssdev->dev->of_node == node) { - found = true; - goto out; - } - } -out: - mutex_unlock(&panel_list_mutex); - return found; -} -EXPORT_SYMBOL(omapdss_component_is_display); - struct omap_dss_device *omap_dss_get_device(struct omap_dss_device *dssdev) { if (!try_module_get(dssdev->owner)) diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index e029613509a1..1ccf0c67d308 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -737,9 +737,6 @@ struct dispc_ops { struct dispc_device *dispc_get_dispc(struct dss_device *dss); const struct dispc_ops *dispc_get_ops(struct dss_device *dss); -bool omapdss_component_is_display(struct device_node *node); -bool omapdss_component_is_output(struct device_node *node); - bool omapdss_stack_is_ready(void); void omapdss_gather_components(struct device *dev); diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c index 1a2d24906edd..7f18153a1bde 100644 --- a/drivers/gpu/drm/omapdrm/dss/output.c +++ b/drivers/gpu/drm/omapdrm/dss/output.c @@ -109,19 +109,6 @@ void omapdss_unregister_output(struct omap_dss_device *out) } EXPORT_SYMBOL(omapdss_unregister_output); -bool omapdss_component_is_output(struct device_node *node) -{ - struct omap_dss_device *out; - - list_for_each_entry(out, &output_list, output_list) { - if (out->dev->of_node == node) - return true; - } - - return false; -} -EXPORT_SYMBOL(omapdss_component_is_output); - struct omap_dss_device *omap_dss_get_output(enum omap_dss_output_id id) { struct omap_dss_device *out; -- GitLab From a7e82a67c1d7b0be6c195a7c4cf2548fa873c056 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Thu, 1 Mar 2018 23:35:55 +0200 Subject: [PATCH 0904/1692] drm/omap: dss: Rework output lookup by port node The omap_dss_find_output_by_port_node() function defined in output.c looks up an output from its port node. To do so it needs to call helper functions from dss-of.c to lookup the port parent and the port number. As omap_dss_find_output_by_port_node() is only called by omapdss_of_find_source_for_first_ep() from dss-of.c this goes back and forth between the to source files and isn't very clear. Simplify the code by passing both the parent and the port number to omap_dss_find_output_by_port_node() instead of the port node, and rename the function to omap_dss_find_output_by_port(). Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/dss-of.c | 39 +++++++++++++-------------- drivers/gpu/drm/omapdrm/dss/omapdss.h | 6 ++--- drivers/gpu/drm/omapdrm/dss/output.c | 17 +++--------- 3 files changed, 23 insertions(+), 39 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/dss-of.c b/drivers/gpu/drm/omapdrm/dss/dss-of.c index 4602a79c6c44..b51af09e9111 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss-of.c +++ b/drivers/gpu/drm/omapdrm/dss/dss-of.c @@ -21,7 +21,8 @@ #include "omapdss.h" -struct device_node *dss_of_port_get_parent_device(struct device_node *port) +static struct device_node * +dss_of_port_get_parent_device(struct device_node *port) { struct device_node *np; int i; @@ -45,40 +46,36 @@ struct device_node *dss_of_port_get_parent_device(struct device_node *port) return NULL; } -u32 dss_of_port_get_port_number(struct device_node *port) -{ - int r; - u32 reg; - - r = of_property_read_u32(port, "reg", ®); - if (r) - reg = 0; - - return reg; -} - struct omap_dss_device * omapdss_of_find_source_for_first_ep(struct device_node *node) { - struct device_node *ep; + struct device_node *src_node; struct device_node *src_port; + struct device_node *ep; struct omap_dss_device *src; + u32 port_number = 0; + /* Get the endpoint... */ ep = of_graph_get_endpoint_by_regs(node, 0, 0); if (!ep) return ERR_PTR(-EINVAL); + /* ... and its remote port... */ src_port = of_graph_get_remote_port(ep); - if (!src_port) { - of_node_put(ep); - return ERR_PTR(-EINVAL); - } - of_node_put(ep); + if (!src_port) + return ERR_PTR(-EINVAL); - src = omap_dss_find_output_by_port_node(src_port); - + /* ... and the remote port's number and parent... */ + of_property_read_u32(src_port, "reg", &port_number); + src_node = dss_of_port_get_parent_device(src_port); of_node_put(src_port); + if (!src_node) + return NULL; + + /* ... and finally the source. */ + src = omap_dss_find_output_by_port(src_node, port_number); + of_node_put(src_node); return src ? src : ERR_PTR(-EPROBE_DEFER); } diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index 1ccf0c67d308..ff0f603bce76 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -576,7 +576,8 @@ int omap_dss_get_num_overlays(void); int omapdss_register_output(struct omap_dss_device *output); void omapdss_unregister_output(struct omap_dss_device *output); struct omap_dss_device *omap_dss_get_output(enum omap_dss_output_id id); -struct omap_dss_device *omap_dss_find_output_by_port_node(struct device_node *port); +struct omap_dss_device *omap_dss_find_output_by_port(struct device_node *src, + unsigned int port); int omapdss_output_set_device(struct omap_dss_device *out, struct omap_dss_device *dssdev); int omapdss_output_unset_device(struct omap_dss_device *out); @@ -603,9 +604,6 @@ static inline bool omapdss_device_is_enabled(struct omap_dss_device *dssdev) struct omap_dss_device * omapdss_of_find_source_for_first_ep(struct device_node *node); -struct device_node *dss_of_port_get_parent_device(struct device_node *port); -u32 dss_of_port_get_port_number(struct device_node *port); - enum dss_writeback_channel { DSS_WB_LCD1_MGR = 0, DSS_WB_LCD2_MGR = 1, diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c index 7f18153a1bde..be254ea42e08 100644 --- a/drivers/gpu/drm/omapdrm/dss/output.c +++ b/drivers/gpu/drm/omapdrm/dss/output.c @@ -122,27 +122,16 @@ struct omap_dss_device *omap_dss_get_output(enum omap_dss_output_id id) } EXPORT_SYMBOL(omap_dss_get_output); -struct omap_dss_device *omap_dss_find_output_by_port_node(struct device_node *port) +struct omap_dss_device *omap_dss_find_output_by_port(struct device_node *src, + unsigned int port) { - struct device_node *src_node; struct omap_dss_device *out; - u32 reg; - - src_node = dss_of_port_get_parent_device(port); - if (!src_node) - return NULL; - - reg = dss_of_port_get_port_number(port); list_for_each_entry(out, &output_list, output_list) { - if (out->dev->of_node == src_node && out->port_num == reg) { - of_node_put(src_node); + if (out->dev->of_node == src && out->port_num == port) return omap_dss_get_device(out); - } } - of_node_put(src_node); - return NULL; } -- GitLab From e10bd354ad79d2772842300c85ffd1a49722cfae Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Thu, 1 Mar 2018 23:35:55 +0200 Subject: [PATCH 0905/1692] drm/omap: dss: Allow looking up any device by port The omap_dss_find_output_by_port() function looks up an omap_dss_device by port from the list of devices registered as outputs. In preparation for looking up sinks in addition to sources, allow the function to look up any registered device. Rename it to omap_dss_find_device_by_port() to match its new purpose. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/base.c | 13 +++++++++++++ drivers/gpu/drm/omapdrm/dss/dss-of.c | 2 +- drivers/gpu/drm/omapdrm/dss/omapdss.h | 4 ++-- drivers/gpu/drm/omapdrm/dss/output.c | 13 ------------- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c index 63fe0a717884..df6cb1ac43c8 100644 --- a/drivers/gpu/drm/omapdrm/dss/base.c +++ b/drivers/gpu/drm/omapdrm/dss/base.c @@ -89,6 +89,19 @@ static bool omapdss_device_is_registered(struct device_node *node) return found; } +struct omap_dss_device *omapdss_find_device_by_port(struct device_node *src, + unsigned int port) +{ + struct omap_dss_device *dssdev; + + list_for_each_entry(dssdev, &omapdss_devices_list, list) { + if (dssdev->dev->of_node == src && dssdev->port_num == port) + return omap_dss_get_device(dssdev); + } + + return NULL; +} + /* ----------------------------------------------------------------------------- * Components Handling */ diff --git a/drivers/gpu/drm/omapdrm/dss/dss-of.c b/drivers/gpu/drm/omapdrm/dss/dss-of.c index b51af09e9111..771b20db2d98 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss-of.c +++ b/drivers/gpu/drm/omapdrm/dss/dss-of.c @@ -74,7 +74,7 @@ omapdss_of_find_source_for_first_ep(struct device_node *node) return NULL; /* ... and finally the source. */ - src = omap_dss_find_output_by_port(src_node, port_number); + src = omapdss_find_device_by_port(src_node, port_number); of_node_put(src_node); return src ? src : ERR_PTR(-EPROBE_DEFER); diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index ff0f603bce76..2ec74206bcff 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -563,6 +563,8 @@ void omapdss_unregister_display(struct omap_dss_device *dssdev); void omapdss_device_register(struct omap_dss_device *dssdev); void omapdss_device_unregister(struct omap_dss_device *dssdev); +struct omap_dss_device *omapdss_find_device_by_port(struct device_node *src, + unsigned int port); struct omap_dss_device *omap_dss_get_device(struct omap_dss_device *dssdev); void omap_dss_put_device(struct omap_dss_device *dssdev); @@ -576,8 +578,6 @@ int omap_dss_get_num_overlays(void); int omapdss_register_output(struct omap_dss_device *output); void omapdss_unregister_output(struct omap_dss_device *output); struct omap_dss_device *omap_dss_get_output(enum omap_dss_output_id id); -struct omap_dss_device *omap_dss_find_output_by_port(struct device_node *src, - unsigned int port); int omapdss_output_set_device(struct omap_dss_device *out, struct omap_dss_device *dssdev); int omapdss_output_unset_device(struct omap_dss_device *out); diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c index be254ea42e08..e659c8e5c419 100644 --- a/drivers/gpu/drm/omapdrm/dss/output.c +++ b/drivers/gpu/drm/omapdrm/dss/output.c @@ -122,19 +122,6 @@ struct omap_dss_device *omap_dss_get_output(enum omap_dss_output_id id) } EXPORT_SYMBOL(omap_dss_get_output); -struct omap_dss_device *omap_dss_find_output_by_port(struct device_node *src, - unsigned int port) -{ - struct omap_dss_device *out; - - list_for_each_entry(out, &output_list, output_list) { - if (out->dev->of_node == src && out->port_num == port) - return omap_dss_get_device(out); - } - - return NULL; -} - struct omap_dss_device *omapdss_find_output_from_display(struct omap_dss_device *dssdev) { while (dssdev->src) -- GitLab From b93109d7dc9e15649e1cf18281f02d8b4a102584 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 28 Feb 2018 15:58:13 +0200 Subject: [PATCH 0906/1692] drm/omap: dss: Move common device operations to common structure The various types of omapdss_*_ops structures define multiple operations that are not specific to a bus type. To simplify the code and remove dependencies on specific bus types move those operations to a common structure. Operations that are specific to a bus type are kept in the specialized ops structures. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- .../omapdrm/displays/connector-analog-tv.c | 14 +- .../gpu/drm/omapdrm/displays/connector-dvi.c | 14 +- .../gpu/drm/omapdrm/displays/connector-hdmi.c | 42 +++--- .../gpu/drm/omapdrm/displays/encoder-opa362.c | 20 ++- .../gpu/drm/omapdrm/displays/encoder-tfp410.c | 20 ++- .../drm/omapdrm/displays/encoder-tpd12s015.c | 48 +++---- drivers/gpu/drm/omapdrm/displays/panel-dpi.c | 16 +-- .../gpu/drm/omapdrm/displays/panel-dsi-cm.c | 120 +++++++++--------- .../displays/panel-lgphilips-lb035q02.c | 14 +- .../omapdrm/displays/panel-nec-nl8048hl11.c | 14 +- .../displays/panel-sharp-ls037v7dw01.c | 14 +- .../omapdrm/displays/panel-sony-acx565akm.c | 14 +- .../omapdrm/displays/panel-tpo-td028ttec1.c | 14 +- .../omapdrm/displays/panel-tpo-td043mtea1.c | 16 +-- drivers/gpu/drm/omapdrm/dss/dpi.c | 4 +- drivers/gpu/drm/omapdrm/dss/dsi.c | 50 ++++---- drivers/gpu/drm/omapdrm/dss/hdmi4.c | 14 +- drivers/gpu/drm/omapdrm/dss/hdmi5.c | 12 +- drivers/gpu/drm/omapdrm/dss/omapdss.h | 109 +++------------- drivers/gpu/drm/omapdrm/dss/sdi.c | 4 +- drivers/gpu/drm/omapdrm/dss/venc.c | 4 +- 21 files changed, 256 insertions(+), 321 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c index 6b640ede6614..a94868d9398b 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c @@ -59,7 +59,7 @@ static int tvc_connect(struct omap_dss_device *dssdev) return PTR_ERR(in); } - r = in->ops.atv->connect(in, dssdev); + r = in->ops->connect(in, dssdev); if (r) { omap_dss_put_device(in); return r; @@ -79,7 +79,7 @@ static void tvc_disconnect(struct omap_dss_device *dssdev) if (!omapdss_device_is_connected(dssdev)) return; - in->ops.atv->disconnect(in, dssdev); + in->ops->disconnect(in, dssdev); omap_dss_put_device(in); ddata->in = NULL; @@ -99,9 +99,9 @@ static int tvc_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - in->ops.atv->set_timings(in, &ddata->vm); + in->ops->set_timings(in, &ddata->vm); - r = in->ops.atv->enable(in); + r = in->ops->enable(in); if (r) return r; @@ -120,7 +120,7 @@ static void tvc_disable(struct omap_dss_device *dssdev) if (!omapdss_device_is_enabled(dssdev)) return; - in->ops.atv->disable(in); + in->ops->disable(in); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } @@ -133,7 +133,7 @@ static void tvc_set_timings(struct omap_dss_device *dssdev, ddata->vm = *vm; - in->ops.atv->set_timings(in, vm); + in->ops->set_timings(in, vm); } static void tvc_get_timings(struct omap_dss_device *dssdev, @@ -150,7 +150,7 @@ static int tvc_check_timings(struct omap_dss_device *dssdev, struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *in = ddata->in; - return in->ops.atv->check_timings(in, vm); + return in->ops->check_timings(in, vm); } static const struct omap_dss_driver tvc_driver = { diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c index 84598ea12a9b..021e3b651c89 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c @@ -72,7 +72,7 @@ static int dvic_connect(struct omap_dss_device *dssdev) return PTR_ERR(in); } - r = in->ops.dvi->connect(in, dssdev); + r = in->ops->connect(in, dssdev); if (r) { omap_dss_put_device(in); return r; @@ -90,7 +90,7 @@ static void dvic_disconnect(struct omap_dss_device *dssdev) if (!omapdss_device_is_connected(dssdev)) return; - in->ops.dvi->disconnect(in, dssdev); + in->ops->disconnect(in, dssdev); omap_dss_put_device(in); ddata->in = NULL; @@ -108,9 +108,9 @@ static int dvic_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - in->ops.dvi->set_timings(in, &ddata->vm); + in->ops->set_timings(in, &ddata->vm); - r = in->ops.dvi->enable(in); + r = in->ops->enable(in); if (r) return r; @@ -127,7 +127,7 @@ static void dvic_disable(struct omap_dss_device *dssdev) if (!omapdss_device_is_enabled(dssdev)) return; - in->ops.dvi->disable(in); + in->ops->disable(in); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } @@ -140,7 +140,7 @@ static void dvic_set_timings(struct omap_dss_device *dssdev, ddata->vm = *vm; - in->ops.dvi->set_timings(in, vm); + in->ops->set_timings(in, vm); } static void dvic_get_timings(struct omap_dss_device *dssdev, @@ -157,7 +157,7 @@ static int dvic_check_timings(struct omap_dss_device *dssdev, struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *in = ddata->in; - return in->ops.dvi->check_timings(in, vm); + return in->ops->check_timings(in, vm); } static int dvic_ddc_read(struct i2c_adapter *adapter, diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c index e031280468fb..b528bd51ada3 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c @@ -69,7 +69,7 @@ static int hdmic_connect(struct omap_dss_device *dssdev) return PTR_ERR(in); } - r = in->ops.hdmi->connect(in, dssdev); + r = in->ops->connect(in, dssdev); if (r) { omap_dss_put_device(in); return r; @@ -89,7 +89,7 @@ static void hdmic_disconnect(struct omap_dss_device *dssdev) if (!omapdss_device_is_connected(dssdev)) return; - in->ops.hdmi->disconnect(in, dssdev); + in->ops->disconnect(in, dssdev); omap_dss_put_device(in); ddata->in = NULL; @@ -109,9 +109,9 @@ static int hdmic_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - in->ops.hdmi->set_timings(in, &ddata->vm); + in->ops->set_timings(in, &ddata->vm); - r = in->ops.hdmi->enable(in); + r = in->ops->enable(in); if (r) return r; @@ -130,7 +130,7 @@ static void hdmic_disable(struct omap_dss_device *dssdev) if (!omapdss_device_is_enabled(dssdev)) return; - in->ops.hdmi->disable(in); + in->ops->disable(in); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } @@ -143,7 +143,7 @@ static void hdmic_set_timings(struct omap_dss_device *dssdev, ddata->vm = *vm; - in->ops.hdmi->set_timings(in, vm); + in->ops->set_timings(in, vm); } static void hdmic_get_timings(struct omap_dss_device *dssdev, @@ -160,7 +160,7 @@ static int hdmic_check_timings(struct omap_dss_device *dssdev, struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *in = ddata->in; - return in->ops.hdmi->check_timings(in, vm); + return in->ops->check_timings(in, vm); } static int hdmic_read_edid(struct omap_dss_device *dssdev, @@ -169,7 +169,7 @@ static int hdmic_read_edid(struct omap_dss_device *dssdev, struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *in = ddata->in; - return in->ops.hdmi->read_edid(in, edid, len); + return in->ops->hdmi.read_edid(in, edid, len); } static bool hdmic_detect(struct omap_dss_device *dssdev) @@ -181,9 +181,9 @@ static bool hdmic_detect(struct omap_dss_device *dssdev) if (gpio_is_valid(ddata->hpd_gpio)) connected = gpio_get_value_cansleep(ddata->hpd_gpio); else - connected = in->ops.hdmi->detect(in); - if (!connected && in->ops.hdmi->lost_hotplug) - in->ops.hdmi->lost_hotplug(in); + connected = in->ops->hdmi.detect(in); + if (!connected && in->ops->hdmi.lost_hotplug) + in->ops->hdmi.lost_hotplug(in); return connected; } @@ -201,8 +201,8 @@ static int hdmic_register_hpd_cb(struct omap_dss_device *dssdev, ddata->hpd_cb_data = cb_data; mutex_unlock(&ddata->hpd_lock); return 0; - } else if (in->ops.hdmi->register_hpd_cb) { - return in->ops.hdmi->register_hpd_cb(in, cb, cb_data); + } else if (in->ops->hdmi.register_hpd_cb) { + return in->ops->hdmi.register_hpd_cb(in, cb, cb_data); } return -ENOTSUPP; @@ -218,8 +218,8 @@ static void hdmic_unregister_hpd_cb(struct omap_dss_device *dssdev) ddata->hpd_cb = NULL; ddata->hpd_cb_data = NULL; mutex_unlock(&ddata->hpd_lock); - } else if (in->ops.hdmi->unregister_hpd_cb) { - in->ops.hdmi->unregister_hpd_cb(in); + } else if (in->ops->hdmi.unregister_hpd_cb) { + in->ops->hdmi.unregister_hpd_cb(in); } } @@ -232,8 +232,8 @@ static void hdmic_enable_hpd(struct omap_dss_device *dssdev) mutex_lock(&ddata->hpd_lock); ddata->hpd_enabled = true; mutex_unlock(&ddata->hpd_lock); - } else if (in->ops.hdmi->enable_hpd) { - in->ops.hdmi->enable_hpd(in); + } else if (in->ops->hdmi.enable_hpd) { + in->ops->hdmi.enable_hpd(in); } } @@ -246,8 +246,8 @@ static void hdmic_disable_hpd(struct omap_dss_device *dssdev) mutex_lock(&ddata->hpd_lock); ddata->hpd_enabled = false; mutex_unlock(&ddata->hpd_lock); - } else if (in->ops.hdmi->disable_hpd) { - in->ops.hdmi->disable_hpd(in); + } else if (in->ops->hdmi.disable_hpd) { + in->ops->hdmi.disable_hpd(in); } } @@ -256,7 +256,7 @@ static int hdmic_set_hdmi_mode(struct omap_dss_device *dssdev, bool hdmi_mode) struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *in = ddata->in; - return in->ops.hdmi->set_hdmi_mode(in, hdmi_mode); + return in->ops->hdmi.set_hdmi_mode(in, hdmi_mode); } static int hdmic_set_infoframe(struct omap_dss_device *dssdev, @@ -265,7 +265,7 @@ static int hdmic_set_infoframe(struct omap_dss_device *dssdev, struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *in = ddata->in; - return in->ops.hdmi->set_infoframe(in, avi); + return in->ops->hdmi.set_infoframe(in, avi); } static const struct omap_dss_driver hdmic_driver = { diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c index 0e3f4a20e531..37982ffe0ad4 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c @@ -50,7 +50,7 @@ static int opa362_connect(struct omap_dss_device *dssdev, return PTR_ERR(in); } - r = in->ops.atv->connect(in, dssdev); + r = in->ops->connect(in, dssdev); if (r) { omap_dss_put_device(in); return r; @@ -82,7 +82,7 @@ static void opa362_disconnect(struct omap_dss_device *dssdev, dst->src = NULL; dssdev->dst = NULL; - in->ops.atv->disconnect(in, &ddata->dssdev); + in->ops->disconnect(in, &ddata->dssdev); omap_dss_put_device(in); ddata->in = NULL; @@ -102,9 +102,9 @@ static int opa362_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - in->ops.atv->set_timings(in, &ddata->vm); + in->ops->set_timings(in, &ddata->vm); - r = in->ops.atv->enable(in); + r = in->ops->enable(in); if (r) return r; @@ -129,7 +129,7 @@ static void opa362_disable(struct omap_dss_device *dssdev) if (ddata->enable_gpio) gpiod_set_value_cansleep(ddata->enable_gpio, 0); - in->ops.atv->disable(in); + in->ops->disable(in); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } @@ -144,7 +144,7 @@ static void opa362_set_timings(struct omap_dss_device *dssdev, ddata->vm = *vm; - in->ops.atv->set_timings(in, vm); + in->ops->set_timings(in, vm); } static int opa362_check_timings(struct omap_dss_device *dssdev, @@ -155,16 +155,14 @@ static int opa362_check_timings(struct omap_dss_device *dssdev, dev_dbg(dssdev->dev, "check_timings\n"); - return in->ops.atv->check_timings(in, vm); + return in->ops->check_timings(in, vm); } -static const struct omapdss_atv_ops opa362_atv_ops = { +static const struct omap_dss_device_ops opa362_ops = { .connect = opa362_connect, .disconnect = opa362_disconnect, - .enable = opa362_enable, .disable = opa362_disable, - .check_timings = opa362_check_timings, .set_timings = opa362_set_timings, }; @@ -191,7 +189,7 @@ static int opa362_probe(struct platform_device *pdev) ddata->enable_gpio = gpio; dssdev = &ddata->dssdev; - dssdev->ops.atv = &opa362_atv_ops; + dssdev->ops = &opa362_ops; dssdev->dev = &pdev->dev; dssdev->type = OMAP_DISPLAY_TYPE_VENC; dssdev->output_type = OMAP_DISPLAY_TYPE_VENC; diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c index 08e63e39d0b7..1fbc5559e54f 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c @@ -44,7 +44,7 @@ static int tfp410_connect(struct omap_dss_device *dssdev, return PTR_ERR(in); } - r = in->ops.dpi->connect(in, dssdev); + r = in->ops->connect(in, dssdev); if (r) { omap_dss_put_device(in); return r; @@ -74,7 +74,7 @@ static void tfp410_disconnect(struct omap_dss_device *dssdev, dst->src = NULL; dssdev->dst = NULL; - in->ops.dpi->disconnect(in, &ddata->dssdev); + in->ops->disconnect(in, &ddata->dssdev); omap_dss_put_device(in); ddata->in = NULL; @@ -92,9 +92,9 @@ static int tfp410_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - in->ops.dpi->set_timings(in, &ddata->vm); + in->ops->set_timings(in, &ddata->vm); - r = in->ops.dpi->enable(in); + r = in->ops->enable(in); if (r) return r; @@ -117,7 +117,7 @@ static void tfp410_disable(struct omap_dss_device *dssdev) if (gpio_is_valid(ddata->pd_gpio)) gpio_set_value_cansleep(ddata->pd_gpio, 0); - in->ops.dpi->disable(in); + in->ops->disable(in); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } @@ -138,7 +138,7 @@ static void tfp410_set_timings(struct omap_dss_device *dssdev, ddata->vm = *vm; - in->ops.dpi->set_timings(in, vm); + in->ops->set_timings(in, vm); } static int tfp410_check_timings(struct omap_dss_device *dssdev, @@ -149,16 +149,14 @@ static int tfp410_check_timings(struct omap_dss_device *dssdev, tfp410_fix_timings(vm); - return in->ops.dpi->check_timings(in, vm); + return in->ops->check_timings(in, vm); } -static const struct omapdss_dvi_ops tfp410_dvi_ops = { +static const struct omap_dss_device_ops tfp410_ops = { .connect = tfp410_connect, .disconnect = tfp410_disconnect, - .enable = tfp410_enable, .disable = tfp410_disable, - .check_timings = tfp410_check_timings, .set_timings = tfp410_set_timings, }; @@ -209,7 +207,7 @@ static int tfp410_probe(struct platform_device *pdev) } dssdev = &ddata->dssdev; - dssdev->ops.dvi = &tfp410_dvi_ops; + dssdev->ops = &tfp410_ops; dssdev->dev = &pdev->dev; dssdev->type = OMAP_DISPLAY_TYPE_DPI; dssdev->output_type = OMAP_DISPLAY_TYPE_DVI; diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c index f35e2afa3e9c..21a4a2fd42bf 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c @@ -49,7 +49,7 @@ static int tpd_connect(struct omap_dss_device *dssdev, return PTR_ERR(in); } - r = in->ops.hdmi->connect(in, dssdev); + r = in->ops->connect(in, dssdev); if (r) { omap_dss_put_device(in); return r; @@ -85,7 +85,7 @@ static void tpd_disconnect(struct omap_dss_device *dssdev, dst->src = NULL; dssdev->dst = NULL; - in->ops.hdmi->disconnect(in, &ddata->dssdev); + in->ops->disconnect(in, &ddata->dssdev); omap_dss_put_device(in); ddata->in = NULL; @@ -100,9 +100,9 @@ static int tpd_enable(struct omap_dss_device *dssdev) if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) return 0; - in->ops.hdmi->set_timings(in, &ddata->vm); + in->ops->set_timings(in, &ddata->vm); - r = in->ops.hdmi->enable(in); + r = in->ops->enable(in); if (r) return r; @@ -119,7 +119,7 @@ static void tpd_disable(struct omap_dss_device *dssdev) if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) return; - in->ops.hdmi->disable(in); + in->ops->disable(in); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } @@ -132,7 +132,7 @@ static void tpd_set_timings(struct omap_dss_device *dssdev, ddata->vm = *vm; - in->ops.hdmi->set_timings(in, vm); + in->ops->set_timings(in, vm); } static int tpd_check_timings(struct omap_dss_device *dssdev, @@ -142,7 +142,7 @@ static int tpd_check_timings(struct omap_dss_device *dssdev, struct omap_dss_device *in = ddata->in; int r; - r = in->ops.hdmi->check_timings(in, vm); + r = in->ops->check_timings(in, vm); return r; } @@ -156,7 +156,7 @@ static int tpd_read_edid(struct omap_dss_device *dssdev, if (!gpiod_get_value_cansleep(ddata->hpd_gpio)) return -ENODEV; - return in->ops.hdmi->read_edid(in, edid, len); + return in->ops->hdmi.read_edid(in, edid, len); } static bool tpd_detect(struct omap_dss_device *dssdev) @@ -165,8 +165,8 @@ static bool tpd_detect(struct omap_dss_device *dssdev) struct omap_dss_device *in = ddata->in; bool connected = gpiod_get_value_cansleep(ddata->hpd_gpio); - if (!connected && in->ops.hdmi->lost_hotplug) - in->ops.hdmi->lost_hotplug(in); + if (!connected && in->ops->hdmi.lost_hotplug) + in->ops->hdmi.lost_hotplug(in); return connected; } @@ -219,7 +219,7 @@ static int tpd_set_infoframe(struct omap_dss_device *dssdev, struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *in = ddata->in; - return in->ops.hdmi->set_infoframe(in, avi); + return in->ops->hdmi.set_infoframe(in, avi); } static int tpd_set_hdmi_mode(struct omap_dss_device *dssdev, @@ -228,27 +228,27 @@ static int tpd_set_hdmi_mode(struct omap_dss_device *dssdev, struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *in = ddata->in; - return in->ops.hdmi->set_hdmi_mode(in, hdmi_mode); + return in->ops->hdmi.set_hdmi_mode(in, hdmi_mode); } -static const struct omapdss_hdmi_ops tpd_hdmi_ops = { +static const struct omap_dss_device_ops tpd_ops = { .connect = tpd_connect, .disconnect = tpd_disconnect, - .enable = tpd_enable, .disable = tpd_disable, - .check_timings = tpd_check_timings, .set_timings = tpd_set_timings, - .read_edid = tpd_read_edid, - .detect = tpd_detect, - .register_hpd_cb = tpd_register_hpd_cb, - .unregister_hpd_cb = tpd_unregister_hpd_cb, - .enable_hpd = tpd_enable_hpd, - .disable_hpd = tpd_disable_hpd, - .set_infoframe = tpd_set_infoframe, - .set_hdmi_mode = tpd_set_hdmi_mode, + .hdmi = { + .read_edid = tpd_read_edid, + .detect = tpd_detect, + .register_hpd_cb = tpd_register_hpd_cb, + .unregister_hpd_cb = tpd_unregister_hpd_cb, + .enable_hpd = tpd_enable_hpd, + .disable_hpd = tpd_disable_hpd, + .set_infoframe = tpd_set_infoframe, + .set_hdmi_mode = tpd_set_hdmi_mode, + }, }; static irqreturn_t tpd_hpd_isr(int irq, void *data) @@ -315,7 +315,7 @@ static int tpd_probe(struct platform_device *pdev) return r; dssdev = &ddata->dssdev; - dssdev->ops.hdmi = &tpd_hdmi_ops; + dssdev->ops = &tpd_ops; dssdev->dev = &pdev->dev; dssdev->type = OMAP_DISPLAY_TYPE_HDMI; dssdev->output_type = OMAP_DISPLAY_TYPE_HDMI; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c index bbd630ead3c1..15042351ace3 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c @@ -50,7 +50,7 @@ static int panel_dpi_connect(struct omap_dss_device *dssdev) return PTR_ERR(in); } - r = in->ops.dpi->connect(in, dssdev); + r = in->ops->connect(in, dssdev); if (r) { omap_dss_put_device(in); return r; @@ -68,7 +68,7 @@ static void panel_dpi_disconnect(struct omap_dss_device *dssdev) if (!omapdss_device_is_connected(dssdev)) return; - in->ops.dpi->disconnect(in, dssdev); + in->ops->disconnect(in, dssdev); omap_dss_put_device(in); ddata->in = NULL; @@ -86,15 +86,15 @@ static int panel_dpi_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - in->ops.dpi->set_timings(in, &ddata->vm); + in->ops->set_timings(in, &ddata->vm); - r = in->ops.dpi->enable(in); + r = in->ops->enable(in); if (r) return r; r = regulator_enable(ddata->vcc_supply); if (r) { - in->ops.dpi->disable(in); + in->ops->disable(in); return r; } @@ -119,7 +119,7 @@ static void panel_dpi_disable(struct omap_dss_device *dssdev) gpiod_set_value_cansleep(ddata->enable_gpio, 0); regulator_disable(ddata->vcc_supply); - in->ops.dpi->disable(in); + in->ops->disable(in); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } @@ -132,7 +132,7 @@ static void panel_dpi_set_timings(struct omap_dss_device *dssdev, ddata->vm = *vm; - in->ops.dpi->set_timings(in, vm); + in->ops->set_timings(in, vm); } static void panel_dpi_get_timings(struct omap_dss_device *dssdev, @@ -149,7 +149,7 @@ static int panel_dpi_check_timings(struct omap_dss_device *dssdev, struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *in = ddata->in; - return in->ops.dpi->check_timings(in, vm); + return in->ops->check_timings(in, vm); } static const struct omap_dss_driver panel_dpi_ops = { diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c index 8d98cd628e11..4296f4ca39fe 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c @@ -146,7 +146,7 @@ static int dsicm_dcs_read_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 *data) int r; u8 buf[1]; - r = in->ops.dsi->dcs_read(in, ddata->channel, dcs_cmd, buf, 1); + r = in->ops->dsi.dcs_read(in, ddata->channel, dcs_cmd, buf, 1); if (r < 0) return r; @@ -159,7 +159,7 @@ static int dsicm_dcs_read_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 *data) static int dsicm_dcs_write_0(struct panel_drv_data *ddata, u8 dcs_cmd) { struct omap_dss_device *in = ddata->in; - return in->ops.dsi->dcs_write(in, ddata->channel, &dcs_cmd, 1); + return in->ops->dsi.dcs_write(in, ddata->channel, &dcs_cmd, 1); } static int dsicm_dcs_write_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 param) @@ -167,7 +167,7 @@ static int dsicm_dcs_write_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 param) struct omap_dss_device *in = ddata->in; u8 buf[2] = { dcs_cmd, param }; - return in->ops.dsi->dcs_write(in, ddata->channel, buf, 2); + return in->ops->dsi.dcs_write(in, ddata->channel, buf, 2); } static int dsicm_sleep_in(struct panel_drv_data *ddata) @@ -180,7 +180,7 @@ static int dsicm_sleep_in(struct panel_drv_data *ddata) hw_guard_wait(ddata); cmd = MIPI_DCS_ENTER_SLEEP_MODE; - r = in->ops.dsi->dcs_write_nosync(in, ddata->channel, &cmd, 1); + r = in->ops->dsi.dcs_write_nosync(in, ddata->channel, &cmd, 1); if (r) return r; @@ -242,7 +242,7 @@ static int dsicm_set_update_window(struct panel_drv_data *ddata, buf[3] = (x2 >> 8) & 0xff; buf[4] = (x2 >> 0) & 0xff; - r = in->ops.dsi->dcs_write_nosync(in, ddata->channel, buf, sizeof(buf)); + r = in->ops->dsi.dcs_write_nosync(in, ddata->channel, buf, sizeof(buf)); if (r) return r; @@ -252,11 +252,11 @@ static int dsicm_set_update_window(struct panel_drv_data *ddata, buf[3] = (y2 >> 8) & 0xff; buf[4] = (y2 >> 0) & 0xff; - r = in->ops.dsi->dcs_write_nosync(in, ddata->channel, buf, sizeof(buf)); + r = in->ops->dsi.dcs_write_nosync(in, ddata->channel, buf, sizeof(buf)); if (r) return r; - in->ops.dsi->bta_sync(in, ddata->channel); + in->ops->dsi.bta_sync(in, ddata->channel); return r; } @@ -290,7 +290,7 @@ static int dsicm_enter_ulps(struct panel_drv_data *ddata) if (ddata->ext_te_gpio) disable_irq(gpiod_to_irq(ddata->ext_te_gpio)); - in->ops.dsi->disable(in, false, true); + in->ops->dsi.disable(in, false, true); ddata->ulps_enabled = true; @@ -315,13 +315,13 @@ static int dsicm_exit_ulps(struct panel_drv_data *ddata) if (!ddata->ulps_enabled) return 0; - r = in->ops.dsi->enable(in); + r = in->ops->enable(in); if (r) { dev_err(&ddata->pdev->dev, "failed to enable DSI\n"); goto err1; } - in->ops.dsi->enable_hs(in, ddata->channel, true); + in->ops->dsi.enable_hs(in, ddata->channel, true); r = _dsicm_enable_te(ddata, true); if (r) { @@ -381,13 +381,13 @@ static int dsicm_bl_update_status(struct backlight_device *dev) mutex_lock(&ddata->lock); if (ddata->enabled) { - in->ops.dsi->bus_lock(in); + in->ops->dsi.bus_lock(in); r = dsicm_wake_up(ddata); if (!r) r = dsicm_dcs_write_1(ddata, DCS_BRIGHTNESS, level); - in->ops.dsi->bus_unlock(in); + in->ops->dsi.bus_unlock(in); } mutex_unlock(&ddata->lock); @@ -421,14 +421,14 @@ static ssize_t dsicm_num_errors_show(struct device *dev, mutex_lock(&ddata->lock); if (ddata->enabled) { - in->ops.dsi->bus_lock(in); + in->ops->dsi.bus_lock(in); r = dsicm_wake_up(ddata); if (!r) r = dsicm_dcs_read_1(ddata, DCS_READ_NUM_ERRORS, &errors); - in->ops.dsi->bus_unlock(in); + in->ops->dsi.bus_unlock(in); } else { r = -ENODEV; } @@ -453,13 +453,13 @@ static ssize_t dsicm_hw_revision_show(struct device *dev, mutex_lock(&ddata->lock); if (ddata->enabled) { - in->ops.dsi->bus_lock(in); + in->ops->dsi.bus_lock(in); r = dsicm_wake_up(ddata); if (!r) r = dsicm_get_id(ddata, &id1, &id2, &id3); - in->ops.dsi->bus_unlock(in); + in->ops->dsi.bus_unlock(in); } else { r = -ENODEV; } @@ -489,14 +489,14 @@ static ssize_t dsicm_store_ulps(struct device *dev, mutex_lock(&ddata->lock); if (ddata->enabled) { - in->ops.dsi->bus_lock(in); + in->ops->dsi.bus_lock(in); if (t) r = dsicm_enter_ulps(ddata); else r = dsicm_wake_up(ddata); - in->ops.dsi->bus_unlock(in); + in->ops->dsi.bus_unlock(in); } mutex_unlock(&ddata->lock); @@ -541,9 +541,9 @@ static ssize_t dsicm_store_ulps_timeout(struct device *dev, if (ddata->enabled) { /* dsicm_wake_up will restart the timer */ - in->ops.dsi->bus_lock(in); + in->ops->dsi.bus_lock(in); r = dsicm_wake_up(ddata); - in->ops.dsi->bus_unlock(in); + in->ops->dsi.bus_unlock(in); } mutex_unlock(&ddata->lock); @@ -635,7 +635,7 @@ static int dsicm_power_on(struct panel_drv_data *ddata) } if (ddata->pin_config.num_pins > 0) { - r = in->ops.dsi->configure_pins(in, &ddata->pin_config); + r = in->ops->dsi.configure_pins(in, &ddata->pin_config); if (r) { dev_err(&ddata->pdev->dev, "failed to configure DSI pins\n"); @@ -643,13 +643,13 @@ static int dsicm_power_on(struct panel_drv_data *ddata) } } - r = in->ops.dsi->set_config(in, &dsi_config); + r = in->ops->dsi.set_config(in, &dsi_config); if (r) { dev_err(&ddata->pdev->dev, "failed to configure DSI\n"); goto err_vddi; } - r = in->ops.dsi->enable(in); + r = in->ops->enable(in); if (r) { dev_err(&ddata->pdev->dev, "failed to enable DSI\n"); goto err_vddi; @@ -657,7 +657,7 @@ static int dsicm_power_on(struct panel_drv_data *ddata) dsicm_hw_reset(ddata); - in->ops.dsi->enable_hs(in, ddata->channel, false); + in->ops->dsi.enable_hs(in, ddata->channel, false); r = dsicm_sleep_out(ddata); if (r) @@ -689,7 +689,7 @@ static int dsicm_power_on(struct panel_drv_data *ddata) if (r) goto err; - r = in->ops.dsi->enable_video_output(in, ddata->channel); + r = in->ops->dsi.enable_video_output(in, ddata->channel); if (r) goto err; @@ -701,7 +701,7 @@ static int dsicm_power_on(struct panel_drv_data *ddata) ddata->intro_printed = true; } - in->ops.dsi->enable_hs(in, ddata->channel, true); + in->ops->dsi.enable_hs(in, ddata->channel, true); return 0; err: @@ -709,7 +709,7 @@ static int dsicm_power_on(struct panel_drv_data *ddata) dsicm_hw_reset(ddata); - in->ops.dsi->disable(in, true, false); + in->ops->dsi.disable(in, true, false); err_vddi: if (ddata->vddi) regulator_disable(ddata->vddi); @@ -725,7 +725,7 @@ static void dsicm_power_off(struct panel_drv_data *ddata) struct omap_dss_device *in = ddata->in; int r; - in->ops.dsi->disable_video_output(in, ddata->channel); + in->ops->dsi.disable_video_output(in, ddata->channel); r = dsicm_dcs_write_0(ddata, MIPI_DCS_SET_DISPLAY_OFF); if (!r) @@ -737,7 +737,7 @@ static void dsicm_power_off(struct panel_drv_data *ddata) dsicm_hw_reset(ddata); } - in->ops.dsi->disable(in, true, false); + in->ops->dsi.disable(in, true, false); if (ddata->vddi) regulator_disable(ddata->vddi); @@ -772,19 +772,19 @@ static int dsicm_connect(struct omap_dss_device *dssdev) return PTR_ERR(in); } - r = in->ops.dsi->connect(in, dssdev); + r = in->ops->connect(in, dssdev); if (r) { dev_err(dev, "Failed to connect to video source\n"); goto err_connect; } - r = in->ops.dsi->request_vc(in, &ddata->channel); + r = in->ops->dsi.request_vc(in, &ddata->channel); if (r) { dev_err(dev, "failed to get virtual channel\n"); goto err_req_vc; } - r = in->ops.dsi->set_vc_id(in, ddata->channel, TCH); + r = in->ops->dsi.set_vc_id(in, ddata->channel, TCH); if (r) { dev_err(dev, "failed to set VC_ID\n"); goto err_vc_id; @@ -794,9 +794,9 @@ static int dsicm_connect(struct omap_dss_device *dssdev) return 0; err_vc_id: - in->ops.dsi->release_vc(in, ddata->channel); + in->ops->dsi.release_vc(in, ddata->channel); err_req_vc: - in->ops.dsi->disconnect(in, dssdev); + in->ops->disconnect(in, dssdev); err_connect: omap_dss_put_device(in); return r; @@ -810,8 +810,8 @@ static void dsicm_disconnect(struct omap_dss_device *dssdev) if (!omapdss_device_is_connected(dssdev)) return; - in->ops.dsi->release_vc(in, ddata->channel); - in->ops.dsi->disconnect(in, dssdev); + in->ops->dsi.release_vc(in, ddata->channel); + in->ops->disconnect(in, dssdev); omap_dss_put_device(in); ddata->in = NULL; @@ -837,11 +837,11 @@ static int dsicm_enable(struct omap_dss_device *dssdev) goto err; } - in->ops.dsi->bus_lock(in); + in->ops->dsi.bus_lock(in); r = dsicm_power_on(ddata); - in->ops.dsi->bus_unlock(in); + in->ops->dsi.bus_unlock(in); if (r) goto err; @@ -873,7 +873,7 @@ static void dsicm_disable(struct omap_dss_device *dssdev) dsicm_cancel_ulps_work(ddata); - in->ops.dsi->bus_lock(in); + in->ops->dsi.bus_lock(in); if (omapdss_device_is_enabled(dssdev)) { r = dsicm_wake_up(ddata); @@ -881,7 +881,7 @@ static void dsicm_disable(struct omap_dss_device *dssdev) dsicm_power_off(ddata); } - in->ops.dsi->bus_unlock(in); + in->ops->dsi.bus_unlock(in); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; @@ -894,7 +894,7 @@ static void dsicm_framedone_cb(int err, void *data) struct omap_dss_device *in = ddata->in; dev_dbg(&ddata->pdev->dev, "framedone, err %d\n", err); - in->ops.dsi->bus_unlock(ddata->in); + in->ops->dsi.bus_unlock(ddata->in); } static irqreturn_t dsicm_te_isr(int irq, void *data) @@ -909,7 +909,7 @@ static irqreturn_t dsicm_te_isr(int irq, void *data) if (old) { cancel_delayed_work(&ddata->te_timeout_work); - r = in->ops.dsi->update(in, ddata->channel, dsicm_framedone_cb, + r = in->ops->dsi.update(in, ddata->channel, dsicm_framedone_cb, ddata); if (r) goto err; @@ -918,7 +918,7 @@ static irqreturn_t dsicm_te_isr(int irq, void *data) return IRQ_HANDLED; err: dev_err(&ddata->pdev->dev, "start update failed\n"); - in->ops.dsi->bus_unlock(in); + in->ops->dsi.bus_unlock(in); return IRQ_HANDLED; } @@ -931,7 +931,7 @@ static void dsicm_te_timeout_work_callback(struct work_struct *work) dev_err(&ddata->pdev->dev, "TE not received for 250ms!\n"); atomic_set(&ddata->do_update, 0); - in->ops.dsi->bus_unlock(in); + in->ops->dsi.bus_unlock(in); } static int dsicm_update(struct omap_dss_device *dssdev, @@ -944,7 +944,7 @@ static int dsicm_update(struct omap_dss_device *dssdev, dev_dbg(&ddata->pdev->dev, "update %d, %d, %d x %d\n", x, y, w, h); mutex_lock(&ddata->lock); - in->ops.dsi->bus_lock(in); + in->ops->dsi.bus_lock(in); r = dsicm_wake_up(ddata); if (r) @@ -966,7 +966,7 @@ static int dsicm_update(struct omap_dss_device *dssdev, msecs_to_jiffies(250)); atomic_set(&ddata->do_update, 1); } else { - r = in->ops.dsi->update(in, ddata->channel, dsicm_framedone_cb, + r = in->ops->dsi.update(in, ddata->channel, dsicm_framedone_cb, ddata); if (r) goto err; @@ -976,7 +976,7 @@ static int dsicm_update(struct omap_dss_device *dssdev, mutex_unlock(&ddata->lock); return 0; err: - in->ops.dsi->bus_unlock(in); + in->ops->dsi.bus_unlock(in); mutex_unlock(&ddata->lock); return r; } @@ -989,8 +989,8 @@ static int dsicm_sync(struct omap_dss_device *dssdev) dev_dbg(&ddata->pdev->dev, "sync\n"); mutex_lock(&ddata->lock); - in->ops.dsi->bus_lock(in); - in->ops.dsi->bus_unlock(in); + in->ops->dsi.bus_lock(in); + in->ops->dsi.bus_unlock(in); mutex_unlock(&ddata->lock); dev_dbg(&ddata->pdev->dev, "sync done\n"); @@ -1009,7 +1009,7 @@ static int _dsicm_enable_te(struct panel_drv_data *ddata, bool enable) r = dsicm_dcs_write_0(ddata, MIPI_DCS_SET_TEAR_OFF); if (!ddata->ext_te_gpio) - in->ops.dsi->enable_te(in, enable); + in->ops->dsi.enable_te(in, enable); /* possible panel bug */ msleep(100); @@ -1028,7 +1028,7 @@ static int dsicm_enable_te(struct omap_dss_device *dssdev, bool enable) if (ddata->te_enabled == enable) goto end; - in->ops.dsi->bus_lock(in); + in->ops->dsi.bus_lock(in); if (ddata->enabled) { r = dsicm_wake_up(ddata); @@ -1042,13 +1042,13 @@ static int dsicm_enable_te(struct omap_dss_device *dssdev, bool enable) ddata->te_enabled = enable; - in->ops.dsi->bus_unlock(in); + in->ops->dsi.bus_unlock(in); end: mutex_unlock(&ddata->lock); return 0; err: - in->ops.dsi->bus_unlock(in); + in->ops->dsi.bus_unlock(in); mutex_unlock(&ddata->lock); return r; @@ -1090,7 +1090,7 @@ static int dsicm_memory_read(struct omap_dss_device *dssdev, size = min((u32)w * h * 3, ddata->vm.hactive * ddata->vm.vactive * 3); - in->ops.dsi->bus_lock(in); + in->ops->dsi.bus_lock(in); r = dsicm_wake_up(ddata); if (r) @@ -1106,7 +1106,7 @@ static int dsicm_memory_read(struct omap_dss_device *dssdev, dsicm_set_update_window(ddata, x, y, w, h); - r = in->ops.dsi->set_max_rx_packet_size(in, ddata->channel, plen); + r = in->ops->dsi.set_max_rx_packet_size(in, ddata->channel, plen); if (r) goto err2; @@ -1114,7 +1114,7 @@ static int dsicm_memory_read(struct omap_dss_device *dssdev, u8 dcs_cmd = first ? 0x2e : 0x3e; first = 0; - r = in->ops.dsi->dcs_read(in, ddata->channel, dcs_cmd, + r = in->ops->dsi.dcs_read(in, ddata->channel, dcs_cmd, buf + buf_used, size - buf_used); if (r < 0) { @@ -1140,9 +1140,9 @@ static int dsicm_memory_read(struct omap_dss_device *dssdev, r = buf_used; err3: - in->ops.dsi->set_max_rx_packet_size(in, ddata->channel, 1); + in->ops->dsi.set_max_rx_packet_size(in, ddata->channel, 1); err2: - in->ops.dsi->bus_unlock(in); + in->ops->dsi.bus_unlock(in); err1: mutex_unlock(&ddata->lock); return r; @@ -1162,11 +1162,11 @@ static void dsicm_ulps_work(struct work_struct *work) return; } - in->ops.dsi->bus_lock(in); + in->ops->dsi.bus_lock(in); dsicm_enter_ulps(ddata); - in->ops.dsi->bus_unlock(in); + in->ops->dsi.bus_unlock(in); mutex_unlock(&ddata->lock); } diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c index 0a6ab6470253..8e293708261c 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c @@ -131,7 +131,7 @@ static int lb035q02_connect(struct omap_dss_device *dssdev) return PTR_ERR(in); } - r = in->ops.dpi->connect(in, dssdev); + r = in->ops->connect(in, dssdev); if (r) { omap_dss_put_device(in); return r; @@ -151,7 +151,7 @@ static void lb035q02_disconnect(struct omap_dss_device *dssdev) if (!omapdss_device_is_connected(dssdev)) return; - in->ops.dpi->disconnect(in, dssdev); + in->ops->disconnect(in, dssdev); omap_dss_put_device(in); ddata->in = NULL; @@ -169,9 +169,9 @@ static int lb035q02_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - in->ops.dpi->set_timings(in, &ddata->vm); + in->ops->set_timings(in, &ddata->vm); - r = in->ops.dpi->enable(in); + r = in->ops->enable(in); if (r) return r; @@ -194,7 +194,7 @@ static void lb035q02_disable(struct omap_dss_device *dssdev) if (ddata->enable_gpio) gpiod_set_value_cansleep(ddata->enable_gpio, 0); - in->ops.dpi->disable(in); + in->ops->disable(in); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } @@ -207,7 +207,7 @@ static void lb035q02_set_timings(struct omap_dss_device *dssdev, ddata->vm = *vm; - in->ops.dpi->set_timings(in, vm); + in->ops->set_timings(in, vm); } static void lb035q02_get_timings(struct omap_dss_device *dssdev, @@ -224,7 +224,7 @@ static int lb035q02_check_timings(struct omap_dss_device *dssdev, struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *in = ddata->in; - return in->ops.dpi->check_timings(in, vm); + return in->ops->check_timings(in, vm); } static const struct omap_dss_driver lb035q02_ops = { diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c index 9816e661c97d..7296c794326c 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c @@ -127,7 +127,7 @@ static int nec_8048_connect(struct omap_dss_device *dssdev) return PTR_ERR(in); } - r = in->ops.dpi->connect(in, dssdev); + r = in->ops->connect(in, dssdev); if (r) { omap_dss_put_device(in); return r; @@ -145,7 +145,7 @@ static void nec_8048_disconnect(struct omap_dss_device *dssdev) if (!omapdss_device_is_connected(dssdev)) return; - in->ops.dpi->disconnect(in, dssdev); + in->ops->disconnect(in, dssdev); omap_dss_put_device(in); ddata->in = NULL; @@ -163,9 +163,9 @@ static int nec_8048_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - in->ops.dpi->set_timings(in, &ddata->vm); + in->ops->set_timings(in, &ddata->vm); - r = in->ops.dpi->enable(in); + r = in->ops->enable(in); if (r) return r; @@ -188,7 +188,7 @@ static void nec_8048_disable(struct omap_dss_device *dssdev) if (gpio_is_valid(ddata->res_gpio)) gpio_set_value_cansleep(ddata->res_gpio, 0); - in->ops.dpi->disable(in); + in->ops->disable(in); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } @@ -201,7 +201,7 @@ static void nec_8048_set_timings(struct omap_dss_device *dssdev, ddata->vm = *vm; - in->ops.dpi->set_timings(in, vm); + in->ops->set_timings(in, vm); } static void nec_8048_get_timings(struct omap_dss_device *dssdev, @@ -218,7 +218,7 @@ static int nec_8048_check_timings(struct omap_dss_device *dssdev, struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *in = ddata->in; - return in->ops.dpi->check_timings(in, vm); + return in->ops->check_timings(in, vm); } static const struct omap_dss_driver nec_8048_ops = { diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c index e259240f96fa..00291b9ecfde 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c @@ -73,7 +73,7 @@ static int sharp_ls_connect(struct omap_dss_device *dssdev) return PTR_ERR(in); } - r = in->ops.dpi->connect(in, dssdev); + r = in->ops->connect(in, dssdev); if (r) { omap_dss_put_device(in); return r; @@ -91,7 +91,7 @@ static void sharp_ls_disconnect(struct omap_dss_device *dssdev) if (!omapdss_device_is_connected(dssdev)) return; - in->ops.dpi->disconnect(in, dssdev); + in->ops->disconnect(in, dssdev); omap_dss_put_device(in); ddata->in = NULL; @@ -109,7 +109,7 @@ static int sharp_ls_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - in->ops.dpi->set_timings(in, &ddata->vm); + in->ops->set_timings(in, &ddata->vm); if (ddata->vcc) { r = regulator_enable(ddata->vcc); @@ -117,7 +117,7 @@ static int sharp_ls_enable(struct omap_dss_device *dssdev) return r; } - r = in->ops.dpi->enable(in); + r = in->ops->enable(in); if (r) { regulator_disable(ddata->vcc); return r; @@ -155,7 +155,7 @@ static void sharp_ls_disable(struct omap_dss_device *dssdev) msleep(100); - in->ops.dpi->disable(in); + in->ops->disable(in); if (ddata->vcc) regulator_disable(ddata->vcc); @@ -171,7 +171,7 @@ static void sharp_ls_set_timings(struct omap_dss_device *dssdev, ddata->vm = *vm; - in->ops.dpi->set_timings(in, vm); + in->ops->set_timings(in, vm); } static void sharp_ls_get_timings(struct omap_dss_device *dssdev, @@ -188,7 +188,7 @@ static int sharp_ls_check_timings(struct omap_dss_device *dssdev, struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *in = ddata->in; - return in->ops.dpi->check_timings(in, vm); + return in->ops->check_timings(in, vm); } static const struct omap_dss_driver sharp_ls_ops = { diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c index d298a3f56189..694ac42b7247 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c @@ -522,7 +522,7 @@ static int acx565akm_connect(struct omap_dss_device *dssdev) return PTR_ERR(in); } - r = in->ops.sdi->connect(in, dssdev); + r = in->ops->connect(in, dssdev); if (r) { omap_dss_put_device(in); return r; @@ -540,7 +540,7 @@ static void acx565akm_disconnect(struct omap_dss_device *dssdev) if (!omapdss_device_is_connected(dssdev)) return; - in->ops.sdi->disconnect(in, dssdev); + in->ops->disconnect(in, dssdev); omap_dss_put_device(in); ddata->in = NULL; @@ -554,9 +554,9 @@ static int acx565akm_panel_power_on(struct omap_dss_device *dssdev) dev_dbg(&ddata->spi->dev, "%s\n", __func__); - in->ops.sdi->set_timings(in, &ddata->vm); + in->ops->set_timings(in, &ddata->vm); - r = in->ops.sdi->enable(in); + r = in->ops->enable(in); if (r) { pr_err("%s sdi enable failed\n", __func__); return r; @@ -621,7 +621,7 @@ static void acx565akm_panel_power_off(struct omap_dss_device *dssdev) /* FIXME need to tweak this delay */ msleep(100); - in->ops.sdi->disable(in); + in->ops->disable(in); } static int acx565akm_enable(struct omap_dss_device *dssdev) @@ -672,7 +672,7 @@ static void acx565akm_set_timings(struct omap_dss_device *dssdev, ddata->vm = *vm; - in->ops.sdi->set_timings(in, vm); + in->ops->set_timings(in, vm); } static void acx565akm_get_timings(struct omap_dss_device *dssdev, @@ -689,7 +689,7 @@ static int acx565akm_check_timings(struct omap_dss_device *dssdev, struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *in = ddata->in; - return in->ops.sdi->check_timings(in, vm); + return in->ops->check_timings(in, vm); } static const struct omap_dss_driver acx565akm_ops = { diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c index 366e0f71288e..3cd1e4ccd43a 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c @@ -181,7 +181,7 @@ static int td028ttec1_panel_connect(struct omap_dss_device *dssdev) return PTR_ERR(in); } - r = in->ops.dpi->connect(in, dssdev); + r = in->ops->connect(in, dssdev); if (r) { omap_dss_put_device(in); return r; @@ -199,7 +199,7 @@ static void td028ttec1_panel_disconnect(struct omap_dss_device *dssdev) if (!omapdss_device_is_connected(dssdev)) return; - in->ops.dpi->disconnect(in, dssdev); + in->ops->disconnect(in, dssdev); omap_dss_put_device(in); ddata->in = NULL; @@ -217,9 +217,9 @@ static int td028ttec1_panel_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - in->ops.dpi->set_timings(in, &ddata->vm); + in->ops->set_timings(in, &ddata->vm); - r = in->ops.dpi->enable(in); + r = in->ops->enable(in); if (r) return r; @@ -328,7 +328,7 @@ static void td028ttec1_panel_disable(struct omap_dss_device *dssdev) jbt_ret_write_0(ddata, JBT_REG_SLEEP_IN); jbt_reg_write_1(ddata, JBT_REG_POWER_ON_OFF, 0x00); - in->ops.dpi->disable(in); + in->ops->disable(in); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } @@ -341,7 +341,7 @@ static void td028ttec1_panel_set_timings(struct omap_dss_device *dssdev, ddata->vm = *vm; - in->ops.dpi->set_timings(in, vm); + in->ops->set_timings(in, vm); } static void td028ttec1_panel_get_timings(struct omap_dss_device *dssdev, @@ -358,7 +358,7 @@ static int td028ttec1_panel_check_timings(struct omap_dss_device *dssdev, struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *in = ddata->in; - return in->ops.dpi->check_timings(in, vm); + return in->ops->check_timings(in, vm); } static const struct omap_dss_driver td028ttec1_ops = { diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c index 8e98232ee9d9..de1140314a7a 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c @@ -352,7 +352,7 @@ static int tpo_td043_connect(struct omap_dss_device *dssdev) return PTR_ERR(in); } - r = in->ops.dpi->connect(in, dssdev); + r = in->ops->connect(in, dssdev); if (r) { omap_dss_put_device(in); return r; @@ -370,7 +370,7 @@ static void tpo_td043_disconnect(struct omap_dss_device *dssdev) if (!omapdss_device_is_connected(dssdev)) return; - in->ops.dpi->disconnect(in, dssdev); + in->ops->disconnect(in, dssdev); omap_dss_put_device(in); ddata->in = NULL; @@ -388,9 +388,9 @@ static int tpo_td043_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - in->ops.dpi->set_timings(in, &ddata->vm); + in->ops->set_timings(in, &ddata->vm); - r = in->ops.dpi->enable(in); + r = in->ops->enable(in); if (r) return r; @@ -401,7 +401,7 @@ static int tpo_td043_enable(struct omap_dss_device *dssdev) if (!ddata->spi_suspended) { r = tpo_td043_power_on(ddata); if (r) { - in->ops.dpi->disable(in); + in->ops->disable(in); return r; } } @@ -419,7 +419,7 @@ static void tpo_td043_disable(struct omap_dss_device *dssdev) if (!omapdss_device_is_enabled(dssdev)) return; - in->ops.dpi->disable(in); + in->ops->disable(in); if (!ddata->spi_suspended) tpo_td043_power_off(ddata); @@ -435,7 +435,7 @@ static void tpo_td043_set_timings(struct omap_dss_device *dssdev, ddata->vm = *vm; - in->ops.dpi->set_timings(in, vm); + in->ops->set_timings(in, vm); } static void tpo_td043_get_timings(struct omap_dss_device *dssdev, @@ -452,7 +452,7 @@ static int tpo_td043_check_timings(struct omap_dss_device *dssdev, struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *in = ddata->in; - return in->ops.dpi->check_timings(in, vm); + return in->ops->check_timings(in, vm); } static const struct omap_dss_driver tpo_td043_ops = { diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c index 5d2d4314055f..11d94b310d1a 100644 --- a/drivers/gpu/drm/omapdrm/dss/dpi.c +++ b/drivers/gpu/drm/omapdrm/dss/dpi.c @@ -681,7 +681,7 @@ static void dpi_disconnect(struct omap_dss_device *dssdev, dss_mgr_disconnect(&dpi->output, dssdev); } -static const struct omapdss_dpi_ops dpi_ops = { +static const struct omap_dss_device_ops dpi_ops = { .connect = dpi_connect, .disconnect = dpi_disconnect, @@ -720,7 +720,7 @@ static void dpi_init_output_port(struct dpi_data *dpi, struct device_node *port) out->output_type = OMAP_DISPLAY_TYPE_DPI; out->dispc_channel = dpi_get_channel(dpi, port_num); out->port_num = port_num; - out->ops.dpi = &dpi_ops; + out->ops = &dpi_ops; out->owner = THIS_MODULE; omapdss_register_output(out); diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 74467b308721..3cc91e973f01 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -5000,43 +5000,45 @@ static void dsi_disconnect(struct omap_dss_device *dssdev, dss_mgr_disconnect(&dsi->output, dssdev); } -static const struct omapdss_dsi_ops dsi_ops = { +static const struct omap_dss_device_ops dsi_ops = { .connect = dsi_connect, .disconnect = dsi_disconnect, + .enable = dsi_display_enable, - .bus_lock = dsi_bus_lock, - .bus_unlock = dsi_bus_unlock, + .dsi = { + .bus_lock = dsi_bus_lock, + .bus_unlock = dsi_bus_unlock, - .enable = dsi_display_enable, - .disable = dsi_display_disable, + .disable = dsi_display_disable, - .enable_hs = dsi_vc_enable_hs, + .enable_hs = dsi_vc_enable_hs, - .configure_pins = dsi_configure_pins, - .set_config = dsi_set_config, + .configure_pins = dsi_configure_pins, + .set_config = dsi_set_config, - .enable_video_output = dsi_enable_video_output, - .disable_video_output = dsi_disable_video_output, + .enable_video_output = dsi_enable_video_output, + .disable_video_output = dsi_disable_video_output, - .update = dsi_update, + .update = dsi_update, - .enable_te = dsi_enable_te, + .enable_te = dsi_enable_te, - .request_vc = dsi_request_vc, - .set_vc_id = dsi_set_vc_id, - .release_vc = dsi_release_vc, + .request_vc = dsi_request_vc, + .set_vc_id = dsi_set_vc_id, + .release_vc = dsi_release_vc, - .dcs_write = dsi_vc_dcs_write, - .dcs_write_nosync = dsi_vc_dcs_write_nosync, - .dcs_read = dsi_vc_dcs_read, + .dcs_write = dsi_vc_dcs_write, + .dcs_write_nosync = dsi_vc_dcs_write_nosync, + .dcs_read = dsi_vc_dcs_read, - .gen_write = dsi_vc_generic_write, - .gen_write_nosync = dsi_vc_generic_write_nosync, - .gen_read = dsi_vc_generic_read, + .gen_write = dsi_vc_generic_write, + .gen_write_nosync = dsi_vc_generic_write_nosync, + .gen_read = dsi_vc_generic_read, - .bta_sync = dsi_vc_send_bta_sync, + .bta_sync = dsi_vc_send_bta_sync, - .set_max_rx_packet_size = dsi_vc_set_max_rx_packet_size, + .set_max_rx_packet_size = dsi_vc_set_max_rx_packet_size, + }, }; static void dsi_init_output(struct dsi_data *dsi) @@ -5050,7 +5052,7 @@ static void dsi_init_output(struct dsi_data *dsi) out->output_type = OMAP_DISPLAY_TYPE_DSI; out->name = dsi->module_id == 0 ? "dsi.0" : "dsi.1"; out->dispc_channel = dsi_get_channel(dsi); - out->ops.dsi = &dsi_ops; + out->ops = &dsi_ops; out->owner = THIS_MODULE; omapdss_register_output(out); diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index b3d7865347a3..e0406f2a90e7 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c @@ -540,7 +540,7 @@ static int hdmi_set_hdmi_mode(struct omap_dss_device *dssdev, return 0; } -static const struct omapdss_hdmi_ops hdmi_ops = { +static const struct omap_dss_device_ops hdmi_ops = { .connect = hdmi_connect, .disconnect = hdmi_disconnect, @@ -550,10 +550,12 @@ static const struct omapdss_hdmi_ops hdmi_ops = { .check_timings = hdmi_display_check_timing, .set_timings = hdmi_display_set_timing, - .read_edid = hdmi_read_edid, - .lost_hotplug = hdmi_lost_hotplug, - .set_infoframe = hdmi_set_infoframe, - .set_hdmi_mode = hdmi_set_hdmi_mode, + .hdmi = { + .read_edid = hdmi_read_edid, + .lost_hotplug = hdmi_lost_hotplug, + .set_infoframe = hdmi_set_infoframe, + .set_hdmi_mode = hdmi_set_hdmi_mode, + }, }; static void hdmi_init_output(struct omap_hdmi *hdmi) @@ -565,7 +567,7 @@ static void hdmi_init_output(struct omap_hdmi *hdmi) out->output_type = OMAP_DISPLAY_TYPE_HDMI; out->name = "hdmi.0"; out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT; - out->ops.hdmi = &hdmi_ops; + out->ops = &hdmi_ops; out->owner = THIS_MODULE; omapdss_register_output(out); diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index 8e9b3a24b2ab..45d2eacedb2a 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c @@ -532,7 +532,7 @@ static int hdmi_set_hdmi_mode(struct omap_dss_device *dssdev, return 0; } -static const struct omapdss_hdmi_ops hdmi_ops = { +static const struct omap_dss_device_ops hdmi_ops = { .connect = hdmi_connect, .disconnect = hdmi_disconnect, @@ -542,9 +542,11 @@ static const struct omapdss_hdmi_ops hdmi_ops = { .check_timings = hdmi_display_check_timing, .set_timings = hdmi_display_set_timing, - .read_edid = hdmi_read_edid, - .set_infoframe = hdmi_set_infoframe, - .set_hdmi_mode = hdmi_set_hdmi_mode, + .hdmi = { + .read_edid = hdmi_read_edid, + .set_infoframe = hdmi_set_infoframe, + .set_hdmi_mode = hdmi_set_hdmi_mode, + }, }; static void hdmi_init_output(struct omap_hdmi *hdmi) @@ -556,7 +558,7 @@ static void hdmi_init_output(struct omap_hdmi *hdmi) out->output_type = OMAP_DISPLAY_TYPE_HDMI; out->name = "hdmi.0"; out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT; - out->ops.hdmi = &hdmi_ops; + out->ops = &hdmi_ops; out->owner = THIS_MODULE; omapdss_register_output(out); diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index 2ec74206bcff..fb6c9d522013 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -296,80 +296,7 @@ struct omap_dss_writeback_info { u8 pre_mult_alpha; }; -struct omapdss_dpi_ops { - int (*connect)(struct omap_dss_device *dssdev, - struct omap_dss_device *dst); - void (*disconnect)(struct omap_dss_device *dssdev, - struct omap_dss_device *dst); - - int (*enable)(struct omap_dss_device *dssdev); - void (*disable)(struct omap_dss_device *dssdev); - - int (*check_timings)(struct omap_dss_device *dssdev, - struct videomode *vm); - void (*set_timings)(struct omap_dss_device *dssdev, - struct videomode *vm); -}; - -struct omapdss_sdi_ops { - int (*connect)(struct omap_dss_device *dssdev, - struct omap_dss_device *dst); - void (*disconnect)(struct omap_dss_device *dssdev, - struct omap_dss_device *dst); - - int (*enable)(struct omap_dss_device *dssdev); - void (*disable)(struct omap_dss_device *dssdev); - - int (*check_timings)(struct omap_dss_device *dssdev, - struct videomode *vm); - void (*set_timings)(struct omap_dss_device *dssdev, - struct videomode *vm); -}; - -struct omapdss_dvi_ops { - int (*connect)(struct omap_dss_device *dssdev, - struct omap_dss_device *dst); - void (*disconnect)(struct omap_dss_device *dssdev, - struct omap_dss_device *dst); - - int (*enable)(struct omap_dss_device *dssdev); - void (*disable)(struct omap_dss_device *dssdev); - - int (*check_timings)(struct omap_dss_device *dssdev, - struct videomode *vm); - void (*set_timings)(struct omap_dss_device *dssdev, - struct videomode *vm); -}; - -struct omapdss_atv_ops { - int (*connect)(struct omap_dss_device *dssdev, - struct omap_dss_device *dst); - void (*disconnect)(struct omap_dss_device *dssdev, - struct omap_dss_device *dst); - - int (*enable)(struct omap_dss_device *dssdev); - void (*disable)(struct omap_dss_device *dssdev); - - int (*check_timings)(struct omap_dss_device *dssdev, - struct videomode *vm); - void (*set_timings)(struct omap_dss_device *dssdev, - struct videomode *vm); -}; - struct omapdss_hdmi_ops { - int (*connect)(struct omap_dss_device *dssdev, - struct omap_dss_device *dst); - void (*disconnect)(struct omap_dss_device *dssdev, - struct omap_dss_device *dst); - - int (*enable)(struct omap_dss_device *dssdev); - void (*disable)(struct omap_dss_device *dssdev); - - int (*check_timings)(struct omap_dss_device *dssdev, - struct videomode *vm); - void (*set_timings)(struct omap_dss_device *dssdev, - struct videomode *vm); - int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len); void (*lost_hotplug)(struct omap_dss_device *dssdev); bool (*detect)(struct omap_dss_device *dssdev); @@ -388,12 +315,6 @@ struct omapdss_hdmi_ops { }; struct omapdss_dsi_ops { - int (*connect)(struct omap_dss_device *dssdev, - struct omap_dss_device *dst); - void (*disconnect)(struct omap_dss_device *dssdev, - struct omap_dss_device *dst); - - int (*enable)(struct omap_dss_device *dssdev); void (*disable)(struct omap_dss_device *dssdev, bool disconnect_lanes, bool enter_ulps); @@ -444,6 +365,26 @@ struct omapdss_dsi_ops { int channel, u16 plen); }; +struct omap_dss_device_ops { + int (*connect)(struct omap_dss_device *dssdev, + struct omap_dss_device *dst); + void (*disconnect)(struct omap_dss_device *dssdev, + struct omap_dss_device *dst); + + int (*enable)(struct omap_dss_device *dssdev); + void (*disable)(struct omap_dss_device *dssdev); + + int (*check_timings)(struct omap_dss_device *dssdev, + struct videomode *vm); + void (*set_timings)(struct omap_dss_device *dssdev, + struct videomode *vm); + + union { + const struct omapdss_hdmi_ops hdmi; + const struct omapdss_dsi_ops dsi; + }; +}; + struct omap_dss_device { struct kobject kobj; struct device *dev; @@ -461,15 +402,7 @@ struct omap_dss_device { const char *name; const struct omap_dss_driver *driver; - - union { - const struct omapdss_dpi_ops *dpi; - const struct omapdss_sdi_ops *sdi; - const struct omapdss_dvi_ops *dvi; - const struct omapdss_hdmi_ops *hdmi; - const struct omapdss_atv_ops *atv; - const struct omapdss_dsi_ops *dsi; - } ops; + const struct omap_dss_device_ops *ops; /* helper variable for driver suspend/resume */ bool activate_after_resume; diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c index 43cf331e80e4..b21499e124c1 100644 --- a/drivers/gpu/drm/omapdrm/dss/sdi.c +++ b/drivers/gpu/drm/omapdrm/dss/sdi.c @@ -311,7 +311,7 @@ static void sdi_disconnect(struct omap_dss_device *dssdev, dss_mgr_disconnect(&sdi->output, dssdev); } -static const struct omapdss_sdi_ops sdi_ops = { +static const struct omap_dss_device_ops sdi_ops = { .connect = sdi_connect, .disconnect = sdi_disconnect, @@ -333,7 +333,7 @@ static void sdi_init_output(struct sdi_device *sdi) out->dispc_channel = OMAP_DSS_CHANNEL_LCD; /* We have SDI only on OMAP3, where it's on port 1 */ out->port_num = 1; - out->ops.sdi = &sdi_ops; + out->ops = &sdi_ops; out->owner = THIS_MODULE; omapdss_register_output(out); diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c index ab0eeec38db0..4cae03f24e1b 100644 --- a/drivers/gpu/drm/omapdrm/dss/venc.c +++ b/drivers/gpu/drm/omapdrm/dss/venc.c @@ -750,7 +750,7 @@ static void venc_disconnect(struct omap_dss_device *dssdev, dss_mgr_disconnect(&venc->output, dssdev); } -static const struct omapdss_atv_ops venc_ops = { +static const struct omap_dss_device_ops venc_ops = { .connect = venc_connect, .disconnect = venc_disconnect, @@ -770,7 +770,7 @@ static void venc_init_output(struct venc_device *venc) out->output_type = OMAP_DISPLAY_TYPE_VENC; out->name = "venc.0"; out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT; - out->ops.atv = &venc_ops; + out->ops = &venc_ops; out->owner = THIS_MODULE; omapdss_register_output(out); -- GitLab From ec727e3f61845d6d64b3c5eba464096d6cc7f8e9 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 28 Feb 2018 17:30:30 +0200 Subject: [PATCH 0907/1692] drm/omap: dss: Add functions to connect and disconnect devices The omap_dss_device objects model display components and are connected at runtime to create display pipelines. The connect and disconnect operations implemented by each component contain lots of duplicate code. As a first step towards fixing this, create new functions to wrap the direct calls to those operations and use them. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- .../omapdrm/displays/connector-analog-tv.c | 4 ++-- .../gpu/drm/omapdrm/displays/connector-dvi.c | 4 ++-- .../gpu/drm/omapdrm/displays/connector-hdmi.c | 4 ++-- .../gpu/drm/omapdrm/displays/encoder-opa362.c | 4 ++-- .../gpu/drm/omapdrm/displays/encoder-tfp410.c | 4 ++-- .../drm/omapdrm/displays/encoder-tpd12s015.c | 4 ++-- drivers/gpu/drm/omapdrm/displays/panel-dpi.c | 4 ++-- .../gpu/drm/omapdrm/displays/panel-dsi-cm.c | 6 +++--- .../displays/panel-lgphilips-lb035q02.c | 4 ++-- .../omapdrm/displays/panel-nec-nl8048hl11.c | 4 ++-- .../displays/panel-sharp-ls037v7dw01.c | 4 ++-- .../omapdrm/displays/panel-sony-acx565akm.c | 4 ++-- .../omapdrm/displays/panel-tpo-td028ttec1.c | 4 ++-- .../omapdrm/displays/panel-tpo-td043mtea1.c | 4 ++-- drivers/gpu/drm/omapdrm/dss/base.c | 20 +++++++++++++++++++ drivers/gpu/drm/omapdrm/dss/omapdss.h | 4 ++++ drivers/gpu/drm/omapdrm/omap_drv.c | 4 ++-- 17 files changed, 55 insertions(+), 31 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c index a94868d9398b..41ba3c5dbe7d 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c @@ -59,7 +59,7 @@ static int tvc_connect(struct omap_dss_device *dssdev) return PTR_ERR(in); } - r = in->ops->connect(in, dssdev); + r = omapdss_device_connect(in, dssdev); if (r) { omap_dss_put_device(in); return r; @@ -79,7 +79,7 @@ static void tvc_disconnect(struct omap_dss_device *dssdev) if (!omapdss_device_is_connected(dssdev)) return; - in->ops->disconnect(in, dssdev); + omapdss_device_disconnect(in, dssdev); omap_dss_put_device(in); ddata->in = NULL; diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c index 021e3b651c89..f193bbda550c 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c @@ -72,7 +72,7 @@ static int dvic_connect(struct omap_dss_device *dssdev) return PTR_ERR(in); } - r = in->ops->connect(in, dssdev); + r = omapdss_device_connect(in, dssdev); if (r) { omap_dss_put_device(in); return r; @@ -90,7 +90,7 @@ static void dvic_disconnect(struct omap_dss_device *dssdev) if (!omapdss_device_is_connected(dssdev)) return; - in->ops->disconnect(in, dssdev); + omapdss_device_disconnect(in, dssdev); omap_dss_put_device(in); ddata->in = NULL; diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c index b528bd51ada3..014554afbb0d 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c @@ -69,7 +69,7 @@ static int hdmic_connect(struct omap_dss_device *dssdev) return PTR_ERR(in); } - r = in->ops->connect(in, dssdev); + r = omapdss_device_connect(in, dssdev); if (r) { omap_dss_put_device(in); return r; @@ -89,7 +89,7 @@ static void hdmic_disconnect(struct omap_dss_device *dssdev) if (!omapdss_device_is_connected(dssdev)) return; - in->ops->disconnect(in, dssdev); + omapdss_device_disconnect(in, dssdev); omap_dss_put_device(in); ddata->in = NULL; diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c index 37982ffe0ad4..752b565987c1 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c @@ -50,7 +50,7 @@ static int opa362_connect(struct omap_dss_device *dssdev, return PTR_ERR(in); } - r = in->ops->connect(in, dssdev); + r = omapdss_device_connect(in, dssdev); if (r) { omap_dss_put_device(in); return r; @@ -82,7 +82,7 @@ static void opa362_disconnect(struct omap_dss_device *dssdev, dst->src = NULL; dssdev->dst = NULL; - in->ops->disconnect(in, &ddata->dssdev); + omapdss_device_disconnect(in, &ddata->dssdev); omap_dss_put_device(in); ddata->in = NULL; diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c index 1fbc5559e54f..a8660e5e5ffc 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c @@ -44,7 +44,7 @@ static int tfp410_connect(struct omap_dss_device *dssdev, return PTR_ERR(in); } - r = in->ops->connect(in, dssdev); + r = omapdss_device_connect(in, dssdev); if (r) { omap_dss_put_device(in); return r; @@ -74,7 +74,7 @@ static void tfp410_disconnect(struct omap_dss_device *dssdev, dst->src = NULL; dssdev->dst = NULL; - in->ops->disconnect(in, &ddata->dssdev); + omapdss_device_disconnect(in, &ddata->dssdev); omap_dss_put_device(in); ddata->in = NULL; diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c index 21a4a2fd42bf..640f15b88467 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c @@ -49,7 +49,7 @@ static int tpd_connect(struct omap_dss_device *dssdev, return PTR_ERR(in); } - r = in->ops->connect(in, dssdev); + r = omapdss_device_connect(in, dssdev); if (r) { omap_dss_put_device(in); return r; @@ -85,7 +85,7 @@ static void tpd_disconnect(struct omap_dss_device *dssdev, dst->src = NULL; dssdev->dst = NULL; - in->ops->disconnect(in, &ddata->dssdev); + omapdss_device_disconnect(in, &ddata->dssdev); omap_dss_put_device(in); ddata->in = NULL; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c index 15042351ace3..987519501336 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c @@ -50,7 +50,7 @@ static int panel_dpi_connect(struct omap_dss_device *dssdev) return PTR_ERR(in); } - r = in->ops->connect(in, dssdev); + r = omapdss_device_connect(in, dssdev); if (r) { omap_dss_put_device(in); return r; @@ -68,7 +68,7 @@ static void panel_dpi_disconnect(struct omap_dss_device *dssdev) if (!omapdss_device_is_connected(dssdev)) return; - in->ops->disconnect(in, dssdev); + omapdss_device_disconnect(in, dssdev); omap_dss_put_device(in); ddata->in = NULL; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c index 4296f4ca39fe..b7cd2ef17fbb 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c @@ -772,7 +772,7 @@ static int dsicm_connect(struct omap_dss_device *dssdev) return PTR_ERR(in); } - r = in->ops->connect(in, dssdev); + r = omapdss_device_connect(in, dssdev); if (r) { dev_err(dev, "Failed to connect to video source\n"); goto err_connect; @@ -796,7 +796,7 @@ static int dsicm_connect(struct omap_dss_device *dssdev) err_vc_id: in->ops->dsi.release_vc(in, ddata->channel); err_req_vc: - in->ops->disconnect(in, dssdev); + omapdss_device_disconnect(in, dssdev); err_connect: omap_dss_put_device(in); return r; @@ -811,7 +811,7 @@ static void dsicm_disconnect(struct omap_dss_device *dssdev) return; in->ops->dsi.release_vc(in, ddata->channel); - in->ops->disconnect(in, dssdev); + omapdss_device_disconnect(in, dssdev); omap_dss_put_device(in); ddata->in = NULL; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c index 8e293708261c..e4a3b5828bf9 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c @@ -131,7 +131,7 @@ static int lb035q02_connect(struct omap_dss_device *dssdev) return PTR_ERR(in); } - r = in->ops->connect(in, dssdev); + r = omapdss_device_connect(in, dssdev); if (r) { omap_dss_put_device(in); return r; @@ -151,7 +151,7 @@ static void lb035q02_disconnect(struct omap_dss_device *dssdev) if (!omapdss_device_is_connected(dssdev)) return; - in->ops->disconnect(in, dssdev); + omapdss_device_disconnect(in, dssdev); omap_dss_put_device(in); ddata->in = NULL; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c index 7296c794326c..e89dd32a2a60 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c @@ -127,7 +127,7 @@ static int nec_8048_connect(struct omap_dss_device *dssdev) return PTR_ERR(in); } - r = in->ops->connect(in, dssdev); + r = omapdss_device_connect(in, dssdev); if (r) { omap_dss_put_device(in); return r; @@ -145,7 +145,7 @@ static void nec_8048_disconnect(struct omap_dss_device *dssdev) if (!omapdss_device_is_connected(dssdev)) return; - in->ops->disconnect(in, dssdev); + omapdss_device_disconnect(in, dssdev); omap_dss_put_device(in); ddata->in = NULL; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c index 00291b9ecfde..d0451cfab7f8 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c @@ -73,7 +73,7 @@ static int sharp_ls_connect(struct omap_dss_device *dssdev) return PTR_ERR(in); } - r = in->ops->connect(in, dssdev); + r = omapdss_device_connect(in, dssdev); if (r) { omap_dss_put_device(in); return r; @@ -91,7 +91,7 @@ static void sharp_ls_disconnect(struct omap_dss_device *dssdev) if (!omapdss_device_is_connected(dssdev)) return; - in->ops->disconnect(in, dssdev); + omapdss_device_disconnect(in, dssdev); omap_dss_put_device(in); ddata->in = NULL; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c index 694ac42b7247..9033e9d25b7f 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c @@ -522,7 +522,7 @@ static int acx565akm_connect(struct omap_dss_device *dssdev) return PTR_ERR(in); } - r = in->ops->connect(in, dssdev); + r = omapdss_device_connect(in, dssdev); if (r) { omap_dss_put_device(in); return r; @@ -540,7 +540,7 @@ static void acx565akm_disconnect(struct omap_dss_device *dssdev) if (!omapdss_device_is_connected(dssdev)) return; - in->ops->disconnect(in, dssdev); + omapdss_device_disconnect(in, dssdev); omap_dss_put_device(in); ddata->in = NULL; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c index 3cd1e4ccd43a..8865459b6c90 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c @@ -181,7 +181,7 @@ static int td028ttec1_panel_connect(struct omap_dss_device *dssdev) return PTR_ERR(in); } - r = in->ops->connect(in, dssdev); + r = omapdss_device_connect(in, dssdev); if (r) { omap_dss_put_device(in); return r; @@ -199,7 +199,7 @@ static void td028ttec1_panel_disconnect(struct omap_dss_device *dssdev) if (!omapdss_device_is_connected(dssdev)) return; - in->ops->disconnect(in, dssdev); + omapdss_device_disconnect(in, dssdev); omap_dss_put_device(in); ddata->in = NULL; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c index de1140314a7a..1daba7a60a9c 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c @@ -352,7 +352,7 @@ static int tpo_td043_connect(struct omap_dss_device *dssdev) return PTR_ERR(in); } - r = in->ops->connect(in, dssdev); + r = omapdss_device_connect(in, dssdev); if (r) { omap_dss_put_device(in); return r; @@ -370,7 +370,7 @@ static void tpo_td043_disconnect(struct omap_dss_device *dssdev) if (!omapdss_device_is_connected(dssdev)) return; - in->ops->disconnect(in, dssdev); + omapdss_device_disconnect(in, dssdev); omap_dss_put_device(in); ddata->in = NULL; diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c index df6cb1ac43c8..576fd3d13259 100644 --- a/drivers/gpu/drm/omapdrm/dss/base.c +++ b/drivers/gpu/drm/omapdrm/dss/base.c @@ -102,6 +102,26 @@ struct omap_dss_device *omapdss_find_device_by_port(struct device_node *src, return NULL; } +int omapdss_device_connect(struct omap_dss_device *src, + struct omap_dss_device *dst) +{ + if (src->driver) + return src->driver->connect(src); + else + return src->ops->connect(src, dst); +} +EXPORT_SYMBOL_GPL(omapdss_device_connect); + +void omapdss_device_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst) +{ + if (src->driver) + src->driver->disconnect(src); + else + src->ops->disconnect(src, dst); +} +EXPORT_SYMBOL_GPL(omapdss_device_disconnect); + /* ----------------------------------------------------------------------------- * Components Handling */ diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index fb6c9d522013..58bd6948bcde 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -498,6 +498,10 @@ void omapdss_device_register(struct omap_dss_device *dssdev); void omapdss_device_unregister(struct omap_dss_device *dssdev); struct omap_dss_device *omapdss_find_device_by_port(struct device_node *src, unsigned int port); +int omapdss_device_connect(struct omap_dss_device *src, + struct omap_dss_device *dst); +void omapdss_device_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst); struct omap_dss_device *omap_dss_get_device(struct omap_dss_device *dssdev); void omap_dss_put_device(struct omap_dss_device *dssdev); diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index e411d46bd803..6bc4b01c8e9c 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -159,7 +159,7 @@ static void omap_disconnect_dssdevs(struct drm_device *ddev) for (i = 0; i < priv->num_dssdevs; i++) { struct omap_dss_device *dssdev = priv->dssdevs[i]; - dssdev->driver->disconnect(dssdev); + omapdss_device_disconnect(dssdev, NULL); priv->dssdevs[i] = NULL; omap_dss_put_device(dssdev); } @@ -189,7 +189,7 @@ static int omap_connect_dssdevs(struct drm_device *ddev) return -EPROBE_DEFER; for_each_dss_dev(dssdev) { - r = dssdev->driver->connect(dssdev); + r = omapdss_device_connect(dssdev, NULL); if (r == -EPROBE_DEFER) { omap_dss_put_device(dssdev); goto cleanup; -- GitLab From 1f507968c30b0e86a307164a212ef11def1e5899 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 28 Feb 2018 17:30:30 +0200 Subject: [PATCH 0908/1692] drm/omap: dss: Move debug message and checks to connection handlers The connectors, encoders and display duplicate the same debug messages and connection checks in their omap_dss_device connect and disconnect handlers. Move the code to the connect and disconnect wrappers. To simplify the code the connect function returns -EBUSY unconditionally if the device is already connected. This doesn't cause any change in practice: the connect handler of displays is never called on a connected device as it is only invoked during omapdrm initialization. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- .../gpu/drm/omapdrm/displays/connector-analog-tv.c | 10 ---------- drivers/gpu/drm/omapdrm/displays/connector-dvi.c | 6 ------ drivers/gpu/drm/omapdrm/displays/connector-hdmi.c | 10 ---------- drivers/gpu/drm/omapdrm/displays/encoder-opa362.c | 11 ----------- drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c | 7 ------- drivers/gpu/drm/omapdrm/displays/panel-dpi.c | 6 ------ drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c | 6 ------ .../drm/omapdrm/displays/panel-lgphilips-lb035q02.c | 6 ------ .../gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c | 6 ------ .../drm/omapdrm/displays/panel-sharp-ls037v7dw01.c | 6 ------ .../gpu/drm/omapdrm/displays/panel-sony-acx565akm.c | 6 ------ .../gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c | 6 ------ .../gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c | 6 ------ drivers/gpu/drm/omapdrm/dss/base.c | 12 ++++++++++++ 14 files changed, 12 insertions(+), 92 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c index 41ba3c5dbe7d..d77e21fc26ad 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c @@ -48,11 +48,6 @@ static int tvc_connect(struct omap_dss_device *dssdev) struct omap_dss_device *in; int r; - dev_dbg(ddata->dev, "connect\n"); - - if (omapdss_device_is_connected(dssdev)) - return 0; - in = omapdss_of_find_source_for_first_ep(ddata->dev->of_node); if (IS_ERR(in)) { dev_err(ddata->dev, "failed to find video source\n"); @@ -74,11 +69,6 @@ static void tvc_disconnect(struct omap_dss_device *dssdev) struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *in = ddata->in; - dev_dbg(ddata->dev, "disconnect\n"); - - if (!omapdss_device_is_connected(dssdev)) - return; - omapdss_device_disconnect(in, dssdev); omap_dss_put_device(in); diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c index f193bbda550c..9a3ecc3ed5b2 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c @@ -63,9 +63,6 @@ static int dvic_connect(struct omap_dss_device *dssdev) struct omap_dss_device *in; int r; - if (omapdss_device_is_connected(dssdev)) - return 0; - in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); if (IS_ERR(in)) { dev_err(dssdev->dev, "failed to find video source\n"); @@ -87,9 +84,6 @@ static void dvic_disconnect(struct omap_dss_device *dssdev) struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *in = ddata->in; - if (!omapdss_device_is_connected(dssdev)) - return; - omapdss_device_disconnect(in, dssdev); omap_dss_put_device(in); diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c index 014554afbb0d..665af9932317 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c @@ -58,11 +58,6 @@ static int hdmic_connect(struct omap_dss_device *dssdev) struct omap_dss_device *in; int r; - dev_dbg(ddata->dev, "connect\n"); - - if (omapdss_device_is_connected(dssdev)) - return 0; - in = omapdss_of_find_source_for_first_ep(ddata->dev->of_node); if (IS_ERR(in)) { dev_err(ddata->dev, "failed to find video source\n"); @@ -84,11 +79,6 @@ static void hdmic_disconnect(struct omap_dss_device *dssdev) struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *in = ddata->in; - dev_dbg(ddata->dev, "disconnect\n"); - - if (!omapdss_device_is_connected(dssdev)) - return; - omapdss_device_disconnect(in, dssdev); omap_dss_put_device(in); diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c index 752b565987c1..5b9ef09e6b2d 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c @@ -39,11 +39,6 @@ static int opa362_connect(struct omap_dss_device *dssdev, struct omap_dss_device *in; int r; - dev_dbg(dssdev->dev, "connect\n"); - - if (omapdss_device_is_connected(dssdev)) - return -EBUSY; - in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); if (IS_ERR(in)) { dev_err(dssdev->dev, "failed to find video source\n"); @@ -69,12 +64,6 @@ static void opa362_disconnect(struct omap_dss_device *dssdev, struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *in = ddata->in; - dev_dbg(dssdev->dev, "disconnect\n"); - - WARN_ON(!omapdss_device_is_connected(dssdev)); - if (!omapdss_device_is_connected(dssdev)) - return; - WARN_ON(dst != dssdev->dst); if (dst != dssdev->dst) return; diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c index a8660e5e5ffc..b22c8f71c0e5 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c @@ -35,9 +35,6 @@ static int tfp410_connect(struct omap_dss_device *dssdev, struct omap_dss_device *in; int r; - if (omapdss_device_is_connected(dssdev)) - return -EBUSY; - in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); if (IS_ERR(in)) { dev_err(dssdev->dev, "failed to find video source\n"); @@ -63,10 +60,6 @@ static void tfp410_disconnect(struct omap_dss_device *dssdev, struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *in = ddata->in; - WARN_ON(!omapdss_device_is_connected(dssdev)); - if (!omapdss_device_is_connected(dssdev)) - return; - WARN_ON(dst != dssdev->dst); if (dst != dssdev->dst) return; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c index 987519501336..5d704cf0809f 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c @@ -41,9 +41,6 @@ static int panel_dpi_connect(struct omap_dss_device *dssdev) struct omap_dss_device *in; int r; - if (omapdss_device_is_connected(dssdev)) - return 0; - in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); if (IS_ERR(in)) { dev_err(dssdev->dev, "failed to find video source\n"); @@ -65,9 +62,6 @@ static void panel_dpi_disconnect(struct omap_dss_device *dssdev) struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *in = ddata->in; - if (!omapdss_device_is_connected(dssdev)) - return; - omapdss_device_disconnect(in, dssdev); omap_dss_put_device(in); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c index b7cd2ef17fbb..be6c0c811e66 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c @@ -763,9 +763,6 @@ static int dsicm_connect(struct omap_dss_device *dssdev) struct omap_dss_device *in; int r; - if (omapdss_device_is_connected(dssdev)) - return 0; - in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); if (IS_ERR(in)) { dev_err(dssdev->dev, "failed to find video source\n"); @@ -807,9 +804,6 @@ static void dsicm_disconnect(struct omap_dss_device *dssdev) struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *in = ddata->in; - if (!omapdss_device_is_connected(dssdev)) - return; - in->ops->dsi.release_vc(in, ddata->channel); omapdss_device_disconnect(in, dssdev); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c index e4a3b5828bf9..50b74e7351e9 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c @@ -122,9 +122,6 @@ static int lb035q02_connect(struct omap_dss_device *dssdev) struct omap_dss_device *in; int r; - if (omapdss_device_is_connected(dssdev)) - return 0; - in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); if (IS_ERR(in)) { dev_err(dssdev->dev, "failed to find video source\n"); @@ -148,9 +145,6 @@ static void lb035q02_disconnect(struct omap_dss_device *dssdev) struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *in = ddata->in; - if (!omapdss_device_is_connected(dssdev)) - return; - omapdss_device_disconnect(in, dssdev); omap_dss_put_device(in); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c index e89dd32a2a60..769cba6c99ef 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c @@ -118,9 +118,6 @@ static int nec_8048_connect(struct omap_dss_device *dssdev) struct omap_dss_device *in; int r; - if (omapdss_device_is_connected(dssdev)) - return 0; - in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); if (IS_ERR(in)) { dev_err(dssdev->dev, "failed to find video source\n"); @@ -142,9 +139,6 @@ static void nec_8048_disconnect(struct omap_dss_device *dssdev) struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *in = ddata->in; - if (!omapdss_device_is_connected(dssdev)) - return; - omapdss_device_disconnect(in, dssdev); omap_dss_put_device(in); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c index d0451cfab7f8..629ea2d93575 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c @@ -64,9 +64,6 @@ static int sharp_ls_connect(struct omap_dss_device *dssdev) struct omap_dss_device *in; int r; - if (omapdss_device_is_connected(dssdev)) - return 0; - in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); if (IS_ERR(in)) { dev_err(dssdev->dev, "failed to find video source\n"); @@ -88,9 +85,6 @@ static void sharp_ls_disconnect(struct omap_dss_device *dssdev) struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *in = ddata->in; - if (!omapdss_device_is_connected(dssdev)) - return; - omapdss_device_disconnect(in, dssdev); omap_dss_put_device(in); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c index 9033e9d25b7f..82797df80e66 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c @@ -513,9 +513,6 @@ static int acx565akm_connect(struct omap_dss_device *dssdev) struct omap_dss_device *in; int r; - if (omapdss_device_is_connected(dssdev)) - return 0; - in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); if (IS_ERR(in)) { dev_err(dssdev->dev, "failed to find video source\n"); @@ -537,9 +534,6 @@ static void acx565akm_disconnect(struct omap_dss_device *dssdev) struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *in = ddata->in; - if (!omapdss_device_is_connected(dssdev)) - return; - omapdss_device_disconnect(in, dssdev); omap_dss_put_device(in); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c index 8865459b6c90..298fedf96efe 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c @@ -172,9 +172,6 @@ static int td028ttec1_panel_connect(struct omap_dss_device *dssdev) struct omap_dss_device *in; int r; - if (omapdss_device_is_connected(dssdev)) - return 0; - in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); if (IS_ERR(in)) { dev_err(dssdev->dev, "failed to find video source\n"); @@ -196,9 +193,6 @@ static void td028ttec1_panel_disconnect(struct omap_dss_device *dssdev) struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *in = ddata->in; - if (!omapdss_device_is_connected(dssdev)) - return; - omapdss_device_disconnect(in, dssdev); omap_dss_put_device(in); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c index 1daba7a60a9c..df3d8103823d 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c @@ -343,9 +343,6 @@ static int tpo_td043_connect(struct omap_dss_device *dssdev) struct omap_dss_device *in; int r; - if (omapdss_device_is_connected(dssdev)) - return 0; - in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); if (IS_ERR(in)) { dev_err(dssdev->dev, "failed to find video source\n"); @@ -367,9 +364,6 @@ static void tpo_td043_disconnect(struct omap_dss_device *dssdev) struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *in = ddata->in; - if (!omapdss_device_is_connected(dssdev)) - return; - omapdss_device_disconnect(in, dssdev); omap_dss_put_device(in); diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c index 576fd3d13259..599ef628736b 100644 --- a/drivers/gpu/drm/omapdrm/dss/base.c +++ b/drivers/gpu/drm/omapdrm/dss/base.c @@ -105,6 +105,11 @@ struct omap_dss_device *omapdss_find_device_by_port(struct device_node *src, int omapdss_device_connect(struct omap_dss_device *src, struct omap_dss_device *dst) { + dev_dbg(src->dev, "connect\n"); + + if (omapdss_device_is_connected(src)) + return -EBUSY; + if (src->driver) return src->driver->connect(src); else @@ -115,6 +120,13 @@ EXPORT_SYMBOL_GPL(omapdss_device_connect); void omapdss_device_disconnect(struct omap_dss_device *src, struct omap_dss_device *dst) { + dev_dbg(src->dev, "disconnect\n"); + + if (!src->id && !omapdss_device_is_connected(src)) { + WARN_ON(!src->driver); + return; + } + if (src->driver) src->driver->disconnect(src); else -- GitLab From 73fc0ac4a69506ead7cf0c0ad0ef79f283766f25 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Sat, 4 Aug 2018 22:10:44 +0300 Subject: [PATCH 0909/1692] drm/omap: displays: Don't call disconnect handlers directly In preparation for the move of checks from the disconnect handlers to the omapdss_device_disconnect() function, replace direct calls to the disconnect handlers at remove time with calls to omapdss_device_disconnect(). Signed-off-by: Laurent Pinchart Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c | 2 +- drivers/gpu/drm/omapdrm/displays/connector-dvi.c | 2 +- drivers/gpu/drm/omapdrm/displays/connector-hdmi.c | 2 +- drivers/gpu/drm/omapdrm/displays/encoder-opa362.c | 2 +- drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c | 2 +- drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c | 2 +- drivers/gpu/drm/omapdrm/displays/panel-dpi.c | 2 +- drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c | 2 +- drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c | 2 +- drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c | 2 +- drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c | 2 +- drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c | 2 +- drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c | 2 +- drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c | 2 +- 14 files changed, 14 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c index d77e21fc26ad..fb5c4dcafa2d 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c @@ -193,7 +193,7 @@ static int __exit tvc_remove(struct platform_device *pdev) omapdss_unregister_display(&ddata->dssdev); tvc_disable(dssdev); - tvc_disconnect(dssdev); + omapdss_device_disconnect(dssdev, NULL); return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c index 9a3ecc3ed5b2..e47214a12118 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c @@ -420,7 +420,7 @@ static int __exit dvic_remove(struct platform_device *pdev) omapdss_unregister_display(&ddata->dssdev); dvic_disable(dssdev); - dvic_disconnect(dssdev); + omapdss_device_disconnect(dssdev, NULL); i2c_put_adapter(ddata->i2c_adapter); diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c index 665af9932317..671f2f29c8d3 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c @@ -375,7 +375,7 @@ static int __exit hdmic_remove(struct platform_device *pdev) omapdss_unregister_display(&ddata->dssdev); hdmic_disable(dssdev); - hdmic_disconnect(dssdev); + omapdss_device_disconnect(dssdev, NULL); return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c index 5b9ef09e6b2d..5724bdf53a42 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c @@ -206,7 +206,7 @@ static int __exit opa362_remove(struct platform_device *pdev) WARN_ON(omapdss_device_is_connected(dssdev)); if (omapdss_device_is_connected(dssdev)) - opa362_disconnect(dssdev, dssdev->dst); + omapdss_device_disconnect(dssdev, dssdev->dst); return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c index b22c8f71c0e5..d65fed251b13 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c @@ -229,7 +229,7 @@ static int __exit tfp410_remove(struct platform_device *pdev) WARN_ON(omapdss_device_is_connected(dssdev)); if (omapdss_device_is_connected(dssdev)) - tfp410_disconnect(dssdev, dssdev->dst); + omapdss_device_disconnect(dssdev, dssdev->dst); return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c index 640f15b88467..bdd9d39215a4 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c @@ -346,7 +346,7 @@ static int __exit tpd_remove(struct platform_device *pdev) WARN_ON(omapdss_device_is_connected(dssdev)); if (omapdss_device_is_connected(dssdev)) - tpd_disconnect(dssdev, dssdev->dst); + omapdss_device_disconnect(dssdev, dssdev->dst); return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c index 5d704cf0809f..4772cb2553f2 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c @@ -241,7 +241,7 @@ static int __exit panel_dpi_remove(struct platform_device *pdev) omapdss_unregister_display(dssdev); panel_dpi_disable(dssdev); - panel_dpi_disconnect(dssdev); + omapdss_device_disconnect(dssdev, NULL); return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c index be6c0c811e66..443dc874e448 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c @@ -1408,7 +1408,7 @@ static int __exit dsicm_remove(struct platform_device *pdev) omapdss_unregister_display(dssdev); dsicm_disable(dssdev); - dsicm_disconnect(dssdev); + omapdss_device_disconnect(dssdev, NULL); sysfs_remove_group(&pdev->dev.kobj, &dsicm_attr_group); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c index 50b74e7351e9..ea4cdc8bd2f8 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c @@ -292,7 +292,7 @@ static int lb035q02_panel_spi_remove(struct spi_device *spi) omapdss_unregister_display(dssdev); lb035q02_disable(dssdev); - lb035q02_disconnect(dssdev); + omapdss_device_disconnect(dssdev, NULL); return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c index 769cba6c99ef..84e9c9a8a566 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c @@ -318,7 +318,7 @@ static int nec_8048_remove(struct spi_device *spi) omapdss_unregister_display(dssdev); nec_8048_disable(dssdev); - nec_8048_disconnect(dssdev); + omapdss_device_disconnect(dssdev, NULL); return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c index 629ea2d93575..a6ec328ee3d3 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c @@ -292,7 +292,7 @@ static int __exit sharp_ls_remove(struct platform_device *pdev) omapdss_unregister_display(dssdev); sharp_ls_disable(dssdev); - sharp_ls_disconnect(dssdev); + omapdss_device_disconnect(dssdev, NULL); return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c index 82797df80e66..0a2efcb84532 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c @@ -837,7 +837,7 @@ static int acx565akm_remove(struct spi_device *spi) omapdss_unregister_display(dssdev); acx565akm_disable(dssdev); - acx565akm_disconnect(dssdev); + omapdss_device_disconnect(dssdev, NULL); return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c index 298fedf96efe..8b5ee55982f4 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c @@ -419,7 +419,7 @@ static int td028ttec1_panel_remove(struct spi_device *spi) omapdss_unregister_display(dssdev); td028ttec1_panel_disable(dssdev); - td028ttec1_panel_disconnect(dssdev); + omapdss_device_disconnect(dssdev, NULL); return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c index df3d8103823d..263c566df499 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c @@ -569,7 +569,7 @@ static int tpo_td043_remove(struct spi_device *spi) omapdss_unregister_display(dssdev); tpo_td043_disable(dssdev); - tpo_td043_disconnect(dssdev); + omapdss_device_disconnect(dssdev, NULL); sysfs_remove_group(&spi->dev.kobj, &tpo_td043_attr_group); -- GitLab From fb5571717c24c264518aaaf1ab4ec4df73e4cebd Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 28 Feb 2018 17:30:30 +0200 Subject: [PATCH 0910/1692] drm/omap: dss: Move src and dst check and set to connection handlers The encoders duplicate the same omap_dss_device src and dst fields set and checks in their connect and disconnect handlers. Move the code to the connect and disconnect wrappers. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- .../gpu/drm/omapdrm/displays/encoder-opa362.c | 10 -------- .../gpu/drm/omapdrm/displays/encoder-tfp410.c | 10 -------- .../drm/omapdrm/displays/encoder-tpd12s015.c | 11 --------- drivers/gpu/drm/omapdrm/dss/base.c | 24 +++++++++++++++++-- drivers/gpu/drm/omapdrm/dss/dpi.c | 5 ---- drivers/gpu/drm/omapdrm/dss/dsi.c | 5 ---- drivers/gpu/drm/omapdrm/dss/hdmi4.c | 5 ---- drivers/gpu/drm/omapdrm/dss/hdmi5.c | 5 ---- drivers/gpu/drm/omapdrm/dss/omapdss.h | 8 +++---- drivers/gpu/drm/omapdrm/dss/output.c | 6 ----- drivers/gpu/drm/omapdrm/dss/sdi.c | 5 ---- drivers/gpu/drm/omapdrm/dss/venc.c | 5 ---- 12 files changed, 25 insertions(+), 74 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c index 5724bdf53a42..c058f889700d 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c @@ -51,9 +51,6 @@ static int opa362_connect(struct omap_dss_device *dssdev, return r; } - dst->src = dssdev; - dssdev->dst = dst; - ddata->in = in; return 0; } @@ -64,13 +61,6 @@ static void opa362_disconnect(struct omap_dss_device *dssdev, struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *in = ddata->in; - WARN_ON(dst != dssdev->dst); - if (dst != dssdev->dst) - return; - - dst->src = NULL; - dssdev->dst = NULL; - omapdss_device_disconnect(in, &ddata->dssdev); omap_dss_put_device(in); diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c index d65fed251b13..b0e75af56485 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c @@ -47,9 +47,6 @@ static int tfp410_connect(struct omap_dss_device *dssdev, return r; } - dst->src = dssdev; - dssdev->dst = dst; - ddata->in = in; return 0; } @@ -60,13 +57,6 @@ static void tfp410_disconnect(struct omap_dss_device *dssdev, struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *in = ddata->in; - WARN_ON(dst != dssdev->dst); - if (dst != dssdev->dst) - return; - - dst->src = NULL; - dssdev->dst = NULL; - omapdss_device_disconnect(in, &ddata->dssdev); omap_dss_put_device(in); diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c index bdd9d39215a4..218cf73d1455 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c @@ -55,9 +55,6 @@ static int tpd_connect(struct omap_dss_device *dssdev, return r; } - dst->src = dssdev; - dssdev->dst = dst; - gpiod_set_value_cansleep(ddata->ct_cp_hpd_gpio, 1); gpiod_set_value_cansleep(ddata->ls_oe_gpio, 1); @@ -74,17 +71,9 @@ static void tpd_disconnect(struct omap_dss_device *dssdev, struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *in = ddata->in; - WARN_ON(dst != dssdev->dst); - - if (dst != dssdev->dst) - return; - gpiod_set_value_cansleep(ddata->ct_cp_hpd_gpio, 0); gpiod_set_value_cansleep(ddata->ls_oe_gpio, 0); - dst->src = NULL; - dssdev->dst = NULL; - omapdss_device_disconnect(in, &ddata->dssdev); omap_dss_put_device(in); diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c index 599ef628736b..22d3818208d7 100644 --- a/drivers/gpu/drm/omapdrm/dss/base.c +++ b/drivers/gpu/drm/omapdrm/dss/base.c @@ -105,15 +105,27 @@ struct omap_dss_device *omapdss_find_device_by_port(struct device_node *src, int omapdss_device_connect(struct omap_dss_device *src, struct omap_dss_device *dst) { + int ret; + dev_dbg(src->dev, "connect\n"); if (omapdss_device_is_connected(src)) return -EBUSY; if (src->driver) - return src->driver->connect(src); + ret = src->driver->connect(src); else - return src->ops->connect(src, dst); + ret = src->ops->connect(src, dst); + + if (ret < 0) + return ret; + + if (dst) { + dst->src = src; + src->dst = dst; + } + + return 0; } EXPORT_SYMBOL_GPL(omapdss_device_connect); @@ -127,6 +139,14 @@ void omapdss_device_disconnect(struct omap_dss_device *src, return; } + if (dst) { + if (WARN_ON(dst != src->dst)) + return; + + dst->src = NULL; + src->dst = NULL; + } + if (src->driver) src->driver->disconnect(src); else diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c index 11d94b310d1a..bb7dcd88026f 100644 --- a/drivers/gpu/drm/omapdrm/dss/dpi.c +++ b/drivers/gpu/drm/omapdrm/dss/dpi.c @@ -671,11 +671,6 @@ static void dpi_disconnect(struct omap_dss_device *dssdev, { struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev); - WARN_ON(dst != dssdev->dst); - - if (dst != dssdev->dst) - return; - omapdss_output_unset_device(dssdev); dss_mgr_disconnect(&dpi->output, dssdev); diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 3cc91e973f01..8e3dce1ddfb0 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -4990,11 +4990,6 @@ static void dsi_disconnect(struct omap_dss_device *dssdev, { struct dsi_data *dsi = to_dsi_data(dssdev); - WARN_ON(dst != dssdev->dst); - - if (dst != dssdev->dst) - return; - omapdss_output_unset_device(dssdev); dss_mgr_disconnect(&dsi->output, dssdev); diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index e0406f2a90e7..7e5474e87c11 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c @@ -478,11 +478,6 @@ static void hdmi_disconnect(struct omap_dss_device *dssdev, { struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev); - WARN_ON(dst != dssdev->dst); - - if (dst != dssdev->dst) - return; - omapdss_output_unset_device(dssdev); dss_mgr_disconnect(&hdmi->output, dssdev); diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index 45d2eacedb2a..86b18ccb8d24 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c @@ -481,11 +481,6 @@ static void hdmi_disconnect(struct omap_dss_device *dssdev, { struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev); - WARN_ON(dst != dssdev->dst); - - if (dst != dssdev->dst) - return; - omapdss_output_unset_device(dssdev); dss_mgr_disconnect(&hdmi->output, dssdev); diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index 58bd6948bcde..0033adf534d3 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -391,6 +391,9 @@ struct omap_dss_device { struct module *owner; + struct omap_dss_device *src; + struct omap_dss_device *dst; + struct list_head list; struct list_head panel_list; @@ -409,8 +412,6 @@ struct omap_dss_device { enum omap_display_caps caps; - struct omap_dss_device *src; - enum omap_dss_display_state state; /* OMAP DSS output specific fields */ @@ -426,9 +427,6 @@ struct omap_dss_device { /* the port number in the DT node */ int port_num; - - /* dynamic fields */ - struct omap_dss_device *dst; }; struct omap_dss_driver { diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c index e659c8e5c419..6abdb615a4c0 100644 --- a/drivers/gpu/drm/omapdrm/dss/output.c +++ b/drivers/gpu/drm/omapdrm/dss/output.c @@ -47,9 +47,6 @@ int omapdss_output_set_device(struct omap_dss_device *out, goto err; } - out->dst = dssdev; - dssdev->src = out; - mutex_unlock(&output_lock); return 0; @@ -81,9 +78,6 @@ int omapdss_output_unset_device(struct omap_dss_device *out) goto err; } - out->dst->src = NULL; - out->dst = NULL; - mutex_unlock(&output_lock); return 0; diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c index b21499e124c1..e782be2c36bc 100644 --- a/drivers/gpu/drm/omapdrm/dss/sdi.c +++ b/drivers/gpu/drm/omapdrm/dss/sdi.c @@ -301,11 +301,6 @@ static void sdi_disconnect(struct omap_dss_device *dssdev, { struct sdi_device *sdi = dssdev_to_sdi(dssdev); - WARN_ON(dst != dssdev->dst); - - if (dst != dssdev->dst) - return; - omapdss_output_unset_device(dssdev); dss_mgr_disconnect(&sdi->output, dssdev); diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c index 4cae03f24e1b..17668da8ac5a 100644 --- a/drivers/gpu/drm/omapdrm/dss/venc.c +++ b/drivers/gpu/drm/omapdrm/dss/venc.c @@ -740,11 +740,6 @@ static void venc_disconnect(struct omap_dss_device *dssdev, { struct venc_device *venc = dssdev_to_venc(dssdev); - WARN_ON(dst != dssdev->dst); - - if (dst != dssdev->dst) - return; - omapdss_output_unset_device(dssdev); dss_mgr_disconnect(&venc->output, dssdev); -- GitLab From 7269fde4e8c96e67cd39f456cabf3329b06efffc Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 28 Feb 2018 17:30:30 +0200 Subject: [PATCH 0911/1692] drm/omap: displays: Remove input omap_dss_device from panel data All connectors, encoders and panels store a pointer to their input omap_dss_device in the panel driver data structure. This duplicates the src field in the omap_dss_device structure. Remove the private copy and use the src field. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- .../omapdrm/displays/connector-analog-tv.c | 41 ++-- .../gpu/drm/omapdrm/displays/connector-dvi.c | 43 ++-- .../gpu/drm/omapdrm/displays/connector-hdmi.c | 88 ++++---- .../gpu/drm/omapdrm/displays/encoder-opa362.c | 41 ++-- .../gpu/drm/omapdrm/displays/encoder-tfp410.c | 41 ++-- .../drm/omapdrm/displays/encoder-tpd12s015.c | 68 +++---- drivers/gpu/drm/omapdrm/displays/panel-dpi.c | 44 ++-- .../gpu/drm/omapdrm/displays/panel-dsi-cm.c | 190 +++++++++--------- .../displays/panel-lgphilips-lb035q02.c | 41 ++-- .../omapdrm/displays/panel-nec-nl8048hl11.c | 42 ++-- .../displays/panel-sharp-ls037v7dw01.c | 42 ++-- .../omapdrm/displays/panel-sony-acx565akm.c | 42 ++-- .../omapdrm/displays/panel-tpo-td028ttec1.c | 42 ++-- .../omapdrm/displays/panel-tpo-td043mtea1.c | 44 ++-- 14 files changed, 362 insertions(+), 447 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c index fb5c4dcafa2d..7d16c320749f 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c @@ -18,7 +18,6 @@ struct panel_drv_data { struct omap_dss_device dssdev; - struct omap_dss_device *in; struct device *dev; @@ -45,40 +44,37 @@ static const struct videomode tvc_pal_vm = { static int tvc_connect(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in; + struct omap_dss_device *src; int r; - in = omapdss_of_find_source_for_first_ep(ddata->dev->of_node); - if (IS_ERR(in)) { + src = omapdss_of_find_source_for_first_ep(ddata->dev->of_node); + if (IS_ERR(src)) { dev_err(ddata->dev, "failed to find video source\n"); - return PTR_ERR(in); + return PTR_ERR(src); } - r = omapdss_device_connect(in, dssdev); + r = omapdss_device_connect(src, dssdev); if (r) { - omap_dss_put_device(in); + omap_dss_put_device(src); return r; } - ddata->in = in; return 0; } static void tvc_disconnect(struct omap_dss_device *dssdev) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; - omapdss_device_disconnect(in, dssdev); + omapdss_device_disconnect(src, dssdev); - omap_dss_put_device(in); - ddata->in = NULL; + omap_dss_put_device(src); } static int tvc_enable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; int r; dev_dbg(ddata->dev, "enable\n"); @@ -89,9 +85,9 @@ static int tvc_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - in->ops->set_timings(in, &ddata->vm); + src->ops->set_timings(src, &ddata->vm); - r = in->ops->enable(in); + r = src->ops->enable(src); if (r) return r; @@ -103,14 +99,14 @@ static int tvc_enable(struct omap_dss_device *dssdev) static void tvc_disable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; dev_dbg(ddata->dev, "disable\n"); if (!omapdss_device_is_enabled(dssdev)) return; - in->ops->disable(in); + src->ops->disable(src); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } @@ -119,11 +115,11 @@ static void tvc_set_timings(struct omap_dss_device *dssdev, struct videomode *vm) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; ddata->vm = *vm; - in->ops->set_timings(in, vm); + src->ops->set_timings(src, vm); } static void tvc_get_timings(struct omap_dss_device *dssdev, @@ -137,10 +133,9 @@ static void tvc_get_timings(struct omap_dss_device *dssdev, static int tvc_check_timings(struct omap_dss_device *dssdev, struct videomode *vm) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; - return in->ops->check_timings(in, vm); + return src->ops->check_timings(src, vm); } static const struct omap_dss_driver tvc_driver = { diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c index e47214a12118..211d59cf8dcd 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c @@ -40,7 +40,6 @@ static const struct videomode dvic_default_vm = { struct panel_drv_data { struct omap_dss_device dssdev; - struct omap_dss_device *in; struct videomode vm; @@ -59,41 +58,37 @@ struct panel_drv_data { static int dvic_connect(struct omap_dss_device *dssdev) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in; + struct omap_dss_device *src; int r; - in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); - if (IS_ERR(in)) { + src = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); + if (IS_ERR(src)) { dev_err(dssdev->dev, "failed to find video source\n"); - return PTR_ERR(in); + return PTR_ERR(src); } - r = omapdss_device_connect(in, dssdev); + r = omapdss_device_connect(src, dssdev); if (r) { - omap_dss_put_device(in); + omap_dss_put_device(src); return r; } - ddata->in = in; return 0; } static void dvic_disconnect(struct omap_dss_device *dssdev) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; - omapdss_device_disconnect(in, dssdev); + omapdss_device_disconnect(src, dssdev); - omap_dss_put_device(in); - ddata->in = NULL; + omap_dss_put_device(src); } static int dvic_enable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; int r; if (!omapdss_device_is_connected(dssdev)) @@ -102,9 +97,9 @@ static int dvic_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - in->ops->set_timings(in, &ddata->vm); + src->ops->set_timings(src, &ddata->vm); - r = in->ops->enable(in); + r = src->ops->enable(src); if (r) return r; @@ -115,13 +110,12 @@ static int dvic_enable(struct omap_dss_device *dssdev) static void dvic_disable(struct omap_dss_device *dssdev) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; if (!omapdss_device_is_enabled(dssdev)) return; - in->ops->disable(in); + src->ops->disable(src); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } @@ -130,11 +124,11 @@ static void dvic_set_timings(struct omap_dss_device *dssdev, struct videomode *vm) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; ddata->vm = *vm; - in->ops->set_timings(in, vm); + src->ops->set_timings(src, vm); } static void dvic_get_timings(struct omap_dss_device *dssdev, @@ -148,10 +142,9 @@ static void dvic_get_timings(struct omap_dss_device *dssdev, static int dvic_check_timings(struct omap_dss_device *dssdev, struct videomode *vm) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; - return in->ops->check_timings(in, vm); + return src->ops->check_timings(src, vm); } static int dvic_ddc_read(struct i2c_adapter *adapter, diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c index 671f2f29c8d3..0998b8715096 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c @@ -37,7 +37,6 @@ static const struct videomode hdmic_default_vm = { struct panel_drv_data { struct omap_dss_device dssdev; - struct omap_dss_device *in; void (*hpd_cb)(void *cb_data, enum drm_connector_status status); void *hpd_cb_data; bool hpd_enabled; @@ -55,40 +54,37 @@ struct panel_drv_data { static int hdmic_connect(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in; + struct omap_dss_device *src; int r; - in = omapdss_of_find_source_for_first_ep(ddata->dev->of_node); - if (IS_ERR(in)) { + src = omapdss_of_find_source_for_first_ep(ddata->dev->of_node); + if (IS_ERR(src)) { dev_err(ddata->dev, "failed to find video source\n"); - return PTR_ERR(in); + return PTR_ERR(src); } - r = omapdss_device_connect(in, dssdev); + r = omapdss_device_connect(src, dssdev); if (r) { - omap_dss_put_device(in); + omap_dss_put_device(src); return r; } - ddata->in = in; return 0; } static void hdmic_disconnect(struct omap_dss_device *dssdev) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; - omapdss_device_disconnect(in, dssdev); + omapdss_device_disconnect(src, dssdev); - omap_dss_put_device(in); - ddata->in = NULL; + omap_dss_put_device(src); } static int hdmic_enable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; int r; dev_dbg(ddata->dev, "enable\n"); @@ -99,9 +95,9 @@ static int hdmic_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - in->ops->set_timings(in, &ddata->vm); + src->ops->set_timings(src, &ddata->vm); - r = in->ops->enable(in); + r = src->ops->enable(src); if (r) return r; @@ -113,14 +109,14 @@ static int hdmic_enable(struct omap_dss_device *dssdev) static void hdmic_disable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; dev_dbg(ddata->dev, "disable\n"); if (!omapdss_device_is_enabled(dssdev)) return; - in->ops->disable(in); + src->ops->disable(src); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } @@ -129,11 +125,11 @@ static void hdmic_set_timings(struct omap_dss_device *dssdev, struct videomode *vm) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; ddata->vm = *vm; - in->ops->set_timings(in, vm); + src->ops->set_timings(src, vm); } static void hdmic_get_timings(struct omap_dss_device *dssdev, @@ -147,33 +143,31 @@ static void hdmic_get_timings(struct omap_dss_device *dssdev, static int hdmic_check_timings(struct omap_dss_device *dssdev, struct videomode *vm) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; - return in->ops->check_timings(in, vm); + return src->ops->check_timings(src, vm); } static int hdmic_read_edid(struct omap_dss_device *dssdev, u8 *edid, int len) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; - return in->ops->hdmi.read_edid(in, edid, len); + return src->ops->hdmi.read_edid(src, edid, len); } static bool hdmic_detect(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; bool connected; if (gpio_is_valid(ddata->hpd_gpio)) connected = gpio_get_value_cansleep(ddata->hpd_gpio); else - connected = in->ops->hdmi.detect(in); - if (!connected && in->ops->hdmi.lost_hotplug) - in->ops->hdmi.lost_hotplug(in); + connected = src->ops->hdmi.detect(src); + if (!connected && src->ops->hdmi.lost_hotplug) + src->ops->hdmi.lost_hotplug(src); return connected; } @@ -183,7 +177,7 @@ static int hdmic_register_hpd_cb(struct omap_dss_device *dssdev, void *cb_data) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; if (gpio_is_valid(ddata->hpd_gpio)) { mutex_lock(&ddata->hpd_lock); @@ -191,8 +185,8 @@ static int hdmic_register_hpd_cb(struct omap_dss_device *dssdev, ddata->hpd_cb_data = cb_data; mutex_unlock(&ddata->hpd_lock); return 0; - } else if (in->ops->hdmi.register_hpd_cb) { - return in->ops->hdmi.register_hpd_cb(in, cb, cb_data); + } else if (src->ops->hdmi.register_hpd_cb) { + return src->ops->hdmi.register_hpd_cb(src, cb, cb_data); } return -ENOTSUPP; @@ -201,61 +195,59 @@ static int hdmic_register_hpd_cb(struct omap_dss_device *dssdev, static void hdmic_unregister_hpd_cb(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; if (gpio_is_valid(ddata->hpd_gpio)) { mutex_lock(&ddata->hpd_lock); ddata->hpd_cb = NULL; ddata->hpd_cb_data = NULL; mutex_unlock(&ddata->hpd_lock); - } else if (in->ops->hdmi.unregister_hpd_cb) { - in->ops->hdmi.unregister_hpd_cb(in); + } else if (src->ops->hdmi.unregister_hpd_cb) { + src->ops->hdmi.unregister_hpd_cb(src); } } static void hdmic_enable_hpd(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; if (gpio_is_valid(ddata->hpd_gpio)) { mutex_lock(&ddata->hpd_lock); ddata->hpd_enabled = true; mutex_unlock(&ddata->hpd_lock); - } else if (in->ops->hdmi.enable_hpd) { - in->ops->hdmi.enable_hpd(in); + } else if (src->ops->hdmi.enable_hpd) { + src->ops->hdmi.enable_hpd(src); } } static void hdmic_disable_hpd(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; if (gpio_is_valid(ddata->hpd_gpio)) { mutex_lock(&ddata->hpd_lock); ddata->hpd_enabled = false; mutex_unlock(&ddata->hpd_lock); - } else if (in->ops->hdmi.disable_hpd) { - in->ops->hdmi.disable_hpd(in); + } else if (src->ops->hdmi.disable_hpd) { + src->ops->hdmi.disable_hpd(src); } } static int hdmic_set_hdmi_mode(struct omap_dss_device *dssdev, bool hdmi_mode) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; - return in->ops->hdmi.set_hdmi_mode(in, hdmi_mode); + return src->ops->hdmi.set_hdmi_mode(src, hdmi_mode); } static int hdmic_set_infoframe(struct omap_dss_device *dssdev, const struct hdmi_avi_infoframe *avi) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; - return in->ops->hdmi.set_infoframe(in, avi); + return src->ops->hdmi.set_infoframe(src, avi); } static const struct omap_dss_driver hdmic_driver = { diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c index c058f889700d..a190bcf08738 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c @@ -23,7 +23,6 @@ struct panel_drv_data { struct omap_dss_device dssdev; - struct omap_dss_device *in; struct gpio_desc *enable_gpio; @@ -35,23 +34,21 @@ struct panel_drv_data { static int opa362_connect(struct omap_dss_device *dssdev, struct omap_dss_device *dst) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in; + struct omap_dss_device *src; int r; - in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); - if (IS_ERR(in)) { + src = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); + if (IS_ERR(src)) { dev_err(dssdev->dev, "failed to find video source\n"); - return PTR_ERR(in); + return PTR_ERR(src); } - r = omapdss_device_connect(in, dssdev); + r = omapdss_device_connect(src, dssdev); if (r) { - omap_dss_put_device(in); + omap_dss_put_device(src); return r; } - ddata->in = in; return 0; } @@ -59,18 +56,17 @@ static void opa362_disconnect(struct omap_dss_device *dssdev, struct omap_dss_device *dst) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; - omapdss_device_disconnect(in, &ddata->dssdev); + omapdss_device_disconnect(src, &ddata->dssdev); - omap_dss_put_device(in); - ddata->in = NULL; + omap_dss_put_device(src); } static int opa362_enable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; int r; dev_dbg(dssdev->dev, "enable\n"); @@ -81,9 +77,9 @@ static int opa362_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - in->ops->set_timings(in, &ddata->vm); + src->ops->set_timings(src, &ddata->vm); - r = in->ops->enable(in); + r = src->ops->enable(src); if (r) return r; @@ -98,7 +94,7 @@ static int opa362_enable(struct omap_dss_device *dssdev) static void opa362_disable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; dev_dbg(dssdev->dev, "disable\n"); @@ -108,7 +104,7 @@ static void opa362_disable(struct omap_dss_device *dssdev) if (ddata->enable_gpio) gpiod_set_value_cansleep(ddata->enable_gpio, 0); - in->ops->disable(in); + src->ops->disable(src); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } @@ -117,24 +113,23 @@ static void opa362_set_timings(struct omap_dss_device *dssdev, struct videomode *vm) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; dev_dbg(dssdev->dev, "set_timings\n"); ddata->vm = *vm; - in->ops->set_timings(in, vm); + src->ops->set_timings(src, vm); } static int opa362_check_timings(struct omap_dss_device *dssdev, struct videomode *vm) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; dev_dbg(dssdev->dev, "check_timings\n"); - return in->ops->check_timings(in, vm); + return src->ops->check_timings(src, vm); } static const struct omap_dss_device_ops opa362_ops = { diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c index b0e75af56485..db52eb49cff7 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c @@ -19,7 +19,6 @@ struct panel_drv_data { struct omap_dss_device dssdev; - struct omap_dss_device *in; int pd_gpio; @@ -31,23 +30,21 @@ struct panel_drv_data { static int tfp410_connect(struct omap_dss_device *dssdev, struct omap_dss_device *dst) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in; + struct omap_dss_device *src; int r; - in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); - if (IS_ERR(in)) { + src = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); + if (IS_ERR(src)) { dev_err(dssdev->dev, "failed to find video source\n"); - return PTR_ERR(in); + return PTR_ERR(src); } - r = omapdss_device_connect(in, dssdev); + r = omapdss_device_connect(src, dssdev); if (r) { - omap_dss_put_device(in); + omap_dss_put_device(src); return r; } - ddata->in = in; return 0; } @@ -55,18 +52,17 @@ static void tfp410_disconnect(struct omap_dss_device *dssdev, struct omap_dss_device *dst) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; - omapdss_device_disconnect(in, &ddata->dssdev); + omapdss_device_disconnect(src, &ddata->dssdev); - omap_dss_put_device(in); - ddata->in = NULL; + omap_dss_put_device(src); } static int tfp410_enable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; int r; if (!omapdss_device_is_connected(dssdev)) @@ -75,9 +71,9 @@ static int tfp410_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - in->ops->set_timings(in, &ddata->vm); + src->ops->set_timings(src, &ddata->vm); - r = in->ops->enable(in); + r = src->ops->enable(src); if (r) return r; @@ -92,7 +88,7 @@ static int tfp410_enable(struct omap_dss_device *dssdev) static void tfp410_disable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; if (!omapdss_device_is_enabled(dssdev)) return; @@ -100,7 +96,7 @@ static void tfp410_disable(struct omap_dss_device *dssdev) if (gpio_is_valid(ddata->pd_gpio)) gpio_set_value_cansleep(ddata->pd_gpio, 0); - in->ops->disable(in); + src->ops->disable(src); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } @@ -115,24 +111,23 @@ static void tfp410_set_timings(struct omap_dss_device *dssdev, struct videomode *vm) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; tfp410_fix_timings(vm); ddata->vm = *vm; - in->ops->set_timings(in, vm); + src->ops->set_timings(src, vm); } static int tfp410_check_timings(struct omap_dss_device *dssdev, struct videomode *vm) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; tfp410_fix_timings(vm); - return in->ops->check_timings(in, vm); + return src->ops->check_timings(src, vm); } static const struct omap_dss_device_ops tfp410_ops = { diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c index 218cf73d1455..4537867f833f 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c @@ -21,7 +21,6 @@ struct panel_drv_data { struct omap_dss_device dssdev; - struct omap_dss_device *in; void (*hpd_cb)(void *cb_data, enum drm_connector_status status); void *hpd_cb_data; bool hpd_enabled; @@ -40,18 +39,18 @@ static int tpd_connect(struct omap_dss_device *dssdev, struct omap_dss_device *dst) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in; + struct omap_dss_device *src; int r; - in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); - if (IS_ERR(in)) { + src = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); + if (IS_ERR(src)) { dev_err(dssdev->dev, "failed to find video source\n"); - return PTR_ERR(in); + return PTR_ERR(src); } - r = omapdss_device_connect(in, dssdev); + r = omapdss_device_connect(src, dssdev); if (r) { - omap_dss_put_device(in); + omap_dss_put_device(src); return r; } @@ -61,7 +60,6 @@ static int tpd_connect(struct omap_dss_device *dssdev, /* DC-DC converter needs at max 300us to get to 90% of 5V */ udelay(300); - ddata->in = in; return 0; } @@ -69,29 +67,28 @@ static void tpd_disconnect(struct omap_dss_device *dssdev, struct omap_dss_device *dst) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; gpiod_set_value_cansleep(ddata->ct_cp_hpd_gpio, 0); gpiod_set_value_cansleep(ddata->ls_oe_gpio, 0); - omapdss_device_disconnect(in, &ddata->dssdev); + omapdss_device_disconnect(src, &ddata->dssdev); - omap_dss_put_device(in); - ddata->in = NULL; + omap_dss_put_device(src); } static int tpd_enable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; int r; if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) return 0; - in->ops->set_timings(in, &ddata->vm); + src->ops->set_timings(src, &ddata->vm); - r = in->ops->enable(in); + r = src->ops->enable(src); if (r) return r; @@ -102,13 +99,12 @@ static int tpd_enable(struct omap_dss_device *dssdev) static void tpd_disable(struct omap_dss_device *dssdev) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) return; - in->ops->disable(in); + src->ops->disable(src); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } @@ -117,45 +113,41 @@ static void tpd_set_timings(struct omap_dss_device *dssdev, struct videomode *vm) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; ddata->vm = *vm; - in->ops->set_timings(in, vm); + src->ops->set_timings(src, vm); } static int tpd_check_timings(struct omap_dss_device *dssdev, struct videomode *vm) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - int r; - - r = in->ops->check_timings(in, vm); + struct omap_dss_device *src = dssdev->src; - return r; + return src->ops->check_timings(src, vm); } static int tpd_read_edid(struct omap_dss_device *dssdev, u8 *edid, int len) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; if (!gpiod_get_value_cansleep(ddata->hpd_gpio)) return -ENODEV; - return in->ops->hdmi.read_edid(in, edid, len); + return src->ops->hdmi.read_edid(src, edid, len); } static bool tpd_detect(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; bool connected = gpiod_get_value_cansleep(ddata->hpd_gpio); - if (!connected && in->ops->hdmi.lost_hotplug) - in->ops->hdmi.lost_hotplug(in); + if (!connected && src->ops->hdmi.lost_hotplug) + src->ops->hdmi.lost_hotplug(src); return connected; } @@ -205,19 +197,17 @@ static void tpd_disable_hpd(struct omap_dss_device *dssdev) static int tpd_set_infoframe(struct omap_dss_device *dssdev, const struct hdmi_avi_infoframe *avi) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; - return in->ops->hdmi.set_infoframe(in, avi); + return src->ops->hdmi.set_infoframe(src, avi); } static int tpd_set_hdmi_mode(struct omap_dss_device *dssdev, bool hdmi_mode) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; - return in->ops->hdmi.set_hdmi_mode(in, hdmi_mode); + return src->ops->hdmi.set_hdmi_mode(src, hdmi_mode); } static const struct omap_dss_device_ops tpd_ops = { @@ -262,7 +252,7 @@ static irqreturn_t tpd_hpd_isr(int irq, void *data) static int tpd_probe(struct platform_device *pdev) { - struct omap_dss_device *in, *dssdev; + struct omap_dss_device *dssdev; struct panel_drv_data *ddata; int r; struct gpio_desc *gpio; @@ -311,8 +301,6 @@ static int tpd_probe(struct platform_device *pdev) dssdev->owner = THIS_MODULE; dssdev->port_num = 1; - in = ddata->in; - r = omapdss_register_output(dssdev); if (r) { dev_err(&pdev->dev, "Failed to register output\n"); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c index 4772cb2553f2..5864e24a6e21 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c @@ -23,7 +23,6 @@ struct panel_drv_data { struct omap_dss_device dssdev; - struct omap_dss_device *in; struct videomode vm; @@ -37,41 +36,37 @@ struct panel_drv_data { static int panel_dpi_connect(struct omap_dss_device *dssdev) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in; + struct omap_dss_device *src; int r; - in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); - if (IS_ERR(in)) { + src = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); + if (IS_ERR(src)) { dev_err(dssdev->dev, "failed to find video source\n"); - return PTR_ERR(in); + return PTR_ERR(src); } - r = omapdss_device_connect(in, dssdev); + r = omapdss_device_connect(src, dssdev); if (r) { - omap_dss_put_device(in); + omap_dss_put_device(src); return r; } - ddata->in = in; return 0; } static void panel_dpi_disconnect(struct omap_dss_device *dssdev) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; - omapdss_device_disconnect(in, dssdev); + omapdss_device_disconnect(src, dssdev); - omap_dss_put_device(in); - ddata->in = NULL; + omap_dss_put_device(src); } static int panel_dpi_enable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; int r; if (!omapdss_device_is_connected(dssdev)) @@ -80,15 +75,15 @@ static int panel_dpi_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - in->ops->set_timings(in, &ddata->vm); + src->ops->set_timings(src, &ddata->vm); - r = in->ops->enable(in); + r = src->ops->enable(src); if (r) return r; r = regulator_enable(ddata->vcc_supply); if (r) { - in->ops->disable(in); + src->ops->disable(src); return r; } @@ -103,7 +98,7 @@ static int panel_dpi_enable(struct omap_dss_device *dssdev) static void panel_dpi_disable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; if (!omapdss_device_is_enabled(dssdev)) return; @@ -113,7 +108,7 @@ static void panel_dpi_disable(struct omap_dss_device *dssdev) gpiod_set_value_cansleep(ddata->enable_gpio, 0); regulator_disable(ddata->vcc_supply); - in->ops->disable(in); + src->ops->disable(src); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } @@ -122,11 +117,11 @@ static void panel_dpi_set_timings(struct omap_dss_device *dssdev, struct videomode *vm) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; ddata->vm = *vm; - in->ops->set_timings(in, vm); + src->ops->set_timings(src, vm); } static void panel_dpi_get_timings(struct omap_dss_device *dssdev, @@ -140,10 +135,9 @@ static void panel_dpi_get_timings(struct omap_dss_device *dssdev, static int panel_dpi_check_timings(struct omap_dss_device *dssdev, struct videomode *vm) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; - return in->ops->check_timings(in, vm); + return src->ops->check_timings(src, vm); } static const struct omap_dss_driver panel_dpi_ops = { diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c index 443dc874e448..365ece2927a4 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c @@ -41,7 +41,6 @@ struct panel_drv_data { struct omap_dss_device dssdev; - struct omap_dss_device *in; struct videomode vm; @@ -142,11 +141,11 @@ static void hw_guard_wait(struct panel_drv_data *ddata) static int dsicm_dcs_read_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 *data) { - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = ddata->dssdev.src; int r; u8 buf[1]; - r = in->ops->dsi.dcs_read(in, ddata->channel, dcs_cmd, buf, 1); + r = src->ops->dsi.dcs_read(src, ddata->channel, dcs_cmd, buf, 1); if (r < 0) return r; @@ -158,29 +157,30 @@ static int dsicm_dcs_read_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 *data) static int dsicm_dcs_write_0(struct panel_drv_data *ddata, u8 dcs_cmd) { - struct omap_dss_device *in = ddata->in; - return in->ops->dsi.dcs_write(in, ddata->channel, &dcs_cmd, 1); + struct omap_dss_device *src = ddata->dssdev.src; + + return src->ops->dsi.dcs_write(src, ddata->channel, &dcs_cmd, 1); } static int dsicm_dcs_write_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 param) { - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = ddata->dssdev.src; u8 buf[2] = { dcs_cmd, param }; - return in->ops->dsi.dcs_write(in, ddata->channel, buf, 2); + return src->ops->dsi.dcs_write(src, ddata->channel, buf, 2); } static int dsicm_sleep_in(struct panel_drv_data *ddata) { - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = ddata->dssdev.src; u8 cmd; int r; hw_guard_wait(ddata); cmd = MIPI_DCS_ENTER_SLEEP_MODE; - r = in->ops->dsi.dcs_write_nosync(in, ddata->channel, &cmd, 1); + r = src->ops->dsi.dcs_write_nosync(src, ddata->channel, &cmd, 1); if (r) return r; @@ -228,7 +228,7 @@ static int dsicm_get_id(struct panel_drv_data *ddata, u8 *id1, u8 *id2, u8 *id3) static int dsicm_set_update_window(struct panel_drv_data *ddata, u16 x, u16 y, u16 w, u16 h) { - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = ddata->dssdev.src; int r; u16 x1 = x; u16 x2 = x + w - 1; @@ -242,7 +242,7 @@ static int dsicm_set_update_window(struct panel_drv_data *ddata, buf[3] = (x2 >> 8) & 0xff; buf[4] = (x2 >> 0) & 0xff; - r = in->ops->dsi.dcs_write_nosync(in, ddata->channel, buf, sizeof(buf)); + r = src->ops->dsi.dcs_write_nosync(src, ddata->channel, buf, sizeof(buf)); if (r) return r; @@ -252,11 +252,11 @@ static int dsicm_set_update_window(struct panel_drv_data *ddata, buf[3] = (y2 >> 8) & 0xff; buf[4] = (y2 >> 0) & 0xff; - r = in->ops->dsi.dcs_write_nosync(in, ddata->channel, buf, sizeof(buf)); + r = src->ops->dsi.dcs_write_nosync(src, ddata->channel, buf, sizeof(buf)); if (r) return r; - in->ops->dsi.bta_sync(in, ddata->channel); + src->ops->dsi.bta_sync(src, ddata->channel); return r; } @@ -275,7 +275,7 @@ static void dsicm_cancel_ulps_work(struct panel_drv_data *ddata) static int dsicm_enter_ulps(struct panel_drv_data *ddata) { - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = ddata->dssdev.src; int r; if (ddata->ulps_enabled) @@ -290,7 +290,7 @@ static int dsicm_enter_ulps(struct panel_drv_data *ddata) if (ddata->ext_te_gpio) disable_irq(gpiod_to_irq(ddata->ext_te_gpio)); - in->ops->dsi.disable(in, false, true); + src->ops->dsi.disable(src, false, true); ddata->ulps_enabled = true; @@ -309,19 +309,19 @@ static int dsicm_enter_ulps(struct panel_drv_data *ddata) static int dsicm_exit_ulps(struct panel_drv_data *ddata) { - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = ddata->dssdev.src; int r; if (!ddata->ulps_enabled) return 0; - r = in->ops->enable(in); + r = src->ops->enable(src); if (r) { dev_err(&ddata->pdev->dev, "failed to enable DSI\n"); goto err1; } - in->ops->dsi.enable_hs(in, ddata->channel, true); + src->ops->dsi.enable_hs(src, ddata->channel, true); r = _dsicm_enable_te(ddata, true); if (r) { @@ -366,7 +366,7 @@ static int dsicm_wake_up(struct panel_drv_data *ddata) static int dsicm_bl_update_status(struct backlight_device *dev) { struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = ddata->dssdev.src; int r = 0; int level; @@ -381,13 +381,13 @@ static int dsicm_bl_update_status(struct backlight_device *dev) mutex_lock(&ddata->lock); if (ddata->enabled) { - in->ops->dsi.bus_lock(in); + src->ops->dsi.bus_lock(src); r = dsicm_wake_up(ddata); if (!r) r = dsicm_dcs_write_1(ddata, DCS_BRIGHTNESS, level); - in->ops->dsi.bus_unlock(in); + src->ops->dsi.bus_unlock(src); } mutex_unlock(&ddata->lock); @@ -414,21 +414,21 @@ static ssize_t dsicm_num_errors_show(struct device *dev, { struct platform_device *pdev = to_platform_device(dev); struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = ddata->dssdev.src; u8 errors = 0; int r; mutex_lock(&ddata->lock); if (ddata->enabled) { - in->ops->dsi.bus_lock(in); + src->ops->dsi.bus_lock(src); r = dsicm_wake_up(ddata); if (!r) r = dsicm_dcs_read_1(ddata, DCS_READ_NUM_ERRORS, &errors); - in->ops->dsi.bus_unlock(in); + src->ops->dsi.bus_unlock(src); } else { r = -ENODEV; } @@ -446,20 +446,20 @@ static ssize_t dsicm_hw_revision_show(struct device *dev, { struct platform_device *pdev = to_platform_device(dev); struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = ddata->dssdev.src; u8 id1, id2, id3; int r; mutex_lock(&ddata->lock); if (ddata->enabled) { - in->ops->dsi.bus_lock(in); + src->ops->dsi.bus_lock(src); r = dsicm_wake_up(ddata); if (!r) r = dsicm_get_id(ddata, &id1, &id2, &id3); - in->ops->dsi.bus_unlock(in); + src->ops->dsi.bus_unlock(src); } else { r = -ENODEV; } @@ -478,7 +478,7 @@ static ssize_t dsicm_store_ulps(struct device *dev, { struct platform_device *pdev = to_platform_device(dev); struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = ddata->dssdev.src; unsigned long t; int r; @@ -489,14 +489,14 @@ static ssize_t dsicm_store_ulps(struct device *dev, mutex_lock(&ddata->lock); if (ddata->enabled) { - in->ops->dsi.bus_lock(in); + src->ops->dsi.bus_lock(src); if (t) r = dsicm_enter_ulps(ddata); else r = dsicm_wake_up(ddata); - in->ops->dsi.bus_unlock(in); + src->ops->dsi.bus_unlock(src); } mutex_unlock(&ddata->lock); @@ -528,7 +528,7 @@ static ssize_t dsicm_store_ulps_timeout(struct device *dev, { struct platform_device *pdev = to_platform_device(dev); struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = ddata->dssdev.src; unsigned long t; int r; @@ -541,9 +541,9 @@ static ssize_t dsicm_store_ulps_timeout(struct device *dev, if (ddata->enabled) { /* dsicm_wake_up will restart the timer */ - in->ops->dsi.bus_lock(in); + src->ops->dsi.bus_lock(src); r = dsicm_wake_up(ddata); - in->ops->dsi.bus_unlock(in); + src->ops->dsi.bus_unlock(src); } mutex_unlock(&ddata->lock); @@ -603,7 +603,7 @@ static void dsicm_hw_reset(struct panel_drv_data *ddata) static int dsicm_power_on(struct panel_drv_data *ddata) { - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = ddata->dssdev.src; u8 id1, id2, id3; int r; struct omap_dss_dsi_config dsi_config = { @@ -635,7 +635,7 @@ static int dsicm_power_on(struct panel_drv_data *ddata) } if (ddata->pin_config.num_pins > 0) { - r = in->ops->dsi.configure_pins(in, &ddata->pin_config); + r = src->ops->dsi.configure_pins(src, &ddata->pin_config); if (r) { dev_err(&ddata->pdev->dev, "failed to configure DSI pins\n"); @@ -643,13 +643,13 @@ static int dsicm_power_on(struct panel_drv_data *ddata) } } - r = in->ops->dsi.set_config(in, &dsi_config); + r = src->ops->dsi.set_config(src, &dsi_config); if (r) { dev_err(&ddata->pdev->dev, "failed to configure DSI\n"); goto err_vddi; } - r = in->ops->enable(in); + r = src->ops->enable(src); if (r) { dev_err(&ddata->pdev->dev, "failed to enable DSI\n"); goto err_vddi; @@ -657,7 +657,7 @@ static int dsicm_power_on(struct panel_drv_data *ddata) dsicm_hw_reset(ddata); - in->ops->dsi.enable_hs(in, ddata->channel, false); + src->ops->dsi.enable_hs(src, ddata->channel, false); r = dsicm_sleep_out(ddata); if (r) @@ -689,7 +689,7 @@ static int dsicm_power_on(struct panel_drv_data *ddata) if (r) goto err; - r = in->ops->dsi.enable_video_output(in, ddata->channel); + r = src->ops->dsi.enable_video_output(src, ddata->channel); if (r) goto err; @@ -701,7 +701,7 @@ static int dsicm_power_on(struct panel_drv_data *ddata) ddata->intro_printed = true; } - in->ops->dsi.enable_hs(in, ddata->channel, true); + src->ops->dsi.enable_hs(src, ddata->channel, true); return 0; err: @@ -709,7 +709,7 @@ static int dsicm_power_on(struct panel_drv_data *ddata) dsicm_hw_reset(ddata); - in->ops->dsi.disable(in, true, false); + src->ops->dsi.disable(src, true, false); err_vddi: if (ddata->vddi) regulator_disable(ddata->vddi); @@ -722,10 +722,10 @@ static int dsicm_power_on(struct panel_drv_data *ddata) static void dsicm_power_off(struct panel_drv_data *ddata) { - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = ddata->dssdev.src; int r; - in->ops->dsi.disable_video_output(in, ddata->channel); + src->ops->dsi.disable_video_output(src, ddata->channel); r = dsicm_dcs_write_0(ddata, MIPI_DCS_SET_DISPLAY_OFF); if (!r) @@ -737,7 +737,7 @@ static void dsicm_power_off(struct panel_drv_data *ddata) dsicm_hw_reset(ddata); } - in->ops->dsi.disable(in, true, false); + src->ops->dsi.disable(src, true, false); if (ddata->vddi) regulator_disable(ddata->vddi); @@ -760,61 +760,59 @@ static int dsicm_connect(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); struct device *dev = &ddata->pdev->dev; - struct omap_dss_device *in; + struct omap_dss_device *src; int r; - in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); - if (IS_ERR(in)) { + src = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); + if (IS_ERR(src)) { dev_err(dssdev->dev, "failed to find video source\n"); - return PTR_ERR(in); + return PTR_ERR(src); } - r = omapdss_device_connect(in, dssdev); + r = omapdss_device_connect(src, dssdev); if (r) { dev_err(dev, "Failed to connect to video source\n"); goto err_connect; } - r = in->ops->dsi.request_vc(in, &ddata->channel); + r = src->ops->dsi.request_vc(src, &ddata->channel); if (r) { dev_err(dev, "failed to get virtual channel\n"); goto err_req_vc; } - r = in->ops->dsi.set_vc_id(in, ddata->channel, TCH); + r = src->ops->dsi.set_vc_id(src, ddata->channel, TCH); if (r) { dev_err(dev, "failed to set VC_ID\n"); goto err_vc_id; } - ddata->in = in; return 0; err_vc_id: - in->ops->dsi.release_vc(in, ddata->channel); + src->ops->dsi.release_vc(src, ddata->channel); err_req_vc: - omapdss_device_disconnect(in, dssdev); + omapdss_device_disconnect(src, dssdev); err_connect: - omap_dss_put_device(in); + omap_dss_put_device(src); return r; } static void dsicm_disconnect(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; - in->ops->dsi.release_vc(in, ddata->channel); - omapdss_device_disconnect(in, dssdev); + src->ops->dsi.release_vc(src, ddata->channel); + omapdss_device_disconnect(src, dssdev); - omap_dss_put_device(in); - ddata->in = NULL; + omap_dss_put_device(src); } static int dsicm_enable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; int r; dev_dbg(&ddata->pdev->dev, "enable\n"); @@ -831,11 +829,11 @@ static int dsicm_enable(struct omap_dss_device *dssdev) goto err; } - in->ops->dsi.bus_lock(in); + src->ops->dsi.bus_lock(src); r = dsicm_power_on(ddata); - in->ops->dsi.bus_unlock(in); + src->ops->dsi.bus_unlock(src); if (r) goto err; @@ -856,7 +854,7 @@ static int dsicm_enable(struct omap_dss_device *dssdev) static void dsicm_disable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; int r; dev_dbg(&ddata->pdev->dev, "disable\n"); @@ -867,7 +865,7 @@ static void dsicm_disable(struct omap_dss_device *dssdev) dsicm_cancel_ulps_work(ddata); - in->ops->dsi.bus_lock(in); + src->ops->dsi.bus_lock(src); if (omapdss_device_is_enabled(dssdev)) { r = dsicm_wake_up(ddata); @@ -875,7 +873,7 @@ static void dsicm_disable(struct omap_dss_device *dssdev) dsicm_power_off(ddata); } - in->ops->dsi.bus_unlock(in); + src->ops->dsi.bus_unlock(src); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; @@ -885,16 +883,16 @@ static void dsicm_disable(struct omap_dss_device *dssdev) static void dsicm_framedone_cb(int err, void *data) { struct panel_drv_data *ddata = data; - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = ddata->dssdev.src; dev_dbg(&ddata->pdev->dev, "framedone, err %d\n", err); - in->ops->dsi.bus_unlock(ddata->in); + src->ops->dsi.bus_unlock(src); } static irqreturn_t dsicm_te_isr(int irq, void *data) { struct panel_drv_data *ddata = data; - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = ddata->dssdev.src; int old; int r; @@ -903,7 +901,7 @@ static irqreturn_t dsicm_te_isr(int irq, void *data) if (old) { cancel_delayed_work(&ddata->te_timeout_work); - r = in->ops->dsi.update(in, ddata->channel, dsicm_framedone_cb, + r = src->ops->dsi.update(src, ddata->channel, dsicm_framedone_cb, ddata); if (r) goto err; @@ -912,7 +910,7 @@ static irqreturn_t dsicm_te_isr(int irq, void *data) return IRQ_HANDLED; err: dev_err(&ddata->pdev->dev, "start update failed\n"); - in->ops->dsi.bus_unlock(in); + src->ops->dsi.bus_unlock(src); return IRQ_HANDLED; } @@ -920,25 +918,25 @@ static void dsicm_te_timeout_work_callback(struct work_struct *work) { struct panel_drv_data *ddata = container_of(work, struct panel_drv_data, te_timeout_work.work); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = ddata->dssdev.src; dev_err(&ddata->pdev->dev, "TE not received for 250ms!\n"); atomic_set(&ddata->do_update, 0); - in->ops->dsi.bus_unlock(in); + src->ops->dsi.bus_unlock(src); } static int dsicm_update(struct omap_dss_device *dssdev, u16 x, u16 y, u16 w, u16 h) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; int r; dev_dbg(&ddata->pdev->dev, "update %d, %d, %d x %d\n", x, y, w, h); mutex_lock(&ddata->lock); - in->ops->dsi.bus_lock(in); + src->ops->dsi.bus_lock(src); r = dsicm_wake_up(ddata); if (r) @@ -960,17 +958,17 @@ static int dsicm_update(struct omap_dss_device *dssdev, msecs_to_jiffies(250)); atomic_set(&ddata->do_update, 1); } else { - r = in->ops->dsi.update(in, ddata->channel, dsicm_framedone_cb, + r = src->ops->dsi.update(src, ddata->channel, dsicm_framedone_cb, ddata); if (r) goto err; } - /* note: no bus_unlock here. unlock is in framedone_cb */ + /* note: no bus_unlock here. unlock is src framedone_cb */ mutex_unlock(&ddata->lock); return 0; err: - in->ops->dsi.bus_unlock(in); + src->ops->dsi.bus_unlock(src); mutex_unlock(&ddata->lock); return r; } @@ -978,13 +976,13 @@ static int dsicm_update(struct omap_dss_device *dssdev, static int dsicm_sync(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; dev_dbg(&ddata->pdev->dev, "sync\n"); mutex_lock(&ddata->lock); - in->ops->dsi.bus_lock(in); - in->ops->dsi.bus_unlock(in); + src->ops->dsi.bus_lock(src); + src->ops->dsi.bus_unlock(src); mutex_unlock(&ddata->lock); dev_dbg(&ddata->pdev->dev, "sync done\n"); @@ -994,7 +992,7 @@ static int dsicm_sync(struct omap_dss_device *dssdev) static int _dsicm_enable_te(struct panel_drv_data *ddata, bool enable) { - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = ddata->dssdev.src; int r; if (enable) @@ -1003,7 +1001,7 @@ static int _dsicm_enable_te(struct panel_drv_data *ddata, bool enable) r = dsicm_dcs_write_0(ddata, MIPI_DCS_SET_TEAR_OFF); if (!ddata->ext_te_gpio) - in->ops->dsi.enable_te(in, enable); + src->ops->dsi.enable_te(src, enable); /* possible panel bug */ msleep(100); @@ -1014,7 +1012,7 @@ static int _dsicm_enable_te(struct panel_drv_data *ddata, bool enable) static int dsicm_enable_te(struct omap_dss_device *dssdev, bool enable) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; int r; mutex_lock(&ddata->lock); @@ -1022,7 +1020,7 @@ static int dsicm_enable_te(struct omap_dss_device *dssdev, bool enable) if (ddata->te_enabled == enable) goto end; - in->ops->dsi.bus_lock(in); + src->ops->dsi.bus_lock(src); if (ddata->enabled) { r = dsicm_wake_up(ddata); @@ -1036,13 +1034,13 @@ static int dsicm_enable_te(struct omap_dss_device *dssdev, bool enable) ddata->te_enabled = enable; - in->ops->dsi.bus_unlock(in); + src->ops->dsi.bus_unlock(src); end: mutex_unlock(&ddata->lock); return 0; err: - in->ops->dsi.bus_unlock(in); + src->ops->dsi.bus_unlock(src); mutex_unlock(&ddata->lock); return r; @@ -1065,7 +1063,7 @@ static int dsicm_memory_read(struct omap_dss_device *dssdev, u16 x, u16 y, u16 w, u16 h) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; int r; int first = 1; int plen; @@ -1084,7 +1082,7 @@ static int dsicm_memory_read(struct omap_dss_device *dssdev, size = min((u32)w * h * 3, ddata->vm.hactive * ddata->vm.vactive * 3); - in->ops->dsi.bus_lock(in); + src->ops->dsi.bus_lock(src); r = dsicm_wake_up(ddata); if (r) @@ -1100,7 +1098,7 @@ static int dsicm_memory_read(struct omap_dss_device *dssdev, dsicm_set_update_window(ddata, x, y, w, h); - r = in->ops->dsi.set_max_rx_packet_size(in, ddata->channel, plen); + r = src->ops->dsi.set_max_rx_packet_size(src, ddata->channel, plen); if (r) goto err2; @@ -1108,7 +1106,7 @@ static int dsicm_memory_read(struct omap_dss_device *dssdev, u8 dcs_cmd = first ? 0x2e : 0x3e; first = 0; - r = in->ops->dsi.dcs_read(in, ddata->channel, dcs_cmd, + r = src->ops->dsi.dcs_read(src, ddata->channel, dcs_cmd, buf + buf_used, size - buf_used); if (r < 0) { @@ -1134,9 +1132,9 @@ static int dsicm_memory_read(struct omap_dss_device *dssdev, r = buf_used; err3: - in->ops->dsi.set_max_rx_packet_size(in, ddata->channel, 1); + src->ops->dsi.set_max_rx_packet_size(src, ddata->channel, 1); err2: - in->ops->dsi.bus_unlock(in); + src->ops->dsi.bus_unlock(src); err1: mutex_unlock(&ddata->lock); return r; @@ -1147,7 +1145,7 @@ static void dsicm_ulps_work(struct work_struct *work) struct panel_drv_data *ddata = container_of(work, struct panel_drv_data, ulps_work.work); struct omap_dss_device *dssdev = &ddata->dssdev; - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; mutex_lock(&ddata->lock); @@ -1156,11 +1154,11 @@ static void dsicm_ulps_work(struct work_struct *work) return; } - in->ops->dsi.bus_lock(in); + src->ops->dsi.bus_lock(src); dsicm_enter_ulps(ddata); - in->ops->dsi.bus_unlock(in); + src->ops->dsi.bus_unlock(src); mutex_unlock(&ddata->lock); } diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c index ea4cdc8bd2f8..2e54ac115dbc 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c @@ -45,7 +45,6 @@ static const struct videomode lb035q02_vm = { struct panel_drv_data { struct omap_dss_device dssdev; - struct omap_dss_device *in; struct spi_device *spi; @@ -119,42 +118,39 @@ static void init_lb035q02_panel(struct spi_device *spi) static int lb035q02_connect(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in; + struct omap_dss_device *src; int r; - in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); - if (IS_ERR(in)) { + src = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); + if (IS_ERR(src)) { dev_err(dssdev->dev, "failed to find video source\n"); - return PTR_ERR(in); + return PTR_ERR(src); } - r = omapdss_device_connect(in, dssdev); + r = omapdss_device_connect(src, dssdev); if (r) { - omap_dss_put_device(in); + omap_dss_put_device(src); return r; } init_lb035q02_panel(ddata->spi); - ddata->in = in; return 0; } static void lb035q02_disconnect(struct omap_dss_device *dssdev) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; - omapdss_device_disconnect(in, dssdev); + omapdss_device_disconnect(src, dssdev); - omap_dss_put_device(in); - ddata->in = NULL; + omap_dss_put_device(src); } static int lb035q02_enable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; int r; if (!omapdss_device_is_connected(dssdev)) @@ -163,9 +159,9 @@ static int lb035q02_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - in->ops->set_timings(in, &ddata->vm); + src->ops->set_timings(src, &ddata->vm); - r = in->ops->enable(in); + r = src->ops->enable(src); if (r) return r; @@ -180,7 +176,7 @@ static int lb035q02_enable(struct omap_dss_device *dssdev) static void lb035q02_disable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; if (!omapdss_device_is_enabled(dssdev)) return; @@ -188,7 +184,7 @@ static void lb035q02_disable(struct omap_dss_device *dssdev) if (ddata->enable_gpio) gpiod_set_value_cansleep(ddata->enable_gpio, 0); - in->ops->disable(in); + src->ops->disable(src); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } @@ -197,11 +193,11 @@ static void lb035q02_set_timings(struct omap_dss_device *dssdev, struct videomode *vm) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; ddata->vm = *vm; - in->ops->set_timings(in, vm); + src->ops->set_timings(src, vm); } static void lb035q02_get_timings(struct omap_dss_device *dssdev, @@ -215,10 +211,9 @@ static void lb035q02_get_timings(struct omap_dss_device *dssdev, static int lb035q02_check_timings(struct omap_dss_device *dssdev, struct videomode *vm) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; - return in->ops->check_timings(in, vm); + return src->ops->check_timings(src, vm); } static const struct omap_dss_driver lb035q02_ops = { diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c index 84e9c9a8a566..6f2261ccd442 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c @@ -21,7 +21,6 @@ struct panel_drv_data { struct omap_dss_device dssdev; - struct omap_dss_device *in; struct videomode vm; @@ -114,41 +113,37 @@ static int init_nec_8048_wvga_lcd(struct spi_device *spi) static int nec_8048_connect(struct omap_dss_device *dssdev) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in; + struct omap_dss_device *src; int r; - in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); - if (IS_ERR(in)) { + src = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); + if (IS_ERR(src)) { dev_err(dssdev->dev, "failed to find video source\n"); - return PTR_ERR(in); + return PTR_ERR(src); } - r = omapdss_device_connect(in, dssdev); + r = omapdss_device_connect(src, dssdev); if (r) { - omap_dss_put_device(in); + omap_dss_put_device(src); return r; } - ddata->in = in; return 0; } static void nec_8048_disconnect(struct omap_dss_device *dssdev) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; - omapdss_device_disconnect(in, dssdev); + omapdss_device_disconnect(src, dssdev); - omap_dss_put_device(in); - ddata->in = NULL; + omap_dss_put_device(src); } static int nec_8048_enable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; int r; if (!omapdss_device_is_connected(dssdev)) @@ -157,9 +152,9 @@ static int nec_8048_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - in->ops->set_timings(in, &ddata->vm); + src->ops->set_timings(src, &ddata->vm); - r = in->ops->enable(in); + r = src->ops->enable(src); if (r) return r; @@ -174,7 +169,7 @@ static int nec_8048_enable(struct omap_dss_device *dssdev) static void nec_8048_disable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; if (!omapdss_device_is_enabled(dssdev)) return; @@ -182,7 +177,7 @@ static void nec_8048_disable(struct omap_dss_device *dssdev) if (gpio_is_valid(ddata->res_gpio)) gpio_set_value_cansleep(ddata->res_gpio, 0); - in->ops->disable(in); + src->ops->disable(src); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } @@ -191,11 +186,11 @@ static void nec_8048_set_timings(struct omap_dss_device *dssdev, struct videomode *vm) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; ddata->vm = *vm; - in->ops->set_timings(in, vm); + src->ops->set_timings(src, vm); } static void nec_8048_get_timings(struct omap_dss_device *dssdev, @@ -209,10 +204,9 @@ static void nec_8048_get_timings(struct omap_dss_device *dssdev, static int nec_8048_check_timings(struct omap_dss_device *dssdev, struct videomode *vm) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; - return in->ops->check_timings(in, vm); + return src->ops->check_timings(src, vm); } static const struct omap_dss_driver nec_8048_ops = { diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c index a6ec328ee3d3..369501251fa9 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c @@ -21,7 +21,6 @@ struct panel_drv_data { struct omap_dss_device dssdev; - struct omap_dss_device *in; struct regulator *vcc; struct videomode vm; @@ -60,41 +59,37 @@ static const struct videomode sharp_ls_vm = { static int sharp_ls_connect(struct omap_dss_device *dssdev) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in; + struct omap_dss_device *src; int r; - in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); - if (IS_ERR(in)) { + src = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); + if (IS_ERR(src)) { dev_err(dssdev->dev, "failed to find video source\n"); - return PTR_ERR(in); + return PTR_ERR(src); } - r = omapdss_device_connect(in, dssdev); + r = omapdss_device_connect(src, dssdev); if (r) { - omap_dss_put_device(in); + omap_dss_put_device(src); return r; } - ddata->in = in; return 0; } static void sharp_ls_disconnect(struct omap_dss_device *dssdev) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; - omapdss_device_disconnect(in, dssdev); + omapdss_device_disconnect(src, dssdev); - omap_dss_put_device(in); - ddata->in = NULL; + omap_dss_put_device(src); } static int sharp_ls_enable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; int r; if (!omapdss_device_is_connected(dssdev)) @@ -103,7 +98,7 @@ static int sharp_ls_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - in->ops->set_timings(in, &ddata->vm); + src->ops->set_timings(src, &ddata->vm); if (ddata->vcc) { r = regulator_enable(ddata->vcc); @@ -111,7 +106,7 @@ static int sharp_ls_enable(struct omap_dss_device *dssdev) return r; } - r = in->ops->enable(in); + r = src->ops->enable(src); if (r) { regulator_disable(ddata->vcc); return r; @@ -134,7 +129,7 @@ static int sharp_ls_enable(struct omap_dss_device *dssdev) static void sharp_ls_disable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; if (!omapdss_device_is_enabled(dssdev)) return; @@ -149,7 +144,7 @@ static void sharp_ls_disable(struct omap_dss_device *dssdev) msleep(100); - in->ops->disable(in); + src->ops->disable(src); if (ddata->vcc) regulator_disable(ddata->vcc); @@ -161,11 +156,11 @@ static void sharp_ls_set_timings(struct omap_dss_device *dssdev, struct videomode *vm) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; ddata->vm = *vm; - in->ops->set_timings(in, vm); + src->ops->set_timings(src, vm); } static void sharp_ls_get_timings(struct omap_dss_device *dssdev, @@ -179,10 +174,9 @@ static void sharp_ls_get_timings(struct omap_dss_device *dssdev, static int sharp_ls_check_timings(struct omap_dss_device *dssdev, struct videomode *vm) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; - return in->ops->check_timings(in, vm); + return src->ops->check_timings(src, vm); } static const struct omap_dss_driver sharp_ls_ops = { diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c index 0a2efcb84532..5bfd24c5ad5c 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c @@ -64,7 +64,6 @@ struct panel_drv_data { struct omap_dss_device dssdev; - struct omap_dss_device *in; int reset_gpio; @@ -509,48 +508,44 @@ static const struct attribute_group bldev_attr_group = { static int acx565akm_connect(struct omap_dss_device *dssdev) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in; + struct omap_dss_device *src; int r; - in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); - if (IS_ERR(in)) { + src = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); + if (IS_ERR(src)) { dev_err(dssdev->dev, "failed to find video source\n"); - return PTR_ERR(in); + return PTR_ERR(src); } - r = omapdss_device_connect(in, dssdev); + r = omapdss_device_connect(src, dssdev); if (r) { - omap_dss_put_device(in); + omap_dss_put_device(src); return r; } - ddata->in = in; return 0; } static void acx565akm_disconnect(struct omap_dss_device *dssdev) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; - omapdss_device_disconnect(in, dssdev); + omapdss_device_disconnect(src, dssdev); - omap_dss_put_device(in); - ddata->in = NULL; + omap_dss_put_device(src); } static int acx565akm_panel_power_on(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; int r; dev_dbg(&ddata->spi->dev, "%s\n", __func__); - in->ops->set_timings(in, &ddata->vm); + src->ops->set_timings(src, &ddata->vm); - r = in->ops->enable(in); + r = src->ops->enable(src); if (r) { pr_err("%s sdi enable failed\n", __func__); return r; @@ -591,7 +586,7 @@ static int acx565akm_panel_power_on(struct omap_dss_device *dssdev) static void acx565akm_panel_power_off(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; dev_dbg(dssdev->dev, "%s\n", __func__); @@ -615,7 +610,7 @@ static void acx565akm_panel_power_off(struct omap_dss_device *dssdev) /* FIXME need to tweak this delay */ msleep(100); - in->ops->disable(in); + src->ops->disable(src); } static int acx565akm_enable(struct omap_dss_device *dssdev) @@ -662,11 +657,11 @@ static void acx565akm_set_timings(struct omap_dss_device *dssdev, struct videomode *vm) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; ddata->vm = *vm; - in->ops->set_timings(in, vm); + src->ops->set_timings(src, vm); } static void acx565akm_get_timings(struct omap_dss_device *dssdev, @@ -680,10 +675,9 @@ static void acx565akm_get_timings(struct omap_dss_device *dssdev, static int acx565akm_check_timings(struct omap_dss_device *dssdev, struct videomode *vm) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; - return in->ops->check_timings(in, vm); + return src->ops->check_timings(src, vm); } static const struct omap_dss_driver acx565akm_ops = { diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c index 8b5ee55982f4..9117c1ba037b 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c @@ -33,7 +33,6 @@ struct panel_drv_data { struct omap_dss_device dssdev; - struct omap_dss_device *in; struct videomode vm; @@ -168,41 +167,37 @@ enum jbt_register { static int td028ttec1_panel_connect(struct omap_dss_device *dssdev) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in; + struct omap_dss_device *src; int r; - in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); - if (IS_ERR(in)) { + src = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); + if (IS_ERR(src)) { dev_err(dssdev->dev, "failed to find video source\n"); - return PTR_ERR(in); + return PTR_ERR(src); } - r = omapdss_device_connect(in, dssdev); + r = omapdss_device_connect(src, dssdev); if (r) { - omap_dss_put_device(in); + omap_dss_put_device(src); return r; } - ddata->in = in; return 0; } static void td028ttec1_panel_disconnect(struct omap_dss_device *dssdev) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; - omapdss_device_disconnect(in, dssdev); + omapdss_device_disconnect(src, dssdev); - omap_dss_put_device(in); - ddata->in = NULL; + omap_dss_put_device(src); } static int td028ttec1_panel_enable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; int r; if (!omapdss_device_is_connected(dssdev)) @@ -211,9 +206,9 @@ static int td028ttec1_panel_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - in->ops->set_timings(in, &ddata->vm); + src->ops->set_timings(src, &ddata->vm); - r = in->ops->enable(in); + r = src->ops->enable(src); if (r) return r; @@ -310,7 +305,7 @@ static int td028ttec1_panel_enable(struct omap_dss_device *dssdev) static void td028ttec1_panel_disable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; if (!omapdss_device_is_enabled(dssdev)) return; @@ -322,7 +317,7 @@ static void td028ttec1_panel_disable(struct omap_dss_device *dssdev) jbt_ret_write_0(ddata, JBT_REG_SLEEP_IN); jbt_reg_write_1(ddata, JBT_REG_POWER_ON_OFF, 0x00); - in->ops->disable(in); + src->ops->disable(src); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } @@ -331,11 +326,11 @@ static void td028ttec1_panel_set_timings(struct omap_dss_device *dssdev, struct videomode *vm) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; ddata->vm = *vm; - in->ops->set_timings(in, vm); + src->ops->set_timings(src, vm); } static void td028ttec1_panel_get_timings(struct omap_dss_device *dssdev, @@ -349,10 +344,9 @@ static void td028ttec1_panel_get_timings(struct omap_dss_device *dssdev, static int td028ttec1_panel_check_timings(struct omap_dss_device *dssdev, struct videomode *vm) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; - return in->ops->check_timings(in, vm); + return src->ops->check_timings(src, vm); } static const struct omap_dss_driver td028ttec1_ops = { diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c index 263c566df499..53742a61aa86 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c @@ -54,7 +54,6 @@ static const u16 tpo_td043_def_gamma[12] = { struct panel_drv_data { struct omap_dss_device dssdev; - struct omap_dss_device *in; struct videomode vm; @@ -339,41 +338,37 @@ static void tpo_td043_power_off(struct panel_drv_data *ddata) static int tpo_td043_connect(struct omap_dss_device *dssdev) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in; + struct omap_dss_device *src; int r; - in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); - if (IS_ERR(in)) { + src = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); + if (IS_ERR(src)) { dev_err(dssdev->dev, "failed to find video source\n"); - return PTR_ERR(in); + return PTR_ERR(src); } - r = omapdss_device_connect(in, dssdev); + r = omapdss_device_connect(src, dssdev); if (r) { - omap_dss_put_device(in); + omap_dss_put_device(src); return r; } - ddata->in = in; return 0; } static void tpo_td043_disconnect(struct omap_dss_device *dssdev) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; - omapdss_device_disconnect(in, dssdev); + omapdss_device_disconnect(src, dssdev); - omap_dss_put_device(in); - ddata->in = NULL; + omap_dss_put_device(src); } static int tpo_td043_enable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; int r; if (!omapdss_device_is_connected(dssdev)) @@ -382,9 +377,9 @@ static int tpo_td043_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - in->ops->set_timings(in, &ddata->vm); + src->ops->set_timings(src, &ddata->vm); - r = in->ops->enable(in); + r = src->ops->enable(src); if (r) return r; @@ -395,7 +390,7 @@ static int tpo_td043_enable(struct omap_dss_device *dssdev) if (!ddata->spi_suspended) { r = tpo_td043_power_on(ddata); if (r) { - in->ops->disable(in); + src->ops->disable(src); return r; } } @@ -408,12 +403,12 @@ static int tpo_td043_enable(struct omap_dss_device *dssdev) static void tpo_td043_disable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; if (!omapdss_device_is_enabled(dssdev)) return; - in->ops->disable(in); + src->ops->disable(src); if (!ddata->spi_suspended) tpo_td043_power_off(ddata); @@ -425,11 +420,11 @@ static void tpo_td043_set_timings(struct omap_dss_device *dssdev, struct videomode *vm) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; ddata->vm = *vm; - in->ops->set_timings(in, vm); + src->ops->set_timings(src, vm); } static void tpo_td043_get_timings(struct omap_dss_device *dssdev, @@ -443,10 +438,9 @@ static void tpo_td043_get_timings(struct omap_dss_device *dssdev, static int tpo_td043_check_timings(struct omap_dss_device *dssdev, struct videomode *vm) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; - return in->ops->check_timings(in, vm); + return src->ops->check_timings(src, vm); } static const struct omap_dss_driver tpo_td043_ops = { -- GitLab From f3ed97f9ae7df6a36025b94e1943492e73691f7c Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Fri, 2 Mar 2018 00:40:04 +0200 Subject: [PATCH 0912/1692] drm/omap: dsi: Simplify debugfs implementation The DSI debugfs regs and irqs show handlers received a pointer to the DSI private data. There's no need to look it up from the list of DSS outputs. Use the pointer directly, this allows simplifying the implementation of the handlers. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/dsi.c | 63 +++++++------------------------ 1 file changed, 14 insertions(+), 49 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 8e3dce1ddfb0..6b16eec0b2b2 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -1518,8 +1518,9 @@ void dsi_dump_clocks(struct seq_file *s) } #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS -static void dsi_dump_dsi_irqs(struct dsi_data *dsi, struct seq_file *s) +static int dsi_dump_dsi_irqs(struct seq_file *s, void *p) { + struct dsi_data *dsi = p; unsigned long flags; struct dsi_irq_stats stats; @@ -1603,33 +1604,20 @@ static void dsi_dump_dsi_irqs(struct dsi_data *dsi, struct seq_file *s) PIS(ULPSACTIVENOT_ALL0); PIS(ULPSACTIVENOT_ALL1); #undef PIS -} - -static int dsi1_dump_irqs(struct seq_file *s, void *p) -{ - struct dsi_data *dsi = dsi_get_dsi_from_id(0); - dsi_dump_dsi_irqs(dsi, s); - return 0; -} - -static int dsi2_dump_irqs(struct seq_file *s, void *p) -{ - struct dsi_data *dsi = dsi_get_dsi_from_id(1); - - dsi_dump_dsi_irqs(dsi, s); return 0; } #endif -static void dsi_dump_dsi_regs(struct dsi_data *dsi, struct seq_file *s) +static int dsi_dump_dsi_regs(struct seq_file *s, void *p) { -#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(dsi, r)) + struct dsi_data *dsi = p; if (dsi_runtime_get(dsi)) - return; + return 0; dsi_enable_scp_clk(dsi); +#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(dsi, r)) DUMPREG(DSI_REVISION); DUMPREG(DSI_SYSCONFIG); DUMPREG(DSI_SYSSTATUS); @@ -1699,25 +1687,11 @@ static void dsi_dump_dsi_regs(struct dsi_data *dsi, struct seq_file *s) DUMPREG(DSI_PLL_GO); DUMPREG(DSI_PLL_CONFIGURATION1); DUMPREG(DSI_PLL_CONFIGURATION2); +#undef DUMPREG dsi_disable_scp_clk(dsi); dsi_runtime_put(dsi); -#undef DUMPREG -} - -static int dsi1_dump_regs(struct seq_file *s, void *p) -{ - struct dsi_data *dsi = dsi_get_dsi_from_id(0); - dsi_dump_dsi_regs(dsi, s); - return 0; -} - -static int dsi2_dump_regs(struct seq_file *s, void *p) -{ - struct dsi_data *dsi = dsi_get_dsi_from_id(1); - - dsi_dump_dsi_regs(dsi, s); return 0; } @@ -5305,6 +5279,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data) struct dsi_data *dsi; struct resource *dsi_mem; struct resource *res; + char name[10]; dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL); if (!dsi) @@ -5443,23 +5418,13 @@ static int dsi_bind(struct device *dev, struct device *master, void *data) dsi_runtime_put(dsi); - if (dsi->module_id == 0) - dsi->debugfs.regs = dss_debugfs_create_file(dss, "dsi1_regs", - dsi1_dump_regs, - &dsi); - else - dsi->debugfs.regs = dss_debugfs_create_file(dss, "dsi2_regs", - dsi2_dump_regs, - &dsi); + snprintf(name, sizeof(name), "dsi%u_regs", dsi->module_id + 1); + dsi->debugfs.regs = dss_debugfs_create_file(dss, name, + dsi_dump_dsi_regs, &dsi); #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS - if (dsi->module_id == 0) - dsi->debugfs.irqs = dss_debugfs_create_file(dss, "dsi1_irqs", - dsi1_dump_irqs, - &dsi); - else - dsi->debugfs.irqs = dss_debugfs_create_file(dss, "dsi2_irqs", - dsi2_dump_irqs, - &dsi); + snprintf(name, sizeof(name), "dsi%u_irqs", dsi->module_id + 1); + dsi->debugfs.irqs = dss_debugfs_create_file(dss, name, + dsi_dump_dsi_irqs, &dsi); #endif return 0; -- GitLab From 3ce75d67e44c11c9f84ea0355fe6431a43ce6e94 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Fri, 2 Mar 2018 00:40:04 +0200 Subject: [PATCH 0913/1692] drm/omap: Move DSI debugfs clocks dump to dsi%u_clks files The DSI clocks are dumped in the DSS-level debugfs clocks file. This complicates the implementation as the DSI private data has to be looked up through the outputs list. Simplify it by creating two debugfs files, dsi1_clks and dsi2_clks, to dump the DSI clocks. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/dsi.c | 43 +++++++------------------------ drivers/gpu/drm/omapdrm/dss/dss.c | 3 --- drivers/gpu/drm/omapdrm/dss/dss.h | 2 -- 3 files changed, 9 insertions(+), 39 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 6b16eec0b2b2..ac6ec1f20d31 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -403,6 +403,7 @@ struct dsi_data { struct { struct dss_debugfs_entry *irqs; struct dss_debugfs_entry *regs; + struct dss_debugfs_entry *clks; } debugfs; #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS @@ -442,27 +443,6 @@ static inline struct dsi_data *to_dsi_data(struct omap_dss_device *dssdev) return dev_get_drvdata(dssdev->dev); } -static struct dsi_data *dsi_get_dsi_from_id(int module) -{ - struct omap_dss_device *out; - enum omap_dss_output_id id; - - switch (module) { - case 0: - id = OMAP_DSS_OUTPUT_DSI1; - break; - case 1: - id = OMAP_DSS_OUTPUT_DSI2; - break; - default: - return NULL; - } - - out = omap_dss_get_output(id); - - return out ? to_dsi_data(out) : NULL; -} - static inline void dsi_write_reg(struct dsi_data *dsi, const struct dsi_reg idx, u32 val) { @@ -1448,8 +1428,9 @@ static void dsi_pll_disable(struct dss_pll *pll) dsi_pll_uninit(dsi, true); } -static void dsi_dump_dsi_clocks(struct dsi_data *dsi, struct seq_file *s) +static int dsi_dump_dsi_clocks(struct seq_file *s, void *p) { + struct dsi_data *dsi = p; struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo; enum dss_clk_source dispc_clk_src, dsi_clk_src; int dsi_module = dsi->module_id; @@ -1459,7 +1440,7 @@ static void dsi_dump_dsi_clocks(struct dsi_data *dsi, struct seq_file *s) dsi_clk_src = dss_get_dsi_clk_source(dsi->dss, dsi_module); if (dsi_runtime_get(dsi)) - return; + return 0; seq_printf(s, "- DSI%d PLL -\n", dsi_module + 1); @@ -1503,18 +1484,8 @@ static void dsi_dump_dsi_clocks(struct dsi_data *dsi, struct seq_file *s) seq_printf(s, "LP_CLK\t\t%lu\n", dsi->current_lp_cinfo.lp_clk); dsi_runtime_put(dsi); -} - -void dsi_dump_clocks(struct seq_file *s) -{ - struct dsi_data *dsi; - int i; - for (i = 0; i < MAX_NUM_DSI; i++) { - dsi = dsi_get_dsi_from_id(i); - if (dsi) - dsi_dump_dsi_clocks(dsi, s); - } + return 0; } #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS @@ -5426,6 +5397,9 @@ static int dsi_bind(struct device *dev, struct device *master, void *data) dsi->debugfs.irqs = dss_debugfs_create_file(dss, name, dsi_dump_dsi_irqs, &dsi); #endif + snprintf(name, sizeof(name), "dsi%u_clks", dsi->module_id + 1); + dsi->debugfs.clks = dss_debugfs_create_file(dss, name, + dsi_dump_dsi_clocks, &dsi); return 0; @@ -5442,6 +5416,7 @@ static void dsi_unbind(struct device *dev, struct device *master, void *data) { struct dsi_data *dsi = dev_get_drvdata(dev); + dss_debugfs_remove_file(dsi->debugfs.clks); dss_debugfs_remove_file(dsi->debugfs.irqs); dss_debugfs_remove_file(dsi->debugfs.regs); diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c index 55d837983a1e..e93bed8edd7e 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.c +++ b/drivers/gpu/drm/omapdrm/dss/dss.c @@ -394,9 +394,6 @@ static int dss_debug_dump_clocks(struct seq_file *s, void *p) dss_dump_clocks(dss, s); dispc_dump_clocks(dss->dispc, s); -#ifdef CONFIG_OMAP2_DSS_DSI - dsi_dump_clocks(s); -#endif return 0; } diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h index e6baad7e653f..02f8b346edfd 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.h +++ b/drivers/gpu/drm/omapdrm/dss/dss.h @@ -376,8 +376,6 @@ static inline void sdi_uninit_port(struct device_node *port) #ifdef CONFIG_OMAP2_DSS_DSI -void dsi_dump_clocks(struct seq_file *s); - void dsi_irq_handler(void); #endif -- GitLab From de57e9dbc1454704a54190cb3b544b841c34301a Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Fri, 2 Mar 2018 01:25:32 +0200 Subject: [PATCH 0914/1692] drm/omap: dss: Remove output devices list The output devices list isn't used anymore, all output devices are accessed through the global devices list. Remove it. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- .../gpu/drm/omapdrm/displays/encoder-opa362.c | 9 ++---- .../gpu/drm/omapdrm/displays/encoder-tfp410.c | 8 ++--- .../drm/omapdrm/displays/encoder-tpd12s015.c | 8 ++--- drivers/gpu/drm/omapdrm/dss/base.c | 2 ++ drivers/gpu/drm/omapdrm/dss/dpi.c | 4 +-- drivers/gpu/drm/omapdrm/dss/dsi.c | 4 +-- drivers/gpu/drm/omapdrm/dss/hdmi4.c | 4 +-- drivers/gpu/drm/omapdrm/dss/hdmi5.c | 4 +-- drivers/gpu/drm/omapdrm/dss/omapdss.h | 5 ---- drivers/gpu/drm/omapdrm/dss/output.c | 29 ------------------- drivers/gpu/drm/omapdrm/dss/sdi.c | 4 +-- drivers/gpu/drm/omapdrm/dss/venc.c | 4 +-- 12 files changed, 20 insertions(+), 65 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c index a190bcf08738..52ceaae14647 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c @@ -146,7 +146,6 @@ static int opa362_probe(struct platform_device *pdev) struct panel_drv_data *ddata; struct omap_dss_device *dssdev; struct gpio_desc *gpio; - int r; dev_dbg(&pdev->dev, "probe\n"); @@ -169,11 +168,7 @@ static int opa362_probe(struct platform_device *pdev) dssdev->output_type = OMAP_DISPLAY_TYPE_VENC; dssdev->owner = THIS_MODULE; - r = omapdss_register_output(dssdev); - if (r) { - dev_err(&pdev->dev, "Failed to register output\n"); - return r; - } + omapdss_device_register(dssdev); return 0; } @@ -183,7 +178,7 @@ static int __exit opa362_remove(struct platform_device *pdev) struct panel_drv_data *ddata = platform_get_drvdata(pdev); struct omap_dss_device *dssdev = &ddata->dssdev; - omapdss_unregister_output(&ddata->dssdev); + omapdss_device_unregister(&ddata->dssdev); WARN_ON(omapdss_device_is_enabled(dssdev)); if (omapdss_device_is_enabled(dssdev)) diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c index db52eb49cff7..f4223cf7f03f 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c @@ -192,11 +192,7 @@ static int tfp410_probe(struct platform_device *pdev) dssdev->owner = THIS_MODULE; dssdev->port_num = 1; - r = omapdss_register_output(dssdev); - if (r) { - dev_err(&pdev->dev, "Failed to register output\n"); - return r; - } + omapdss_device_register(dssdev); return 0; } @@ -206,7 +202,7 @@ static int __exit tfp410_remove(struct platform_device *pdev) struct panel_drv_data *ddata = platform_get_drvdata(pdev); struct omap_dss_device *dssdev = &ddata->dssdev; - omapdss_unregister_output(&ddata->dssdev); + omapdss_device_unregister(&ddata->dssdev); WARN_ON(omapdss_device_is_enabled(dssdev)); if (omapdss_device_is_enabled(dssdev)) diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c index 4537867f833f..90e07036fb48 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c @@ -301,11 +301,7 @@ static int tpd_probe(struct platform_device *pdev) dssdev->owner = THIS_MODULE; dssdev->port_num = 1; - r = omapdss_register_output(dssdev); - if (r) { - dev_err(&pdev->dev, "Failed to register output\n"); - return r; - } + omapdss_device_register(dssdev); return 0; } @@ -315,7 +311,7 @@ static int __exit tpd_remove(struct platform_device *pdev) struct panel_drv_data *ddata = platform_get_drvdata(pdev); struct omap_dss_device *dssdev = &ddata->dssdev; - omapdss_unregister_output(&ddata->dssdev); + omapdss_device_unregister(&ddata->dssdev); WARN_ON(omapdss_device_is_enabled(dssdev)); if (omapdss_device_is_enabled(dssdev)) diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c index 22d3818208d7..8fac816ca481 100644 --- a/drivers/gpu/drm/omapdrm/dss/base.c +++ b/drivers/gpu/drm/omapdrm/dss/base.c @@ -63,6 +63,7 @@ void omapdss_device_register(struct omap_dss_device *dssdev) list_add_tail(&dssdev->list, &omapdss_devices_list); mutex_unlock(&omapdss_devices_lock); } +EXPORT_SYMBOL_GPL(omapdss_device_register); void omapdss_device_unregister(struct omap_dss_device *dssdev) { @@ -70,6 +71,7 @@ void omapdss_device_unregister(struct omap_dss_device *dssdev) list_del(&dssdev->list); mutex_unlock(&omapdss_devices_lock); } +EXPORT_SYMBOL_GPL(omapdss_device_unregister); static bool omapdss_device_is_registered(struct device_node *node) { diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c index bb7dcd88026f..372f1070bd69 100644 --- a/drivers/gpu/drm/omapdrm/dss/dpi.c +++ b/drivers/gpu/drm/omapdrm/dss/dpi.c @@ -718,7 +718,7 @@ static void dpi_init_output_port(struct dpi_data *dpi, struct device_node *port) out->ops = &dpi_ops; out->owner = THIS_MODULE; - omapdss_register_output(out); + omapdss_device_register(out); } static void dpi_uninit_output_port(struct device_node *port) @@ -726,7 +726,7 @@ static void dpi_uninit_output_port(struct device_node *port) struct dpi_data *dpi = port->data; struct omap_dss_device *out = &dpi->output; - omapdss_unregister_output(out); + omapdss_device_unregister(out); } int dpi_init_port(struct dss_device *dss, struct platform_device *pdev, diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index ac6ec1f20d31..e061816e5381 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -4995,14 +4995,14 @@ static void dsi_init_output(struct dsi_data *dsi) out->ops = &dsi_ops; out->owner = THIS_MODULE; - omapdss_register_output(out); + omapdss_device_register(out); } static void dsi_uninit_output(struct dsi_data *dsi) { struct omap_dss_device *out = &dsi->output; - omapdss_unregister_output(out); + omapdss_device_unregister(out); } static int dsi_probe_of(struct dsi_data *dsi) diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index 7e5474e87c11..fa818033f3f3 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c @@ -565,14 +565,14 @@ static void hdmi_init_output(struct omap_hdmi *hdmi) out->ops = &hdmi_ops; out->owner = THIS_MODULE; - omapdss_register_output(out); + omapdss_device_register(out); } static void hdmi_uninit_output(struct omap_hdmi *hdmi) { struct omap_dss_device *out = &hdmi->output; - omapdss_unregister_output(out); + omapdss_device_unregister(out); } static int hdmi_probe_of(struct omap_hdmi *hdmi) diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index 86b18ccb8d24..ef4a48f397d2 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c @@ -556,14 +556,14 @@ static void hdmi_init_output(struct omap_hdmi *hdmi) out->ops = &hdmi_ops; out->owner = THIS_MODULE; - omapdss_register_output(out); + omapdss_device_register(out); } static void hdmi_uninit_output(struct omap_hdmi *hdmi) { struct omap_dss_device *out = &hdmi->output; - omapdss_unregister_output(out); + omapdss_device_unregister(out); } static int hdmi_probe_of(struct omap_hdmi *hdmi) diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index 0033adf534d3..26abc09f3d4a 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -416,8 +416,6 @@ struct omap_dss_device { /* OMAP DSS output specific fields */ - struct list_head output_list; - /* DISPC channel for this output */ enum omap_channel dispc_channel; bool dispc_channel_connected; @@ -510,9 +508,6 @@ int omap_dss_get_num_overlay_managers(void); int omap_dss_get_num_overlays(void); -int omapdss_register_output(struct omap_dss_device *output); -void omapdss_unregister_output(struct omap_dss_device *output); -struct omap_dss_device *omap_dss_get_output(enum omap_dss_output_id id); int omapdss_output_set_device(struct omap_dss_device *out, struct omap_dss_device *dssdev); int omapdss_output_unset_device(struct omap_dss_device *out); diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c index 6abdb615a4c0..e62da96f83af 100644 --- a/drivers/gpu/drm/omapdrm/dss/output.c +++ b/drivers/gpu/drm/omapdrm/dss/output.c @@ -23,7 +23,6 @@ #include "omapdss.h" -static LIST_HEAD(output_list); static DEFINE_MUTEX(output_lock); int omapdss_output_set_device(struct omap_dss_device *out, @@ -88,34 +87,6 @@ int omapdss_output_unset_device(struct omap_dss_device *out) } EXPORT_SYMBOL(omapdss_output_unset_device); -int omapdss_register_output(struct omap_dss_device *out) -{ - list_add_tail(&out->output_list, &output_list); - omapdss_device_register(out); - return 0; -} -EXPORT_SYMBOL(omapdss_register_output); - -void omapdss_unregister_output(struct omap_dss_device *out) -{ - list_del(&out->output_list); - omapdss_device_unregister(out); -} -EXPORT_SYMBOL(omapdss_unregister_output); - -struct omap_dss_device *omap_dss_get_output(enum omap_dss_output_id id) -{ - struct omap_dss_device *out; - - list_for_each_entry(out, &output_list, output_list) { - if (out->id == id) - return out; - } - - return NULL; -} -EXPORT_SYMBOL(omap_dss_get_output); - struct omap_dss_device *omapdss_find_output_from_display(struct omap_dss_device *dssdev) { while (dssdev->src) diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c index e782be2c36bc..2f39e1f47990 100644 --- a/drivers/gpu/drm/omapdrm/dss/sdi.c +++ b/drivers/gpu/drm/omapdrm/dss/sdi.c @@ -331,12 +331,12 @@ static void sdi_init_output(struct sdi_device *sdi) out->ops = &sdi_ops; out->owner = THIS_MODULE; - omapdss_register_output(out); + omapdss_device_register(out); } static void sdi_uninit_output(struct sdi_device *sdi) { - omapdss_unregister_output(&sdi->output); + omapdss_device_unregister(&sdi->output); } int sdi_init_port(struct dss_device *dss, struct platform_device *pdev, diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c index 17668da8ac5a..70418cf4eea2 100644 --- a/drivers/gpu/drm/omapdrm/dss/venc.c +++ b/drivers/gpu/drm/omapdrm/dss/venc.c @@ -768,12 +768,12 @@ static void venc_init_output(struct venc_device *venc) out->ops = &venc_ops; out->owner = THIS_MODULE; - omapdss_register_output(out); + omapdss_device_register(out); } static void venc_uninit_output(struct venc_device *venc) { - omapdss_unregister_output(&venc->output); + omapdss_device_unregister(&venc->output); } static int venc_probe_of(struct venc_device *venc) -- GitLab From 92ce521a4841131acf9af41e5bc772990ada06dc Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Fri, 2 Mar 2018 01:40:48 +0200 Subject: [PATCH 0915/1692] drm/omap: dss: Rename for_each_dss_dev macro to for_each_dss_display The macro iterates over displays only, rename it accordingly. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/dss.c | 2 +- drivers/gpu/drm/omapdrm/dss/omapdss.h | 2 +- drivers/gpu/drm/omapdrm/omap_drv.c | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c index e93bed8edd7e..7245dd3423e6 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.c +++ b/drivers/gpu/drm/omapdrm/dss/dss.c @@ -1557,7 +1557,7 @@ static void dss_shutdown(struct platform_device *pdev) DSSDBG("shutdown\n"); - for_each_dss_dev(dssdev) { + for_each_dss_display(dssdev) { if (!dssdev->driver) continue; diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index 26abc09f3d4a..a6ddc881ea72 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -501,7 +501,7 @@ void omapdss_device_disconnect(struct omap_dss_device *src, struct omap_dss_device *omap_dss_get_device(struct omap_dss_device *dssdev); void omap_dss_put_device(struct omap_dss_device *dssdev); -#define for_each_dss_dev(d) while ((d = omap_dss_get_next_device(d)) != NULL) +#define for_each_dss_display(d) while ((d = omap_dss_get_next_device(d)) != NULL) struct omap_dss_device *omap_dss_get_next_device(struct omap_dss_device *from); int omap_dss_get_num_overlay_managers(void); diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index 6bc4b01c8e9c..4f402eb8088d 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -188,7 +188,7 @@ static int omap_connect_dssdevs(struct drm_device *ddev) if (!omapdss_stack_is_ready()) return -EPROBE_DEFER; - for_each_dss_dev(dssdev) { + for_each_dss_display(dssdev) { r = omapdss_device_connect(dssdev, NULL); if (r == -EPROBE_DEFER) { omap_dss_put_device(dssdev); @@ -200,7 +200,7 @@ static int omap_connect_dssdevs(struct drm_device *ddev) omap_dss_get_device(dssdev); priv->dssdevs[priv->num_dssdevs++] = dssdev; if (priv->num_dssdevs == ARRAY_SIZE(priv->dssdevs)) { - /* To balance the 'for_each_dss_dev' loop */ + /* To balance the 'for_each_dss_display' loop */ omap_dss_put_device(dssdev); break; } -- GitLab From b9f4d2ebf641d157be89a68227a5feb00c961d10 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Fri, 2 Mar 2018 02:07:34 +0200 Subject: [PATCH 0916/1692] drm/omap: dss: Make omap_dss_get_next_device() more generic Despite its name, the omap_dss_get_next_device() function operates on display devices only. Make it more generic by allowing operation on all devices, with a parameter to specify the device type. While at it rename the function to omapdss_device_get_next() to match the naming of the other functions operating on struct omap_dss_device. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/base.c | 52 +++++++++++++++++++++++++++ drivers/gpu/drm/omapdrm/dss/display.c | 49 ------------------------- drivers/gpu/drm/omapdrm/dss/omapdss.h | 6 ++-- 3 files changed, 56 insertions(+), 51 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c index 8fac816ca481..9f01a4f28145 100644 --- a/drivers/gpu/drm/omapdrm/dss/base.c +++ b/drivers/gpu/drm/omapdrm/dss/base.c @@ -104,6 +104,58 @@ struct omap_dss_device *omapdss_find_device_by_port(struct device_node *src, return NULL; } +/* + * Search for the next device starting at @from. If display_only is true, skip + * non-display devices. Release the reference to the @from device, and acquire + * a reference to the returned device if found. + */ +struct omap_dss_device *omapdss_device_get_next(struct omap_dss_device *from, + bool display_only) +{ + struct omap_dss_device *dssdev; + struct list_head *list; + + mutex_lock(&omapdss_devices_lock); + + if (list_empty(&omapdss_devices_list)) { + dssdev = NULL; + goto done; + } + + /* + * Start from the from entry if given or from omapdss_devices_list + * otherwise. + */ + list = from ? &from->list : &omapdss_devices_list; + + list_for_each_entry(dssdev, list, list) { + /* + * Stop if we reach the omapdss_devices_list, that's the end of + * the list. + */ + if (&dssdev->list == &omapdss_devices_list) { + dssdev = NULL; + goto done; + } + + /* Filter out non-display entries if display_only is set. */ + if (!display_only || dssdev->driver) + goto done; + } + + dssdev = NULL; + +done: + if (from) + omap_dss_put_device(from); + if (dssdev) + omap_dss_get_device(dssdev); + + mutex_unlock(&omapdss_devices_lock); + return dssdev; +} +EXPORT_SYMBOL(omapdss_device_get_next); + int omapdss_device_connect(struct omap_dss_device *src, struct omap_dss_device *dst) { diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c index e7872e0c8dab..5df73cd76153 100644 --- a/drivers/gpu/drm/omapdrm/dss/display.c +++ b/drivers/gpu/drm/omapdrm/dss/display.c @@ -92,52 +92,3 @@ void omap_dss_put_device(struct omap_dss_device *dssdev) module_put(dssdev->owner); } EXPORT_SYMBOL(omap_dss_put_device); - -/* - * ref count of the found device is incremented. - * ref count of from-device is decremented. - */ -struct omap_dss_device *omap_dss_get_next_device(struct omap_dss_device *from) -{ - struct list_head *l; - struct omap_dss_device *dssdev; - - mutex_lock(&panel_list_mutex); - - if (list_empty(&panel_list)) { - dssdev = NULL; - goto out; - } - - if (from == NULL) { - dssdev = list_first_entry(&panel_list, struct omap_dss_device, - panel_list); - omap_dss_get_device(dssdev); - goto out; - } - - omap_dss_put_device(from); - - list_for_each(l, &panel_list) { - dssdev = list_entry(l, struct omap_dss_device, panel_list); - if (dssdev == from) { - if (list_is_last(l, &panel_list)) { - dssdev = NULL; - goto out; - } - - dssdev = list_entry(l->next, struct omap_dss_device, - panel_list); - omap_dss_get_device(dssdev); - goto out; - } - } - - WARN(1, "'from' dssdev not found\n"); - - dssdev = NULL; -out: - mutex_unlock(&panel_list_mutex); - return dssdev; -} -EXPORT_SYMBOL(omap_dss_get_next_device); diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index a6ddc881ea72..94a3f98bdd3d 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -489,11 +489,15 @@ static inline bool omapdss_is_initialized(void) int omapdss_register_display(struct omap_dss_device *dssdev); void omapdss_unregister_display(struct omap_dss_device *dssdev); +#define for_each_dss_display(d) \ + while ((d = omapdss_device_get_next(d, true)) != NULL) void omapdss_device_register(struct omap_dss_device *dssdev); void omapdss_device_unregister(struct omap_dss_device *dssdev); struct omap_dss_device *omapdss_find_device_by_port(struct device_node *src, unsigned int port); +struct omap_dss_device *omapdss_device_get_next(struct omap_dss_device *from, + bool display_only); int omapdss_device_connect(struct omap_dss_device *src, struct omap_dss_device *dst); void omapdss_device_disconnect(struct omap_dss_device *src, @@ -501,8 +505,6 @@ void omapdss_device_disconnect(struct omap_dss_device *src, struct omap_dss_device *omap_dss_get_device(struct omap_dss_device *dssdev); void omap_dss_put_device(struct omap_dss_device *dssdev); -#define for_each_dss_display(d) while ((d = omap_dss_get_next_device(d)) != NULL) -struct omap_dss_device *omap_dss_get_next_device(struct omap_dss_device *from); int omap_dss_get_num_overlay_managers(void); -- GitLab From 4e0bb06c0b9a025a5a52f93ecef7658d8f8c3c06 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Fri, 2 Mar 2018 02:15:36 +0200 Subject: [PATCH 0917/1692] drm/omap: dss: Split omapdss_register_display() Split the function into omapdss_display_init() to perform display-specific initialization of the omap_dss_device, and omapdss_register_display() to register the device. The latter will then be replaced by more generic registration. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c | 1 + drivers/gpu/drm/omapdrm/displays/connector-dvi.c | 1 + drivers/gpu/drm/omapdrm/displays/connector-hdmi.c | 1 + drivers/gpu/drm/omapdrm/displays/panel-dpi.c | 1 + drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c | 1 + drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c | 1 + drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c | 1 + drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c | 1 + drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c | 1 + drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c | 1 + drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c | 1 + drivers/gpu/drm/omapdrm/dss/display.c | 6 +++++- drivers/gpu/drm/omapdrm/dss/omapdss.h | 1 + 13 files changed, 17 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c index 7d16c320749f..ae3976a97ce2 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c @@ -171,6 +171,7 @@ static int tvc_probe(struct platform_device *pdev) dssdev->type = OMAP_DISPLAY_TYPE_VENC; dssdev->owner = THIS_MODULE; + omapdss_display_init(dssdev); r = omapdss_register_display(dssdev); if (r) { dev_err(&pdev->dev, "Failed to register panel\n"); diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c index 211d59cf8dcd..5e3ef5e9e9f6 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c @@ -390,6 +390,7 @@ static int dvic_probe(struct platform_device *pdev) dssdev->type = OMAP_DISPLAY_TYPE_DVI; dssdev->owner = THIS_MODULE; + omapdss_display_init(dssdev); r = omapdss_register_display(dssdev); if (r) { dev_err(&pdev->dev, "Failed to register panel\n"); diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c index 0998b8715096..cf64742d8646 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c @@ -350,6 +350,7 @@ static int hdmic_probe(struct platform_device *pdev) dssdev->type = OMAP_DISPLAY_TYPE_HDMI; dssdev->owner = THIS_MODULE; + omapdss_display_init(dssdev); r = omapdss_register_display(dssdev); if (r) { dev_err(&pdev->dev, "Failed to register panel\n"); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c index 5864e24a6e21..c4c529531243 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c @@ -218,6 +218,7 @@ static int panel_dpi_probe(struct platform_device *pdev) dssdev->type = OMAP_DISPLAY_TYPE_DPI; dssdev->owner = THIS_MODULE; + omapdss_display_init(dssdev); r = omapdss_register_display(dssdev); if (r) { dev_err(&pdev->dev, "Failed to register panel\n"); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c index 365ece2927a4..479ce69fd8d8 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c @@ -1328,6 +1328,7 @@ static int dsicm_probe(struct platform_device *pdev) dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE | OMAP_DSS_DISPLAY_CAP_TEAR_ELIM; + omapdss_display_init(dssdev); r = omapdss_register_display(dssdev); if (r) { dev_err(dev, "Failed to register panel\n"); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c index 2e54ac115dbc..fb0afd5adf48 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c @@ -270,6 +270,7 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi) dssdev->type = OMAP_DISPLAY_TYPE_DPI; dssdev->owner = THIS_MODULE; + omapdss_display_init(dssdev); r = omapdss_register_display(dssdev); if (r) { dev_err(&spi->dev, "Failed to register panel\n"); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c index 6f2261ccd442..6ae113871032 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c @@ -293,6 +293,7 @@ static int nec_8048_probe(struct spi_device *spi) dssdev->type = OMAP_DISPLAY_TYPE_DPI; dssdev->owner = THIS_MODULE; + omapdss_display_init(dssdev); r = omapdss_register_display(dssdev); if (r) { dev_err(&spi->dev, "Failed to register panel\n"); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c index 369501251fa9..fb986b586749 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c @@ -269,6 +269,7 @@ static int sharp_ls_probe(struct platform_device *pdev) dssdev->type = OMAP_DISPLAY_TYPE_DPI; dssdev->owner = THIS_MODULE; + omapdss_display_init(dssdev); r = omapdss_register_display(dssdev); if (r) { dev_err(&pdev->dev, "Failed to register panel\n"); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c index 5bfd24c5ad5c..e110187ff911 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c @@ -800,6 +800,7 @@ static int acx565akm_probe(struct spi_device *spi) dssdev->type = OMAP_DISPLAY_TYPE_SDI; dssdev->owner = THIS_MODULE; + omapdss_display_init(dssdev); r = omapdss_register_display(dssdev); if (r) { dev_err(&spi->dev, "Failed to register panel\n"); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c index 9117c1ba037b..c44e1b430a0e 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c @@ -394,6 +394,7 @@ static int td028ttec1_panel_probe(struct spi_device *spi) dssdev->type = OMAP_DISPLAY_TYPE_DPI; dssdev->owner = THIS_MODULE; + omapdss_display_init(dssdev); r = omapdss_register_display(dssdev); if (r) { dev_err(&spi->dev, "Failed to register panel\n"); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c index 53742a61aa86..0cb70abb6e5d 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c @@ -537,6 +537,7 @@ static int tpo_td043_probe(struct spi_device *spi) dssdev->type = OMAP_DISPLAY_TYPE_DPI; dssdev->owner = THIS_MODULE; + omapdss_display_init(dssdev); r = omapdss_register_display(dssdev); if (r) { dev_err(&spi->dev, "Failed to register panel\n"); diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c index 5df73cd76153..c641993c7d17 100644 --- a/drivers/gpu/drm/omapdrm/dss/display.c +++ b/drivers/gpu/drm/omapdrm/dss/display.c @@ -32,7 +32,7 @@ static LIST_HEAD(panel_list); static DEFINE_MUTEX(panel_list_mutex); static int disp_num_counter; -int omapdss_register_display(struct omap_dss_device *dssdev) +void omapdss_display_init(struct omap_dss_device *dssdev) { int id; @@ -52,7 +52,11 @@ int omapdss_register_display(struct omap_dss_device *dssdev) if (dssdev->name == NULL) dssdev->name = devm_kasprintf(dssdev->dev, GFP_KERNEL, "display%u", id); +} +EXPORT_SYMBOL_GPL(omapdss_display_init); +int omapdss_register_display(struct omap_dss_device *dssdev) +{ mutex_lock(&panel_list_mutex); list_add_tail(&dssdev->panel_list, &panel_list); mutex_unlock(&panel_list_mutex); diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index 94a3f98bdd3d..e42821583b91 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -487,6 +487,7 @@ static inline bool omapdss_is_initialized(void) return !!omapdss_get_dss(); } +void omapdss_display_init(struct omap_dss_device *dssdev); int omapdss_register_display(struct omap_dss_device *dssdev); void omapdss_unregister_display(struct omap_dss_device *dssdev); #define for_each_dss_display(d) \ -- GitLab From 67822ae11971c664f5d28d7914b4b00cff07a9fd Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Fri, 2 Mar 2018 01:25:32 +0200 Subject: [PATCH 0918/1692] drm/omap: dss: Remove panel devices list The panel devices list isn't used anymore, all panel devices are accessed through the global devices list. Remove it. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- .../omapdrm/displays/connector-analog-tv.c | 9 ++----- .../gpu/drm/omapdrm/displays/connector-dvi.c | 14 ++-------- .../gpu/drm/omapdrm/displays/connector-hdmi.c | 8 ++---- drivers/gpu/drm/omapdrm/displays/panel-dpi.c | 8 ++---- .../gpu/drm/omapdrm/displays/panel-dsi-cm.c | 8 ++---- .../displays/panel-lgphilips-lb035q02.c | 8 ++---- .../omapdrm/displays/panel-nec-nl8048hl11.c | 8 ++---- .../displays/panel-sharp-ls037v7dw01.c | 8 ++---- .../omapdrm/displays/panel-sony-acx565akm.c | 27 ++++++------------- .../omapdrm/displays/panel-tpo-td028ttec1.c | 8 ++---- .../omapdrm/displays/panel-tpo-td043mtea1.c | 22 ++++----------- drivers/gpu/drm/omapdrm/dss/display.c | 23 ---------------- drivers/gpu/drm/omapdrm/dss/omapdss.h | 3 --- 13 files changed, 31 insertions(+), 123 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c index ae3976a97ce2..49720117da15 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c @@ -154,7 +154,6 @@ static int tvc_probe(struct platform_device *pdev) { struct panel_drv_data *ddata; struct omap_dss_device *dssdev; - int r; ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); if (!ddata) @@ -172,11 +171,7 @@ static int tvc_probe(struct platform_device *pdev) dssdev->owner = THIS_MODULE; omapdss_display_init(dssdev); - r = omapdss_register_display(dssdev); - if (r) { - dev_err(&pdev->dev, "Failed to register panel\n"); - return r; - } + omapdss_device_register(dssdev); return 0; } @@ -186,7 +181,7 @@ static int __exit tvc_remove(struct platform_device *pdev) struct panel_drv_data *ddata = platform_get_drvdata(pdev); struct omap_dss_device *dssdev = &ddata->dssdev; - omapdss_unregister_display(&ddata->dssdev); + omapdss_device_unregister(&ddata->dssdev); tvc_disable(dssdev); omapdss_device_disconnect(dssdev, NULL); diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c index 5e3ef5e9e9f6..7876e61bf63e 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c @@ -391,19 +391,9 @@ static int dvic_probe(struct platform_device *pdev) dssdev->owner = THIS_MODULE; omapdss_display_init(dssdev); - r = omapdss_register_display(dssdev); - if (r) { - dev_err(&pdev->dev, "Failed to register panel\n"); - goto err_reg; - } + omapdss_device_register(dssdev); return 0; - -err_reg: - i2c_put_adapter(ddata->i2c_adapter); - mutex_destroy(&ddata->hpd_lock); - - return r; } static int __exit dvic_remove(struct platform_device *pdev) @@ -411,7 +401,7 @@ static int __exit dvic_remove(struct platform_device *pdev) struct panel_drv_data *ddata = platform_get_drvdata(pdev); struct omap_dss_device *dssdev = &ddata->dssdev; - omapdss_unregister_display(&ddata->dssdev); + omapdss_device_unregister(&ddata->dssdev); dvic_disable(dssdev); omapdss_device_disconnect(dssdev, NULL); diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c index cf64742d8646..5e07a8479cfc 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c @@ -351,11 +351,7 @@ static int hdmic_probe(struct platform_device *pdev) dssdev->owner = THIS_MODULE; omapdss_display_init(dssdev); - r = omapdss_register_display(dssdev); - if (r) { - dev_err(&pdev->dev, "Failed to register panel\n"); - return r; - } + omapdss_device_register(dssdev); return 0; } @@ -365,7 +361,7 @@ static int __exit hdmic_remove(struct platform_device *pdev) struct panel_drv_data *ddata = platform_get_drvdata(pdev); struct omap_dss_device *dssdev = &ddata->dssdev; - omapdss_unregister_display(&ddata->dssdev); + omapdss_device_unregister(&ddata->dssdev); hdmic_disable(dssdev); omapdss_device_disconnect(dssdev, NULL); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c index c4c529531243..3e2bca737de0 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c @@ -219,11 +219,7 @@ static int panel_dpi_probe(struct platform_device *pdev) dssdev->owner = THIS_MODULE; omapdss_display_init(dssdev); - r = omapdss_register_display(dssdev); - if (r) { - dev_err(&pdev->dev, "Failed to register panel\n"); - return r; - } + omapdss_device_register(dssdev); return 0; } @@ -233,7 +229,7 @@ static int __exit panel_dpi_remove(struct platform_device *pdev) struct panel_drv_data *ddata = platform_get_drvdata(pdev); struct omap_dss_device *dssdev = &ddata->dssdev; - omapdss_unregister_display(dssdev); + omapdss_device_unregister(dssdev); panel_dpi_disable(dssdev); omapdss_device_disconnect(dssdev, NULL); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c index 479ce69fd8d8..0614db1c7366 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c @@ -1329,11 +1329,7 @@ static int dsicm_probe(struct platform_device *pdev) OMAP_DSS_DISPLAY_CAP_TEAR_ELIM; omapdss_display_init(dssdev); - r = omapdss_register_display(dssdev); - if (r) { - dev_err(dev, "Failed to register panel\n"); - goto err_reg; - } + omapdss_device_register(dssdev); mutex_init(&ddata->lock); @@ -1404,7 +1400,7 @@ static int __exit dsicm_remove(struct platform_device *pdev) dev_dbg(&pdev->dev, "remove\n"); - omapdss_unregister_display(dssdev); + omapdss_device_unregister(dssdev); dsicm_disable(dssdev); omapdss_device_disconnect(dssdev, NULL); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c index fb0afd5adf48..0698b64e5bdc 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c @@ -271,11 +271,7 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi) dssdev->owner = THIS_MODULE; omapdss_display_init(dssdev); - r = omapdss_register_display(dssdev); - if (r) { - dev_err(&spi->dev, "Failed to register panel\n"); - return r; - } + omapdss_device_register(dssdev); return 0; } @@ -285,7 +281,7 @@ static int lb035q02_panel_spi_remove(struct spi_device *spi) struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); struct omap_dss_device *dssdev = &ddata->dssdev; - omapdss_unregister_display(dssdev); + omapdss_device_unregister(dssdev); lb035q02_disable(dssdev); omapdss_device_disconnect(dssdev, NULL); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c index 6ae113871032..da34c35009dd 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c @@ -294,11 +294,7 @@ static int nec_8048_probe(struct spi_device *spi) dssdev->owner = THIS_MODULE; omapdss_display_init(dssdev); - r = omapdss_register_display(dssdev); - if (r) { - dev_err(&spi->dev, "Failed to register panel\n"); - return r; - } + omapdss_device_register(dssdev); return 0; } @@ -310,7 +306,7 @@ static int nec_8048_remove(struct spi_device *spi) dev_dbg(&ddata->spi->dev, "%s\n", __func__); - omapdss_unregister_display(dssdev); + omapdss_device_unregister(dssdev); nec_8048_disable(dssdev); omapdss_device_disconnect(dssdev, NULL); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c index fb986b586749..d92ee6f1ae7f 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c @@ -270,11 +270,7 @@ static int sharp_ls_probe(struct platform_device *pdev) dssdev->owner = THIS_MODULE; omapdss_display_init(dssdev); - r = omapdss_register_display(dssdev); - if (r) { - dev_err(&pdev->dev, "Failed to register panel\n"); - return r; - } + omapdss_device_register(dssdev); return 0; } @@ -284,7 +280,7 @@ static int __exit sharp_ls_remove(struct platform_device *pdev) struct panel_drv_data *ddata = platform_get_drvdata(pdev); struct omap_dss_device *dssdev = &ddata->dssdev; - omapdss_unregister_display(dssdev); + omapdss_device_unregister(dssdev); sharp_ls_disable(dssdev); omapdss_device_disconnect(dssdev, NULL); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c index e110187ff911..4602f7cd1b53 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c @@ -733,7 +733,7 @@ static int acx565akm_probe(struct spi_device *spi) r = devm_gpio_request_one(&spi->dev, ddata->reset_gpio, GPIOF_OUT_INIT_LOW, "lcd reset"); if (r) - goto err_gpio; + return r; } if (gpio_is_valid(ddata->reset_gpio)) @@ -754,7 +754,7 @@ static int acx565akm_probe(struct spi_device *spi) if (r) { dev_err(&spi->dev, "%s panel detect error\n", __func__); - goto err_detect; + return r; } memset(&props, 0, sizeof(props)); @@ -764,17 +764,15 @@ static int acx565akm_probe(struct spi_device *spi) bldev = backlight_device_register("acx565akm", &ddata->spi->dev, ddata, &acx565akm_bl_ops, &props); - if (IS_ERR(bldev)) { - r = PTR_ERR(bldev); - goto err_reg_bl; - } + if (IS_ERR(bldev)) + return PTR_ERR(bldev); ddata->bl_dev = bldev; if (ddata->has_cabc) { r = sysfs_create_group(&bldev->dev.kobj, &bldev_attr_group); if (r) { dev_err(&bldev->dev, "%s failed to create sysfs files\n", __func__); - goto err_sysfs; + goto err_backlight_unregister; } ddata->cabc_mode = get_hw_cabc_mode(ddata); } @@ -801,21 +799,12 @@ static int acx565akm_probe(struct spi_device *spi) dssdev->owner = THIS_MODULE; omapdss_display_init(dssdev); - r = omapdss_register_display(dssdev); - if (r) { - dev_err(&spi->dev, "Failed to register panel\n"); - goto err_reg; - } + omapdss_device_register(dssdev); return 0; -err_reg: - sysfs_remove_group(&bldev->dev.kobj, &bldev_attr_group); -err_sysfs: +err_backlight_unregister: backlight_device_unregister(bldev); -err_reg_bl: -err_detect: -err_gpio: return r; } @@ -829,7 +818,7 @@ static int acx565akm_remove(struct spi_device *spi) sysfs_remove_group(&ddata->bl_dev->dev.kobj, &bldev_attr_group); backlight_device_unregister(ddata->bl_dev); - omapdss_unregister_display(dssdev); + omapdss_device_unregister(dssdev); acx565akm_disable(dssdev); omapdss_device_disconnect(dssdev, NULL); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c index c44e1b430a0e..b7c95b903d03 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c @@ -395,11 +395,7 @@ static int td028ttec1_panel_probe(struct spi_device *spi) dssdev->owner = THIS_MODULE; omapdss_display_init(dssdev); - r = omapdss_register_display(dssdev); - if (r) { - dev_err(&spi->dev, "Failed to register panel\n"); - return r; - } + omapdss_device_register(dssdev); return 0; } @@ -411,7 +407,7 @@ static int td028ttec1_panel_remove(struct spi_device *spi) dev_dbg(&ddata->spi_dev->dev, "%s\n", __func__); - omapdss_unregister_display(dssdev); + omapdss_device_unregister(dssdev); td028ttec1_panel_disable(dssdev); omapdss_device_disconnect(dssdev, NULL); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c index 0cb70abb6e5d..e6b2381b971c 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c @@ -509,8 +509,7 @@ static int tpo_td043_probe(struct spi_device *spi) ddata->vcc_reg = devm_regulator_get(&spi->dev, "vcc"); if (IS_ERR(ddata->vcc_reg)) { dev_err(&spi->dev, "failed to get LCD VCC regulator\n"); - r = PTR_ERR(ddata->vcc_reg); - goto err_regulator; + return PTR_ERR(ddata->vcc_reg); } if (gpio_is_valid(ddata->nreset_gpio)) { @@ -519,14 +518,14 @@ static int tpo_td043_probe(struct spi_device *spi) "lcd reset"); if (r < 0) { dev_err(&spi->dev, "couldn't request reset GPIO\n"); - goto err_gpio_req; + return r; } } r = sysfs_create_group(&spi->dev.kobj, &tpo_td043_attr_group); if (r) { dev_err(&spi->dev, "failed to create sysfs files\n"); - goto err_sysfs; + return r; } ddata->vm = tpo_td043_vm; @@ -538,20 +537,9 @@ static int tpo_td043_probe(struct spi_device *spi) dssdev->owner = THIS_MODULE; omapdss_display_init(dssdev); - r = omapdss_register_display(dssdev); - if (r) { - dev_err(&spi->dev, "Failed to register panel\n"); - goto err_reg; - } + omapdss_device_register(dssdev); return 0; - -err_reg: - sysfs_remove_group(&spi->dev.kobj, &tpo_td043_attr_group); -err_sysfs: -err_gpio_req: -err_regulator: - return r; } static int tpo_td043_remove(struct spi_device *spi) @@ -561,7 +549,7 @@ static int tpo_td043_remove(struct spi_device *spi) dev_dbg(&ddata->spi->dev, "%s\n", __func__); - omapdss_unregister_display(dssdev); + omapdss_device_unregister(dssdev); tpo_td043_disable(dssdev); omapdss_device_disconnect(dssdev, NULL); diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c index c641993c7d17..178d88841e0c 100644 --- a/drivers/gpu/drm/omapdrm/dss/display.c +++ b/drivers/gpu/drm/omapdrm/dss/display.c @@ -28,8 +28,6 @@ #include "omapdss.h" -static LIST_HEAD(panel_list); -static DEFINE_MUTEX(panel_list_mutex); static int disp_num_counter; void omapdss_display_init(struct omap_dss_device *dssdev) @@ -55,27 +53,6 @@ void omapdss_display_init(struct omap_dss_device *dssdev) } EXPORT_SYMBOL_GPL(omapdss_display_init); -int omapdss_register_display(struct omap_dss_device *dssdev) -{ - mutex_lock(&panel_list_mutex); - list_add_tail(&dssdev->panel_list, &panel_list); - mutex_unlock(&panel_list_mutex); - - omapdss_device_register(dssdev); - return 0; -} -EXPORT_SYMBOL(omapdss_register_display); - -void omapdss_unregister_display(struct omap_dss_device *dssdev) -{ - mutex_lock(&panel_list_mutex); - list_del(&dssdev->panel_list); - mutex_unlock(&panel_list_mutex); - - omapdss_device_register(dssdev); -} -EXPORT_SYMBOL(omapdss_unregister_display); - struct omap_dss_device *omap_dss_get_device(struct omap_dss_device *dssdev) { if (!try_module_get(dssdev->owner)) diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index e42821583b91..5f71f6885991 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -395,7 +395,6 @@ struct omap_dss_device { struct omap_dss_device *dst; struct list_head list; - struct list_head panel_list; unsigned int alias_id; @@ -488,8 +487,6 @@ static inline bool omapdss_is_initialized(void) } void omapdss_display_init(struct omap_dss_device *dssdev); -int omapdss_register_display(struct omap_dss_device *dssdev); -void omapdss_unregister_display(struct omap_dss_device *dssdev); #define for_each_dss_display(d) \ while ((d = omapdss_device_get_next(d, true)) != NULL) -- GitLab From c1dfe721e0966947019c43b65f2837c591fdcb3c Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Fri, 2 Mar 2018 02:43:45 +0200 Subject: [PATCH 0919/1692] drm/omap: dss: Move and rename omap_dss_(get|put)_device() The functions operate on any omap_dss_device, move them from display.c to base.c. While at it rename them to match the naming of the other functions operating on struct omap_dss_device. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- .../omapdrm/displays/connector-analog-tv.c | 4 +-- .../gpu/drm/omapdrm/displays/connector-dvi.c | 4 +-- .../gpu/drm/omapdrm/displays/connector-hdmi.c | 4 +-- .../gpu/drm/omapdrm/displays/encoder-opa362.c | 4 +-- .../gpu/drm/omapdrm/displays/encoder-tfp410.c | 4 +-- .../drm/omapdrm/displays/encoder-tpd12s015.c | 4 +-- drivers/gpu/drm/omapdrm/displays/panel-dpi.c | 4 +-- .../gpu/drm/omapdrm/displays/panel-dsi-cm.c | 4 +-- .../displays/panel-lgphilips-lb035q02.c | 4 +-- .../omapdrm/displays/panel-nec-nl8048hl11.c | 4 +-- .../displays/panel-sharp-ls037v7dw01.c | 4 +-- .../omapdrm/displays/panel-sony-acx565akm.c | 4 +-- .../omapdrm/displays/panel-tpo-td028ttec1.c | 4 +-- .../omapdrm/displays/panel-tpo-td043mtea1.c | 4 +-- drivers/gpu/drm/omapdrm/dss/base.c | 27 ++++++++++++++++--- drivers/gpu/drm/omapdrm/dss/display.c | 24 ----------------- drivers/gpu/drm/omapdrm/dss/omapdss.h | 5 ++-- drivers/gpu/drm/omapdrm/dss/output.c | 2 +- drivers/gpu/drm/omapdrm/omap_connector.c | 4 +-- drivers/gpu/drm/omapdrm/omap_crtc.c | 2 +- drivers/gpu/drm/omapdrm/omap_drv.c | 8 +++--- 21 files changed, 62 insertions(+), 66 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c index 49720117da15..0eacd2dc302d 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c @@ -55,7 +55,7 @@ static int tvc_connect(struct omap_dss_device *dssdev) r = omapdss_device_connect(src, dssdev); if (r) { - omap_dss_put_device(src); + omapdss_device_put(src); return r; } @@ -68,7 +68,7 @@ static void tvc_disconnect(struct omap_dss_device *dssdev) omapdss_device_disconnect(src, dssdev); - omap_dss_put_device(src); + omapdss_device_put(src); } static int tvc_enable(struct omap_dss_device *dssdev) diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c index 7876e61bf63e..754f15bc0115 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c @@ -69,7 +69,7 @@ static int dvic_connect(struct omap_dss_device *dssdev) r = omapdss_device_connect(src, dssdev); if (r) { - omap_dss_put_device(src); + omapdss_device_put(src); return r; } @@ -82,7 +82,7 @@ static void dvic_disconnect(struct omap_dss_device *dssdev) omapdss_device_disconnect(src, dssdev); - omap_dss_put_device(src); + omapdss_device_put(src); } static int dvic_enable(struct omap_dss_device *dssdev) diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c index 5e07a8479cfc..e05f2964f8e1 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c @@ -65,7 +65,7 @@ static int hdmic_connect(struct omap_dss_device *dssdev) r = omapdss_device_connect(src, dssdev); if (r) { - omap_dss_put_device(src); + omapdss_device_put(src); return r; } @@ -78,7 +78,7 @@ static void hdmic_disconnect(struct omap_dss_device *dssdev) omapdss_device_disconnect(src, dssdev); - omap_dss_put_device(src); + omapdss_device_put(src); } static int hdmic_enable(struct omap_dss_device *dssdev) diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c index 52ceaae14647..f665f2caea22 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c @@ -45,7 +45,7 @@ static int opa362_connect(struct omap_dss_device *dssdev, r = omapdss_device_connect(src, dssdev); if (r) { - omap_dss_put_device(src); + omapdss_device_put(src); return r; } @@ -60,7 +60,7 @@ static void opa362_disconnect(struct omap_dss_device *dssdev, omapdss_device_disconnect(src, &ddata->dssdev); - omap_dss_put_device(src); + omapdss_device_put(src); } static int opa362_enable(struct omap_dss_device *dssdev) diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c index f4223cf7f03f..11c123ca7916 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c @@ -41,7 +41,7 @@ static int tfp410_connect(struct omap_dss_device *dssdev, r = omapdss_device_connect(src, dssdev); if (r) { - omap_dss_put_device(src); + omapdss_device_put(src); return r; } @@ -56,7 +56,7 @@ static void tfp410_disconnect(struct omap_dss_device *dssdev, omapdss_device_disconnect(src, &ddata->dssdev); - omap_dss_put_device(src); + omapdss_device_put(src); } static int tfp410_enable(struct omap_dss_device *dssdev) diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c index 90e07036fb48..eb2d5af34a35 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c @@ -50,7 +50,7 @@ static int tpd_connect(struct omap_dss_device *dssdev, r = omapdss_device_connect(src, dssdev); if (r) { - omap_dss_put_device(src); + omapdss_device_put(src); return r; } @@ -74,7 +74,7 @@ static void tpd_disconnect(struct omap_dss_device *dssdev, omapdss_device_disconnect(src, &ddata->dssdev); - omap_dss_put_device(src); + omapdss_device_put(src); } static int tpd_enable(struct omap_dss_device *dssdev) diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c index 3e2bca737de0..64eab64e5f0d 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c @@ -47,7 +47,7 @@ static int panel_dpi_connect(struct omap_dss_device *dssdev) r = omapdss_device_connect(src, dssdev); if (r) { - omap_dss_put_device(src); + omapdss_device_put(src); return r; } @@ -60,7 +60,7 @@ static void panel_dpi_disconnect(struct omap_dss_device *dssdev) omapdss_device_disconnect(src, dssdev); - omap_dss_put_device(src); + omapdss_device_put(src); } static int panel_dpi_enable(struct omap_dss_device *dssdev) diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c index 0614db1c7366..bd9401c1c11e 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c @@ -794,7 +794,7 @@ static int dsicm_connect(struct omap_dss_device *dssdev) err_req_vc: omapdss_device_disconnect(src, dssdev); err_connect: - omap_dss_put_device(src); + omapdss_device_put(src); return r; } @@ -806,7 +806,7 @@ static void dsicm_disconnect(struct omap_dss_device *dssdev) src->ops->dsi.release_vc(src, ddata->channel); omapdss_device_disconnect(src, dssdev); - omap_dss_put_device(src); + omapdss_device_put(src); } static int dsicm_enable(struct omap_dss_device *dssdev) diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c index 0698b64e5bdc..9f9caeadf5dd 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c @@ -129,7 +129,7 @@ static int lb035q02_connect(struct omap_dss_device *dssdev) r = omapdss_device_connect(src, dssdev); if (r) { - omap_dss_put_device(src); + omapdss_device_put(src); return r; } @@ -144,7 +144,7 @@ static void lb035q02_disconnect(struct omap_dss_device *dssdev) omapdss_device_disconnect(src, dssdev); - omap_dss_put_device(src); + omapdss_device_put(src); } static int lb035q02_enable(struct omap_dss_device *dssdev) diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c index da34c35009dd..d07c955ea06f 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c @@ -124,7 +124,7 @@ static int nec_8048_connect(struct omap_dss_device *dssdev) r = omapdss_device_connect(src, dssdev); if (r) { - omap_dss_put_device(src); + omapdss_device_put(src); return r; } @@ -137,7 +137,7 @@ static void nec_8048_disconnect(struct omap_dss_device *dssdev) omapdss_device_disconnect(src, dssdev); - omap_dss_put_device(src); + omapdss_device_put(src); } static int nec_8048_enable(struct omap_dss_device *dssdev) diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c index d92ee6f1ae7f..d02f22a858e2 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c @@ -70,7 +70,7 @@ static int sharp_ls_connect(struct omap_dss_device *dssdev) r = omapdss_device_connect(src, dssdev); if (r) { - omap_dss_put_device(src); + omapdss_device_put(src); return r; } @@ -83,7 +83,7 @@ static void sharp_ls_disconnect(struct omap_dss_device *dssdev) omapdss_device_disconnect(src, dssdev); - omap_dss_put_device(src); + omapdss_device_put(src); } static int sharp_ls_enable(struct omap_dss_device *dssdev) diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c index 4602f7cd1b53..a98c1181887a 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c @@ -519,7 +519,7 @@ static int acx565akm_connect(struct omap_dss_device *dssdev) r = omapdss_device_connect(src, dssdev); if (r) { - omap_dss_put_device(src); + omapdss_device_put(src); return r; } @@ -532,7 +532,7 @@ static void acx565akm_disconnect(struct omap_dss_device *dssdev) omapdss_device_disconnect(src, dssdev); - omap_dss_put_device(src); + omapdss_device_put(src); } static int acx565akm_panel_power_on(struct omap_dss_device *dssdev) diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c index b7c95b903d03..07c3f8143e4c 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c @@ -178,7 +178,7 @@ static int td028ttec1_panel_connect(struct omap_dss_device *dssdev) r = omapdss_device_connect(src, dssdev); if (r) { - omap_dss_put_device(src); + omapdss_device_put(src); return r; } @@ -191,7 +191,7 @@ static void td028ttec1_panel_disconnect(struct omap_dss_device *dssdev) omapdss_device_disconnect(src, dssdev); - omap_dss_put_device(src); + omapdss_device_put(src); } static int td028ttec1_panel_enable(struct omap_dss_device *dssdev) diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c index e6b2381b971c..c32ab872e9f9 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c @@ -349,7 +349,7 @@ static int tpo_td043_connect(struct omap_dss_device *dssdev) r = omapdss_device_connect(src, dssdev); if (r) { - omap_dss_put_device(src); + omapdss_device_put(src); return r; } @@ -362,7 +362,7 @@ static void tpo_td043_disconnect(struct omap_dss_device *dssdev) omapdss_device_disconnect(src, dssdev); - omap_dss_put_device(src); + omapdss_device_put(src); } static int tpo_td043_enable(struct omap_dss_device *dssdev) diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c index 9f01a4f28145..08846eea5a53 100644 --- a/drivers/gpu/drm/omapdrm/dss/base.c +++ b/drivers/gpu/drm/omapdrm/dss/base.c @@ -91,6 +91,27 @@ static bool omapdss_device_is_registered(struct device_node *node) return found; } +struct omap_dss_device *omapdss_device_get(struct omap_dss_device *dssdev) +{ + if (!try_module_get(dssdev->owner)) + return NULL; + + if (get_device(dssdev->dev) == NULL) { + module_put(dssdev->owner); + return NULL; + } + + return dssdev; +} +EXPORT_SYMBOL(omapdss_device_get); + +void omapdss_device_put(struct omap_dss_device *dssdev) +{ + put_device(dssdev->dev); + module_put(dssdev->owner); +} +EXPORT_SYMBOL(omapdss_device_put); + struct omap_dss_device *omapdss_find_device_by_port(struct device_node *src, unsigned int port) { @@ -98,7 +119,7 @@ struct omap_dss_device *omapdss_find_device_by_port(struct device_node *src, list_for_each_entry(dssdev, &omapdss_devices_list, list) { if (dssdev->dev->of_node == src && dssdev->port_num == port) - return omap_dss_get_device(dssdev); + return omapdss_device_get(dssdev); } return NULL; @@ -147,9 +168,9 @@ struct omap_dss_device *omapdss_device_get_next(struct omap_dss_device *from, done: if (from) - omap_dss_put_device(from); + omapdss_device_put(from); if (dssdev) - omap_dss_get_device(dssdev); + omapdss_device_get(dssdev); mutex_unlock(&omapdss_devices_lock); return dssdev; diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c index 178d88841e0c..53cb46f6503d 100644 --- a/drivers/gpu/drm/omapdrm/dss/display.c +++ b/drivers/gpu/drm/omapdrm/dss/display.c @@ -21,9 +21,6 @@ #define DSS_SUBSYS_NAME "DISPLAY" #include -#include -#include -#include #include #include "omapdss.h" @@ -52,24 +49,3 @@ void omapdss_display_init(struct omap_dss_device *dssdev) "display%u", id); } EXPORT_SYMBOL_GPL(omapdss_display_init); - -struct omap_dss_device *omap_dss_get_device(struct omap_dss_device *dssdev) -{ - if (!try_module_get(dssdev->owner)) - return NULL; - - if (get_device(dssdev->dev) == NULL) { - module_put(dssdev->owner); - return NULL; - } - - return dssdev; -} -EXPORT_SYMBOL(omap_dss_get_device); - -void omap_dss_put_device(struct omap_dss_device *dssdev) -{ - put_device(dssdev->dev); - module_put(dssdev->owner); -} -EXPORT_SYMBOL(omap_dss_put_device); diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index 5f71f6885991..96011e42da05 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -492,6 +492,8 @@ void omapdss_display_init(struct omap_dss_device *dssdev); void omapdss_device_register(struct omap_dss_device *dssdev); void omapdss_device_unregister(struct omap_dss_device *dssdev); +struct omap_dss_device *omapdss_device_get(struct omap_dss_device *dssdev); +void omapdss_device_put(struct omap_dss_device *dssdev); struct omap_dss_device *omapdss_find_device_by_port(struct device_node *src, unsigned int port); struct omap_dss_device *omapdss_device_get_next(struct omap_dss_device *from, @@ -501,9 +503,6 @@ int omapdss_device_connect(struct omap_dss_device *src, void omapdss_device_disconnect(struct omap_dss_device *src, struct omap_dss_device *dst); -struct omap_dss_device *omap_dss_get_device(struct omap_dss_device *dssdev); -void omap_dss_put_device(struct omap_dss_device *dssdev); - int omap_dss_get_num_overlay_managers(void); int omap_dss_get_num_overlays(void); diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c index e62da96f83af..b5bf7a5e35d9 100644 --- a/drivers/gpu/drm/omapdrm/dss/output.c +++ b/drivers/gpu/drm/omapdrm/dss/output.c @@ -93,7 +93,7 @@ struct omap_dss_device *omapdss_find_output_from_display(struct omap_dss_device dssdev = dssdev->src; if (dssdev->id != 0) - return omap_dss_get_device(dssdev); + return omapdss_device_get(dssdev); return NULL; } diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c index 3b60086f2938..69ebb0fa1df5 100644 --- a/drivers/gpu/drm/omapdrm/omap_connector.c +++ b/drivers/gpu/drm/omapdrm/omap_connector.c @@ -98,7 +98,7 @@ static void omap_connector_destroy(struct drm_connector *connector) drm_connector_cleanup(connector); kfree(omap_connector); - omap_dss_put_device(dssdev); + omapdss_device_put(dssdev); } #define MAX_EDID 512 @@ -254,7 +254,7 @@ struct drm_connector *omap_connector_init(struct drm_device *dev, DBG("%s", dssdev->name); - omap_dss_get_device(dssdev); + omapdss_device_get(dssdev); omap_connector = kzalloc(sizeof(*omap_connector), GFP_KERNEL); if (!omap_connector) diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index 6c4d40b824e4..e18ca6cdc0d6 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c @@ -704,7 +704,7 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev, out = omapdss_find_output_from_display(dssdev); channel = out->dispc_channel; - omap_dss_put_device(out); + omapdss_device_put(out); DBG("%s", channel_names[channel]); diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index 4f402eb8088d..a3b7dbf1b92b 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -161,7 +161,7 @@ static void omap_disconnect_dssdevs(struct drm_device *ddev) omapdss_device_disconnect(dssdev, NULL); priv->dssdevs[i] = NULL; - omap_dss_put_device(dssdev); + omapdss_device_put(dssdev); } priv->num_dssdevs = 0; @@ -191,17 +191,17 @@ static int omap_connect_dssdevs(struct drm_device *ddev) for_each_dss_display(dssdev) { r = omapdss_device_connect(dssdev, NULL); if (r == -EPROBE_DEFER) { - omap_dss_put_device(dssdev); + omapdss_device_put(dssdev); goto cleanup; } else if (r) { dev_warn(dssdev->dev, "could not connect display: %s\n", dssdev->name); } else { - omap_dss_get_device(dssdev); + omapdss_device_get(dssdev); priv->dssdevs[priv->num_dssdevs++] = dssdev; if (priv->num_dssdevs == ARRAY_SIZE(priv->dssdevs)) { /* To balance the 'for_each_dss_display' loop */ - omap_dss_put_device(dssdev); + omapdss_device_put(dssdev); break; } } -- GitLab From f324b2798c871511c64ea4232405e6f248e20d52 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Fri, 2 Mar 2018 02:54:16 +0200 Subject: [PATCH 0920/1692] drm/omap: dss: Store dss_device pointer in omap_dss_device Storing the dss_device pointer in the omap_dss_device structure will allow accessing the dss_device from the dss_mgr API functions. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- .../gpu/drm/omapdrm/displays/connector-analog-tv.c | 2 +- drivers/gpu/drm/omapdrm/displays/connector-dvi.c | 2 +- drivers/gpu/drm/omapdrm/displays/connector-hdmi.c | 2 +- drivers/gpu/drm/omapdrm/displays/encoder-opa362.c | 2 +- drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c | 2 +- drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c | 2 +- drivers/gpu/drm/omapdrm/displays/panel-dpi.c | 2 +- drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c | 2 +- .../drm/omapdrm/displays/panel-lgphilips-lb035q02.c | 2 +- .../gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c | 2 +- .../drm/omapdrm/displays/panel-sharp-ls037v7dw01.c | 2 +- .../gpu/drm/omapdrm/displays/panel-sony-acx565akm.c | 2 +- .../gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c | 2 +- .../gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c | 2 +- drivers/gpu/drm/omapdrm/dss/base.c | 11 +++++++++-- drivers/gpu/drm/omapdrm/dss/omapdss.h | 4 +++- drivers/gpu/drm/omapdrm/omap_drv.c | 2 +- 17 files changed, 27 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c index 0eacd2dc302d..e416e9e1566a 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c @@ -53,7 +53,7 @@ static int tvc_connect(struct omap_dss_device *dssdev) return PTR_ERR(src); } - r = omapdss_device_connect(src, dssdev); + r = omapdss_device_connect(dssdev->dss, src, dssdev); if (r) { omapdss_device_put(src); return r; diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c index 754f15bc0115..a35d39f6cca9 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c @@ -67,7 +67,7 @@ static int dvic_connect(struct omap_dss_device *dssdev) return PTR_ERR(src); } - r = omapdss_device_connect(src, dssdev); + r = omapdss_device_connect(dssdev->dss, src, dssdev); if (r) { omapdss_device_put(src); return r; diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c index e05f2964f8e1..dd6e67f9ae47 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c @@ -63,7 +63,7 @@ static int hdmic_connect(struct omap_dss_device *dssdev) return PTR_ERR(src); } - r = omapdss_device_connect(src, dssdev); + r = omapdss_device_connect(dssdev->dss, src, dssdev); if (r) { omapdss_device_put(src); return r; diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c index f665f2caea22..41a51364c985 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c @@ -43,7 +43,7 @@ static int opa362_connect(struct omap_dss_device *dssdev, return PTR_ERR(src); } - r = omapdss_device_connect(src, dssdev); + r = omapdss_device_connect(dssdev->dss, src, dssdev); if (r) { omapdss_device_put(src); return r; diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c index 11c123ca7916..cadb8f52aa88 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c @@ -39,7 +39,7 @@ static int tfp410_connect(struct omap_dss_device *dssdev, return PTR_ERR(src); } - r = omapdss_device_connect(src, dssdev); + r = omapdss_device_connect(dssdev->dss, src, dssdev); if (r) { omapdss_device_put(src); return r; diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c index eb2d5af34a35..4f671dc272a0 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c @@ -48,7 +48,7 @@ static int tpd_connect(struct omap_dss_device *dssdev, return PTR_ERR(src); } - r = omapdss_device_connect(src, dssdev); + r = omapdss_device_connect(dssdev->dss, src, dssdev); if (r) { omapdss_device_put(src); return r; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c index 64eab64e5f0d..c03986d7df73 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c @@ -45,7 +45,7 @@ static int panel_dpi_connect(struct omap_dss_device *dssdev) return PTR_ERR(src); } - r = omapdss_device_connect(src, dssdev); + r = omapdss_device_connect(dssdev->dss, src, dssdev); if (r) { omapdss_device_put(src); return r; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c index bd9401c1c11e..aeeec81f8fcf 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c @@ -769,7 +769,7 @@ static int dsicm_connect(struct omap_dss_device *dssdev) return PTR_ERR(src); } - r = omapdss_device_connect(src, dssdev); + r = omapdss_device_connect(dssdev->dss, src, dssdev); if (r) { dev_err(dev, "Failed to connect to video source\n"); goto err_connect; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c index 9f9caeadf5dd..d4a7925e3f16 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c @@ -127,7 +127,7 @@ static int lb035q02_connect(struct omap_dss_device *dssdev) return PTR_ERR(src); } - r = omapdss_device_connect(src, dssdev); + r = omapdss_device_connect(dssdev->dss, src, dssdev); if (r) { omapdss_device_put(src); return r; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c index d07c955ea06f..b0fe60aba729 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c @@ -122,7 +122,7 @@ static int nec_8048_connect(struct omap_dss_device *dssdev) return PTR_ERR(src); } - r = omapdss_device_connect(src, dssdev); + r = omapdss_device_connect(dssdev->dss, src, dssdev); if (r) { omapdss_device_put(src); return r; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c index d02f22a858e2..d96eb1a1397f 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c @@ -68,7 +68,7 @@ static int sharp_ls_connect(struct omap_dss_device *dssdev) return PTR_ERR(src); } - r = omapdss_device_connect(src, dssdev); + r = omapdss_device_connect(dssdev->dss, src, dssdev); if (r) { omapdss_device_put(src); return r; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c index a98c1181887a..65068eea0647 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c @@ -517,7 +517,7 @@ static int acx565akm_connect(struct omap_dss_device *dssdev) return PTR_ERR(src); } - r = omapdss_device_connect(src, dssdev); + r = omapdss_device_connect(dssdev->dss, src, dssdev); if (r) { omapdss_device_put(src); return r; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c index 07c3f8143e4c..96e2e30dd999 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c @@ -176,7 +176,7 @@ static int td028ttec1_panel_connect(struct omap_dss_device *dssdev) return PTR_ERR(src); } - r = omapdss_device_connect(src, dssdev); + r = omapdss_device_connect(dssdev->dss, src, dssdev); if (r) { omapdss_device_put(src); return r; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c index c32ab872e9f9..5845f9e6c6ba 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c @@ -347,7 +347,7 @@ static int tpo_td043_connect(struct omap_dss_device *dssdev) return PTR_ERR(src); } - r = omapdss_device_connect(src, dssdev); + r = omapdss_device_connect(dssdev->dss, src, dssdev); if (r) { omapdss_device_put(src); return r; diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c index 08846eea5a53..991c424960c7 100644 --- a/drivers/gpu/drm/omapdrm/dss/base.c +++ b/drivers/gpu/drm/omapdrm/dss/base.c @@ -177,7 +177,8 @@ struct omap_dss_device *omapdss_device_get_next(struct omap_dss_device *from, } EXPORT_SYMBOL(omapdss_device_get_next); -int omapdss_device_connect(struct omap_dss_device *src, +int omapdss_device_connect(struct dss_device *dss, + struct omap_dss_device *src, struct omap_dss_device *dst) { int ret; @@ -187,13 +188,17 @@ int omapdss_device_connect(struct omap_dss_device *src, if (omapdss_device_is_connected(src)) return -EBUSY; + src->dss = dss; + if (src->driver) ret = src->driver->connect(src); else ret = src->ops->connect(src, dst); - if (ret < 0) + if (ret < 0) { + src->dss = NULL; return ret; + } if (dst) { dst->src = src; @@ -226,6 +231,8 @@ void omapdss_device_disconnect(struct omap_dss_device *src, src->driver->disconnect(src); else src->ops->disconnect(src, dst); + + src->dss = NULL; } EXPORT_SYMBOL_GPL(omapdss_device_disconnect); diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index 96011e42da05..4befe8aab333 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -391,6 +391,7 @@ struct omap_dss_device { struct module *owner; + struct dss_device *dss; struct omap_dss_device *src; struct omap_dss_device *dst; @@ -498,7 +499,8 @@ struct omap_dss_device *omapdss_find_device_by_port(struct device_node *src, unsigned int port); struct omap_dss_device *omapdss_device_get_next(struct omap_dss_device *from, bool display_only); -int omapdss_device_connect(struct omap_dss_device *src, +int omapdss_device_connect(struct dss_device *dss, + struct omap_dss_device *src, struct omap_dss_device *dst); void omapdss_device_disconnect(struct omap_dss_device *src, struct omap_dss_device *dst); diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index a3b7dbf1b92b..95ebb6b1fc36 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -189,7 +189,7 @@ static int omap_connect_dssdevs(struct drm_device *ddev) return -EPROBE_DEFER; for_each_dss_display(dssdev) { - r = omapdss_device_connect(dssdev, NULL); + r = omapdss_device_connect(priv->dss, dssdev, NULL); if (r == -EPROBE_DEFER) { omapdss_device_put(dssdev); goto cleanup; -- GitLab From 845417b3b3b0d6c1694394ae6c3e07b281b89f82 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Fri, 2 Mar 2018 03:05:10 +0200 Subject: [PATCH 0921/1692] drm/omap: dss: Move DSS mgr ops and private data to dss_device The DSS manager ops and private data pointer are specific to a DSS instance. Store them in the dss_device structure instead of global variable. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/dss.h | 2 + drivers/gpu/drm/omapdrm/dss/omapdss.h | 5 ++- drivers/gpu/drm/omapdrm/dss/output.c | 58 +++++++++++++++------------ drivers/gpu/drm/omapdrm/omap_crtc.c | 6 +-- drivers/gpu/drm/omapdrm/omap_crtc.h | 2 +- drivers/gpu/drm/omapdrm/omap_drv.c | 4 +- 6 files changed, 44 insertions(+), 33 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h index 02f8b346edfd..0305eaf2c30c 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.h +++ b/drivers/gpu/drm/omapdrm/dss/dss.h @@ -269,6 +269,8 @@ struct dss_device { struct dispc_device *dispc; const struct dispc_ops *dispc_ops; + const struct dss_mgr_ops *mgr_ops; + struct omap_drm_private *mgr_ops_priv; }; /* core */ diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index 4befe8aab333..4df405ae20db 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -574,9 +574,10 @@ struct dss_mgr_ops { void (*handler)(void *), void *data); }; -int dss_install_mgr_ops(const struct dss_mgr_ops *mgr_ops, +int dss_install_mgr_ops(struct dss_device *dss, + const struct dss_mgr_ops *mgr_ops, struct omap_drm_private *priv); -void dss_uninstall_mgr_ops(void); +void dss_uninstall_mgr_ops(struct dss_device *dss); int dss_mgr_connect(struct omap_dss_device *dssdev, struct omap_dss_device *dst); diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c index b5bf7a5e35d9..a5df6eed4aef 100644 --- a/drivers/gpu/drm/omapdrm/dss/output.c +++ b/drivers/gpu/drm/omapdrm/dss/output.c @@ -21,6 +21,7 @@ #include #include +#include "dss.h" #include "omapdss.h" static DEFINE_MUTEX(output_lock); @@ -99,90 +100,97 @@ struct omap_dss_device *omapdss_find_output_from_display(struct omap_dss_device } EXPORT_SYMBOL(omapdss_find_output_from_display); -static const struct dss_mgr_ops *dss_mgr_ops; -static struct omap_drm_private *dss_mgr_ops_priv; - -int dss_install_mgr_ops(const struct dss_mgr_ops *mgr_ops, +int dss_install_mgr_ops(struct dss_device *dss, + const struct dss_mgr_ops *mgr_ops, struct omap_drm_private *priv) { - if (dss_mgr_ops) + if (dss->mgr_ops) return -EBUSY; - dss_mgr_ops = mgr_ops; - dss_mgr_ops_priv = priv; + dss->mgr_ops = mgr_ops; + dss->mgr_ops_priv = priv; return 0; } EXPORT_SYMBOL(dss_install_mgr_ops); -void dss_uninstall_mgr_ops(void) +void dss_uninstall_mgr_ops(struct dss_device *dss) { - dss_mgr_ops = NULL; - dss_mgr_ops_priv = NULL; + dss->mgr_ops = NULL; + dss->mgr_ops_priv = NULL; } EXPORT_SYMBOL(dss_uninstall_mgr_ops); int dss_mgr_connect(struct omap_dss_device *dssdev, struct omap_dss_device *dst) { - return dss_mgr_ops->connect(dss_mgr_ops_priv, - dssdev->dispc_channel, dst); + return dssdev->dss->mgr_ops->connect(dssdev->dss->mgr_ops_priv, + dssdev->dispc_channel, dst); } EXPORT_SYMBOL(dss_mgr_connect); void dss_mgr_disconnect(struct omap_dss_device *dssdev, struct omap_dss_device *dst) { - dss_mgr_ops->disconnect(dss_mgr_ops_priv, dssdev->dispc_channel, dst); + dssdev->dss->mgr_ops->disconnect(dssdev->dss->mgr_ops_priv, + dssdev->dispc_channel, dst); } EXPORT_SYMBOL(dss_mgr_disconnect); void dss_mgr_set_timings(struct omap_dss_device *dssdev, const struct videomode *vm) { - dss_mgr_ops->set_timings(dss_mgr_ops_priv, dssdev->dispc_channel, vm); + dssdev->dss->mgr_ops->set_timings(dssdev->dss->mgr_ops_priv, + dssdev->dispc_channel, vm); } EXPORT_SYMBOL(dss_mgr_set_timings); void dss_mgr_set_lcd_config(struct omap_dss_device *dssdev, const struct dss_lcd_mgr_config *config) { - dss_mgr_ops->set_lcd_config(dss_mgr_ops_priv, - dssdev->dispc_channel, config); + dssdev->dss->mgr_ops->set_lcd_config(dssdev->dss->mgr_ops_priv, + dssdev->dispc_channel, config); } EXPORT_SYMBOL(dss_mgr_set_lcd_config); int dss_mgr_enable(struct omap_dss_device *dssdev) { - return dss_mgr_ops->enable(dss_mgr_ops_priv, dssdev->dispc_channel); + return dssdev->dss->mgr_ops->enable(dssdev->dss->mgr_ops_priv, + dssdev->dispc_channel); } EXPORT_SYMBOL(dss_mgr_enable); void dss_mgr_disable(struct omap_dss_device *dssdev) { - dss_mgr_ops->disable(dss_mgr_ops_priv, dssdev->dispc_channel); + dssdev->dss->mgr_ops->disable(dssdev->dss->mgr_ops_priv, + dssdev->dispc_channel); } EXPORT_SYMBOL(dss_mgr_disable); void dss_mgr_start_update(struct omap_dss_device *dssdev) { - dss_mgr_ops->start_update(dss_mgr_ops_priv, dssdev->dispc_channel); + dssdev->dss->mgr_ops->start_update(dssdev->dss->mgr_ops_priv, + dssdev->dispc_channel); } EXPORT_SYMBOL(dss_mgr_start_update); int dss_mgr_register_framedone_handler(struct omap_dss_device *dssdev, void (*handler)(void *), void *data) { - return dss_mgr_ops->register_framedone_handler(dss_mgr_ops_priv, - dssdev->dispc_channel, - handler, data); + struct dss_device *dss = dssdev->dss; + + return dss->mgr_ops->register_framedone_handler(dss->mgr_ops_priv, + dssdev->dispc_channel, + handler, data); } EXPORT_SYMBOL(dss_mgr_register_framedone_handler); void dss_mgr_unregister_framedone_handler(struct omap_dss_device *dssdev, void (*handler)(void *), void *data) { - dss_mgr_ops->unregister_framedone_handler(dss_mgr_ops_priv, - dssdev->dispc_channel, - handler, data); + struct dss_device *dss = dssdev->dss; + + dss->mgr_ops->unregister_framedone_handler(dss->mgr_ops_priv, + dssdev->dispc_channel, + handler, data); } EXPORT_SYMBOL(dss_mgr_unregister_framedone_handler); diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index e18ca6cdc0d6..4ddc4ed18b47 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c @@ -683,12 +683,12 @@ void omap_crtc_pre_init(struct omap_drm_private *priv) { memset(omap_crtcs, 0, sizeof(omap_crtcs)); - dss_install_mgr_ops(&mgr_ops, priv); + dss_install_mgr_ops(priv->dss, &mgr_ops, priv); } -void omap_crtc_pre_uninit(void) +void omap_crtc_pre_uninit(struct omap_drm_private *priv) { - dss_uninstall_mgr_ops(); + dss_uninstall_mgr_ops(priv->dss); } /* initialize crtc */ diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.h b/drivers/gpu/drm/omapdrm/omap_crtc.h index eaab2d7f0324..1c6530703855 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.h +++ b/drivers/gpu/drm/omapdrm/omap_crtc.h @@ -33,7 +33,7 @@ struct videomode; struct videomode *omap_crtc_timings(struct drm_crtc *crtc); enum omap_channel omap_crtc_channel(struct drm_crtc *crtc); void omap_crtc_pre_init(struct omap_drm_private *priv); -void omap_crtc_pre_uninit(void); +void omap_crtc_pre_uninit(struct omap_drm_private *priv); struct drm_crtc *omap_crtc_init(struct drm_device *dev, struct drm_plane *plane, struct omap_dss_device *dssdev); int omap_crtc_wait_pending(struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index 95ebb6b1fc36..df90f82ef217 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -638,7 +638,7 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev) destroy_workqueue(priv->wq); omap_disconnect_dssdevs(ddev); err_crtc_uninit: - omap_crtc_pre_uninit(); + omap_crtc_pre_uninit(priv); drm_dev_unref(ddev); return ret; } @@ -666,7 +666,7 @@ static void omapdrm_cleanup(struct omap_drm_private *priv) destroy_workqueue(priv->wq); omap_disconnect_dssdevs(ddev); - omap_crtc_pre_uninit(); + omap_crtc_pre_uninit(priv); drm_dev_unref(ddev); } -- GitLab From 5c718e015a0f1280440f90ebf1c18e2b98a102e4 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Fri, 2 Mar 2018 03:11:49 +0200 Subject: [PATCH 0922/1692] drm/omap: dss: Modify omapdss_find_output_from_display() to return channel The omapdss_find_output_from_display() function is only used to retrieve the dispc channel corresponding to the display. Return the dispc channel directly, and rename the function to omapdss_device_get_dispc_channel() to match its new purpose. The dssdev->id check is removed as the dssdev is guaranteed to be an output and have a non-zero id, as proved by the lack of crash despite the caller never checking the returned pointer before dereferencing it. As the function is not specific to outputs anymore, move it from output.c to base.c. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/base.c | 9 +++++++++ drivers/gpu/drm/omapdrm/dss/omapdss.h | 3 +-- drivers/gpu/drm/omapdrm/dss/output.c | 12 ------------ drivers/gpu/drm/omapdrm/omap_crtc.c | 5 +---- 4 files changed, 11 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c index 991c424960c7..272858972496 100644 --- a/drivers/gpu/drm/omapdrm/dss/base.c +++ b/drivers/gpu/drm/omapdrm/dss/base.c @@ -236,6 +236,15 @@ void omapdss_device_disconnect(struct omap_dss_device *src, } EXPORT_SYMBOL_GPL(omapdss_device_disconnect); +enum omap_channel omapdss_device_get_dispc_channel(struct omap_dss_device *dssdev) +{ + while (dssdev->src) + dssdev = dssdev->src; + + return dssdev->dispc_channel; +} +EXPORT_SYMBOL(omapdss_device_get_dispc_channel); + /* ----------------------------------------------------------------------------- * Components Handling */ diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index 4df405ae20db..121bc953ba31 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -504,6 +504,7 @@ int omapdss_device_connect(struct dss_device *dss, struct omap_dss_device *dst); void omapdss_device_disconnect(struct omap_dss_device *src, struct omap_dss_device *dst); +enum omap_channel omapdss_device_get_dispc_channel(struct omap_dss_device *dssdev); int omap_dss_get_num_overlay_managers(void); @@ -513,8 +514,6 @@ int omapdss_output_set_device(struct omap_dss_device *out, struct omap_dss_device *dssdev); int omapdss_output_unset_device(struct omap_dss_device *out); -struct omap_dss_device *omapdss_find_output_from_display(struct omap_dss_device *dssdev); - typedef void (*omap_dispc_isr_t) (void *arg, u32 mask); int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask); int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask); diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c index a5df6eed4aef..191b2e801257 100644 --- a/drivers/gpu/drm/omapdrm/dss/output.c +++ b/drivers/gpu/drm/omapdrm/dss/output.c @@ -88,18 +88,6 @@ int omapdss_output_unset_device(struct omap_dss_device *out) } EXPORT_SYMBOL(omapdss_output_unset_device); -struct omap_dss_device *omapdss_find_output_from_display(struct omap_dss_device *dssdev) -{ - while (dssdev->src) - dssdev = dssdev->src; - - if (dssdev->id != 0) - return omapdss_device_get(dssdev); - - return NULL; -} -EXPORT_SYMBOL(omapdss_find_output_from_display); - int dss_install_mgr_ops(struct dss_device *dss, const struct dss_mgr_ops *mgr_ops, struct omap_drm_private *priv) diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index 4ddc4ed18b47..c5f1915aef67 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c @@ -699,12 +699,9 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev, struct drm_crtc *crtc = NULL; struct omap_crtc *omap_crtc; enum omap_channel channel; - struct omap_dss_device *out; int ret; - out = omapdss_find_output_from_display(dssdev); - channel = out->dispc_channel; - omapdss_device_put(out); + channel = omapdss_device_get_dispc_channel(dssdev); DBG("%s", channel_names[channel]); -- GitLab From 4e20bda68e01f723d7fcc4e7d55a4afc78223fb7 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Sun, 4 Mar 2018 21:49:28 +0200 Subject: [PATCH 0923/1692] drm/omap: dss: Replace omap_dss_device port number with bitmask The omap_dss_device port_num field stores the DT port number associated with the device. The field is used in different ways depending on the device type: - For DPI outputs, the port number is used as an identifier of the DPI instance - For sources, the port number is used to look up the omap_dss_device by DT port node As omap_dss_device instances are only looked up as sources by sinks, setting the field to the number of the source port works for both use cases. However, to enable looking up sinks, we need to record all the ports associated with an omap_dss_device. Do so by turning the port_num field into an of_ports bitmask. For DPI outputs the port number is additionally stored in the dpi_data structure as the output ID. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- .../omapdrm/displays/connector-analog-tv.c | 1 + .../gpu/drm/omapdrm/displays/connector-dvi.c | 1 + .../gpu/drm/omapdrm/displays/connector-hdmi.c | 1 + .../gpu/drm/omapdrm/displays/encoder-opa362.c | 1 + .../gpu/drm/omapdrm/displays/encoder-tfp410.c | 2 +- .../drm/omapdrm/displays/encoder-tpd12s015.c | 2 +- drivers/gpu/drm/omapdrm/displays/panel-dpi.c | 1 + .../gpu/drm/omapdrm/displays/panel-dsi-cm.c | 1 + .../displays/panel-lgphilips-lb035q02.c | 1 + .../omapdrm/displays/panel-nec-nl8048hl11.c | 1 + .../displays/panel-sharp-ls037v7dw01.c | 1 + .../omapdrm/displays/panel-sony-acx565akm.c | 1 + .../omapdrm/displays/panel-tpo-td028ttec1.c | 1 + .../omapdrm/displays/panel-tpo-td043mtea1.c | 1 + drivers/gpu/drm/omapdrm/dss/base.c | 2 +- drivers/gpu/drm/omapdrm/dss/dpi.c | 19 +++++++++---------- drivers/gpu/drm/omapdrm/dss/dsi.c | 1 + drivers/gpu/drm/omapdrm/dss/hdmi4.c | 1 + drivers/gpu/drm/omapdrm/dss/hdmi5.c | 1 + drivers/gpu/drm/omapdrm/dss/omapdss.h | 4 ++-- drivers/gpu/drm/omapdrm/dss/sdi.c | 2 +- drivers/gpu/drm/omapdrm/dss/venc.c | 1 + 22 files changed, 31 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c index e416e9e1566a..5b151b88e1c1 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c @@ -169,6 +169,7 @@ static int tvc_probe(struct platform_device *pdev) dssdev->dev = &pdev->dev; dssdev->type = OMAP_DISPLAY_TYPE_VENC; dssdev->owner = THIS_MODULE; + dssdev->of_ports = BIT(0); omapdss_display_init(dssdev); omapdss_device_register(dssdev); diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c index a35d39f6cca9..14f7941eb83a 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c @@ -389,6 +389,7 @@ static int dvic_probe(struct platform_device *pdev) dssdev->dev = &pdev->dev; dssdev->type = OMAP_DISPLAY_TYPE_DVI; dssdev->owner = THIS_MODULE; + dssdev->of_ports = BIT(0); omapdss_display_init(dssdev); omapdss_device_register(dssdev); diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c index dd6e67f9ae47..005b1b0cbc46 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c @@ -349,6 +349,7 @@ static int hdmic_probe(struct platform_device *pdev) dssdev->dev = &pdev->dev; dssdev->type = OMAP_DISPLAY_TYPE_HDMI; dssdev->owner = THIS_MODULE; + dssdev->of_ports = BIT(0); omapdss_display_init(dssdev); omapdss_device_register(dssdev); diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c index 41a51364c985..d74b90961c68 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c @@ -167,6 +167,7 @@ static int opa362_probe(struct platform_device *pdev) dssdev->type = OMAP_DISPLAY_TYPE_VENC; dssdev->output_type = OMAP_DISPLAY_TYPE_VENC; dssdev->owner = THIS_MODULE; + dssdev->of_ports = BIT(1) | BIT(0); omapdss_device_register(dssdev); diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c index cadb8f52aa88..2bb1af8f815f 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c @@ -190,7 +190,7 @@ static int tfp410_probe(struct platform_device *pdev) dssdev->type = OMAP_DISPLAY_TYPE_DPI; dssdev->output_type = OMAP_DISPLAY_TYPE_DVI; dssdev->owner = THIS_MODULE; - dssdev->port_num = 1; + dssdev->of_ports = BIT(1) | BIT(0); omapdss_device_register(dssdev); diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c index 4f671dc272a0..e33f73f06e00 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c @@ -299,7 +299,7 @@ static int tpd_probe(struct platform_device *pdev) dssdev->type = OMAP_DISPLAY_TYPE_HDMI; dssdev->output_type = OMAP_DISPLAY_TYPE_HDMI; dssdev->owner = THIS_MODULE; - dssdev->port_num = 1; + dssdev->of_ports = BIT(1) | BIT(0); omapdss_device_register(dssdev); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c index c03986d7df73..c8cd2f663ddf 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c @@ -217,6 +217,7 @@ static int panel_dpi_probe(struct platform_device *pdev) dssdev->driver = &panel_dpi_ops; dssdev->type = OMAP_DISPLAY_TYPE_DPI; dssdev->owner = THIS_MODULE; + dssdev->of_ports = BIT(0); omapdss_display_init(dssdev); omapdss_device_register(dssdev); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c index aeeec81f8fcf..febb20961dc5 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c @@ -1324,6 +1324,7 @@ static int dsicm_probe(struct platform_device *pdev) dssdev->driver = &dsicm_ops; dssdev->type = OMAP_DISPLAY_TYPE_DSI; dssdev->owner = THIS_MODULE; + dssdev->of_ports = BIT(0); dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE | OMAP_DSS_DISPLAY_CAP_TEAR_ELIM; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c index d4a7925e3f16..52e30bd1ed4a 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c @@ -269,6 +269,7 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi) dssdev->driver = &lb035q02_ops; dssdev->type = OMAP_DISPLAY_TYPE_DPI; dssdev->owner = THIS_MODULE; + dssdev->of_ports = BIT(0); omapdss_display_init(dssdev); omapdss_device_register(dssdev); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c index b0fe60aba729..3f88407fe3c0 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c @@ -292,6 +292,7 @@ static int nec_8048_probe(struct spi_device *spi) dssdev->driver = &nec_8048_ops; dssdev->type = OMAP_DISPLAY_TYPE_DPI; dssdev->owner = THIS_MODULE; + dssdev->of_ports = BIT(0); omapdss_display_init(dssdev); omapdss_device_register(dssdev); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c index d96eb1a1397f..08576ae94d55 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c @@ -268,6 +268,7 @@ static int sharp_ls_probe(struct platform_device *pdev) dssdev->driver = &sharp_ls_ops; dssdev->type = OMAP_DISPLAY_TYPE_DPI; dssdev->owner = THIS_MODULE; + dssdev->of_ports = BIT(0); omapdss_display_init(dssdev); omapdss_device_register(dssdev); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c index 65068eea0647..181c3c271918 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c @@ -797,6 +797,7 @@ static int acx565akm_probe(struct spi_device *spi) dssdev->driver = &acx565akm_ops; dssdev->type = OMAP_DISPLAY_TYPE_SDI; dssdev->owner = THIS_MODULE; + dssdev->of_ports = BIT(0); omapdss_display_init(dssdev); omapdss_device_register(dssdev); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c index 96e2e30dd999..39234f5db144 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c @@ -393,6 +393,7 @@ static int td028ttec1_panel_probe(struct spi_device *spi) dssdev->driver = &td028ttec1_ops; dssdev->type = OMAP_DISPLAY_TYPE_DPI; dssdev->owner = THIS_MODULE; + dssdev->of_ports = BIT(0); omapdss_display_init(dssdev); omapdss_device_register(dssdev); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c index 5845f9e6c6ba..61fcf2286387 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c @@ -535,6 +535,7 @@ static int tpo_td043_probe(struct spi_device *spi) dssdev->driver = &tpo_td043_ops; dssdev->type = OMAP_DISPLAY_TYPE_DPI; dssdev->owner = THIS_MODULE; + dssdev->of_ports = BIT(0); omapdss_display_init(dssdev); omapdss_device_register(dssdev); diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c index 272858972496..67086cbb3e24 100644 --- a/drivers/gpu/drm/omapdrm/dss/base.c +++ b/drivers/gpu/drm/omapdrm/dss/base.c @@ -118,7 +118,7 @@ struct omap_dss_device *omapdss_find_device_by_port(struct device_node *src, struct omap_dss_device *dssdev; list_for_each_entry(dssdev, &omapdss_devices_list, list) { - if (dssdev->dev->of_node == src && dssdev->port_num == port) + if (dssdev->dev->of_node == src && dssdev->of_ports & BIT(port)) return omapdss_device_get(dssdev); } diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c index 372f1070bd69..0c11d17f23a4 100644 --- a/drivers/gpu/drm/omapdrm/dss/dpi.c +++ b/drivers/gpu/drm/omapdrm/dss/dpi.c @@ -39,6 +39,7 @@ struct dpi_data { struct platform_device *pdev; enum dss_model dss_model; struct dss_device *dss; + unsigned int id; struct regulator *vdds_dsi_reg; enum dss_clk_source clk_src; @@ -413,7 +414,7 @@ static int dpi_display_enable(struct omap_dss_device *dssdev) if (r) goto err_get_dispc; - r = dss_dpi_select_source(dpi->dss, out->port_num, out->dispc_channel); + r = dss_dpi_select_source(dpi->dss, dpi->id, out->dispc_channel); if (r) goto err_src_sel; @@ -609,7 +610,7 @@ static void dpi_init_pll(struct dpi_data *dpi) * the channel in some more dynamic manner, or get the channel as a user * parameter. */ -static enum omap_channel dpi_get_channel(struct dpi_data *dpi, int port_num) +static enum omap_channel dpi_get_channel(struct dpi_data *dpi) { switch (dpi->dss_model) { case DSS_MODEL_OMAP2: @@ -617,7 +618,7 @@ static enum omap_channel dpi_get_channel(struct dpi_data *dpi, int port_num) return OMAP_DSS_CHANNEL_LCD; case DSS_MODEL_DRA7: - switch (port_num) { + switch (dpi->id) { case 2: return OMAP_DSS_CHANNEL_LCD3; case 1: @@ -690,12 +691,10 @@ static const struct omap_dss_device_ops dpi_ops = { static void dpi_init_output_port(struct dpi_data *dpi, struct device_node *port) { struct omap_dss_device *out = &dpi->output; - int r; - u32 port_num; + u32 port_num = 0; - r = of_property_read_u32(port, "reg", &port_num); - if (r) - port_num = 0; + of_property_read_u32(port, "reg", &port_num); + dpi->id = port_num <= 2 ? port_num : 0; switch (port_num) { case 2: @@ -713,8 +712,8 @@ static void dpi_init_output_port(struct dpi_data *dpi, struct device_node *port) out->dev = &dpi->pdev->dev; out->id = OMAP_DSS_OUTPUT_DPI; out->output_type = OMAP_DISPLAY_TYPE_DPI; - out->dispc_channel = dpi_get_channel(dpi, port_num); - out->port_num = port_num; + out->dispc_channel = dpi_get_channel(dpi); + out->of_ports = BIT(port_num); out->ops = &dpi_ops; out->owner = THIS_MODULE; diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index e061816e5381..ee260353b9ca 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -4994,6 +4994,7 @@ static void dsi_init_output(struct dsi_data *dsi) out->dispc_channel = dsi_get_channel(dsi); out->ops = &dsi_ops; out->owner = THIS_MODULE; + out->of_ports = BIT(0); omapdss_device_register(out); } diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index fa818033f3f3..bf800cede2ad 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c @@ -564,6 +564,7 @@ static void hdmi_init_output(struct omap_hdmi *hdmi) out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT; out->ops = &hdmi_ops; out->owner = THIS_MODULE; + out->of_ports = BIT(0); omapdss_device_register(out); } diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index ef4a48f397d2..e5d23dd19f99 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c @@ -555,6 +555,7 @@ static void hdmi_init_output(struct omap_hdmi *hdmi) out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT; out->ops = &hdmi_ops; out->owner = THIS_MODULE; + out->of_ports = BIT(0); omapdss_device_register(out); } diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index 121bc953ba31..c2d9ebdec3d1 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -423,8 +423,8 @@ struct omap_dss_device { /* output instance */ enum omap_dss_output_id id; - /* the port number in the DT node */ - int port_num; + /* bitmask of port numbers in DT */ + unsigned int of_ports; }; struct omap_dss_driver { diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c index 2f39e1f47990..4b70a3044dd6 100644 --- a/drivers/gpu/drm/omapdrm/dss/sdi.c +++ b/drivers/gpu/drm/omapdrm/dss/sdi.c @@ -327,7 +327,7 @@ static void sdi_init_output(struct sdi_device *sdi) out->name = "sdi.0"; out->dispc_channel = OMAP_DSS_CHANNEL_LCD; /* We have SDI only on OMAP3, where it's on port 1 */ - out->port_num = 1; + out->of_ports = BIT(1); out->ops = &sdi_ops; out->owner = THIS_MODULE; diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c index 70418cf4eea2..dd9af335dba0 100644 --- a/drivers/gpu/drm/omapdrm/dss/venc.c +++ b/drivers/gpu/drm/omapdrm/dss/venc.c @@ -767,6 +767,7 @@ static void venc_init_output(struct venc_device *venc) out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT; out->ops = &venc_ops; out->owner = THIS_MODULE; + out->of_ports = BIT(0); omapdss_device_register(out); } -- GitLab From eaaedaf6a1de3be2f62feefc40fa6a711382f1ca Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Fri, 2 Mar 2018 21:11:06 +0200 Subject: [PATCH 0924/1692] drm/omap: dss: Extend omapdss_of_find_source_for_first_ep() to sinks The omapdss_of_find_source_for_first_ep() function locates the source corresponding to the first endpoint of the first port of a device node. We can easily extend it to locate sinks as well by passing the port number as a parameter. This will be useful to find sinks in encoders drivers. Extend the function and rename it to omapdss_of_find_connected_device() to reflect its new extended purpose. Additionally, it is useful to differentiate between failures to return the connected device because no link exists in the device tree for the requested port, or because the connected device as described in the device tree is invalid or not probed yet. Return NULL in the first case and an error code in the second case, and update the callers accordingly. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- .../gpu/drm/omapdrm/displays/connector-analog-tv.c | 6 +++--- drivers/gpu/drm/omapdrm/displays/connector-dvi.c | 6 +++--- drivers/gpu/drm/omapdrm/displays/connector-hdmi.c | 6 +++--- drivers/gpu/drm/omapdrm/displays/encoder-opa362.c | 2 +- drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c | 2 +- .../gpu/drm/omapdrm/displays/encoder-tpd12s015.c | 2 +- drivers/gpu/drm/omapdrm/displays/panel-dpi.c | 6 +++--- drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c | 6 +++--- .../omapdrm/displays/panel-lgphilips-lb035q02.c | 6 +++--- .../drm/omapdrm/displays/panel-nec-nl8048hl11.c | 6 +++--- .../drm/omapdrm/displays/panel-sharp-ls037v7dw01.c | 6 +++--- .../drm/omapdrm/displays/panel-sony-acx565akm.c | 6 +++--- .../drm/omapdrm/displays/panel-tpo-td028ttec1.c | 6 +++--- .../drm/omapdrm/displays/panel-tpo-td043mtea1.c | 6 +++--- drivers/gpu/drm/omapdrm/dss/dss-of.c | 14 +++++++------- drivers/gpu/drm/omapdrm/dss/omapdss.h | 2 +- 16 files changed, 44 insertions(+), 44 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c index 5b151b88e1c1..15921b361672 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c @@ -47,10 +47,10 @@ static int tvc_connect(struct omap_dss_device *dssdev) struct omap_dss_device *src; int r; - src = omapdss_of_find_source_for_first_ep(ddata->dev->of_node); - if (IS_ERR(src)) { + src = omapdss_of_find_connected_device(ddata->dev->of_node, 0); + if (IS_ERR_OR_NULL(src)) { dev_err(ddata->dev, "failed to find video source\n"); - return PTR_ERR(src); + return src ? PTR_ERR(src) : -EINVAL; } r = omapdss_device_connect(dssdev->dss, src, dssdev); diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c index 14f7941eb83a..f8510cd7b166 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c @@ -61,10 +61,10 @@ static int dvic_connect(struct omap_dss_device *dssdev) struct omap_dss_device *src; int r; - src = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); - if (IS_ERR(src)) { + src = omapdss_of_find_connected_device(dssdev->dev->of_node, 0); + if (IS_ERR_OR_NULL(src)) { dev_err(dssdev->dev, "failed to find video source\n"); - return PTR_ERR(src); + return src ? PTR_ERR(src) : -EINVAL; } r = omapdss_device_connect(dssdev->dss, src, dssdev); diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c index 005b1b0cbc46..3452925ed54d 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c @@ -57,10 +57,10 @@ static int hdmic_connect(struct omap_dss_device *dssdev) struct omap_dss_device *src; int r; - src = omapdss_of_find_source_for_first_ep(ddata->dev->of_node); - if (IS_ERR(src)) { + src = omapdss_of_find_connected_device(ddata->dev->of_node, 0); + if (IS_ERR_OR_NULL(src)) { dev_err(ddata->dev, "failed to find video source\n"); - return PTR_ERR(src); + return src ? PTR_ERR(src) : -EINVAL; } r = omapdss_device_connect(dssdev->dss, src, dssdev); diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c index d74b90961c68..939e259d601d 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c @@ -37,7 +37,7 @@ static int opa362_connect(struct omap_dss_device *dssdev, struct omap_dss_device *src; int r; - src = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); + src = omapdss_of_find_connected_device(dssdev->dev->of_node, 0); if (IS_ERR(src)) { dev_err(dssdev->dev, "failed to find video source\n"); return PTR_ERR(src); diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c index 2bb1af8f815f..55549c5a5af2 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c @@ -33,7 +33,7 @@ static int tfp410_connect(struct omap_dss_device *dssdev, struct omap_dss_device *src; int r; - src = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); + src = omapdss_of_find_connected_device(dssdev->dev->of_node, 0); if (IS_ERR(src)) { dev_err(dssdev->dev, "failed to find video source\n"); return PTR_ERR(src); diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c index e33f73f06e00..58a831c3f74c 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c @@ -42,7 +42,7 @@ static int tpd_connect(struct omap_dss_device *dssdev, struct omap_dss_device *src; int r; - src = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); + src = omapdss_of_find_connected_device(dssdev->dev->of_node, 0); if (IS_ERR(src)) { dev_err(dssdev->dev, "failed to find video source\n"); return PTR_ERR(src); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c index c8cd2f663ddf..8c17ad4ddf84 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c @@ -39,10 +39,10 @@ static int panel_dpi_connect(struct omap_dss_device *dssdev) struct omap_dss_device *src; int r; - src = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); - if (IS_ERR(src)) { + src = omapdss_of_find_connected_device(dssdev->dev->of_node, 0); + if (IS_ERR_OR_NULL(src)) { dev_err(dssdev->dev, "failed to find video source\n"); - return PTR_ERR(src); + return src ? PTR_ERR(src) : -EINVAL; } r = omapdss_device_connect(dssdev->dss, src, dssdev); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c index febb20961dc5..501c47f95130 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c @@ -763,10 +763,10 @@ static int dsicm_connect(struct omap_dss_device *dssdev) struct omap_dss_device *src; int r; - src = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); - if (IS_ERR(src)) { + src = omapdss_of_find_connected_device(dssdev->dev->of_node, 0); + if (IS_ERR_OR_NULL(src)) { dev_err(dssdev->dev, "failed to find video source\n"); - return PTR_ERR(src); + return src ? PTR_ERR(src) : -EINVAL; } r = omapdss_device_connect(dssdev->dss, src, dssdev); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c index 52e30bd1ed4a..73416b1c7386 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c @@ -121,10 +121,10 @@ static int lb035q02_connect(struct omap_dss_device *dssdev) struct omap_dss_device *src; int r; - src = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); - if (IS_ERR(src)) { + src = omapdss_of_find_connected_device(dssdev->dev->of_node, 0); + if (IS_ERR_OR_NULL(src)) { dev_err(dssdev->dev, "failed to find video source\n"); - return PTR_ERR(src); + return src ? PTR_ERR(src) : -EINVAL; } r = omapdss_device_connect(dssdev->dss, src, dssdev); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c index 3f88407fe3c0..cf5d9e1522a8 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c @@ -116,10 +116,10 @@ static int nec_8048_connect(struct omap_dss_device *dssdev) struct omap_dss_device *src; int r; - src = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); - if (IS_ERR(src)) { + src = omapdss_of_find_connected_device(dssdev->dev->of_node, 0); + if (IS_ERR_OR_NULL(src)) { dev_err(dssdev->dev, "failed to find video source\n"); - return PTR_ERR(src); + return src ? PTR_ERR(src) : -EINVAL; } r = omapdss_device_connect(dssdev->dss, src, dssdev); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c index 08576ae94d55..1c3180495dfd 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c @@ -62,10 +62,10 @@ static int sharp_ls_connect(struct omap_dss_device *dssdev) struct omap_dss_device *src; int r; - src = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); - if (IS_ERR(src)) { + src = omapdss_of_find_connected_device(dssdev->dev->of_node, 0); + if (IS_ERR_OR_NULL(src)) { dev_err(dssdev->dev, "failed to find video source\n"); - return PTR_ERR(src); + return src ? PTR_ERR(src) : -EINVAL; } r = omapdss_device_connect(dssdev->dss, src, dssdev); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c index 181c3c271918..d91ab8dab4d9 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c @@ -511,10 +511,10 @@ static int acx565akm_connect(struct omap_dss_device *dssdev) struct omap_dss_device *src; int r; - src = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); - if (IS_ERR(src)) { + src = omapdss_of_find_connected_device(dssdev->dev->of_node, 0); + if (IS_ERR_OR_NULL(src)) { dev_err(dssdev->dev, "failed to find video source\n"); - return PTR_ERR(src); + return src ? PTR_ERR(src) : -EINVAL; } r = omapdss_device_connect(dssdev->dss, src, dssdev); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c index 39234f5db144..a57daf44d421 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c @@ -170,10 +170,10 @@ static int td028ttec1_panel_connect(struct omap_dss_device *dssdev) struct omap_dss_device *src; int r; - src = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); - if (IS_ERR(src)) { + src = omapdss_of_find_connected_device(dssdev->dev->of_node, 0); + if (IS_ERR_OR_NULL(src)) { dev_err(dssdev->dev, "failed to find video source\n"); - return PTR_ERR(src); + return src ? PTR_ERR(src) : -EINVAL; } r = omapdss_device_connect(dssdev->dss, src, dssdev); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c index 61fcf2286387..719c298d3996 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c @@ -341,10 +341,10 @@ static int tpo_td043_connect(struct omap_dss_device *dssdev) struct omap_dss_device *src; int r; - src = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); - if (IS_ERR(src)) { + src = omapdss_of_find_connected_device(dssdev->dev->of_node, 0); + if (IS_ERR_OR_NULL(src)) { dev_err(dssdev->dev, "failed to find video source\n"); - return PTR_ERR(src); + return src ? PTR_ERR(src) : -EINVAL; } r = omapdss_device_connect(dssdev->dss, src, dssdev); diff --git a/drivers/gpu/drm/omapdrm/dss/dss-of.c b/drivers/gpu/drm/omapdrm/dss/dss-of.c index 771b20db2d98..0422597ac6b0 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss-of.c +++ b/drivers/gpu/drm/omapdrm/dss/dss-of.c @@ -47,7 +47,7 @@ dss_of_port_get_parent_device(struct device_node *port) } struct omap_dss_device * -omapdss_of_find_source_for_first_ep(struct device_node *node) +omapdss_of_find_connected_device(struct device_node *node, unsigned int port) { struct device_node *src_node; struct device_node *src_port; @@ -56,27 +56,27 @@ omapdss_of_find_source_for_first_ep(struct device_node *node) u32 port_number = 0; /* Get the endpoint... */ - ep = of_graph_get_endpoint_by_regs(node, 0, 0); + ep = of_graph_get_endpoint_by_regs(node, port, 0); if (!ep) - return ERR_PTR(-EINVAL); + return NULL; /* ... and its remote port... */ src_port = of_graph_get_remote_port(ep); of_node_put(ep); if (!src_port) - return ERR_PTR(-EINVAL); + return NULL; /* ... and the remote port's number and parent... */ of_property_read_u32(src_port, "reg", &port_number); src_node = dss_of_port_get_parent_device(src_port); of_node_put(src_port); if (!src_node) - return NULL; + return ERR_PTR(-EINVAL); - /* ... and finally the source. */ + /* ... and finally the connected device. */ src = omapdss_find_device_by_port(src_node, port_number); of_node_put(src_node); return src ? src : ERR_PTR(-EPROBE_DEFER); } -EXPORT_SYMBOL_GPL(omapdss_of_find_source_for_first_ep); +EXPORT_SYMBOL_GPL(omapdss_of_find_connected_device); diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index c2d9ebdec3d1..dc2f8167f61b 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -532,7 +532,7 @@ static inline bool omapdss_device_is_enabled(struct omap_dss_device *dssdev) } struct omap_dss_device * -omapdss_of_find_source_for_first_ep(struct device_node *node); +omapdss_of_find_connected_device(struct device_node *node, unsigned int port); enum dss_writeback_channel { DSS_WB_LCD1_MGR = 0, -- GitLab From a25edf0ea6de6780ef9e8dc489a5c14599c48326 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Fri, 2 Mar 2018 22:05:41 +0200 Subject: [PATCH 0925/1692] drm/omap: displays: Don't cast dssdev to panel data unnecessarily The connect handle of the analog TV and HDMI connectors casts the dssdev to panel data only to then access fields of the panel data that are also present in the dssdev. Remove the cast and use dssdev directly. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c | 5 ++--- drivers/gpu/drm/omapdrm/displays/connector-hdmi.c | 5 ++--- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c index 15921b361672..cda6c312ad05 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c @@ -43,13 +43,12 @@ static const struct videomode tvc_pal_vm = { static int tvc_connect(struct omap_dss_device *dssdev) { - struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *src; int r; - src = omapdss_of_find_connected_device(ddata->dev->of_node, 0); + src = omapdss_of_find_connected_device(dssdev->dev->of_node, 0); if (IS_ERR_OR_NULL(src)) { - dev_err(ddata->dev, "failed to find video source\n"); + dev_err(dssdev->dev, "failed to find video source\n"); return src ? PTR_ERR(src) : -EINVAL; } diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c index 3452925ed54d..6eb4c24d6aa7 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c @@ -53,13 +53,12 @@ struct panel_drv_data { static int hdmic_connect(struct omap_dss_device *dssdev) { - struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *src; int r; - src = omapdss_of_find_connected_device(ddata->dev->of_node, 0); + src = omapdss_of_find_connected_device(dssdev->dev->of_node, 0); if (IS_ERR_OR_NULL(src)) { - dev_err(ddata->dev, "failed to find video source\n"); + dev_err(dssdev->dev, "failed to find video source\n"); return src ? PTR_ERR(src) : -EINVAL; } -- GitLab From 66aacfe22d53137eab511b3f4d674ddd40a7b1ac Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Fri, 2 Mar 2018 21:38:21 +0200 Subject: [PATCH 0926/1692] drm/omap: dss: Cleanup error paths in output init functions Rename the jump labels according to the cleanup they perform, not the location they're accessed from, and move functions from error checks to cleanup paths, and move reference handling to simplify cleanup. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/dpi.c | 10 ++-------- drivers/gpu/drm/omapdrm/dss/dsi.c | 9 ++++----- drivers/gpu/drm/omapdrm/dss/hdmi4.c | 7 ++++--- drivers/gpu/drm/omapdrm/dss/hdmi5.c | 7 ++++--- drivers/gpu/drm/omapdrm/dss/sdi.c | 7 ++----- drivers/gpu/drm/omapdrm/dss/venc.c | 7 +++---- 6 files changed, 19 insertions(+), 28 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c index 0c11d17f23a4..5839009f272e 100644 --- a/drivers/gpu/drm/omapdrm/dss/dpi.c +++ b/drivers/gpu/drm/omapdrm/dss/dpi.c @@ -745,15 +745,14 @@ int dpi_init_port(struct dss_device *dss, struct platform_device *pdev, return 0; r = of_property_read_u32(ep, "data-lines", &datalines); + of_node_put(ep); if (r) { DSSERR("failed to parse datalines\n"); - goto err_datalines; + return r; } dpi->data_lines = datalines; - of_node_put(ep); - dpi->pdev = pdev; dpi->dss_model = dss_model; dpi->dss = dss; @@ -764,11 +763,6 @@ int dpi_init_port(struct dss_device *dss, struct platform_device *pdev, dpi_init_output_port(dpi, port); return 0; - -err_datalines: - of_node_put(ep); - - return r; } void dpi_uninit_port(struct device_node *port) diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index ee260353b9ca..173c05a54550 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -5360,7 +5360,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data) r = dsi_runtime_get(dsi); if (r) - goto err_runtime_get; + goto err_pm_disable; rev = dsi_read_reg(dsi, DSI_REVISION); dev_dbg(dev, "OMAP DSI rev %d.%d\n", @@ -5381,7 +5381,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data) r = dsi_probe_of(dsi); if (r) { DSSERR("Invalid DSI DT data\n"); - goto err_probe_of; + goto err_uninit_output; } r = of_platform_populate(dev->of_node, NULL, NULL, dev); @@ -5404,11 +5404,10 @@ static int dsi_bind(struct device *dev, struct device *master, void *data) return 0; -err_probe_of: +err_uninit_output: dsi_uninit_output(dsi); dsi_runtime_put(dsi); - -err_runtime_get: +err_pm_disable: pm_runtime_disable(dev); return r; } diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index bf800cede2ad..1d1f2e0b2b2a 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c @@ -780,9 +780,7 @@ static int hdmi4_bind(struct device *dev, struct device *master, void *data) r = hdmi_audio_register(hdmi); if (r) { DSSERR("Registering HDMI audio failed\n"); - hdmi_uninit_output(hdmi); - pm_runtime_disable(&pdev->dev); - return r; + goto err_uninit_output; } hdmi->debugfs = dss_debugfs_create_file(dss, "hdmi", hdmi_dump_regs, @@ -790,6 +788,9 @@ static int hdmi4_bind(struct device *dev, struct device *master, void *data) return 0; +err_uninit_output: + hdmi_uninit_output(hdmi); + pm_runtime_disable(&pdev->dev); err_pll: hdmi_pll_uninit(&hdmi->pll); err_free: diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index e5d23dd19f99..92ae561bf974 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c @@ -773,9 +773,7 @@ static int hdmi5_bind(struct device *dev, struct device *master, void *data) r = hdmi_audio_register(hdmi); if (r) { DSSERR("Registering HDMI audio failed %d\n", r); - hdmi_uninit_output(hdmi); - pm_runtime_disable(&pdev->dev); - return r; + goto err_uninit_output; } hdmi->debugfs = dss_debugfs_create_file(dss, "hdmi", hdmi_dump_regs, @@ -783,6 +781,9 @@ static int hdmi5_bind(struct device *dev, struct device *master, void *data) return 0; +err_uninit_output: + hdmi_uninit_output(hdmi); + pm_runtime_disable(&pdev->dev); err_pll: hdmi_pll_uninit(&hdmi->pll); err_free: diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c index 4b70a3044dd6..e9b280784264 100644 --- a/drivers/gpu/drm/omapdrm/dss/sdi.c +++ b/drivers/gpu/drm/omapdrm/dss/sdi.c @@ -358,16 +358,15 @@ int sdi_init_port(struct dss_device *dss, struct platform_device *pdev, } r = of_property_read_u32(ep, "datapairs", &datapairs); + of_node_put(ep); if (r) { DSSERR("failed to parse datapairs\n"); - goto err_datapairs; + goto err_free; } sdi->datapairs = datapairs; sdi->dss = dss; - of_node_put(ep); - sdi->pdev = pdev; port->data = sdi; @@ -375,8 +374,6 @@ int sdi_init_port(struct dss_device *dss, struct platform_device *pdev, return 0; -err_datapairs: - of_node_put(ep); err_free: kfree(sdi); diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c index dd9af335dba0..93c3e5250a63 100644 --- a/drivers/gpu/drm/omapdrm/dss/venc.c +++ b/drivers/gpu/drm/omapdrm/dss/venc.c @@ -867,7 +867,7 @@ static int venc_bind(struct device *dev, struct device *master, void *data) r = venc_runtime_get(venc); if (r) - goto err_runtime_get; + goto err_pm_disable; rev_id = (u8)(venc_read_reg(venc, VENC_REV_ID) & 0xff); dev_dbg(&pdev->dev, "OMAP VENC rev %d\n", rev_id); @@ -877,7 +877,7 @@ static int venc_bind(struct device *dev, struct device *master, void *data) r = venc_probe_of(venc); if (r) { DSSERR("Invalid DT data\n"); - goto err_probe_of; + goto err_pm_disable; } venc->debugfs = dss_debugfs_create_file(dss, "venc", venc_dump_regs, @@ -887,8 +887,7 @@ static int venc_bind(struct device *dev, struct device *master, void *data) return 0; -err_probe_of: -err_runtime_get: +err_pm_disable: pm_runtime_disable(&pdev->dev); err_free: kfree(venc); -- GitLab From edb715dffdee71bb8216ee4d71c0714d932e9acf Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Sat, 3 Mar 2018 18:52:59 +0200 Subject: [PATCH 0927/1692] drm/omap: dss: dsi: Move initialization code from bind to probe There's no reason to delay initialization of most of the driver (such as mapping memory I/O or enabling runtime PM) to the component bind handler. Perform as much of the initialization as possible at probe time, initializing at bind time only the parts that depends on the DSS. The cleanup code is moved from unbind to remove in a similar way. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/dsi.c | 301 ++++++++++++++++-------------- 1 file changed, 161 insertions(+), 140 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 173c05a54550..ab0426fab22e 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -4981,85 +4981,9 @@ static const struct omap_dss_device_ops dsi_ops = { }, }; -static void dsi_init_output(struct dsi_data *dsi) -{ - struct omap_dss_device *out = &dsi->output; - - out->dev = dsi->dev; - out->id = dsi->module_id == 0 ? - OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2; - - out->output_type = OMAP_DISPLAY_TYPE_DSI; - out->name = dsi->module_id == 0 ? "dsi.0" : "dsi.1"; - out->dispc_channel = dsi_get_channel(dsi); - out->ops = &dsi_ops; - out->owner = THIS_MODULE; - out->of_ports = BIT(0); - - omapdss_device_register(out); -} - -static void dsi_uninit_output(struct dsi_data *dsi) -{ - struct omap_dss_device *out = &dsi->output; - - omapdss_device_unregister(out); -} - -static int dsi_probe_of(struct dsi_data *dsi) -{ - struct device_node *node = dsi->dev->of_node; - struct property *prop; - u32 lane_arr[10]; - int len, num_pins; - int r, i; - struct device_node *ep; - struct omap_dsi_pin_config pin_cfg; - - ep = of_graph_get_endpoint_by_regs(node, 0, 0); - if (!ep) - return 0; - - prop = of_find_property(ep, "lanes", &len); - if (prop == NULL) { - dev_err(dsi->dev, "failed to find lane data\n"); - r = -EINVAL; - goto err; - } - - num_pins = len / sizeof(u32); - - if (num_pins < 4 || num_pins % 2 != 0 || - num_pins > dsi->num_lanes_supported * 2) { - dev_err(dsi->dev, "bad number of lanes\n"); - r = -EINVAL; - goto err; - } - - r = of_property_read_u32_array(ep, "lanes", lane_arr, num_pins); - if (r) { - dev_err(dsi->dev, "failed to read lane data\n"); - goto err; - } - - pin_cfg.num_pins = num_pins; - for (i = 0; i < num_pins; ++i) - pin_cfg.pins[i] = (int)lane_arr[i]; - - r = dsi_configure_pins(&dsi->output, &pin_cfg); - if (r) { - dev_err(dsi->dev, "failed to configure pins"); - goto err; - } - - of_node_put(ep); - - return 0; - -err: - of_node_put(ep); - return r; -} +/* ----------------------------------------------------------------------------- + * PLL + */ static const struct dss_pll_ops dsi_pll_ops = { .enable = dsi_pll_enable, @@ -5174,7 +5098,153 @@ static int dsi_init_pll_data(struct dss_device *dss, struct dsi_data *dsi) return 0; } -/* DSI1 HW IP initialisation */ +/* ----------------------------------------------------------------------------- + * Component Bind & Unbind + */ + +static int dsi_bind(struct device *dev, struct device *master, void *data) +{ + struct dss_device *dss = dss_get_device(master); + struct dsi_data *dsi = dev_get_drvdata(dev); + char name[10]; + u32 rev; + int r; + + dsi->dss = dss; + + dsi_init_pll_data(dss, dsi); + + r = dsi_runtime_get(dsi); + if (r) + return r; + + rev = dsi_read_reg(dsi, DSI_REVISION); + dev_dbg(dev, "OMAP DSI rev %d.%d\n", + FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0)); + + dsi->line_buffer_size = dsi_get_line_buf_size(dsi); + + dsi_runtime_put(dsi); + + snprintf(name, sizeof(name), "dsi%u_regs", dsi->module_id + 1); + dsi->debugfs.regs = dss_debugfs_create_file(dss, name, + dsi_dump_dsi_regs, &dsi); +#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS + snprintf(name, sizeof(name), "dsi%u_irqs", dsi->module_id + 1); + dsi->debugfs.irqs = dss_debugfs_create_file(dss, name, + dsi_dump_dsi_irqs, &dsi); +#endif + snprintf(name, sizeof(name), "dsi%u_clks", dsi->module_id + 1); + dsi->debugfs.clks = dss_debugfs_create_file(dss, name, + dsi_dump_dsi_clocks, &dsi); + + return 0; +} + +static void dsi_unbind(struct device *dev, struct device *master, void *data) +{ + struct dsi_data *dsi = dev_get_drvdata(dev); + + dss_debugfs_remove_file(dsi->debugfs.clks); + dss_debugfs_remove_file(dsi->debugfs.irqs); + dss_debugfs_remove_file(dsi->debugfs.regs); + + of_platform_depopulate(dev); + + WARN_ON(dsi->scp_clk_refcount > 0); + + dss_pll_unregister(&dsi->pll); +} + +static const struct component_ops dsi_component_ops = { + .bind = dsi_bind, + .unbind = dsi_unbind, +}; + +/* ----------------------------------------------------------------------------- + * Probe & Remove, Suspend & Resume + */ + +static void dsi_init_output(struct dsi_data *dsi) +{ + struct omap_dss_device *out = &dsi->output; + + out->dev = dsi->dev; + out->id = dsi->module_id == 0 ? + OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2; + + out->output_type = OMAP_DISPLAY_TYPE_DSI; + out->name = dsi->module_id == 0 ? "dsi.0" : "dsi.1"; + out->dispc_channel = dsi_get_channel(dsi); + out->ops = &dsi_ops; + out->owner = THIS_MODULE; + out->of_ports = BIT(0); + + omapdss_device_register(out); +} + +static void dsi_uninit_output(struct dsi_data *dsi) +{ + struct omap_dss_device *out = &dsi->output; + + omapdss_device_unregister(out); +} + +static int dsi_probe_of(struct dsi_data *dsi) +{ + struct device_node *node = dsi->dev->of_node; + struct property *prop; + u32 lane_arr[10]; + int len, num_pins; + int r, i; + struct device_node *ep; + struct omap_dsi_pin_config pin_cfg; + + ep = of_graph_get_endpoint_by_regs(node, 0, 0); + if (!ep) + return 0; + + prop = of_find_property(ep, "lanes", &len); + if (prop == NULL) { + dev_err(dsi->dev, "failed to find lane data\n"); + r = -EINVAL; + goto err; + } + + num_pins = len / sizeof(u32); + + if (num_pins < 4 || num_pins % 2 != 0 || + num_pins > dsi->num_lanes_supported * 2) { + dev_err(dsi->dev, "bad number of lanes\n"); + r = -EINVAL; + goto err; + } + + r = of_property_read_u32_array(ep, "lanes", lane_arr, num_pins); + if (r) { + dev_err(dsi->dev, "failed to read lane data\n"); + goto err; + } + + pin_cfg.num_pins = num_pins; + for (i = 0; i < num_pins; ++i) + pin_cfg.pins[i] = (int)lane_arr[i]; + + r = dsi_configure_pins(&dsi->output, &pin_cfg); + if (r) { + dev_err(dsi->dev, "failed to configure pins"); + goto err; + } + + of_node_put(ep); + + return 0; + +err: + of_node_put(ep); + return r; +} + static const struct dsi_of_data dsi_of_data_omap34xx = { .model = DSI_MODEL_OMAP3, .pll_hw = &dss_omap3_dsi_pll_hw, @@ -5240,24 +5310,21 @@ static const struct soc_device_attribute dsi_soc_devices[] = { { /* sentinel */ } }; -static int dsi_bind(struct device *dev, struct device *master, void *data) +static int dsi_probe(struct platform_device *pdev) { - struct platform_device *pdev = to_platform_device(dev); - struct dss_device *dss = dss_get_device(master); const struct soc_device_attribute *soc; const struct dsi_module_id_data *d; - u32 rev; - int r, i; + struct device *dev = &pdev->dev; struct dsi_data *dsi; struct resource *dsi_mem; struct resource *res; - char name[10]; + unsigned int i; + int r; dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL); if (!dsi) return -ENOMEM; - dsi->dss = dss; dsi->dev = dev; dev_set_drvdata(dev, dsi); @@ -5354,18 +5421,8 @@ static int dsi_bind(struct device *dev, struct device *master, void *data) if (r) return r; - dsi_init_pll_data(dss, dsi); - pm_runtime_enable(dev); - r = dsi_runtime_get(dsi); - if (r) - goto err_pm_disable; - - rev = dsi_read_reg(dsi, DSI_REVISION); - dev_dbg(dev, "OMAP DSI rev %d.%d\n", - FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0)); - /* DSI on OMAP3 doesn't have register DSI_GNQ, set number * of data to 3 by default */ if (dsi->data->quirks & DSI_QUIRK_GNQ) @@ -5374,8 +5431,6 @@ static int dsi_bind(struct device *dev, struct device *master, void *data) else dsi->num_lanes_supported = 3; - dsi->line_buffer_size = dsi_get_line_buf_size(dsi); - dsi_init_output(dsi); r = dsi_probe_of(dsi); @@ -5388,67 +5443,33 @@ static int dsi_bind(struct device *dev, struct device *master, void *data) if (r) DSSERR("Failed to populate DSI child devices: %d\n", r); - dsi_runtime_put(dsi); - - snprintf(name, sizeof(name), "dsi%u_regs", dsi->module_id + 1); - dsi->debugfs.regs = dss_debugfs_create_file(dss, name, - dsi_dump_dsi_regs, &dsi); -#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS - snprintf(name, sizeof(name), "dsi%u_irqs", dsi->module_id + 1); - dsi->debugfs.irqs = dss_debugfs_create_file(dss, name, - dsi_dump_dsi_irqs, &dsi); -#endif - snprintf(name, sizeof(name), "dsi%u_clks", dsi->module_id + 1); - dsi->debugfs.clks = dss_debugfs_create_file(dss, name, - dsi_dump_dsi_clocks, &dsi); + r = component_add(&pdev->dev, &dsi_component_ops); + if (r) + goto err_uninit_output; return 0; err_uninit_output: dsi_uninit_output(dsi); - dsi_runtime_put(dsi); -err_pm_disable: pm_runtime_disable(dev); return r; } -static void dsi_unbind(struct device *dev, struct device *master, void *data) +static int dsi_remove(struct platform_device *pdev) { - struct dsi_data *dsi = dev_get_drvdata(dev); + struct dsi_data *dsi = platform_get_drvdata(pdev); - dss_debugfs_remove_file(dsi->debugfs.clks); - dss_debugfs_remove_file(dsi->debugfs.irqs); - dss_debugfs_remove_file(dsi->debugfs.regs); - - of_platform_depopulate(dev); - - WARN_ON(dsi->scp_clk_refcount > 0); - - dss_pll_unregister(&dsi->pll); + component_del(&pdev->dev, &dsi_component_ops); dsi_uninit_output(dsi); - pm_runtime_disable(dev); + pm_runtime_disable(&pdev->dev); if (dsi->vdds_dsi_reg != NULL && dsi->vdds_dsi_enabled) { regulator_disable(dsi->vdds_dsi_reg); dsi->vdds_dsi_enabled = false; } -} -static const struct component_ops dsi_component_ops = { - .bind = dsi_bind, - .unbind = dsi_unbind, -}; - -static int dsi_probe(struct platform_device *pdev) -{ - return component_add(&pdev->dev, &dsi_component_ops); -} - -static int dsi_remove(struct platform_device *pdev) -{ - component_del(&pdev->dev, &dsi_component_ops); return 0; } -- GitLab From 5fc15d98a06833dd6b4247533fa50e15ada34c5e Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Sat, 3 Mar 2018 18:52:59 +0200 Subject: [PATCH 0928/1692] drm/omap: dss: hdmi4: Move initialization code from bind to probe There's no reason to delay initialization of most of the driver (such as mapping memory I/O or enabling runtime PM) to the component bind handler. Perform as much of the initialization as possible at probe time, initializing at bind time only the parts that depends on the DSS. The cleanup code is moved from unbind to remove in a similar way. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/hdmi4.c | 218 +++++++++++++++------------- 1 file changed, 117 insertions(+), 101 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index 1d1f2e0b2b2a..89fdce02278c 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c @@ -553,53 +553,10 @@ static const struct omap_dss_device_ops hdmi_ops = { }, }; -static void hdmi_init_output(struct omap_hdmi *hdmi) -{ - struct omap_dss_device *out = &hdmi->output; - - out->dev = &hdmi->pdev->dev; - out->id = OMAP_DSS_OUTPUT_HDMI; - out->output_type = OMAP_DISPLAY_TYPE_HDMI; - out->name = "hdmi.0"; - out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT; - out->ops = &hdmi_ops; - out->owner = THIS_MODULE; - out->of_ports = BIT(0); - - omapdss_device_register(out); -} - -static void hdmi_uninit_output(struct omap_hdmi *hdmi) -{ - struct omap_dss_device *out = &hdmi->output; - - omapdss_device_unregister(out); -} - -static int hdmi_probe_of(struct omap_hdmi *hdmi) -{ - struct platform_device *pdev = hdmi->pdev; - struct device_node *node = pdev->dev.of_node; - struct device_node *ep; - int r; - - ep = of_graph_get_endpoint_by_regs(node, 0, 0); - if (!ep) - return 0; - - r = hdmi_parse_lanes_of(pdev, ep, &hdmi->phy); - if (r) - goto err; - - of_node_put(ep); - return 0; - -err: - of_node_put(ep); - return r; -} +/* ----------------------------------------------------------------------------- + * Audio Callbacks + */ -/* Audio callbacks */ static int hdmi_audio_startup(struct device *dev, void (*abort_cb)(struct device *dev)) { @@ -714,27 +671,123 @@ static int hdmi_audio_register(struct omap_hdmi *hdmi) return 0; } -/* HDMI HW IP initialisation */ +/* ----------------------------------------------------------------------------- + * Component Bind & Unbind + */ + static int hdmi4_bind(struct device *dev, struct device *master, void *data) { - struct platform_device *pdev = to_platform_device(dev); struct dss_device *dss = dss_get_device(master); - struct omap_hdmi *hdmi; + struct omap_hdmi *hdmi = dev_get_drvdata(dev); int r; + + hdmi->dss = dss; + + r = hdmi_pll_init(dss, hdmi->pdev, &hdmi->pll, &hdmi->wp); + if (r) + return r; + + r = hdmi4_cec_init(hdmi->pdev, &hdmi->core, &hdmi->wp); + if (r) + goto err_pll_uninit; + + r = hdmi_audio_register(hdmi); + if (r) { + DSSERR("Registering HDMI audio failed\n"); + goto err_cec_uninit; + } + + hdmi->debugfs = dss_debugfs_create_file(dss, "hdmi", hdmi_dump_regs, + hdmi); + + return 0; + +err_cec_uninit: + hdmi4_cec_uninit(&hdmi->core); +err_pll_uninit: + hdmi_pll_uninit(&hdmi->pll); + return r; +} + +static void hdmi4_unbind(struct device *dev, struct device *master, void *data) +{ + struct omap_hdmi *hdmi = dev_get_drvdata(dev); + + dss_debugfs_remove_file(hdmi->debugfs); + + if (hdmi->audio_pdev) + platform_device_unregister(hdmi->audio_pdev); + + hdmi4_cec_uninit(&hdmi->core); + hdmi_pll_uninit(&hdmi->pll); +} + +static const struct component_ops hdmi4_component_ops = { + .bind = hdmi4_bind, + .unbind = hdmi4_unbind, +}; + +/* ----------------------------------------------------------------------------- + * Probe & Remove, Suspend & Resume + */ + +static void hdmi4_init_output(struct omap_hdmi *hdmi) +{ + struct omap_dss_device *out = &hdmi->output; + + out->dev = &hdmi->pdev->dev; + out->id = OMAP_DSS_OUTPUT_HDMI; + out->output_type = OMAP_DISPLAY_TYPE_HDMI; + out->name = "hdmi.0"; + out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT; + out->ops = &hdmi_ops; + out->owner = THIS_MODULE; + out->of_ports = BIT(0); + + omapdss_device_register(out); +} + +static void hdmi4_uninit_output(struct omap_hdmi *hdmi) +{ + struct omap_dss_device *out = &hdmi->output; + + omapdss_device_unregister(out); +} + +static int hdmi4_probe_of(struct omap_hdmi *hdmi) +{ + struct platform_device *pdev = hdmi->pdev; + struct device_node *node = pdev->dev.of_node; + struct device_node *ep; + int r; + + ep = of_graph_get_endpoint_by_regs(node, 0, 0); + if (!ep) + return 0; + + r = hdmi_parse_lanes_of(pdev, ep, &hdmi->phy); + of_node_put(ep); + return r; +} + +static int hdmi4_probe(struct platform_device *pdev) +{ + struct omap_hdmi *hdmi; int irq; + int r; hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL); if (!hdmi) return -ENOMEM; hdmi->pdev = pdev; - hdmi->dss = dss; + dev_set_drvdata(&pdev->dev, hdmi); mutex_init(&hdmi->lock); spin_lock_init(&hdmi->audio_playing_lock); - r = hdmi_probe_of(hdmi); + r = hdmi4_probe_of(hdmi); if (r) goto err_free; @@ -742,27 +795,19 @@ static int hdmi4_bind(struct device *dev, struct device *master, void *data) if (r) goto err_free; - r = hdmi_pll_init(dss, pdev, &hdmi->pll, &hdmi->wp); - if (r) - goto err_free; - r = hdmi_phy_init(pdev, &hdmi->phy, 4); if (r) - goto err_pll; + goto err_free; r = hdmi4_core_init(pdev, &hdmi->core); if (r) - goto err_pll; - - r = hdmi4_cec_init(pdev, &hdmi->core, &hdmi->wp); - if (r) - goto err_pll; + goto err_free; irq = platform_get_irq(pdev, 0); if (irq < 0) { DSSERR("platform_get_irq failed\n"); r = -ENODEV; - goto err_pll; + goto err_free; } r = devm_request_threaded_irq(&pdev->dev, irq, @@ -770,67 +815,38 @@ static int hdmi4_bind(struct device *dev, struct device *master, void *data) IRQF_ONESHOT, "OMAP HDMI", hdmi); if (r) { DSSERR("HDMI IRQ request failed\n"); - goto err_pll; + goto err_free; } pm_runtime_enable(&pdev->dev); - hdmi_init_output(hdmi); + hdmi4_init_output(hdmi); - r = hdmi_audio_register(hdmi); - if (r) { - DSSERR("Registering HDMI audio failed\n"); + r = component_add(&pdev->dev, &hdmi4_component_ops); + if (r) goto err_uninit_output; - } - - hdmi->debugfs = dss_debugfs_create_file(dss, "hdmi", hdmi_dump_regs, - hdmi); return 0; err_uninit_output: - hdmi_uninit_output(hdmi); + hdmi4_uninit_output(hdmi); pm_runtime_disable(&pdev->dev); -err_pll: - hdmi_pll_uninit(&hdmi->pll); err_free: kfree(hdmi); return r; } -static void hdmi4_unbind(struct device *dev, struct device *master, void *data) +static int hdmi4_remove(struct platform_device *pdev) { - struct omap_hdmi *hdmi = dev_get_drvdata(dev); + struct omap_hdmi *hdmi = platform_get_drvdata(pdev); - dss_debugfs_remove_file(hdmi->debugfs); - - if (hdmi->audio_pdev) - platform_device_unregister(hdmi->audio_pdev); - - hdmi_uninit_output(hdmi); - - hdmi4_cec_uninit(&hdmi->core); + component_del(&pdev->dev, &hdmi4_component_ops); - hdmi_pll_uninit(&hdmi->pll); + hdmi4_uninit_output(hdmi); - pm_runtime_disable(dev); + pm_runtime_disable(&pdev->dev); kfree(hdmi); -} - -static const struct component_ops hdmi4_component_ops = { - .bind = hdmi4_bind, - .unbind = hdmi4_unbind, -}; - -static int hdmi4_probe(struct platform_device *pdev) -{ - return component_add(&pdev->dev, &hdmi4_component_ops); -} - -static int hdmi4_remove(struct platform_device *pdev) -{ - component_del(&pdev->dev, &hdmi4_component_ops); return 0; } -- GitLab From 5f031b4717349849e4d88edd09c9ec06a4729cfb Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Sat, 3 Mar 2018 18:52:59 +0200 Subject: [PATCH 0929/1692] drm/omap: dss: hdmi5: Move initialization code from bind to probe There's no reason to delay initialization of most of the driver (such as mapping memory I/O or enabling runtime PM) to the component bind handler. Perform as much of the initialization as possible at probe time, initializing at bind time only the parts that depends on the DSS. The cleanup code is moved from unbind to remove in a similar way. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/hdmi5.c | 205 +++++++++++++++------------- 1 file changed, 110 insertions(+), 95 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index 92ae561bf974..64b45a612439 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c @@ -544,53 +544,10 @@ static const struct omap_dss_device_ops hdmi_ops = { }, }; -static void hdmi_init_output(struct omap_hdmi *hdmi) -{ - struct omap_dss_device *out = &hdmi->output; - - out->dev = &hdmi->pdev->dev; - out->id = OMAP_DSS_OUTPUT_HDMI; - out->output_type = OMAP_DISPLAY_TYPE_HDMI; - out->name = "hdmi.0"; - out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT; - out->ops = &hdmi_ops; - out->owner = THIS_MODULE; - out->of_ports = BIT(0); - - omapdss_device_register(out); -} - -static void hdmi_uninit_output(struct omap_hdmi *hdmi) -{ - struct omap_dss_device *out = &hdmi->output; - - omapdss_device_unregister(out); -} - -static int hdmi_probe_of(struct omap_hdmi *hdmi) -{ - struct platform_device *pdev = hdmi->pdev; - struct device_node *node = pdev->dev.of_node; - struct device_node *ep; - int r; - - ep = of_graph_get_endpoint_by_regs(node, 0, 0); - if (!ep) - return 0; - - r = hdmi_parse_lanes_of(pdev, ep, &hdmi->phy); - if (r) - goto err; - - of_node_put(ep); - return 0; - -err: - of_node_put(ep); - return r; -} +/* ----------------------------------------------------------------------------- + * Audio Callbacks + */ -/* Audio callbacks */ static int hdmi_audio_startup(struct device *dev, void (*abort_cb)(struct device *dev)) { @@ -711,27 +668,116 @@ static int hdmi_audio_register(struct omap_hdmi *hdmi) return 0; } -/* HDMI HW IP initialisation */ +/* ----------------------------------------------------------------------------- + * Component Bind & Unbind + */ + static int hdmi5_bind(struct device *dev, struct device *master, void *data) { - struct platform_device *pdev = to_platform_device(dev); struct dss_device *dss = dss_get_device(master); - struct omap_hdmi *hdmi; + struct omap_hdmi *hdmi = dev_get_drvdata(dev); int r; + + hdmi->dss = dss; + + r = hdmi_pll_init(dss, hdmi->pdev, &hdmi->pll, &hdmi->wp); + if (r) + return r; + + r = hdmi_audio_register(hdmi); + if (r) { + DSSERR("Registering HDMI audio failed %d\n", r); + goto err_pll_uninit; + } + + hdmi->debugfs = dss_debugfs_create_file(dss, "hdmi", hdmi_dump_regs, + hdmi); + + return 0; + +err_pll_uninit: + hdmi_pll_uninit(&hdmi->pll); + return r; +} + +static void hdmi5_unbind(struct device *dev, struct device *master, void *data) +{ + struct omap_hdmi *hdmi = dev_get_drvdata(dev); + + dss_debugfs_remove_file(hdmi->debugfs); + + if (hdmi->audio_pdev) + platform_device_unregister(hdmi->audio_pdev); + + hdmi_pll_uninit(&hdmi->pll); +} + +static const struct component_ops hdmi5_component_ops = { + .bind = hdmi5_bind, + .unbind = hdmi5_unbind, +}; + +/* ----------------------------------------------------------------------------- + * Probe & Remove, Suspend & Resume + */ + +static void hdmi5_init_output(struct omap_hdmi *hdmi) +{ + struct omap_dss_device *out = &hdmi->output; + + out->dev = &hdmi->pdev->dev; + out->id = OMAP_DSS_OUTPUT_HDMI; + out->output_type = OMAP_DISPLAY_TYPE_HDMI; + out->name = "hdmi.0"; + out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT; + out->ops = &hdmi_ops; + out->owner = THIS_MODULE; + out->of_ports = BIT(0); + + omapdss_device_register(out); +} + +static void hdmi5_uninit_output(struct omap_hdmi *hdmi) +{ + struct omap_dss_device *out = &hdmi->output; + + omapdss_device_unregister(out); +} + +static int hdmi5_probe_of(struct omap_hdmi *hdmi) +{ + struct platform_device *pdev = hdmi->pdev; + struct device_node *node = pdev->dev.of_node; + struct device_node *ep; + int r; + + ep = of_graph_get_endpoint_by_regs(node, 0, 0); + if (!ep) + return 0; + + r = hdmi_parse_lanes_of(pdev, ep, &hdmi->phy); + of_node_put(ep); + return r; +} + +static int hdmi5_probe(struct platform_device *pdev) +{ + struct omap_hdmi *hdmi; int irq; + int r; hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL); if (!hdmi) return -ENOMEM; hdmi->pdev = pdev; - hdmi->dss = dss; + dev_set_drvdata(&pdev->dev, hdmi); mutex_init(&hdmi->lock); spin_lock_init(&hdmi->audio_playing_lock); - r = hdmi_probe_of(hdmi); + r = hdmi5_probe_of(hdmi); if (r) goto err_free; @@ -739,23 +785,19 @@ static int hdmi5_bind(struct device *dev, struct device *master, void *data) if (r) goto err_free; - r = hdmi_pll_init(dss, pdev, &hdmi->pll, &hdmi->wp); - if (r) - goto err_free; - r = hdmi_phy_init(pdev, &hdmi->phy, 5); if (r) - goto err_pll; + goto err_free; r = hdmi5_core_init(pdev, &hdmi->core); if (r) - goto err_pll; + goto err_free; irq = platform_get_irq(pdev, 0); if (irq < 0) { DSSERR("platform_get_irq failed\n"); r = -ENODEV; - goto err_pll; + goto err_free; } r = devm_request_threaded_irq(&pdev->dev, irq, @@ -763,65 +805,38 @@ static int hdmi5_bind(struct device *dev, struct device *master, void *data) IRQF_ONESHOT, "OMAP HDMI", hdmi); if (r) { DSSERR("HDMI IRQ request failed\n"); - goto err_pll; + goto err_free; } pm_runtime_enable(&pdev->dev); - hdmi_init_output(hdmi); + hdmi5_init_output(hdmi); - r = hdmi_audio_register(hdmi); - if (r) { - DSSERR("Registering HDMI audio failed %d\n", r); + r = component_add(&pdev->dev, &hdmi5_component_ops); + if (r) goto err_uninit_output; - } - - hdmi->debugfs = dss_debugfs_create_file(dss, "hdmi", hdmi_dump_regs, - hdmi); return 0; err_uninit_output: - hdmi_uninit_output(hdmi); + hdmi5_uninit_output(hdmi); pm_runtime_disable(&pdev->dev); -err_pll: - hdmi_pll_uninit(&hdmi->pll); err_free: kfree(hdmi); return r; } -static void hdmi5_unbind(struct device *dev, struct device *master, void *data) +static int hdmi5_remove(struct platform_device *pdev) { - struct omap_hdmi *hdmi = dev_get_drvdata(dev); - - dss_debugfs_remove_file(hdmi->debugfs); + struct omap_hdmi *hdmi = platform_get_drvdata(pdev); - if (hdmi->audio_pdev) - platform_device_unregister(hdmi->audio_pdev); - - hdmi_uninit_output(hdmi); + component_del(&pdev->dev, &hdmi5_component_ops); - hdmi_pll_uninit(&hdmi->pll); + hdmi5_uninit_output(hdmi); - pm_runtime_disable(dev); + pm_runtime_disable(&pdev->dev); kfree(hdmi); -} - -static const struct component_ops hdmi5_component_ops = { - .bind = hdmi5_bind, - .unbind = hdmi5_unbind, -}; - -static int hdmi5_probe(struct platform_device *pdev) -{ - return component_add(&pdev->dev, &hdmi5_component_ops); -} - -static int hdmi5_remove(struct platform_device *pdev) -{ - component_del(&pdev->dev, &hdmi5_component_ops); return 0; } -- GitLab From c87193267d247c58f4517081d9cd04c8dc6302b8 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Sat, 3 Mar 2018 18:52:59 +0200 Subject: [PATCH 0930/1692] drm/omap: dss: venc: Move initialization code from bind to probe There's no reason to delay initialization of most of the driver (such as mapping memory I/O or enabling runtime PM) to the component bind handler. Perform as much of the initialization as possible at probe time, initializing at bind time only the parts that depends on the DSS. The cleanup code is moved from unbind to remove in a similar way. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/venc.c | 104 +++++++++++++++++------------ 1 file changed, 60 insertions(+), 44 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c index 93c3e5250a63..5adf8510d67b 100644 --- a/drivers/gpu/drm/omapdrm/dss/venc.c +++ b/drivers/gpu/drm/omapdrm/dss/venc.c @@ -756,6 +756,50 @@ static const struct omap_dss_device_ops venc_ops = { .set_timings = venc_set_timings, }; +/* ----------------------------------------------------------------------------- + * Component Bind & Unbind + */ + +static int venc_bind(struct device *dev, struct device *master, void *data) +{ + struct dss_device *dss = dss_get_device(master); + struct venc_device *venc = dev_get_drvdata(dev); + u8 rev_id; + int r; + + venc->dss = dss; + + r = venc_runtime_get(venc); + if (r) + return r; + + rev_id = (u8)(venc_read_reg(venc, VENC_REV_ID) & 0xff); + dev_dbg(dev, "OMAP VENC rev %d\n", rev_id); + + venc_runtime_put(venc); + + venc->debugfs = dss_debugfs_create_file(dss, "venc", venc_dump_regs, + venc); + + return 0; +} + +static void venc_unbind(struct device *dev, struct device *master, void *data) +{ + struct venc_device *venc = dev_get_drvdata(dev); + + dss_debugfs_remove_file(venc->debugfs); +} + +static const struct component_ops venc_component_ops = { + .bind = venc_bind, + .unbind = venc_unbind, +}; + +/* ----------------------------------------------------------------------------- + * Probe & Remove, Suspend & Resume + */ + static void venc_init_output(struct venc_device *venc) { struct omap_dss_device *out = &venc->output; @@ -820,19 +864,15 @@ static int venc_probe_of(struct venc_device *venc) return r; } -/* VENC HW IP initialisation */ static const struct soc_device_attribute venc_soc_devices[] = { { .machine = "OMAP3[45]*" }, { .machine = "AM35*" }, { /* sentinel */ } }; -static int venc_bind(struct device *dev, struct device *master, void *data) +static int venc_probe(struct platform_device *pdev) { - struct platform_device *pdev = to_platform_device(dev); - struct dss_device *dss = dss_get_device(master); struct venc_device *venc; - u8 rev_id; struct resource *venc_mem; int r; @@ -841,8 +881,8 @@ static int venc_bind(struct device *dev, struct device *master, void *data) return -ENOMEM; venc->pdev = pdev; - venc->dss = dss; - dev_set_drvdata(dev, venc); + + platform_set_drvdata(pdev, venc); /* The OMAP34xx, OMAP35xx and AM35xx VENC require the TV DAC clock. */ if (soc_device_match(venc_soc_devices)) @@ -863,63 +903,39 @@ static int venc_bind(struct device *dev, struct device *master, void *data) if (r) goto err_free; - pm_runtime_enable(&pdev->dev); - - r = venc_runtime_get(venc); - if (r) - goto err_pm_disable; - - rev_id = (u8)(venc_read_reg(venc, VENC_REV_ID) & 0xff); - dev_dbg(&pdev->dev, "OMAP VENC rev %d\n", rev_id); - - venc_runtime_put(venc); - r = venc_probe_of(venc); - if (r) { - DSSERR("Invalid DT data\n"); - goto err_pm_disable; - } + if (r) + goto err_free; - venc->debugfs = dss_debugfs_create_file(dss, "venc", venc_dump_regs, - venc); + pm_runtime_enable(&pdev->dev); venc_init_output(venc); + r = component_add(&pdev->dev, &venc_component_ops); + if (r) + goto err_uninit_output; + return 0; -err_pm_disable: +err_uninit_output: + venc_uninit_output(venc); pm_runtime_disable(&pdev->dev); err_free: kfree(venc); return r; } -static void venc_unbind(struct device *dev, struct device *master, void *data) +static int venc_remove(struct platform_device *pdev) { - struct venc_device *venc = dev_get_drvdata(dev); + struct venc_device *venc = platform_get_drvdata(pdev); - dss_debugfs_remove_file(venc->debugfs); + component_del(&pdev->dev, &venc_component_ops); venc_uninit_output(venc); - pm_runtime_disable(dev); + pm_runtime_disable(&pdev->dev); kfree(venc); -} - -static const struct component_ops venc_component_ops = { - .bind = venc_bind, - .unbind = venc_unbind, -}; - -static int venc_probe(struct platform_device *pdev) -{ - return component_add(&pdev->dev, &venc_component_ops); -} - -static int venc_remove(struct platform_device *pdev) -{ - component_del(&pdev->dev, &venc_component_ops); return 0; } -- GitLab From 27d624527d99265c2df999af3615ff71c29d06f4 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Fri, 2 Mar 2018 22:13:06 +0200 Subject: [PATCH 0931/1692] drm/omap: dss: Acquire next dssdev at probe time Look up the next dssdev at probe time based on device tree links for all DSS outputs and encoders. This will be used to reverse the order of the dssdev connect and disconnect call chains. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- .../gpu/drm/omapdrm/displays/encoder-opa362.c | 9 +++++++++ .../gpu/drm/omapdrm/displays/encoder-tfp410.c | 9 +++++++++ .../drm/omapdrm/displays/encoder-tpd12s015.c | 9 +++++++++ drivers/gpu/drm/omapdrm/dss/dpi.c | 17 +++++++++++++---- drivers/gpu/drm/omapdrm/dss/dsi.c | 18 ++++++++++++++++-- drivers/gpu/drm/omapdrm/dss/hdmi4.c | 18 ++++++++++++++++-- drivers/gpu/drm/omapdrm/dss/hdmi5.c | 18 ++++++++++++++++-- drivers/gpu/drm/omapdrm/dss/omapdss.h | 1 + drivers/gpu/drm/omapdrm/dss/sdi.c | 17 +++++++++++++++-- drivers/gpu/drm/omapdrm/dss/venc.c | 18 ++++++++++++++++-- 10 files changed, 120 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c index 939e259d601d..f661ba8f3fa0 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c @@ -169,6 +169,13 @@ static int opa362_probe(struct platform_device *pdev) dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(1) | BIT(0); + dssdev->next = omapdss_of_find_connected_device(pdev->dev.of_node, 1); + if (IS_ERR(dssdev->next)) { + if (PTR_ERR(dssdev->next) != -EPROBE_DEFER) + dev_err(&pdev->dev, "failed to find video sink\n"); + return PTR_ERR(dssdev->next); + } + omapdss_device_register(dssdev); return 0; @@ -179,6 +186,8 @@ static int __exit opa362_remove(struct platform_device *pdev) struct panel_drv_data *ddata = platform_get_drvdata(pdev); struct omap_dss_device *dssdev = &ddata->dssdev; + if (dssdev->next) + omapdss_device_put(dssdev->next); omapdss_device_unregister(&ddata->dssdev); WARN_ON(omapdss_device_is_enabled(dssdev)); diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c index 55549c5a5af2..3be35ba564da 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c @@ -192,6 +192,13 @@ static int tfp410_probe(struct platform_device *pdev) dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(1) | BIT(0); + dssdev->next = omapdss_of_find_connected_device(pdev->dev.of_node, 1); + if (IS_ERR(dssdev->next)) { + if (PTR_ERR(dssdev->next) != -EPROBE_DEFER) + dev_err(&pdev->dev, "failed to find video sink\n"); + return PTR_ERR(dssdev->next); + } + omapdss_device_register(dssdev); return 0; @@ -202,6 +209,8 @@ static int __exit tfp410_remove(struct platform_device *pdev) struct panel_drv_data *ddata = platform_get_drvdata(pdev); struct omap_dss_device *dssdev = &ddata->dssdev; + if (dssdev->next) + omapdss_device_put(dssdev->next); omapdss_device_unregister(&ddata->dssdev); WARN_ON(omapdss_device_is_enabled(dssdev)); diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c index 58a831c3f74c..cee53346f6fc 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c @@ -301,6 +301,13 @@ static int tpd_probe(struct platform_device *pdev) dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(1) | BIT(0); + dssdev->next = omapdss_of_find_connected_device(pdev->dev.of_node, 1); + if (IS_ERR(dssdev->next)) { + if (PTR_ERR(dssdev->next) != -EPROBE_DEFER) + dev_err(&pdev->dev, "failed to find video sink\n"); + return PTR_ERR(dssdev->next); + } + omapdss_device_register(dssdev); return 0; @@ -311,6 +318,8 @@ static int __exit tpd_remove(struct platform_device *pdev) struct panel_drv_data *ddata = platform_get_drvdata(pdev); struct omap_dss_device *dssdev = &ddata->dssdev; + if (dssdev->next) + omapdss_device_put(dssdev->next); omapdss_device_unregister(&ddata->dssdev); WARN_ON(omapdss_device_is_enabled(dssdev)); diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c index 5839009f272e..ae35aa1bf2c5 100644 --- a/drivers/gpu/drm/omapdrm/dss/dpi.c +++ b/drivers/gpu/drm/omapdrm/dss/dpi.c @@ -688,7 +688,7 @@ static const struct omap_dss_device_ops dpi_ops = { .set_timings = dpi_set_timings, }; -static void dpi_init_output_port(struct dpi_data *dpi, struct device_node *port) +static int dpi_init_output_port(struct dpi_data *dpi, struct device_node *port) { struct omap_dss_device *out = &dpi->output; u32 port_num = 0; @@ -717,7 +717,16 @@ static void dpi_init_output_port(struct dpi_data *dpi, struct device_node *port) out->ops = &dpi_ops; out->owner = THIS_MODULE; + out->next = omapdss_of_find_connected_device(out->dev->of_node, 0); + if (IS_ERR(out->next)) { + if (PTR_ERR(out->next) != -EPROBE_DEFER) + dev_err(out->dev, "failed to find video sink\n"); + return PTR_ERR(out->next); + } + omapdss_device_register(out); + + return 0; } static void dpi_uninit_output_port(struct device_node *port) @@ -725,6 +734,8 @@ static void dpi_uninit_output_port(struct device_node *port) struct dpi_data *dpi = port->data; struct omap_dss_device *out = &dpi->output; + if (out->next) + omapdss_device_put(out->next); omapdss_device_unregister(out); } @@ -760,9 +771,7 @@ int dpi_init_port(struct dss_device *dss, struct platform_device *pdev, mutex_init(&dpi->lock); - dpi_init_output_port(dpi, port); - - return 0; + return dpi_init_output_port(dpi, port); } void dpi_uninit_port(struct device_node *port) diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index ab0426fab22e..631bf5805649 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -5165,7 +5165,7 @@ static const struct component_ops dsi_component_ops = { * Probe & Remove, Suspend & Resume */ -static void dsi_init_output(struct dsi_data *dsi) +static int dsi_init_output(struct dsi_data *dsi) { struct omap_dss_device *out = &dsi->output; @@ -5180,13 +5180,24 @@ static void dsi_init_output(struct dsi_data *dsi) out->owner = THIS_MODULE; out->of_ports = BIT(0); + out->next = omapdss_of_find_connected_device(out->dev->of_node, 0); + if (IS_ERR(out->next)) { + if (PTR_ERR(out->next) != -EPROBE_DEFER) + dev_err(out->dev, "failed to find video sink\n"); + return PTR_ERR(out->next); + } + omapdss_device_register(out); + + return 0; } static void dsi_uninit_output(struct dsi_data *dsi) { struct omap_dss_device *out = &dsi->output; + if (out->next) + omapdss_device_put(out->next); omapdss_device_unregister(out); } @@ -5431,7 +5442,9 @@ static int dsi_probe(struct platform_device *pdev) else dsi->num_lanes_supported = 3; - dsi_init_output(dsi); + r = dsi_init_output(dsi); + if (r) + goto err_pm_disable; r = dsi_probe_of(dsi); if (r) { @@ -5451,6 +5464,7 @@ static int dsi_probe(struct platform_device *pdev) err_uninit_output: dsi_uninit_output(dsi); +err_pm_disable: pm_runtime_disable(dev); return r; } diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index 89fdce02278c..118c015624b9 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c @@ -731,7 +731,7 @@ static const struct component_ops hdmi4_component_ops = { * Probe & Remove, Suspend & Resume */ -static void hdmi4_init_output(struct omap_hdmi *hdmi) +static int hdmi4_init_output(struct omap_hdmi *hdmi) { struct omap_dss_device *out = &hdmi->output; @@ -744,13 +744,24 @@ static void hdmi4_init_output(struct omap_hdmi *hdmi) out->owner = THIS_MODULE; out->of_ports = BIT(0); + out->next = omapdss_of_find_connected_device(out->dev->of_node, 0); + if (IS_ERR(out->next)) { + if (PTR_ERR(out->next) != -EPROBE_DEFER) + dev_err(out->dev, "failed to find video sink\n"); + return PTR_ERR(out->next); + } + omapdss_device_register(out); + + return 0; } static void hdmi4_uninit_output(struct omap_hdmi *hdmi) { struct omap_dss_device *out = &hdmi->output; + if (out->next) + omapdss_device_put(out->next); omapdss_device_unregister(out); } @@ -820,7 +831,9 @@ static int hdmi4_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); - hdmi4_init_output(hdmi); + r = hdmi4_init_output(hdmi); + if (r) + goto err_pm_disable; r = component_add(&pdev->dev, &hdmi4_component_ops); if (r) @@ -830,6 +843,7 @@ static int hdmi4_probe(struct platform_device *pdev) err_uninit_output: hdmi4_uninit_output(hdmi); +err_pm_disable: pm_runtime_disable(&pdev->dev); err_free: kfree(hdmi); diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index 64b45a612439..7af60ca4e7b2 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c @@ -721,7 +721,7 @@ static const struct component_ops hdmi5_component_ops = { * Probe & Remove, Suspend & Resume */ -static void hdmi5_init_output(struct omap_hdmi *hdmi) +static int hdmi5_init_output(struct omap_hdmi *hdmi) { struct omap_dss_device *out = &hdmi->output; @@ -734,13 +734,24 @@ static void hdmi5_init_output(struct omap_hdmi *hdmi) out->owner = THIS_MODULE; out->of_ports = BIT(0); + out->next = omapdss_of_find_connected_device(out->dev->of_node, 0); + if (IS_ERR(out->next)) { + if (PTR_ERR(out->next) != -EPROBE_DEFER) + dev_err(out->dev, "failed to find video sink\n"); + return PTR_ERR(out->next); + } + omapdss_device_register(out); + + return 0; } static void hdmi5_uninit_output(struct omap_hdmi *hdmi) { struct omap_dss_device *out = &hdmi->output; + if (out->next) + omapdss_device_put(out->next); omapdss_device_unregister(out); } @@ -810,7 +821,9 @@ static int hdmi5_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); - hdmi5_init_output(hdmi); + r = hdmi5_init_output(hdmi); + if (r) + goto err_pm_disable; r = component_add(&pdev->dev, &hdmi5_component_ops); if (r) @@ -820,6 +833,7 @@ static int hdmi5_probe(struct platform_device *pdev) err_uninit_output: hdmi5_uninit_output(hdmi); +err_pm_disable: pm_runtime_disable(&pdev->dev); err_free: kfree(hdmi); diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index dc2f8167f61b..5d3e4ced73d1 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -394,6 +394,7 @@ struct omap_dss_device { struct dss_device *dss; struct omap_dss_device *src; struct omap_dss_device *dst; + struct omap_dss_device *next; struct list_head list; diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c index e9b280784264..fd7c11ebda5d 100644 --- a/drivers/gpu/drm/omapdrm/dss/sdi.c +++ b/drivers/gpu/drm/omapdrm/dss/sdi.c @@ -317,7 +317,7 @@ static const struct omap_dss_device_ops sdi_ops = { .set_timings = sdi_set_timings, }; -static void sdi_init_output(struct sdi_device *sdi) +static int sdi_init_output(struct sdi_device *sdi) { struct omap_dss_device *out = &sdi->output; @@ -331,11 +331,22 @@ static void sdi_init_output(struct sdi_device *sdi) out->ops = &sdi_ops; out->owner = THIS_MODULE; + out->next = omapdss_of_find_connected_device(out->dev->of_node, 1); + if (IS_ERR(out->next)) { + if (PTR_ERR(out->next) != -EPROBE_DEFER) + dev_err(out->dev, "failed to find video sink\n"); + return PTR_ERR(out->next); + } + omapdss_device_register(out); + + return 0; } static void sdi_uninit_output(struct sdi_device *sdi) { + if (sdi->output.next) + omapdss_device_put(sdi->output.next); omapdss_device_unregister(&sdi->output); } @@ -370,7 +381,9 @@ int sdi_init_port(struct dss_device *dss, struct platform_device *pdev, sdi->pdev = pdev; port->data = sdi; - sdi_init_output(sdi); + r = sdi_init_output(sdi); + if (r) + goto err_free; return 0; diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c index 5adf8510d67b..298e86cc9e14 100644 --- a/drivers/gpu/drm/omapdrm/dss/venc.c +++ b/drivers/gpu/drm/omapdrm/dss/venc.c @@ -800,7 +800,7 @@ static const struct component_ops venc_component_ops = { * Probe & Remove, Suspend & Resume */ -static void venc_init_output(struct venc_device *venc) +static int venc_init_output(struct venc_device *venc) { struct omap_dss_device *out = &venc->output; @@ -813,11 +813,22 @@ static void venc_init_output(struct venc_device *venc) out->owner = THIS_MODULE; out->of_ports = BIT(0); + out->next = omapdss_of_find_connected_device(out->dev->of_node, 0); + if (IS_ERR(out->next)) { + if (PTR_ERR(out->next) != -EPROBE_DEFER) + dev_err(out->dev, "failed to find video sink\n"); + return PTR_ERR(out->next); + } + omapdss_device_register(out); + + return 0; } static void venc_uninit_output(struct venc_device *venc) { + if (venc->output.next) + omapdss_device_put(venc->output.next); omapdss_device_unregister(&venc->output); } @@ -909,7 +920,9 @@ static int venc_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); - venc_init_output(venc); + r = venc_init_output(venc); + if (r) + goto err_pm_disable; r = component_add(&pdev->dev, &venc_component_ops); if (r) @@ -919,6 +932,7 @@ static int venc_probe(struct platform_device *pdev) err_uninit_output: venc_uninit_output(venc); +err_pm_disable: pm_runtime_disable(&pdev->dev); err_free: kfree(venc); -- GitLab From f7e376aece4636afb0c4da5ce54d5e805ce47a76 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Sun, 4 Mar 2018 22:28:25 +0200 Subject: [PATCH 0932/1692] drm/omap: dss: Add for_each_dss_output() macro Similarly to for_each_dss_display(), the for_each_dss_output() macro iterates over all the DSS connected outputs. Signed-off-by: Laurent Pinchart Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/base.c | 19 +++++++++++++------ drivers/gpu/drm/omapdrm/dss/omapdss.h | 13 ++++++++++--- 2 files changed, 23 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c index 67086cbb3e24..1dbd08e6e029 100644 --- a/drivers/gpu/drm/omapdrm/dss/base.c +++ b/drivers/gpu/drm/omapdrm/dss/base.c @@ -126,12 +126,13 @@ struct omap_dss_device *omapdss_find_device_by_port(struct device_node *src, } /* - * Search for the next device starting at @from. If display_only is true, skip - * non-display devices. Release the reference to the @from device, and acquire - * a reference to the returned device if found. + * Search for the next device starting at @from. The type argument specfies + * which device types to consider when searching. Searching for multiple types + * is supported by and'ing their type flags. Release the reference to the @from + * device, and acquire a reference to the returned device if found. */ struct omap_dss_device *omapdss_device_get_next(struct omap_dss_device *from, - bool display_only) + enum omap_dss_device_type type) { struct omap_dss_device *dssdev; struct list_head *list; @@ -159,8 +160,14 @@ struct omap_dss_device *omapdss_device_get_next(struct omap_dss_device *from, goto done; } - /* Filter out non-display entries if display_only is set. */ - if (!display_only || dssdev->driver) + /* + * Accept display entities if the display type is requested, + * and output entities if the output type is requested. + */ + if ((type & OMAP_DSS_DEVICE_TYPE_DISPLAY) && dssdev->driver) + goto done; + if ((type & OMAP_DSS_DEVICE_TYPE_OUTPUT) && dssdev->id && + dssdev->next) goto done; } diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index 5d3e4ced73d1..5cbbfb16369b 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -385,6 +385,11 @@ struct omap_dss_device_ops { }; }; +enum omap_dss_device_type { + OMAP_DSS_DEVICE_TYPE_OUTPUT = (1 << 0), + OMAP_DSS_DEVICE_TYPE_DISPLAY = (1 << 1), +}; + struct omap_dss_device { struct kobject kobj; struct device *dev; @@ -488,9 +493,9 @@ static inline bool omapdss_is_initialized(void) return !!omapdss_get_dss(); } -void omapdss_display_init(struct omap_dss_device *dssdev); #define for_each_dss_display(d) \ - while ((d = omapdss_device_get_next(d, true)) != NULL) + while ((d = omapdss_device_get_next(d, OMAP_DSS_DEVICE_TYPE_DISPLAY)) != NULL) +void omapdss_display_init(struct omap_dss_device *dssdev); void omapdss_device_register(struct omap_dss_device *dssdev); void omapdss_device_unregister(struct omap_dss_device *dssdev); @@ -499,7 +504,7 @@ void omapdss_device_put(struct omap_dss_device *dssdev); struct omap_dss_device *omapdss_find_device_by_port(struct device_node *src, unsigned int port); struct omap_dss_device *omapdss_device_get_next(struct omap_dss_device *from, - bool display_only); + enum omap_dss_device_type type); int omapdss_device_connect(struct dss_device *dss, struct omap_dss_device *src, struct omap_dss_device *dst); @@ -511,6 +516,8 @@ int omap_dss_get_num_overlay_managers(void); int omap_dss_get_num_overlays(void); +#define for_each_dss_output(d) \ + while ((d = omapdss_device_get_next(d, OMAP_DSS_DEVICE_TYPE_OUTPUT)) != NULL) int omapdss_output_set_device(struct omap_dss_device *out, struct omap_dss_device *dssdev); int omapdss_output_unset_device(struct omap_dss_device *out); -- GitLab From bea131966ffab271e8c3b33e37244ad340fb7876 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Mon, 5 Mar 2018 14:28:06 +0200 Subject: [PATCH 0933/1692] drm/omap: dss: Add function to retrieve display for an output Add a new omapdss_display_get() function to retrieve the omap_dss_device for a given DSS output. This will be used when reversing the direction of the DSS pipeline handling logic. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/display.c | 9 +++++++++ drivers/gpu/drm/omapdrm/dss/omapdss.h | 1 + 2 files changed, 10 insertions(+) diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c index 53cb46f6503d..34b2a4ef63a4 100644 --- a/drivers/gpu/drm/omapdrm/dss/display.c +++ b/drivers/gpu/drm/omapdrm/dss/display.c @@ -49,3 +49,12 @@ void omapdss_display_init(struct omap_dss_device *dssdev) "display%u", id); } EXPORT_SYMBOL_GPL(omapdss_display_init); + +struct omap_dss_device *omapdss_display_get(struct omap_dss_device *output) +{ + while (output->next) + output = output->next; + + return omapdss_device_get(output); +} +EXPORT_SYMBOL_GPL(omapdss_display_get); diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index 5cbbfb16369b..1621d1eaed42 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -496,6 +496,7 @@ static inline bool omapdss_is_initialized(void) #define for_each_dss_display(d) \ while ((d = omapdss_device_get_next(d, OMAP_DSS_DEVICE_TYPE_DISPLAY)) != NULL) void omapdss_display_init(struct omap_dss_device *dssdev); +struct omap_dss_device *omapdss_display_get(struct omap_dss_device *output); void omapdss_device_register(struct omap_dss_device *dssdev); void omapdss_device_unregister(struct omap_dss_device *dssdev); -- GitLab From a48bc6ac2c6cd85bc079fc859ab14ea844e812cd Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Sun, 4 Mar 2018 23:55:56 +0200 Subject: [PATCH 0934/1692] drm/omap: dss: Remove duplicated parameter to dss_mgr_(dis)connect() The dss_mgr_connect() and dss_mgr_disconnect() functions take two omap_dss_device pointers as parameters, which are always set to the same value by all callers. Remove the duplicated pointer. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/dpi.c | 8 +++----- drivers/gpu/drm/omapdrm/dss/dsi.c | 8 +++----- drivers/gpu/drm/omapdrm/dss/hdmi4.c | 8 +++----- drivers/gpu/drm/omapdrm/dss/hdmi5.c | 8 +++----- drivers/gpu/drm/omapdrm/dss/omapdss.h | 6 ++---- drivers/gpu/drm/omapdrm/dss/output.c | 9 ++++----- drivers/gpu/drm/omapdrm/dss/sdi.c | 8 +++----- drivers/gpu/drm/omapdrm/dss/venc.c | 8 +++----- 8 files changed, 24 insertions(+), 39 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c index ae35aa1bf2c5..533d87e11bf5 100644 --- a/drivers/gpu/drm/omapdrm/dss/dpi.c +++ b/drivers/gpu/drm/omapdrm/dss/dpi.c @@ -652,7 +652,7 @@ static int dpi_connect(struct omap_dss_device *dssdev, dpi_init_pll(dpi); - r = dss_mgr_connect(&dpi->output, dssdev); + r = dss_mgr_connect(dssdev); if (r) return r; @@ -660,7 +660,7 @@ static int dpi_connect(struct omap_dss_device *dssdev, if (r) { DSSERR("failed to connect output to new device: %s\n", dst->name); - dss_mgr_disconnect(&dpi->output, dssdev); + dss_mgr_disconnect(dssdev); return r; } @@ -670,11 +670,9 @@ static int dpi_connect(struct omap_dss_device *dssdev, static void dpi_disconnect(struct omap_dss_device *dssdev, struct omap_dss_device *dst) { - struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev); - omapdss_output_unset_device(dssdev); - dss_mgr_disconnect(&dpi->output, dssdev); + dss_mgr_disconnect(dssdev); } static const struct omap_dss_device_ops dpi_ops = { diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 631bf5805649..d5ae01529901 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -4915,7 +4915,7 @@ static int dsi_connect(struct omap_dss_device *dssdev, if (r) return r; - r = dss_mgr_connect(&dsi->output, dssdev); + r = dss_mgr_connect(dssdev); if (r) return r; @@ -4923,7 +4923,7 @@ static int dsi_connect(struct omap_dss_device *dssdev, if (r) { DSSERR("failed to connect output to new device: %s\n", dssdev->name); - dss_mgr_disconnect(&dsi->output, dssdev); + dss_mgr_disconnect(dssdev); return r; } @@ -4933,11 +4933,9 @@ static int dsi_connect(struct omap_dss_device *dssdev, static void dsi_disconnect(struct omap_dss_device *dssdev, struct omap_dss_device *dst) { - struct dsi_data *dsi = to_dsi_data(dssdev); - omapdss_output_unset_device(dssdev); - dss_mgr_disconnect(&dsi->output, dssdev); + dss_mgr_disconnect(dssdev); } static const struct omap_dss_device_ops dsi_ops = { diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index 118c015624b9..5216c5554741 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c @@ -458,7 +458,7 @@ static int hdmi_connect(struct omap_dss_device *dssdev, if (r) return r; - r = dss_mgr_connect(&hdmi->output, dssdev); + r = dss_mgr_connect(dssdev); if (r) return r; @@ -466,7 +466,7 @@ static int hdmi_connect(struct omap_dss_device *dssdev, if (r) { DSSERR("failed to connect output to new device: %s\n", dst->name); - dss_mgr_disconnect(&hdmi->output, dssdev); + dss_mgr_disconnect(dssdev); return r; } @@ -476,11 +476,9 @@ static int hdmi_connect(struct omap_dss_device *dssdev, static void hdmi_disconnect(struct omap_dss_device *dssdev, struct omap_dss_device *dst) { - struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev); - omapdss_output_unset_device(dssdev); - dss_mgr_disconnect(&hdmi->output, dssdev); + dss_mgr_disconnect(dssdev); } static int hdmi_read_edid(struct omap_dss_device *dssdev, diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index 7af60ca4e7b2..363bc5843e0f 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c @@ -461,7 +461,7 @@ static int hdmi_connect(struct omap_dss_device *dssdev, if (r) return r; - r = dss_mgr_connect(&hdmi->output, dssdev); + r = dss_mgr_connect(dssdev); if (r) return r; @@ -469,7 +469,7 @@ static int hdmi_connect(struct omap_dss_device *dssdev, if (r) { DSSERR("failed to connect output to new device: %s\n", dst->name); - dss_mgr_disconnect(&hdmi->output, dssdev); + dss_mgr_disconnect(dssdev); return r; } @@ -479,11 +479,9 @@ static int hdmi_connect(struct omap_dss_device *dssdev, static void hdmi_disconnect(struct omap_dss_device *dssdev, struct omap_dss_device *dst) { - struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev); - omapdss_output_unset_device(dssdev); - dss_mgr_disconnect(&hdmi->output, dssdev); + dss_mgr_disconnect(dssdev); } static int hdmi_read_edid(struct omap_dss_device *dssdev, diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index 1621d1eaed42..5d03e9066a33 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -587,10 +587,8 @@ int dss_install_mgr_ops(struct dss_device *dss, struct omap_drm_private *priv); void dss_uninstall_mgr_ops(struct dss_device *dss); -int dss_mgr_connect(struct omap_dss_device *dssdev, - struct omap_dss_device *dst); -void dss_mgr_disconnect(struct omap_dss_device *dssdev, - struct omap_dss_device *dst); +int dss_mgr_connect(struct omap_dss_device *dssdev); +void dss_mgr_disconnect(struct omap_dss_device *dssdev); void dss_mgr_set_timings(struct omap_dss_device *dssdev, const struct videomode *vm); void dss_mgr_set_lcd_config(struct omap_dss_device *dssdev, diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c index 191b2e801257..2f7a019d059e 100644 --- a/drivers/gpu/drm/omapdrm/dss/output.c +++ b/drivers/gpu/drm/omapdrm/dss/output.c @@ -109,18 +109,17 @@ void dss_uninstall_mgr_ops(struct dss_device *dss) } EXPORT_SYMBOL(dss_uninstall_mgr_ops); -int dss_mgr_connect(struct omap_dss_device *dssdev, struct omap_dss_device *dst) +int dss_mgr_connect(struct omap_dss_device *dssdev) { return dssdev->dss->mgr_ops->connect(dssdev->dss->mgr_ops_priv, - dssdev->dispc_channel, dst); + dssdev->dispc_channel, dssdev); } EXPORT_SYMBOL(dss_mgr_connect); -void dss_mgr_disconnect(struct omap_dss_device *dssdev, - struct omap_dss_device *dst) +void dss_mgr_disconnect(struct omap_dss_device *dssdev) { dssdev->dss->mgr_ops->disconnect(dssdev->dss->mgr_ops_priv, - dssdev->dispc_channel, dst); + dssdev->dispc_channel, dssdev); } EXPORT_SYMBOL(dss_mgr_disconnect); diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c index fd7c11ebda5d..4b3a24d98065 100644 --- a/drivers/gpu/drm/omapdrm/dss/sdi.c +++ b/drivers/gpu/drm/omapdrm/dss/sdi.c @@ -281,7 +281,7 @@ static int sdi_connect(struct omap_dss_device *dssdev, if (r) return r; - r = dss_mgr_connect(&sdi->output, dssdev); + r = dss_mgr_connect(dssdev); if (r) return r; @@ -289,7 +289,7 @@ static int sdi_connect(struct omap_dss_device *dssdev, if (r) { DSSERR("failed to connect output to new device: %s\n", dst->name); - dss_mgr_disconnect(&sdi->output, dssdev); + dss_mgr_disconnect(dssdev); return r; } @@ -299,11 +299,9 @@ static int sdi_connect(struct omap_dss_device *dssdev, static void sdi_disconnect(struct omap_dss_device *dssdev, struct omap_dss_device *dst) { - struct sdi_device *sdi = dssdev_to_sdi(dssdev); - omapdss_output_unset_device(dssdev); - dss_mgr_disconnect(&sdi->output, dssdev); + dss_mgr_disconnect(dssdev); } static const struct omap_dss_device_ops sdi_ops = { diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c index 298e86cc9e14..24a2b9a41864 100644 --- a/drivers/gpu/drm/omapdrm/dss/venc.c +++ b/drivers/gpu/drm/omapdrm/dss/venc.c @@ -720,7 +720,7 @@ static int venc_connect(struct omap_dss_device *dssdev, if (r) return r; - r = dss_mgr_connect(&venc->output, dssdev); + r = dss_mgr_connect(dssdev); if (r) return r; @@ -728,7 +728,7 @@ static int venc_connect(struct omap_dss_device *dssdev, if (r) { DSSERR("failed to connect output to new device: %s\n", dst->name); - dss_mgr_disconnect(&venc->output, dssdev); + dss_mgr_disconnect(dssdev); return r; } @@ -738,11 +738,9 @@ static int venc_connect(struct omap_dss_device *dssdev, static void venc_disconnect(struct omap_dss_device *dssdev, struct omap_dss_device *dst) { - struct venc_device *venc = dssdev_to_venc(dssdev); - omapdss_output_unset_device(dssdev); - dss_mgr_disconnect(&venc->output, dssdev); + dss_mgr_disconnect(dssdev); } static const struct omap_dss_device_ops venc_ops = { -- GitLab From 8a36357ae3b2d1b4647d20bc806d524c21132572 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Mon, 5 Mar 2018 00:10:55 +0200 Subject: [PATCH 0935/1692] drm/omap: dss: Get regulators at probe time Regulators for the DPI, DSI, HDMI, SDI and VENC outputs are all looked up when connecting the output omap_dss_device. There's no need to delay regulator handling to that time, get the regulators at probe time. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/dpi.c | 69 ++++++++++++++--------------- drivers/gpu/drm/omapdrm/dss/dsi.c | 36 +++------------ drivers/gpu/drm/omapdrm/dss/hdmi4.c | 33 ++++---------- drivers/gpu/drm/omapdrm/dss/hdmi5.c | 31 ++++--------- drivers/gpu/drm/omapdrm/dss/sdi.c | 32 ++++--------- drivers/gpu/drm/omapdrm/dss/venc.c | 32 ++++--------- 6 files changed, 72 insertions(+), 161 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c index 533d87e11bf5..35d63c686393 100644 --- a/drivers/gpu/drm/omapdrm/dss/dpi.c +++ b/drivers/gpu/drm/omapdrm/dss/dpi.c @@ -551,38 +551,6 @@ static int dpi_verify_pll(struct dss_pll *pll) return 0; } -static const struct soc_device_attribute dpi_soc_devices[] = { - { .machine = "OMAP3[456]*" }, - { .machine = "[AD]M37*" }, - { /* sentinel */ } -}; - -static int dpi_init_regulator(struct dpi_data *dpi) -{ - struct regulator *vdds_dsi; - - /* - * The DPI uses the DSI VDDS on OMAP34xx, OMAP35xx, OMAP36xx, AM37xx and - * DM37xx only. - */ - if (!soc_device_match(dpi_soc_devices)) - return 0; - - if (dpi->vdds_dsi_reg) - return 0; - - vdds_dsi = devm_regulator_get(&dpi->pdev->dev, "vdds_dsi"); - if (IS_ERR(vdds_dsi)) { - if (PTR_ERR(vdds_dsi) != -EPROBE_DEFER) - DSSERR("can't get VDDS_DSI regulator\n"); - return PTR_ERR(vdds_dsi); - } - - dpi->vdds_dsi_reg = vdds_dsi; - - return 0; -} - static void dpi_init_pll(struct dpi_data *dpi) { struct dss_pll *pll; @@ -646,10 +614,6 @@ static int dpi_connect(struct omap_dss_device *dssdev, struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev); int r; - r = dpi_init_regulator(dpi); - if (r) - return r; - dpi_init_pll(dpi); r = dss_mgr_connect(dssdev); @@ -737,6 +701,35 @@ static void dpi_uninit_output_port(struct device_node *port) omapdss_device_unregister(out); } +static const struct soc_device_attribute dpi_soc_devices[] = { + { .machine = "OMAP3[456]*" }, + { .machine = "[AD]M37*" }, + { /* sentinel */ } +}; + +static int dpi_init_regulator(struct dpi_data *dpi) +{ + struct regulator *vdds_dsi; + + /* + * The DPI uses the DSI VDDS on OMAP34xx, OMAP35xx, OMAP36xx, AM37xx and + * DM37xx only. + */ + if (!soc_device_match(dpi_soc_devices)) + return 0; + + vdds_dsi = devm_regulator_get(&dpi->pdev->dev, "vdds_dsi"); + if (IS_ERR(vdds_dsi)) { + if (PTR_ERR(vdds_dsi) != -EPROBE_DEFER) + DSSERR("can't get VDDS_DSI regulator\n"); + return PTR_ERR(vdds_dsi); + } + + dpi->vdds_dsi_reg = vdds_dsi; + + return 0; +} + int dpi_init_port(struct dss_device *dss, struct platform_device *pdev, struct device_node *port, enum dss_model dss_model) { @@ -769,6 +762,10 @@ int dpi_init_port(struct dss_device *dss, struct platform_device *pdev, mutex_init(&dpi->lock); + r = dpi_init_regulator(dpi); + if (r) + return r; + return dpi_init_output_port(dpi, port); } diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index d5ae01529901..41a98021d5bf 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -1137,26 +1137,6 @@ static void dsi_runtime_put(struct dsi_data *dsi) WARN_ON(r < 0 && r != -ENOSYS); } -static int dsi_regulator_init(struct dsi_data *dsi) -{ - struct regulator *vdds_dsi; - - if (dsi->vdds_dsi_reg != NULL) - return 0; - - vdds_dsi = devm_regulator_get(dsi->dev, "vdd"); - - if (IS_ERR(vdds_dsi)) { - if (PTR_ERR(vdds_dsi) != -EPROBE_DEFER) - DSSERR("can't get DSI VDD regulator\n"); - return PTR_ERR(vdds_dsi); - } - - dsi->vdds_dsi_reg = vdds_dsi; - - return 0; -} - static void _dsi_print_reset_status(struct dsi_data *dsi) { u32 l; @@ -1353,10 +1333,6 @@ static int dsi_pll_enable(struct dss_pll *pll) DSSDBG("PLL init\n"); - r = dsi_regulator_init(dsi); - if (r) - return r; - r = dsi_runtime_get(dsi); if (r) return r; @@ -4908,13 +4884,8 @@ static int dsi_get_clocks(struct dsi_data *dsi) static int dsi_connect(struct omap_dss_device *dssdev, struct omap_dss_device *dst) { - struct dsi_data *dsi = to_dsi_data(dssdev); int r; - r = dsi_regulator_init(dsi); - if (r) - return r; - r = dss_mgr_connect(dssdev); if (r) return r; @@ -5384,6 +5355,13 @@ static int dsi_probe(struct platform_device *pdev) return r; } + dsi->vdds_dsi_reg = devm_regulator_get(dev, "vdd"); + if (IS_ERR(dsi->vdds_dsi_reg)) { + if (PTR_ERR(dsi->vdds_dsi_reg) != -EPROBE_DEFER) + DSSERR("can't get DSI VDD regulator\n"); + return PTR_ERR(dsi->vdds_dsi_reg); + } + soc = soc_device_match(dsi_soc_devices); if (soc) dsi->data = soc->data; diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index 5216c5554741..6edb85898a7d 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c @@ -108,26 +108,6 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data) return IRQ_HANDLED; } -static int hdmi_init_regulator(struct omap_hdmi *hdmi) -{ - struct regulator *reg; - - if (hdmi->vdda_reg != NULL) - return 0; - - reg = devm_regulator_get(&hdmi->pdev->dev, "vdda"); - - if (IS_ERR(reg)) { - if (PTR_ERR(reg) != -EPROBE_DEFER) - DSSERR("can't get VDDA regulator\n"); - return PTR_ERR(reg); - } - - hdmi->vdda_reg = reg; - - return 0; -} - static int hdmi_power_on_core(struct omap_hdmi *hdmi) { int r; @@ -451,13 +431,8 @@ void hdmi4_core_disable(struct hdmi_core_data *core) static int hdmi_connect(struct omap_dss_device *dssdev, struct omap_dss_device *dst) { - struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev); int r; - r = hdmi_init_regulator(hdmi); - if (r) - return r; - r = dss_mgr_connect(dssdev); if (r) return r; @@ -827,6 +802,14 @@ static int hdmi4_probe(struct platform_device *pdev) goto err_free; } + hdmi->vdda_reg = devm_regulator_get(&pdev->dev, "vdda"); + if (IS_ERR(hdmi->vdda_reg)) { + r = PTR_ERR(hdmi->vdda_reg); + if (r != -EPROBE_DEFER) + DSSERR("can't get VDDA regulator\n"); + goto err_free; + } + pm_runtime_enable(&pdev->dev); r = hdmi4_init_output(hdmi); diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index 363bc5843e0f..db20a578091b 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c @@ -117,24 +117,6 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data) return IRQ_HANDLED; } -static int hdmi_init_regulator(struct omap_hdmi *hdmi) -{ - struct regulator *reg; - - if (hdmi->vdda_reg != NULL) - return 0; - - reg = devm_regulator_get(&hdmi->pdev->dev, "vdda"); - if (IS_ERR(reg)) { - DSSERR("can't get VDDA regulator\n"); - return PTR_ERR(reg); - } - - hdmi->vdda_reg = reg; - - return 0; -} - static int hdmi_power_on_core(struct omap_hdmi *hdmi) { int r; @@ -454,13 +436,8 @@ static void hdmi_core_disable(struct omap_hdmi *hdmi) static int hdmi_connect(struct omap_dss_device *dssdev, struct omap_dss_device *dst) { - struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev); int r; - r = hdmi_init_regulator(hdmi); - if (r) - return r; - r = dss_mgr_connect(dssdev); if (r) return r; @@ -817,6 +794,14 @@ static int hdmi5_probe(struct platform_device *pdev) goto err_free; } + hdmi->vdda_reg = devm_regulator_get(&pdev->dev, "vdda"); + if (IS_ERR(hdmi->vdda_reg)) { + r = PTR_ERR(hdmi->vdda_reg); + if (r != -EPROBE_DEFER) + DSSERR("can't get VDDA regulator\n"); + goto err_free; + } + pm_runtime_enable(&pdev->dev); r = hdmi5_init_output(hdmi); diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c index 4b3a24d98065..1fb25e2c5f87 100644 --- a/drivers/gpu/drm/omapdrm/dss/sdi.c +++ b/drivers/gpu/drm/omapdrm/dss/sdi.c @@ -252,35 +252,11 @@ static int sdi_check_timings(struct omap_dss_device *dssdev, return 0; } -static int sdi_init_regulator(struct sdi_device *sdi) -{ - struct regulator *vdds_sdi; - - if (sdi->vdds_sdi_reg) - return 0; - - vdds_sdi = devm_regulator_get(&sdi->pdev->dev, "vdds_sdi"); - if (IS_ERR(vdds_sdi)) { - if (PTR_ERR(vdds_sdi) != -EPROBE_DEFER) - DSSERR("can't get VDDS_SDI regulator\n"); - return PTR_ERR(vdds_sdi); - } - - sdi->vdds_sdi_reg = vdds_sdi; - - return 0; -} - static int sdi_connect(struct omap_dss_device *dssdev, struct omap_dss_device *dst) { - struct sdi_device *sdi = dssdev_to_sdi(dssdev); int r; - r = sdi_init_regulator(sdi); - if (r) - return r; - r = dss_mgr_connect(dssdev); if (r) return r; @@ -379,6 +355,14 @@ int sdi_init_port(struct dss_device *dss, struct platform_device *pdev, sdi->pdev = pdev; port->data = sdi; + sdi->vdds_sdi_reg = devm_regulator_get(&pdev->dev, "vdds_sdi"); + if (IS_ERR(sdi->vdds_sdi_reg)) { + r = PTR_ERR(sdi->vdds_sdi_reg); + if (r != -EPROBE_DEFER) + DSSERR("can't get VDDS_SDI regulator\n"); + goto err_free; + } + r = sdi_init_output(sdi); if (r) goto err_free; diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c index 24a2b9a41864..7aa06b796481 100644 --- a/drivers/gpu/drm/omapdrm/dss/venc.c +++ b/drivers/gpu/drm/omapdrm/dss/venc.c @@ -614,25 +614,6 @@ static int venc_check_timings(struct omap_dss_device *dssdev, } } -static int venc_init_regulator(struct venc_device *venc) -{ - struct regulator *vdda_dac; - - if (venc->vdda_dac_reg != NULL) - return 0; - - vdda_dac = devm_regulator_get(&venc->pdev->dev, "vdda"); - if (IS_ERR(vdda_dac)) { - if (PTR_ERR(vdda_dac) != -EPROBE_DEFER) - DSSERR("can't get VDDA_DAC regulator\n"); - return PTR_ERR(vdda_dac); - } - - venc->vdda_dac_reg = vdda_dac; - - return 0; -} - static int venc_dump_regs(struct seq_file *s, void *p) { struct venc_device *venc = s->private; @@ -713,13 +694,8 @@ static int venc_get_clocks(struct venc_device *venc) static int venc_connect(struct omap_dss_device *dssdev, struct omap_dss_device *dst) { - struct venc_device *venc = dssdev_to_venc(dssdev); int r; - r = venc_init_regulator(venc); - if (r) - return r; - r = dss_mgr_connect(dssdev); if (r) return r; @@ -908,6 +884,14 @@ static int venc_probe(struct platform_device *pdev) goto err_free; } + venc->vdda_dac_reg = devm_regulator_get(&pdev->dev, "vdda"); + if (IS_ERR(venc->vdda_dac_reg)) { + r = PTR_ERR(venc->vdda_dac_reg); + if (r != -EPROBE_DEFER) + DSSERR("can't get VDDA_DAC regulator\n"); + goto err_free; + } + r = venc_get_clocks(venc); if (r) goto err_free; -- GitLab From f96993630445b7bf0aebc67288f804035ec46fc6 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Mon, 5 Mar 2018 14:47:47 +0200 Subject: [PATCH 0936/1692] drm/omap: Remove unneeded variable assignments in omap_modeset_init The crtc_idx and plane_idw variables in the main loop are always equal to the loop counter i, use it instead. Don't unnecessarily initialize dssdev to NULL. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/omap_drv.c | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index df90f82ef217..17d17a790c78 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -239,10 +239,9 @@ static int omap_modeset_init_properties(struct drm_device *dev) static int omap_modeset_init(struct drm_device *dev) { struct omap_drm_private *priv = dev->dev_private; - struct omap_dss_device *dssdev = NULL; int num_ovls = priv->dispc_ops->get_num_ovls(priv->dispc); int num_mgrs = priv->dispc_ops->get_num_mgrs(priv->dispc); - int num_crtcs, crtc_idx, plane_idx; + int num_crtcs; unsigned int i; int ret; u32 plane_crtc_mask; @@ -275,10 +274,6 @@ static int omap_modeset_init(struct drm_device *dev) /* All planes can be put to any CRTC */ plane_crtc_mask = (1 << num_crtcs) - 1; - dssdev = NULL; - - crtc_idx = 0; - plane_idx = 0; for (i = 0; i < priv->num_dssdevs; i++) { struct omap_dss_device *dssdev = priv->dssdevs[i]; struct drm_connector *connector; @@ -295,7 +290,7 @@ static int omap_modeset_init(struct drm_device *dev) if (!connector) return -ENOMEM; - plane = omap_plane_init(dev, plane_idx, DRM_PLANE_TYPE_PRIMARY, + plane = omap_plane_init(dev, i, DRM_PLANE_TYPE_PRIMARY, plane_crtc_mask); if (IS_ERR(plane)) return PTR_ERR(plane); @@ -305,27 +300,24 @@ static int omap_modeset_init(struct drm_device *dev) return PTR_ERR(crtc); drm_connector_attach_encoder(connector, encoder); - encoder->possible_crtcs = (1 << crtc_idx); + encoder->possible_crtcs = 1 << i; priv->crtcs[priv->num_crtcs++] = crtc; priv->planes[priv->num_planes++] = plane; priv->encoders[priv->num_encoders++] = encoder; priv->connectors[priv->num_connectors++] = connector; - - plane_idx++; - crtc_idx++; } /* * Create normal planes for the remaining overlays: */ - for (; plane_idx < num_ovls; plane_idx++) { + for (; i < num_ovls; i++) { struct drm_plane *plane; if (WARN_ON(priv->num_planes >= ARRAY_SIZE(priv->planes))) return -EINVAL; - plane = omap_plane_init(dev, plane_idx, DRM_PLANE_TYPE_OVERLAY, + plane = omap_plane_init(dev, i, DRM_PLANE_TYPE_OVERLAY, plane_crtc_mask); if (IS_ERR(plane)) return PTR_ERR(plane); -- GitLab From ac3b13189333c224e800b3421ac89536d0109b78 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Mon, 5 Mar 2018 19:11:30 +0200 Subject: [PATCH 0937/1692] drm/omap: Create all planes before CRTCs Creating all the planes in a single location instead of creating them per-CRTC with remaining planes then created in a second step simplifies the logic. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/omap_drv.c | 45 +++++++++++++----------------- 1 file changed, 19 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index 17d17a790c78..f68948bb2847 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -271,14 +271,30 @@ static int omap_modeset_init(struct drm_device *dev) return -EINVAL; } - /* All planes can be put to any CRTC */ + /* Create all planes first. They can all be put to any CRTC. */ plane_crtc_mask = (1 << num_crtcs) - 1; + for (i = 0; i < num_ovls; i++) { + enum drm_plane_type type = i < priv->num_dssdevs + ? DRM_PLANE_TYPE_PRIMARY + : DRM_PLANE_TYPE_OVERLAY; + struct drm_plane *plane; + + if (WARN_ON(priv->num_planes >= ARRAY_SIZE(priv->planes))) + return -EINVAL; + + plane = omap_plane_init(dev, i, type, plane_crtc_mask); + if (IS_ERR(plane)) + return PTR_ERR(plane); + + priv->planes[priv->num_planes++] = plane; + } + + /* Create the CRTCs, encoders and connectors. */ for (i = 0; i < priv->num_dssdevs; i++) { struct omap_dss_device *dssdev = priv->dssdevs[i]; struct drm_connector *connector; struct drm_encoder *encoder; - struct drm_plane *plane; struct drm_crtc *crtc; encoder = omap_encoder_init(dev, dssdev); @@ -290,12 +306,7 @@ static int omap_modeset_init(struct drm_device *dev) if (!connector) return -ENOMEM; - plane = omap_plane_init(dev, i, DRM_PLANE_TYPE_PRIMARY, - plane_crtc_mask); - if (IS_ERR(plane)) - return PTR_ERR(plane); - - crtc = omap_crtc_init(dev, plane, dssdev); + crtc = omap_crtc_init(dev, priv->planes[i], dssdev); if (IS_ERR(crtc)) return PTR_ERR(crtc); @@ -303,28 +314,10 @@ static int omap_modeset_init(struct drm_device *dev) encoder->possible_crtcs = 1 << i; priv->crtcs[priv->num_crtcs++] = crtc; - priv->planes[priv->num_planes++] = plane; priv->encoders[priv->num_encoders++] = encoder; priv->connectors[priv->num_connectors++] = connector; } - /* - * Create normal planes for the remaining overlays: - */ - for (; i < num_ovls; i++) { - struct drm_plane *plane; - - if (WARN_ON(priv->num_planes >= ARRAY_SIZE(priv->planes))) - return -EINVAL; - - plane = omap_plane_init(dev, i, DRM_PLANE_TYPE_OVERLAY, - plane_crtc_mask); - if (IS_ERR(plane)) - return PTR_ERR(plane); - - priv->planes[priv->num_planes++] = plane; - } - DBG("registered %d planes, %d crtcs, %d encoders and %d connectors\n", priv->num_planes, priv->num_crtcs, priv->num_encoders, priv->num_connectors); -- GitLab From 2ee767922e1bc7ede9ceb7aed9a14141480836a7 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Mon, 5 Mar 2018 15:02:22 +0200 Subject: [PATCH 0938/1692] drm/omap: Group CRTC, encoder, connector and dssdev in a structure Create an omap_drm_pipeline structure to model display pipelines, made of a CRTC, an encoder, a connector and a DSS display device. This allows grouping related parameters together instead of storing them in independent arrays and thus improves code readability. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/omap_crtc.c | 4 +- drivers/gpu/drm/omapdrm/omap_drv.c | 144 +++++++++++++-------------- drivers/gpu/drm/omapdrm/omap_drv.h | 20 ++-- drivers/gpu/drm/omapdrm/omap_fbdev.c | 4 +- drivers/gpu/drm/omapdrm/omap_irq.c | 4 +- 5 files changed, 84 insertions(+), 92 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index c5f1915aef67..f5bf553a862f 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c @@ -474,8 +474,8 @@ static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc) * has been changed to the DRM model. */ - for (i = 0; i < priv->num_encoders; ++i) { - struct drm_encoder *encoder = priv->encoders[i]; + for (i = 0; i < priv->num_pipes; ++i) { + struct drm_encoder *encoder = priv->pipes[i].encoder; if (encoder->crtc == crtc) { struct omap_dss_device *dssdev; diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index f68948bb2847..f10e5053580b 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -129,9 +129,9 @@ static const struct drm_mode_config_funcs omap_mode_config_funcs = { .atomic_commit = drm_atomic_helper_commit, }; -static int get_connector_type(struct omap_dss_device *dssdev) +static int get_connector_type(struct omap_dss_device *display) { - switch (dssdev->type) { + switch (display->type) { case OMAP_DISPLAY_TYPE_HDMI: return DRM_MODE_CONNECTOR_HDMIA; case OMAP_DISPLAY_TYPE_DVI: @@ -151,65 +151,65 @@ static int get_connector_type(struct omap_dss_device *dssdev) } } -static void omap_disconnect_dssdevs(struct drm_device *ddev) +static void omap_disconnect_pipelines(struct drm_device *ddev) { struct omap_drm_private *priv = ddev->dev_private; unsigned int i; - for (i = 0; i < priv->num_dssdevs; i++) { - struct omap_dss_device *dssdev = priv->dssdevs[i]; + for (i = 0; i < priv->num_pipes; i++) { + struct omap_dss_device *display = priv->pipes[i].display; - omapdss_device_disconnect(dssdev, NULL); - priv->dssdevs[i] = NULL; - omapdss_device_put(dssdev); + omapdss_device_disconnect(display, NULL); + priv->pipes[i].display = NULL; + omapdss_device_put(display); } - priv->num_dssdevs = 0; + priv->num_pipes = 0; } -static int omap_compare_dssdevs(const void *a, const void *b) +static int omap_compare_pipes(const void *a, const void *b) { - const struct omap_dss_device *dssdev1 = *(struct omap_dss_device **)a; - const struct omap_dss_device *dssdev2 = *(struct omap_dss_device **)b; + const struct omap_drm_pipeline *pipe1 = a; + const struct omap_drm_pipeline *pipe2 = b; - if (dssdev1->alias_id > dssdev2->alias_id) + if (pipe1->display->alias_id > pipe2->display->alias_id) return 1; - else if (dssdev1->alias_id < dssdev2->alias_id) + else if (pipe1->display->alias_id < pipe2->display->alias_id) return -1; return 0; } -static int omap_connect_dssdevs(struct drm_device *ddev) +static int omap_connect_pipelines(struct drm_device *ddev) { struct omap_drm_private *priv = ddev->dev_private; - struct omap_dss_device *dssdev = NULL; + struct omap_dss_device *display = NULL; int r; if (!omapdss_stack_is_ready()) return -EPROBE_DEFER; - for_each_dss_display(dssdev) { - r = omapdss_device_connect(priv->dss, dssdev, NULL); + for_each_dss_display(display) { + r = omapdss_device_connect(priv->dss, display, NULL); if (r == -EPROBE_DEFER) { - omapdss_device_put(dssdev); + omapdss_device_put(display); goto cleanup; } else if (r) { - dev_warn(dssdev->dev, "could not connect display: %s\n", - dssdev->name); + dev_warn(display->dev, "could not connect display: %s\n", + display->name); } else { - omapdss_device_get(dssdev); - priv->dssdevs[priv->num_dssdevs++] = dssdev; - if (priv->num_dssdevs == ARRAY_SIZE(priv->dssdevs)) { + omapdss_device_get(display); + priv->pipes[priv->num_pipes++].display = display; + if (priv->num_pipes == ARRAY_SIZE(priv->pipes)) { /* To balance the 'for_each_dss_display' loop */ - omapdss_device_put(dssdev); + omapdss_device_put(display); break; } } } /* Sort the list by DT aliases */ - sort(priv->dssdevs, priv->num_dssdevs, sizeof(priv->dssdevs[0]), - omap_compare_dssdevs, NULL); + sort(priv->pipes, priv->num_pipes, sizeof(priv->pipes[0]), + omap_compare_pipes, NULL); return 0; @@ -218,7 +218,7 @@ static int omap_connect_dssdevs(struct drm_device *ddev) * if we are deferring probe, we disconnect the devices we previously * connected */ - omap_disconnect_dssdevs(ddev); + omap_disconnect_pipelines(ddev); return r; } @@ -241,7 +241,6 @@ static int omap_modeset_init(struct drm_device *dev) struct omap_drm_private *priv = dev->dev_private; int num_ovls = priv->dispc_ops->get_num_ovls(priv->dispc); int num_mgrs = priv->dispc_ops->get_num_mgrs(priv->dispc); - int num_crtcs; unsigned int i; int ret; u32 plane_crtc_mask; @@ -260,22 +259,17 @@ static int omap_modeset_init(struct drm_device *dev) * configuration does not match the expectations or exceeds * the available resources, the configuration is rejected. */ - num_crtcs = priv->num_dssdevs; - if (num_crtcs > num_mgrs || num_crtcs > num_ovls || - num_crtcs > ARRAY_SIZE(priv->crtcs) || - num_crtcs > ARRAY_SIZE(priv->planes) || - num_crtcs > ARRAY_SIZE(priv->encoders) || - num_crtcs > ARRAY_SIZE(priv->connectors)) { + if (priv->num_pipes > num_mgrs || priv->num_pipes > num_ovls) { dev_err(dev->dev, "%s(): Too many connected displays\n", __func__); return -EINVAL; } /* Create all planes first. They can all be put to any CRTC. */ - plane_crtc_mask = (1 << num_crtcs) - 1; + plane_crtc_mask = (1 << priv->num_pipes) - 1; for (i = 0; i < num_ovls; i++) { - enum drm_plane_type type = i < priv->num_dssdevs + enum drm_plane_type type = i < priv->num_pipes ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; struct drm_plane *plane; @@ -291,36 +285,36 @@ static int omap_modeset_init(struct drm_device *dev) } /* Create the CRTCs, encoders and connectors. */ - for (i = 0; i < priv->num_dssdevs; i++) { - struct omap_dss_device *dssdev = priv->dssdevs[i]; + for (i = 0; i < priv->num_pipes; i++) { + struct omap_drm_pipeline *pipe = &priv->pipes[i]; + struct omap_dss_device *display = pipe->display; struct drm_connector *connector; struct drm_encoder *encoder; struct drm_crtc *crtc; - encoder = omap_encoder_init(dev, dssdev); + encoder = omap_encoder_init(dev, display); if (!encoder) return -ENOMEM; connector = omap_connector_init(dev, - get_connector_type(dssdev), dssdev, encoder); + get_connector_type(display), display, encoder); if (!connector) return -ENOMEM; - crtc = omap_crtc_init(dev, priv->planes[i], dssdev); + crtc = omap_crtc_init(dev, priv->planes[i], display); if (IS_ERR(crtc)) return PTR_ERR(crtc); drm_connector_attach_encoder(connector, encoder); encoder->possible_crtcs = 1 << i; - priv->crtcs[priv->num_crtcs++] = crtc; - priv->encoders[priv->num_encoders++] = encoder; - priv->connectors[priv->num_connectors++] = connector; + pipe->crtc = crtc; + pipe->encoder = encoder; + pipe->connector = connector; } - DBG("registered %d planes, %d crtcs, %d encoders and %d connectors\n", - priv->num_planes, priv->num_crtcs, priv->num_encoders, - priv->num_connectors); + DBG("registered %u planes, %u crtcs/encoders/connectors\n", + priv->num_planes, priv->num_pipes); dev->mode_config.min_width = 8; dev->mode_config.min_height = 2; @@ -355,11 +349,11 @@ static void omap_modeset_enable_external_hpd(struct drm_device *ddev) struct omap_drm_private *priv = ddev->dev_private; int i; - for (i = 0; i < priv->num_dssdevs; i++) { - struct omap_dss_device *dssdev = priv->dssdevs[i]; + for (i = 0; i < priv->num_pipes; i++) { + struct omap_dss_device *display = priv->pipes[i].display; - if (dssdev->driver->enable_hpd) - dssdev->driver->enable_hpd(dssdev); + if (display->driver->enable_hpd) + display->driver->enable_hpd(display); } } @@ -371,11 +365,11 @@ static void omap_modeset_disable_external_hpd(struct drm_device *ddev) struct omap_drm_private *priv = ddev->dev_private; int i; - for (i = 0; i < priv->num_dssdevs; i++) { - struct omap_dss_device *dssdev = priv->dssdevs[i]; + for (i = 0; i < priv->num_pipes; i++) { + struct omap_dss_device *display = priv->pipes[i].display; - if (dssdev->driver->disable_hpd) - dssdev->driver->disable_hpd(dssdev); + if (display->driver->disable_hpd) + display->driver->disable_hpd(display); } } @@ -561,7 +555,7 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev) omap_crtc_pre_init(priv); - ret = omap_connect_dssdevs(ddev); + ret = omap_connect_pipelines(ddev); if (ret) goto err_crtc_uninit; @@ -586,14 +580,14 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev) } /* Initialize vblank handling, start with all CRTCs disabled. */ - ret = drm_vblank_init(ddev, priv->num_crtcs); + ret = drm_vblank_init(ddev, priv->num_pipes); if (ret) { dev_err(priv->dev, "could not init vblank\n"); goto err_cleanup_modeset; } - for (i = 0; i < priv->num_crtcs; i++) - drm_crtc_vblank_off(priv->crtcs[i]); + for (i = 0; i < priv->num_pipes; i++) + drm_crtc_vblank_off(priv->pipes[i].crtc); omap_fbdev_init(ddev); @@ -621,7 +615,7 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev) err_gem_deinit: omap_gem_deinit(ddev); destroy_workqueue(priv->wq); - omap_disconnect_dssdevs(ddev); + omap_disconnect_pipelines(ddev); err_crtc_uninit: omap_crtc_pre_uninit(priv); drm_dev_unref(ddev); @@ -650,7 +644,7 @@ static void omapdrm_cleanup(struct omap_drm_private *priv) destroy_workqueue(priv->wq); - omap_disconnect_dssdevs(ddev); + omap_disconnect_pipelines(ddev); omap_crtc_pre_uninit(priv); drm_dev_unref(ddev); @@ -700,17 +694,17 @@ static int omap_drm_suspend_all_displays(struct drm_device *ddev) struct omap_drm_private *priv = ddev->dev_private; int i; - for (i = 0; i < priv->num_dssdevs; i++) { - struct omap_dss_device *dssdev = priv->dssdevs[i]; + for (i = 0; i < priv->num_pipes; i++) { + struct omap_dss_device *display = priv->pipes[i].display; - if (!dssdev->driver) + if (!display->driver) continue; - if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) { - dssdev->driver->disable(dssdev); - dssdev->activate_after_resume = true; + if (display->state == OMAP_DSS_DISPLAY_ACTIVE) { + display->driver->disable(display); + display->activate_after_resume = true; } else { - dssdev->activate_after_resume = false; + display->activate_after_resume = false; } } @@ -722,15 +716,15 @@ static int omap_drm_resume_all_displays(struct drm_device *ddev) struct omap_drm_private *priv = ddev->dev_private; int i; - for (i = 0; i < priv->num_dssdevs; i++) { - struct omap_dss_device *dssdev = priv->dssdevs[i]; + for (i = 0; i < priv->num_pipes; i++) { + struct omap_dss_device *display = priv->pipes[i].display; - if (!dssdev->driver) + if (!display->driver) continue; - if (dssdev->activate_after_resume) { - dssdev->driver->enable(dssdev); - dssdev->activate_after_resume = false; + if (display->activate_after_resume) { + display->driver->enable(display); + display->activate_after_resume = false; } } diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h index 006c868c528d..bc9b954fcc31 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.h +++ b/drivers/gpu/drm/omapdrm/omap_drv.h @@ -45,6 +45,13 @@ struct omap_drm_usergart; +struct omap_drm_pipeline { + struct drm_crtc *crtc; + struct drm_encoder *encoder; + struct drm_connector *connector; + struct omap_dss_device *display; +}; + struct omap_drm_private { struct drm_device *ddev; struct device *dev; @@ -54,21 +61,12 @@ struct omap_drm_private { struct dispc_device *dispc; const struct dispc_ops *dispc_ops; - unsigned int num_dssdevs; - struct omap_dss_device *dssdevs[8]; - - unsigned int num_crtcs; - struct drm_crtc *crtcs[8]; + unsigned int num_pipes; + struct omap_drm_pipeline pipes[8]; unsigned int num_planes; struct drm_plane *planes[8]; - unsigned int num_encoders; - struct drm_encoder *encoders[8]; - - unsigned int num_connectors; - struct drm_connector *connectors[8]; - struct drm_fb_helper *fbdev; struct workqueue_struct *wq; diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c index d958cc813a94..b445309b0143 100644 --- a/drivers/gpu/drm/omapdrm/omap_fbdev.c +++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c @@ -243,7 +243,7 @@ void omap_fbdev_init(struct drm_device *dev) struct drm_fb_helper *helper; int ret = 0; - if (!priv->num_crtcs || !priv->num_connectors) + if (!priv->num_pipes) return; fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL); @@ -256,7 +256,7 @@ void omap_fbdev_init(struct drm_device *dev) drm_fb_helper_prepare(dev, helper, &omap_fb_helper_funcs); - ret = drm_fb_helper_init(dev, helper, priv->num_connectors); + ret = drm_fb_helper_init(dev, helper, priv->num_pipes); if (ret) goto fail; diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c index c85115049f86..329ad26d6d50 100644 --- a/drivers/gpu/drm/omapdrm/omap_irq.c +++ b/drivers/gpu/drm/omapdrm/omap_irq.c @@ -206,8 +206,8 @@ static irqreturn_t omap_irq_handler(int irq, void *arg) VERB("irqs: %08x", irqstatus); - for (id = 0; id < priv->num_crtcs; id++) { - struct drm_crtc *crtc = priv->crtcs[id]; + for (id = 0; id < priv->num_pipes; id++) { + struct drm_crtc *crtc = priv->pipes[id].crtc; enum omap_channel channel = omap_crtc_channel(crtc); if (irqstatus & priv->dispc_ops->mgr_get_vsync_irq(priv->dispc, channel)) { -- GitLab From 511afb44d72aa7b6b871fa71f829afaaa27e84f0 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Sun, 4 Mar 2018 23:42:36 +0200 Subject: [PATCH 0939/1692] drm/omap: Reverse direction of DSS device (dis)connect operations The omapdrm and omapdss drivers are architectured based on display pipelines made of multiple components handled from sink (display) to source (DSS output). This is incompatible with the DRM bridge and panel APIs that handle components from source to sink. To reconcile the omapdrm and omapdss drivers with the DRM bridge and panel model, we need to reverse the direction of the DSS device operations. Start with the connect and disconnect operations. Signed-off-by: Laurent Pinchart Signed-off-by: Tomi Valkeinen --- .../omapdrm/displays/connector-analog-tv.c | 27 ++---------- .../gpu/drm/omapdrm/displays/connector-dvi.c | 27 ++---------- .../gpu/drm/omapdrm/displays/connector-hdmi.c | 27 ++---------- .../gpu/drm/omapdrm/displays/encoder-opa362.c | 34 ++++----------- .../gpu/drm/omapdrm/displays/encoder-tfp410.c | 34 ++++----------- .../drm/omapdrm/displays/encoder-tpd12s015.c | 32 +++++--------- drivers/gpu/drm/omapdrm/displays/panel-dpi.c | 27 ++---------- .../gpu/drm/omapdrm/displays/panel-dsi-cm.c | 42 +++++-------------- .../displays/panel-lgphilips-lb035q02.c | 28 +++---------- .../omapdrm/displays/panel-nec-nl8048hl11.c | 27 ++---------- .../displays/panel-sharp-ls037v7dw01.c | 27 ++---------- .../omapdrm/displays/panel-sony-acx565akm.c | 27 ++---------- .../omapdrm/displays/panel-tpo-td028ttec1.c | 27 ++---------- .../omapdrm/displays/panel-tpo-td043mtea1.c | 27 ++---------- drivers/gpu/drm/omapdrm/dss/base.c | 32 +++++++------- drivers/gpu/drm/omapdrm/dss/dpi.c | 32 +++++++++----- drivers/gpu/drm/omapdrm/dss/dsi.c | 32 +++++++++----- drivers/gpu/drm/omapdrm/dss/hdmi4.c | 30 ++++++++----- drivers/gpu/drm/omapdrm/dss/hdmi5.c | 30 ++++++++----- drivers/gpu/drm/omapdrm/dss/omapdss.h | 6 ++- drivers/gpu/drm/omapdrm/dss/sdi.c | 30 ++++++++----- drivers/gpu/drm/omapdrm/dss/venc.c | 30 ++++++++----- drivers/gpu/drm/omapdrm/omap_drv.c | 35 +++++++++------- drivers/gpu/drm/omapdrm/omap_drv.h | 1 + 24 files changed, 239 insertions(+), 432 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c index cda6c312ad05..d59b4f2e22dc 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c @@ -41,33 +41,15 @@ static const struct videomode tvc_pal_vm = { #define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) -static int tvc_connect(struct omap_dss_device *dssdev) +static int tvc_connect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct omap_dss_device *src; - int r; - - src = omapdss_of_find_connected_device(dssdev->dev->of_node, 0); - if (IS_ERR_OR_NULL(src)) { - dev_err(dssdev->dev, "failed to find video source\n"); - return src ? PTR_ERR(src) : -EINVAL; - } - - r = omapdss_device_connect(dssdev->dss, src, dssdev); - if (r) { - omapdss_device_put(src); - return r; - } - return 0; } -static void tvc_disconnect(struct omap_dss_device *dssdev) +static void tvc_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct omap_dss_device *src = dssdev->src; - - omapdss_device_disconnect(src, dssdev); - - omapdss_device_put(src); } static int tvc_enable(struct omap_dss_device *dssdev) @@ -184,7 +166,6 @@ static int __exit tvc_remove(struct platform_device *pdev) omapdss_device_unregister(&ddata->dssdev); tvc_disable(dssdev); - omapdss_device_disconnect(dssdev, NULL); return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c index f8510cd7b166..39e7d0be887f 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c @@ -56,33 +56,15 @@ struct panel_drv_data { #define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) -static int dvic_connect(struct omap_dss_device *dssdev) +static int dvic_connect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct omap_dss_device *src; - int r; - - src = omapdss_of_find_connected_device(dssdev->dev->of_node, 0); - if (IS_ERR_OR_NULL(src)) { - dev_err(dssdev->dev, "failed to find video source\n"); - return src ? PTR_ERR(src) : -EINVAL; - } - - r = omapdss_device_connect(dssdev->dss, src, dssdev); - if (r) { - omapdss_device_put(src); - return r; - } - return 0; } -static void dvic_disconnect(struct omap_dss_device *dssdev) +static void dvic_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct omap_dss_device *src = dssdev->src; - - omapdss_device_disconnect(src, dssdev); - - omapdss_device_put(src); } static int dvic_enable(struct omap_dss_device *dssdev) @@ -405,7 +387,6 @@ static int __exit dvic_remove(struct platform_device *pdev) omapdss_device_unregister(&ddata->dssdev); dvic_disable(dssdev); - omapdss_device_disconnect(dssdev, NULL); i2c_put_adapter(ddata->i2c_adapter); diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c index 6eb4c24d6aa7..e9878da5bfdb 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c @@ -51,33 +51,15 @@ struct panel_drv_data { #define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) -static int hdmic_connect(struct omap_dss_device *dssdev) +static int hdmic_connect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct omap_dss_device *src; - int r; - - src = omapdss_of_find_connected_device(dssdev->dev->of_node, 0); - if (IS_ERR_OR_NULL(src)) { - dev_err(dssdev->dev, "failed to find video source\n"); - return src ? PTR_ERR(src) : -EINVAL; - } - - r = omapdss_device_connect(dssdev->dss, src, dssdev); - if (r) { - omapdss_device_put(src); - return r; - } - return 0; } -static void hdmic_disconnect(struct omap_dss_device *dssdev) +static void hdmic_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct omap_dss_device *src = dssdev->src; - - omapdss_device_disconnect(src, dssdev); - - omapdss_device_put(src); } static int hdmic_enable(struct omap_dss_device *dssdev) @@ -364,7 +346,6 @@ static int __exit hdmic_remove(struct platform_device *pdev) omapdss_device_unregister(&ddata->dssdev); hdmic_disable(dssdev); - omapdss_device_disconnect(dssdev, NULL); return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c index f661ba8f3fa0..3243e5f9bd06 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c @@ -31,36 +31,16 @@ struct panel_drv_data { #define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) -static int opa362_connect(struct omap_dss_device *dssdev, - struct omap_dss_device *dst) +static int opa362_connect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct omap_dss_device *src; - int r; - - src = omapdss_of_find_connected_device(dssdev->dev->of_node, 0); - if (IS_ERR(src)) { - dev_err(dssdev->dev, "failed to find video source\n"); - return PTR_ERR(src); - } - - r = omapdss_device_connect(dssdev->dss, src, dssdev); - if (r) { - omapdss_device_put(src); - return r; - } - - return 0; + return omapdss_device_connect(dst->dss, dst, dst->next); } -static void opa362_disconnect(struct omap_dss_device *dssdev, - struct omap_dss_device *dst) +static void opa362_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - - omapdss_device_disconnect(src, &ddata->dssdev); - - omapdss_device_put(src); + omapdss_device_disconnect(dst, dst->next); } static int opa362_enable(struct omap_dss_device *dssdev) @@ -196,7 +176,7 @@ static int __exit opa362_remove(struct platform_device *pdev) WARN_ON(omapdss_device_is_connected(dssdev)); if (omapdss_device_is_connected(dssdev)) - omapdss_device_disconnect(dssdev, dssdev->dst); + omapdss_device_disconnect(NULL, dssdev); return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c index 3be35ba564da..7114ea672e69 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c @@ -27,36 +27,16 @@ struct panel_drv_data { #define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) -static int tfp410_connect(struct omap_dss_device *dssdev, - struct omap_dss_device *dst) +static int tfp410_connect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct omap_dss_device *src; - int r; - - src = omapdss_of_find_connected_device(dssdev->dev->of_node, 0); - if (IS_ERR(src)) { - dev_err(dssdev->dev, "failed to find video source\n"); - return PTR_ERR(src); - } - - r = omapdss_device_connect(dssdev->dss, src, dssdev); - if (r) { - omapdss_device_put(src); - return r; - } - - return 0; + return omapdss_device_connect(dst->dss, dst, dst->next); } -static void tfp410_disconnect(struct omap_dss_device *dssdev, - struct omap_dss_device *dst) +static void tfp410_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - - omapdss_device_disconnect(src, &ddata->dssdev); - - omapdss_device_put(src); + omapdss_device_disconnect(dst, dst->next); } static int tfp410_enable(struct omap_dss_device *dssdev) @@ -219,7 +199,7 @@ static int __exit tfp410_remove(struct platform_device *pdev) WARN_ON(omapdss_device_is_connected(dssdev)); if (omapdss_device_is_connected(dssdev)) - omapdss_device_disconnect(dssdev, dssdev->dst); + omapdss_device_disconnect(NULL, dssdev); return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c index cee53346f6fc..c99e55487d38 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c @@ -35,24 +35,15 @@ struct panel_drv_data { #define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) -static int tpd_connect(struct omap_dss_device *dssdev, - struct omap_dss_device *dst) +static int tpd_connect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src; + struct panel_drv_data *ddata = to_panel_data(dst); int r; - src = omapdss_of_find_connected_device(dssdev->dev->of_node, 0); - if (IS_ERR(src)) { - dev_err(dssdev->dev, "failed to find video source\n"); - return PTR_ERR(src); - } - - r = omapdss_device_connect(dssdev->dss, src, dssdev); - if (r) { - omapdss_device_put(src); + r = omapdss_device_connect(dst->dss, dst, dst->next); + if (r) return r; - } gpiod_set_value_cansleep(ddata->ct_cp_hpd_gpio, 1); gpiod_set_value_cansleep(ddata->ls_oe_gpio, 1); @@ -63,18 +54,15 @@ static int tpd_connect(struct omap_dss_device *dssdev, return 0; } -static void tpd_disconnect(struct omap_dss_device *dssdev, - struct omap_dss_device *dst) +static void tpd_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; + struct panel_drv_data *ddata = to_panel_data(dst); gpiod_set_value_cansleep(ddata->ct_cp_hpd_gpio, 0); gpiod_set_value_cansleep(ddata->ls_oe_gpio, 0); - omapdss_device_disconnect(src, &ddata->dssdev); - - omapdss_device_put(src); + omapdss_device_disconnect(dst, dst->next); } static int tpd_enable(struct omap_dss_device *dssdev) @@ -328,7 +316,7 @@ static int __exit tpd_remove(struct platform_device *pdev) WARN_ON(omapdss_device_is_connected(dssdev)); if (omapdss_device_is_connected(dssdev)) - omapdss_device_disconnect(dssdev, dssdev->dst); + omapdss_device_disconnect(NULL, dssdev); return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c index 8c17ad4ddf84..91f99c95c4c4 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c @@ -34,33 +34,15 @@ struct panel_drv_data { #define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev) -static int panel_dpi_connect(struct omap_dss_device *dssdev) +static int panel_dpi_connect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct omap_dss_device *src; - int r; - - src = omapdss_of_find_connected_device(dssdev->dev->of_node, 0); - if (IS_ERR_OR_NULL(src)) { - dev_err(dssdev->dev, "failed to find video source\n"); - return src ? PTR_ERR(src) : -EINVAL; - } - - r = omapdss_device_connect(dssdev->dss, src, dssdev); - if (r) { - omapdss_device_put(src); - return r; - } - return 0; } -static void panel_dpi_disconnect(struct omap_dss_device *dssdev) +static void panel_dpi_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct omap_dss_device *src = dssdev->src; - - omapdss_device_disconnect(src, dssdev); - - omapdss_device_put(src); } static int panel_dpi_enable(struct omap_dss_device *dssdev) @@ -233,7 +215,6 @@ static int __exit panel_dpi_remove(struct platform_device *pdev) omapdss_device_unregister(dssdev); panel_dpi_disable(dssdev); - omapdss_device_disconnect(dssdev, NULL); return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c index 501c47f95130..e30f0ab315f5 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c @@ -756,57 +756,35 @@ static int dsicm_panel_reset(struct panel_drv_data *ddata) return dsicm_power_on(ddata); } -static int dsicm_connect(struct omap_dss_device *dssdev) +static int dsicm_connect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct panel_drv_data *ddata = to_panel_data(dssdev); + struct panel_drv_data *ddata = to_panel_data(dst); struct device *dev = &ddata->pdev->dev; - struct omap_dss_device *src; int r; - src = omapdss_of_find_connected_device(dssdev->dev->of_node, 0); - if (IS_ERR_OR_NULL(src)) { - dev_err(dssdev->dev, "failed to find video source\n"); - return src ? PTR_ERR(src) : -EINVAL; - } - - r = omapdss_device_connect(dssdev->dss, src, dssdev); - if (r) { - dev_err(dev, "Failed to connect to video source\n"); - goto err_connect; - } - r = src->ops->dsi.request_vc(src, &ddata->channel); if (r) { dev_err(dev, "failed to get virtual channel\n"); - goto err_req_vc; + return r; } r = src->ops->dsi.set_vc_id(src, ddata->channel, TCH); if (r) { dev_err(dev, "failed to set VC_ID\n"); - goto err_vc_id; + src->ops->dsi.release_vc(src, ddata->channel); + return r; } return 0; - -err_vc_id: - src->ops->dsi.release_vc(src, ddata->channel); -err_req_vc: - omapdss_device_disconnect(src, dssdev); -err_connect: - omapdss_device_put(src); - return r; } -static void dsicm_disconnect(struct omap_dss_device *dssdev) +static void dsicm_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; + struct panel_drv_data *ddata = to_panel_data(dst); src->ops->dsi.release_vc(src, ddata->channel); - omapdss_device_disconnect(src, dssdev); - - omapdss_device_put(src); } static int dsicm_enable(struct omap_dss_device *dssdev) @@ -1404,7 +1382,7 @@ static int __exit dsicm_remove(struct platform_device *pdev) omapdss_device_unregister(dssdev); dsicm_disable(dssdev); - omapdss_device_disconnect(dssdev, NULL); + omapdss_device_disconnect(dssdev->src, dssdev); sysfs_remove_group(&pdev->dev.kobj, &dsicm_attr_group); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c index 73416b1c7386..66763a12fc3d 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c @@ -115,36 +115,19 @@ static void init_lb035q02_panel(struct spi_device *spi) lb035q02_write_reg(spi, 0x3b, 0x0806); } -static int lb035q02_connect(struct omap_dss_device *dssdev) +static int lb035q02_connect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src; - int r; - - src = omapdss_of_find_connected_device(dssdev->dev->of_node, 0); - if (IS_ERR_OR_NULL(src)) { - dev_err(dssdev->dev, "failed to find video source\n"); - return src ? PTR_ERR(src) : -EINVAL; - } - - r = omapdss_device_connect(dssdev->dss, src, dssdev); - if (r) { - omapdss_device_put(src); - return r; - } + struct panel_drv_data *ddata = to_panel_data(dst); init_lb035q02_panel(ddata->spi); return 0; } -static void lb035q02_disconnect(struct omap_dss_device *dssdev) +static void lb035q02_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct omap_dss_device *src = dssdev->src; - - omapdss_device_disconnect(src, dssdev); - - omapdss_device_put(src); } static int lb035q02_enable(struct omap_dss_device *dssdev) @@ -285,7 +268,6 @@ static int lb035q02_panel_spi_remove(struct spi_device *spi) omapdss_device_unregister(dssdev); lb035q02_disable(dssdev); - omapdss_device_disconnect(dssdev, NULL); return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c index cf5d9e1522a8..b4dba55b678b 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c @@ -111,33 +111,15 @@ static int init_nec_8048_wvga_lcd(struct spi_device *spi) return 0; } -static int nec_8048_connect(struct omap_dss_device *dssdev) +static int nec_8048_connect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct omap_dss_device *src; - int r; - - src = omapdss_of_find_connected_device(dssdev->dev->of_node, 0); - if (IS_ERR_OR_NULL(src)) { - dev_err(dssdev->dev, "failed to find video source\n"); - return src ? PTR_ERR(src) : -EINVAL; - } - - r = omapdss_device_connect(dssdev->dss, src, dssdev); - if (r) { - omapdss_device_put(src); - return r; - } - return 0; } -static void nec_8048_disconnect(struct omap_dss_device *dssdev) +static void nec_8048_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct omap_dss_device *src = dssdev->src; - - omapdss_device_disconnect(src, dssdev); - - omapdss_device_put(src); } static int nec_8048_enable(struct omap_dss_device *dssdev) @@ -310,7 +292,6 @@ static int nec_8048_remove(struct spi_device *spi) omapdss_device_unregister(dssdev); nec_8048_disable(dssdev); - omapdss_device_disconnect(dssdev, NULL); return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c index 1c3180495dfd..7fbdf3ec0113 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c @@ -57,33 +57,15 @@ static const struct videomode sharp_ls_vm = { #define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev) -static int sharp_ls_connect(struct omap_dss_device *dssdev) +static int sharp_ls_connect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct omap_dss_device *src; - int r; - - src = omapdss_of_find_connected_device(dssdev->dev->of_node, 0); - if (IS_ERR_OR_NULL(src)) { - dev_err(dssdev->dev, "failed to find video source\n"); - return src ? PTR_ERR(src) : -EINVAL; - } - - r = omapdss_device_connect(dssdev->dss, src, dssdev); - if (r) { - omapdss_device_put(src); - return r; - } - return 0; } -static void sharp_ls_disconnect(struct omap_dss_device *dssdev) +static void sharp_ls_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct omap_dss_device *src = dssdev->src; - - omapdss_device_disconnect(src, dssdev); - - omapdss_device_put(src); } static int sharp_ls_enable(struct omap_dss_device *dssdev) @@ -284,7 +266,6 @@ static int __exit sharp_ls_remove(struct platform_device *pdev) omapdss_device_unregister(dssdev); sharp_ls_disable(dssdev); - omapdss_device_disconnect(dssdev, NULL); return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c index d91ab8dab4d9..036fd8e57074 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c @@ -506,33 +506,15 @@ static const struct attribute_group bldev_attr_group = { .attrs = bldev_attrs, }; -static int acx565akm_connect(struct omap_dss_device *dssdev) +static int acx565akm_connect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct omap_dss_device *src; - int r; - - src = omapdss_of_find_connected_device(dssdev->dev->of_node, 0); - if (IS_ERR_OR_NULL(src)) { - dev_err(dssdev->dev, "failed to find video source\n"); - return src ? PTR_ERR(src) : -EINVAL; - } - - r = omapdss_device_connect(dssdev->dss, src, dssdev); - if (r) { - omapdss_device_put(src); - return r; - } - return 0; } -static void acx565akm_disconnect(struct omap_dss_device *dssdev) +static void acx565akm_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct omap_dss_device *src = dssdev->src; - - omapdss_device_disconnect(src, dssdev); - - omapdss_device_put(src); } static int acx565akm_panel_power_on(struct omap_dss_device *dssdev) @@ -822,7 +804,6 @@ static int acx565akm_remove(struct spi_device *spi) omapdss_device_unregister(dssdev); acx565akm_disable(dssdev); - omapdss_device_disconnect(dssdev, NULL); return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c index a57daf44d421..fc08f71b95a0 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c @@ -165,33 +165,15 @@ enum jbt_register { #define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev) -static int td028ttec1_panel_connect(struct omap_dss_device *dssdev) +static int td028ttec1_panel_connect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct omap_dss_device *src; - int r; - - src = omapdss_of_find_connected_device(dssdev->dev->of_node, 0); - if (IS_ERR_OR_NULL(src)) { - dev_err(dssdev->dev, "failed to find video source\n"); - return src ? PTR_ERR(src) : -EINVAL; - } - - r = omapdss_device_connect(dssdev->dss, src, dssdev); - if (r) { - omapdss_device_put(src); - return r; - } - return 0; } -static void td028ttec1_panel_disconnect(struct omap_dss_device *dssdev) +static void td028ttec1_panel_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct omap_dss_device *src = dssdev->src; - - omapdss_device_disconnect(src, dssdev); - - omapdss_device_put(src); } static int td028ttec1_panel_enable(struct omap_dss_device *dssdev) @@ -411,7 +393,6 @@ static int td028ttec1_panel_remove(struct spi_device *spi) omapdss_device_unregister(dssdev); td028ttec1_panel_disable(dssdev); - omapdss_device_disconnect(dssdev, NULL); return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c index 719c298d3996..cb6f19f8a0da 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c @@ -336,33 +336,15 @@ static void tpo_td043_power_off(struct panel_drv_data *ddata) ddata->powered_on = 0; } -static int tpo_td043_connect(struct omap_dss_device *dssdev) +static int tpo_td043_connect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct omap_dss_device *src; - int r; - - src = omapdss_of_find_connected_device(dssdev->dev->of_node, 0); - if (IS_ERR_OR_NULL(src)) { - dev_err(dssdev->dev, "failed to find video source\n"); - return src ? PTR_ERR(src) : -EINVAL; - } - - r = omapdss_device_connect(dssdev->dss, src, dssdev); - if (r) { - omapdss_device_put(src); - return r; - } - return 0; } -static void tpo_td043_disconnect(struct omap_dss_device *dssdev) +static void tpo_td043_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct omap_dss_device *src = dssdev->src; - - omapdss_device_disconnect(src, dssdev); - - omapdss_device_put(src); } static int tpo_td043_enable(struct omap_dss_device *dssdev) @@ -553,7 +535,6 @@ static int tpo_td043_remove(struct spi_device *spi) omapdss_device_unregister(dssdev); tpo_td043_disable(dssdev); - omapdss_device_disconnect(dssdev, NULL); sysfs_remove_group(&spi->dev.kobj, &tpo_td043_attr_group); diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c index 1dbd08e6e029..c3e451440d4b 100644 --- a/drivers/gpu/drm/omapdrm/dss/base.c +++ b/drivers/gpu/drm/omapdrm/dss/base.c @@ -190,24 +190,24 @@ int omapdss_device_connect(struct dss_device *dss, { int ret; - dev_dbg(src->dev, "connect\n"); + dev_dbg(dst->dev, "connect\n"); - if (omapdss_device_is_connected(src)) + if (omapdss_device_is_connected(dst)) return -EBUSY; - src->dss = dss; + dst->dss = dss; - if (src->driver) - ret = src->driver->connect(src); + if (dst->driver) + ret = dst->driver->connect(src, dst); else - ret = src->ops->connect(src, dst); + ret = dst->ops->connect(src, dst); if (ret < 0) { - src->dss = NULL; + dst->dss = NULL; return ret; } - if (dst) { + if (src) { dst->src = src; src->dst = dst; } @@ -219,14 +219,14 @@ EXPORT_SYMBOL_GPL(omapdss_device_connect); void omapdss_device_disconnect(struct omap_dss_device *src, struct omap_dss_device *dst) { - dev_dbg(src->dev, "disconnect\n"); + dev_dbg(dst->dev, "disconnect\n"); - if (!src->id && !omapdss_device_is_connected(src)) { - WARN_ON(!src->driver); + if (!dst->id && !omapdss_device_is_connected(dst)) { + WARN_ON(!dst->driver); return; } - if (dst) { + if (src) { if (WARN_ON(dst != src->dst)) return; @@ -234,12 +234,12 @@ void omapdss_device_disconnect(struct omap_dss_device *src, src->dst = NULL; } - if (src->driver) - src->driver->disconnect(src); + if (dst->driver) + dst->driver->disconnect(src, dst); else - src->ops->disconnect(src, dst); + dst->ops->disconnect(src, dst); - src->dss = NULL; + dst->dss = NULL; } EXPORT_SYMBOL_GPL(omapdss_device_disconnect); diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c index 35d63c686393..6bd0fd12883e 100644 --- a/drivers/gpu/drm/omapdrm/dss/dpi.c +++ b/drivers/gpu/drm/omapdrm/dss/dpi.c @@ -608,35 +608,45 @@ static enum omap_channel dpi_get_channel(struct dpi_data *dpi) } } -static int dpi_connect(struct omap_dss_device *dssdev, - struct omap_dss_device *dst) +static int dpi_connect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev); + struct dpi_data *dpi = dpi_get_data_from_dssdev(dst); int r; dpi_init_pll(dpi); - r = dss_mgr_connect(dssdev); + r = dss_mgr_connect(dst); if (r) return r; - r = omapdss_output_set_device(dssdev, dst); + r = omapdss_output_set_device(dst, dst->next); if (r) { DSSERR("failed to connect output to new device: %s\n", dst->name); - dss_mgr_disconnect(dssdev); - return r; + goto err_mgr_disconnect; } + r = omapdss_device_connect(dst->dss, dst, dst->next); + if (r) + goto err_output_unset; + return 0; + +err_output_unset: + omapdss_output_unset_device(dst); +err_mgr_disconnect: + dss_mgr_disconnect(dst); + return r; } -static void dpi_disconnect(struct omap_dss_device *dssdev, - struct omap_dss_device *dst) +static void dpi_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - omapdss_output_unset_device(dssdev); + omapdss_device_disconnect(dst, dst->next); + omapdss_output_unset_device(dst); - dss_mgr_disconnect(dssdev); + dss_mgr_disconnect(dst); } static const struct omap_dss_device_ops dpi_ops = { diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 41a98021d5bf..0e88ae1178f7 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -4881,32 +4881,42 @@ static int dsi_get_clocks(struct dsi_data *dsi) return 0; } -static int dsi_connect(struct omap_dss_device *dssdev, - struct omap_dss_device *dst) +static int dsi_connect(struct omap_dss_device *src, + struct omap_dss_device *dst) { int r; - r = dss_mgr_connect(dssdev); + r = dss_mgr_connect(dst); if (r) return r; - r = omapdss_output_set_device(dssdev, dst); + r = omapdss_output_set_device(dst, dst->next); if (r) { DSSERR("failed to connect output to new device: %s\n", - dssdev->name); - dss_mgr_disconnect(dssdev); - return r; + dst->name); + goto err_mgr_disconnect; } + r = omapdss_device_connect(dst->dss, dst, dst->next); + if (r) + goto err_output_unset; + return 0; + +err_output_unset: + omapdss_output_unset_device(dst); +err_mgr_disconnect: + dss_mgr_disconnect(dst); + return r; } -static void dsi_disconnect(struct omap_dss_device *dssdev, - struct omap_dss_device *dst) +static void dsi_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - omapdss_output_unset_device(dssdev); + omapdss_device_disconnect(dst, dst->next); + omapdss_output_unset_device(dst); - dss_mgr_disconnect(dssdev); + dss_mgr_disconnect(dst); } static const struct omap_dss_device_ops dsi_ops = { diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index 6edb85898a7d..9f883669e71b 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c @@ -428,32 +428,42 @@ void hdmi4_core_disable(struct hdmi_core_data *core) mutex_unlock(&hdmi->lock); } -static int hdmi_connect(struct omap_dss_device *dssdev, - struct omap_dss_device *dst) +static int hdmi_connect(struct omap_dss_device *src, + struct omap_dss_device *dst) { int r; - r = dss_mgr_connect(dssdev); + r = dss_mgr_connect(dst); if (r) return r; - r = omapdss_output_set_device(dssdev, dst); + r = omapdss_output_set_device(dst, dst->next); if (r) { DSSERR("failed to connect output to new device: %s\n", dst->name); - dss_mgr_disconnect(dssdev); - return r; + goto err_mgr_disconnect; } + r = omapdss_device_connect(dst->dss, dst, dst->next); + if (r) + goto err_output_unset; + return 0; + +err_output_unset: + omapdss_output_unset_device(dst); +err_mgr_disconnect: + dss_mgr_disconnect(dst); + return r; } -static void hdmi_disconnect(struct omap_dss_device *dssdev, - struct omap_dss_device *dst) +static void hdmi_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - omapdss_output_unset_device(dssdev); + omapdss_device_disconnect(dst, dst->next); + omapdss_output_unset_device(dst); - dss_mgr_disconnect(dssdev); + dss_mgr_disconnect(dst); } static int hdmi_read_edid(struct omap_dss_device *dssdev, diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index db20a578091b..beb70b1fab94 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c @@ -433,32 +433,42 @@ static void hdmi_core_disable(struct omap_hdmi *hdmi) mutex_unlock(&hdmi->lock); } -static int hdmi_connect(struct omap_dss_device *dssdev, - struct omap_dss_device *dst) +static int hdmi_connect(struct omap_dss_device *src, + struct omap_dss_device *dst) { int r; - r = dss_mgr_connect(dssdev); + r = dss_mgr_connect(dst); if (r) return r; - r = omapdss_output_set_device(dssdev, dst); + r = omapdss_output_set_device(dst, dst->next); if (r) { DSSERR("failed to connect output to new device: %s\n", dst->name); - dss_mgr_disconnect(dssdev); - return r; + goto err_mgr_disconnect; } + r = omapdss_device_connect(dst->dss, dst, dst->next); + if (r) + goto err_output_unset; + return 0; + +err_output_unset: + omapdss_output_unset_device(dst); +err_mgr_disconnect: + dss_mgr_disconnect(dst); + return r; } -static void hdmi_disconnect(struct omap_dss_device *dssdev, - struct omap_dss_device *dst) +static void hdmi_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - omapdss_output_unset_device(dssdev); + omapdss_device_disconnect(dst, dst->next); + omapdss_output_unset_device(dst); - dss_mgr_disconnect(dssdev); + dss_mgr_disconnect(dst); } static int hdmi_read_edid(struct omap_dss_device *dssdev, diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index 5d03e9066a33..80c4c2ae306a 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -437,8 +437,10 @@ struct omap_dss_driver { int (*probe)(struct omap_dss_device *); void (*remove)(struct omap_dss_device *); - int (*connect)(struct omap_dss_device *dssdev); - void (*disconnect)(struct omap_dss_device *dssdev); + int (*connect)(struct omap_dss_device *src, + struct omap_dss_device *dst); + void (*disconnect)(struct omap_dss_device *src, + struct omap_dss_device *dst); int (*enable)(struct omap_dss_device *display); void (*disable)(struct omap_dss_device *display); diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c index 1fb25e2c5f87..c32e8ed2a96f 100644 --- a/drivers/gpu/drm/omapdrm/dss/sdi.c +++ b/drivers/gpu/drm/omapdrm/dss/sdi.c @@ -252,32 +252,42 @@ static int sdi_check_timings(struct omap_dss_device *dssdev, return 0; } -static int sdi_connect(struct omap_dss_device *dssdev, - struct omap_dss_device *dst) +static int sdi_connect(struct omap_dss_device *src, + struct omap_dss_device *dst) { int r; - r = dss_mgr_connect(dssdev); + r = dss_mgr_connect(dst); if (r) return r; - r = omapdss_output_set_device(dssdev, dst); + r = omapdss_output_set_device(dst, dst); if (r) { DSSERR("failed to connect output to new device: %s\n", dst->name); - dss_mgr_disconnect(dssdev); - return r; + goto err_mgr_disconnect; } + r = omapdss_device_connect(dst->dss, dst, dst->next); + if (r) + goto err_output_unset; + return 0; + +err_output_unset: + omapdss_output_unset_device(dst); +err_mgr_disconnect: + dss_mgr_disconnect(dst); + return r; } -static void sdi_disconnect(struct omap_dss_device *dssdev, - struct omap_dss_device *dst) +static void sdi_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - omapdss_output_unset_device(dssdev); + omapdss_device_disconnect(dst, dst->next); + omapdss_output_unset_device(dst); - dss_mgr_disconnect(dssdev); + dss_mgr_disconnect(dst); } static const struct omap_dss_device_ops sdi_ops = { diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c index 7aa06b796481..db0aa8f1ff7c 100644 --- a/drivers/gpu/drm/omapdrm/dss/venc.c +++ b/drivers/gpu/drm/omapdrm/dss/venc.c @@ -691,32 +691,42 @@ static int venc_get_clocks(struct venc_device *venc) return 0; } -static int venc_connect(struct omap_dss_device *dssdev, - struct omap_dss_device *dst) +static int venc_connect(struct omap_dss_device *src, + struct omap_dss_device *dst) { int r; - r = dss_mgr_connect(dssdev); + r = dss_mgr_connect(dst); if (r) return r; - r = omapdss_output_set_device(dssdev, dst); + r = omapdss_output_set_device(dst, dst->next); if (r) { DSSERR("failed to connect output to new device: %s\n", dst->name); - dss_mgr_disconnect(dssdev); - return r; + goto err_mgr_disconnect; } + r = omapdss_device_connect(dst->dss, dst, dst->next); + if (r) + goto err_output_unset; + return 0; + +err_output_unset: + omapdss_output_unset_device(dst); +err_mgr_disconnect: + dss_mgr_disconnect(dst); + return r; } -static void venc_disconnect(struct omap_dss_device *dssdev, - struct omap_dss_device *dst) +static void venc_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - omapdss_output_unset_device(dssdev); + omapdss_device_disconnect(dst, dst->next); + omapdss_output_unset_device(dst); - dss_mgr_disconnect(dssdev); + dss_mgr_disconnect(dst); } static const struct omap_dss_device_ops venc_ops = { diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index f10e5053580b..0052f151bf7a 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -157,11 +157,14 @@ static void omap_disconnect_pipelines(struct drm_device *ddev) unsigned int i; for (i = 0; i < priv->num_pipes; i++) { - struct omap_dss_device *display = priv->pipes[i].display; + struct omap_drm_pipeline *pipe = &priv->pipes[i]; + + omapdss_device_disconnect(NULL, pipe->output); - omapdss_device_disconnect(display, NULL); - priv->pipes[i].display = NULL; - omapdss_device_put(display); + omapdss_device_put(pipe->output); + omapdss_device_put(pipe->display); + pipe->output = NULL; + pipe->display = NULL; } priv->num_pipes = 0; @@ -182,26 +185,30 @@ static int omap_compare_pipes(const void *a, const void *b) static int omap_connect_pipelines(struct drm_device *ddev) { struct omap_drm_private *priv = ddev->dev_private; - struct omap_dss_device *display = NULL; + struct omap_dss_device *output = NULL; int r; if (!omapdss_stack_is_ready()) return -EPROBE_DEFER; - for_each_dss_display(display) { - r = omapdss_device_connect(priv->dss, display, NULL); + for_each_dss_output(output) { + r = omapdss_device_connect(priv->dss, NULL, output); if (r == -EPROBE_DEFER) { - omapdss_device_put(display); + omapdss_device_put(output); goto cleanup; } else if (r) { - dev_warn(display->dev, "could not connect display: %s\n", - display->name); + dev_warn(output->dev, "could not connect output %s\n", + output->name); } else { - omapdss_device_get(display); - priv->pipes[priv->num_pipes++].display = display; + struct omap_drm_pipeline *pipe; + + pipe = &priv->pipes[priv->num_pipes++]; + pipe->output = omapdss_device_get(output); + pipe->display = omapdss_display_get(output); + if (priv->num_pipes == ARRAY_SIZE(priv->pipes)) { - /* To balance the 'for_each_dss_display' loop */ - omapdss_device_put(display); + /* To balance the 'for_each_dss_output' loop */ + omapdss_device_put(output); break; } } diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h index bc9b954fcc31..a38d07d4d6ea 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.h +++ b/drivers/gpu/drm/omapdrm/omap_drv.h @@ -49,6 +49,7 @@ struct omap_drm_pipeline { struct drm_crtc *crtc; struct drm_encoder *encoder; struct drm_connector *connector; + struct omap_dss_device *output; struct omap_dss_device *display; }; -- GitLab From 79ddb2f0c348e991edca106a0e5ab414a822ccfc Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Sat, 26 May 2018 20:25:10 +0300 Subject: [PATCH 0940/1692] drm/omap: dss: Move connection checks to omapdss_device_(dis)connect When a DSS output is (dis)connected the omapdss_output_(un)set_device() function performs a sanity check to ensure that the output isn't already (dis)connected. The check is unnecessary as those situations should never happen, but can nonetheless be useful to catch driver bugs. To prepare for removal of the omapdss_output_(un)set_device() functions move the connection check to the omapdss_device_connect() function. The omapdss_device_disconnect() already contains a corresponding check. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/base.c | 1 + drivers/gpu/drm/omapdrm/dss/output.c | 29 ++-------------------------- 2 files changed, 3 insertions(+), 27 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c index c3e451440d4b..02c6ed97d632 100644 --- a/drivers/gpu/drm/omapdrm/dss/base.c +++ b/drivers/gpu/drm/omapdrm/dss/base.c @@ -208,6 +208,7 @@ int omapdss_device_connect(struct dss_device *dss, } if (src) { + WARN_ON(src->dst); dst->src = src; src->dst = dst; } diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c index 2f7a019d059e..96d74218cf91 100644 --- a/drivers/gpu/drm/omapdrm/dss/output.c +++ b/drivers/gpu/drm/omapdrm/dss/output.c @@ -29,61 +29,36 @@ static DEFINE_MUTEX(output_lock); int omapdss_output_set_device(struct omap_dss_device *out, struct omap_dss_device *dssdev) { - int r; + int r = 0; mutex_lock(&output_lock); - if (out->dst) { - dev_err(out->dev, - "output already has device %s connected to it\n", - out->dst->name); - r = -EINVAL; - goto err; - } - if (out->output_type != dssdev->type) { dev_err(out->dev, "output type and display type don't match\n"); r = -EINVAL; - goto err; } mutex_unlock(&output_lock); - return 0; -err: - mutex_unlock(&output_lock); - return r; } EXPORT_SYMBOL(omapdss_output_set_device); int omapdss_output_unset_device(struct omap_dss_device *out) { - int r; + int r = 0; mutex_lock(&output_lock); - if (!out->dst) { - dev_err(out->dev, - "output doesn't have a device connected to it\n"); - r = -EINVAL; - goto err; - } - if (out->dst->state != OMAP_DSS_DISPLAY_DISABLED) { dev_err(out->dev, "device %s is not disabled, cannot unset device\n", out->dst->name); r = -EINVAL; - goto err; } mutex_unlock(&output_lock); - return 0; -err: - mutex_unlock(&output_lock); - return r; } EXPORT_SYMBOL(omapdss_output_unset_device); -- GitLab From 713165561b7e372cd21f34bfeb82188361569f74 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 6 Mar 2018 01:25:13 +0200 Subject: [PATCH 0941/1692] drm/omap: dss: Move display type validation to initialization time The display type is validated when the display is connected to the DSS output. We already have all the information we need for validation when initializing the outputs. Move validation to output initialization to simplify pipeline connection handling. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/dpi.c | 25 +++++++++++-------------- drivers/gpu/drm/omapdrm/dss/dsi.c | 25 +++++++++++-------------- drivers/gpu/drm/omapdrm/dss/hdmi4.c | 25 +++++++++++-------------- drivers/gpu/drm/omapdrm/dss/hdmi5.c | 25 +++++++++++-------------- drivers/gpu/drm/omapdrm/dss/omapdss.h | 3 +-- drivers/gpu/drm/omapdrm/dss/output.c | 17 +++++------------ drivers/gpu/drm/omapdrm/dss/sdi.c | 25 +++++++++++-------------- drivers/gpu/drm/omapdrm/dss/venc.c | 25 +++++++++++-------------- 8 files changed, 72 insertions(+), 98 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c index 6bd0fd12883e..eeeea936f889 100644 --- a/drivers/gpu/drm/omapdrm/dss/dpi.c +++ b/drivers/gpu/drm/omapdrm/dss/dpi.c @@ -620,24 +620,13 @@ static int dpi_connect(struct omap_dss_device *src, if (r) return r; - r = omapdss_output_set_device(dst, dst->next); + r = omapdss_device_connect(dst->dss, dst, dst->next); if (r) { - DSSERR("failed to connect output to new device: %s\n", - dst->name); - goto err_mgr_disconnect; + dss_mgr_disconnect(dst); + return r; } - r = omapdss_device_connect(dst->dss, dst, dst->next); - if (r) - goto err_output_unset; - return 0; - -err_output_unset: - omapdss_output_unset_device(dst); -err_mgr_disconnect: - dss_mgr_disconnect(dst); - return r; } static void dpi_disconnect(struct omap_dss_device *src, @@ -664,6 +653,7 @@ static int dpi_init_output_port(struct dpi_data *dpi, struct device_node *port) { struct omap_dss_device *out = &dpi->output; u32 port_num = 0; + int r; of_property_read_u32(port, "reg", &port_num); dpi->id = port_num <= 2 ? port_num : 0; @@ -696,6 +686,13 @@ static int dpi_init_output_port(struct dpi_data *dpi, struct device_node *port) return PTR_ERR(out->next); } + r = omapdss_output_validate(out); + if (r) { + omapdss_device_put(out->next); + out->next = NULL; + return r; + } + omapdss_device_register(out); return 0; diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 0e88ae1178f7..9c617e35efd1 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -4890,24 +4890,13 @@ static int dsi_connect(struct omap_dss_device *src, if (r) return r; - r = omapdss_output_set_device(dst, dst->next); + r = omapdss_device_connect(dst->dss, dst, dst->next); if (r) { - DSSERR("failed to connect output to new device: %s\n", - dst->name); - goto err_mgr_disconnect; + dss_mgr_disconnect(dst); + return r; } - r = omapdss_device_connect(dst->dss, dst, dst->next); - if (r) - goto err_output_unset; - return 0; - -err_output_unset: - omapdss_output_unset_device(dst); -err_mgr_disconnect: - dss_mgr_disconnect(dst); - return r; } static void dsi_disconnect(struct omap_dss_device *src, @@ -5147,6 +5136,7 @@ static const struct component_ops dsi_component_ops = { static int dsi_init_output(struct dsi_data *dsi) { struct omap_dss_device *out = &dsi->output; + int r; out->dev = dsi->dev; out->id = dsi->module_id == 0 ? @@ -5166,6 +5156,13 @@ static int dsi_init_output(struct dsi_data *dsi) return PTR_ERR(out->next); } + r = omapdss_output_validate(out); + if (r) { + omapdss_device_put(out->next); + out->next = NULL; + return r; + } + omapdss_device_register(out); return 0; diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index 9f883669e71b..22f8b74f5bf5 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c @@ -437,24 +437,13 @@ static int hdmi_connect(struct omap_dss_device *src, if (r) return r; - r = omapdss_output_set_device(dst, dst->next); + r = omapdss_device_connect(dst->dss, dst, dst->next); if (r) { - DSSERR("failed to connect output to new device: %s\n", - dst->name); - goto err_mgr_disconnect; + dss_mgr_disconnect(dst); + return r; } - r = omapdss_device_connect(dst->dss, dst, dst->next); - if (r) - goto err_output_unset; - return 0; - -err_output_unset: - omapdss_output_unset_device(dst); -err_mgr_disconnect: - dss_mgr_disconnect(dst); - return r; } static void hdmi_disconnect(struct omap_dss_device *src, @@ -717,6 +706,7 @@ static const struct component_ops hdmi4_component_ops = { static int hdmi4_init_output(struct omap_hdmi *hdmi) { struct omap_dss_device *out = &hdmi->output; + int r; out->dev = &hdmi->pdev->dev; out->id = OMAP_DSS_OUTPUT_HDMI; @@ -734,6 +724,13 @@ static int hdmi4_init_output(struct omap_hdmi *hdmi) return PTR_ERR(out->next); } + r = omapdss_output_validate(out); + if (r) { + omapdss_device_put(out->next); + out->next = NULL; + return r; + } + omapdss_device_register(out); return 0; diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index beb70b1fab94..d8592d02a58d 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c @@ -442,24 +442,13 @@ static int hdmi_connect(struct omap_dss_device *src, if (r) return r; - r = omapdss_output_set_device(dst, dst->next); + r = omapdss_device_connect(dst->dss, dst, dst->next); if (r) { - DSSERR("failed to connect output to new device: %s\n", - dst->name); - goto err_mgr_disconnect; + dss_mgr_disconnect(dst); + return r; } - r = omapdss_device_connect(dst->dss, dst, dst->next); - if (r) - goto err_output_unset; - return 0; - -err_output_unset: - omapdss_output_unset_device(dst); -err_mgr_disconnect: - dss_mgr_disconnect(dst); - return r; } static void hdmi_disconnect(struct omap_dss_device *src, @@ -709,6 +698,7 @@ static const struct component_ops hdmi5_component_ops = { static int hdmi5_init_output(struct omap_hdmi *hdmi) { struct omap_dss_device *out = &hdmi->output; + int r; out->dev = &hdmi->pdev->dev; out->id = OMAP_DSS_OUTPUT_HDMI; @@ -726,6 +716,13 @@ static int hdmi5_init_output(struct omap_hdmi *hdmi) return PTR_ERR(out->next); } + r = omapdss_output_validate(out); + if (r) { + omapdss_device_put(out->next); + out->next = NULL; + return r; + } + omapdss_device_register(out); return 0; diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index 80c4c2ae306a..600ac7c25724 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -521,8 +521,7 @@ int omap_dss_get_num_overlays(void); #define for_each_dss_output(d) \ while ((d = omapdss_device_get_next(d, OMAP_DSS_DEVICE_TYPE_OUTPUT)) != NULL) -int omapdss_output_set_device(struct omap_dss_device *out, - struct omap_dss_device *dssdev); +int omapdss_output_validate(struct omap_dss_device *out); int omapdss_output_unset_device(struct omap_dss_device *out); typedef void (*omap_dispc_isr_t) (void *arg, u32 mask); diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c index 96d74218cf91..be544dd48bf4 100644 --- a/drivers/gpu/drm/omapdrm/dss/output.c +++ b/drivers/gpu/drm/omapdrm/dss/output.c @@ -26,23 +26,16 @@ static DEFINE_MUTEX(output_lock); -int omapdss_output_set_device(struct omap_dss_device *out, - struct omap_dss_device *dssdev) +int omapdss_output_validate(struct omap_dss_device *out) { - int r = 0; - - mutex_lock(&output_lock); - - if (out->output_type != dssdev->type) { + if (out->next && out->output_type != out->next->type) { dev_err(out->dev, "output type and display type don't match\n"); - r = -EINVAL; + return -EINVAL; } - mutex_unlock(&output_lock); - - return r; + return 0; } -EXPORT_SYMBOL(omapdss_output_set_device); +EXPORT_SYMBOL(omapdss_output_validate); int omapdss_output_unset_device(struct omap_dss_device *out) { diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c index c32e8ed2a96f..2101a697a08a 100644 --- a/drivers/gpu/drm/omapdrm/dss/sdi.c +++ b/drivers/gpu/drm/omapdrm/dss/sdi.c @@ -261,24 +261,13 @@ static int sdi_connect(struct omap_dss_device *src, if (r) return r; - r = omapdss_output_set_device(dst, dst); + r = omapdss_device_connect(dst->dss, dst, dst->next); if (r) { - DSSERR("failed to connect output to new device: %s\n", - dst->name); - goto err_mgr_disconnect; + dss_mgr_disconnect(dst); + return r; } - r = omapdss_device_connect(dst->dss, dst, dst->next); - if (r) - goto err_output_unset; - return 0; - -err_output_unset: - omapdss_output_unset_device(dst); -err_mgr_disconnect: - dss_mgr_disconnect(dst); - return r; } static void sdi_disconnect(struct omap_dss_device *src, @@ -304,6 +293,7 @@ static const struct omap_dss_device_ops sdi_ops = { static int sdi_init_output(struct sdi_device *sdi) { struct omap_dss_device *out = &sdi->output; + int r; out->dev = &sdi->pdev->dev; out->id = OMAP_DSS_OUTPUT_SDI; @@ -322,6 +312,13 @@ static int sdi_init_output(struct sdi_device *sdi) return PTR_ERR(out->next); } + r = omapdss_output_validate(out); + if (r) { + omapdss_device_put(out->next); + out->next = NULL; + return r; + } + omapdss_device_register(out); return 0; diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c index db0aa8f1ff7c..e673f3e78c69 100644 --- a/drivers/gpu/drm/omapdrm/dss/venc.c +++ b/drivers/gpu/drm/omapdrm/dss/venc.c @@ -700,24 +700,13 @@ static int venc_connect(struct omap_dss_device *src, if (r) return r; - r = omapdss_output_set_device(dst, dst->next); + r = omapdss_device_connect(dst->dss, dst, dst->next); if (r) { - DSSERR("failed to connect output to new device: %s\n", - dst->name); - goto err_mgr_disconnect; + dss_mgr_disconnect(dst); + return r; } - r = omapdss_device_connect(dst->dss, dst, dst->next); - if (r) - goto err_output_unset; - return 0; - -err_output_unset: - omapdss_output_unset_device(dst); -err_mgr_disconnect: - dss_mgr_disconnect(dst); - return r; } static void venc_disconnect(struct omap_dss_device *src, @@ -787,6 +776,7 @@ static const struct component_ops venc_component_ops = { static int venc_init_output(struct venc_device *venc) { struct omap_dss_device *out = &venc->output; + int r; out->dev = &venc->pdev->dev; out->id = OMAP_DSS_OUTPUT_VENC; @@ -804,6 +794,13 @@ static int venc_init_output(struct venc_device *venc) return PTR_ERR(out->next); } + r = omapdss_output_validate(out); + if (r) { + omapdss_device_put(out->next); + out->next = NULL; + return r; + } + omapdss_device_register(out); return 0; -- GitLab From 3be0f15bd6e94aa17a571020704bde413342e8eb Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 6 Mar 2018 01:51:31 +0200 Subject: [PATCH 0942/1692] drm/omap: dss: Merge two disconnection helpers To simplify the pipeline disconnection handling merge the omapdss_device_disconnect() and omapdss_output_unset_device() functions. The device state check is now called for every device in the pipeline, extending this sanity check coverage. There is no need to return an error from omapdss_device_disconnect() when the check fails, as omapdss_output_unset_device() used to do, given that we can't prevent disconnection due to device unbinding (the return value of omapdss_output_unset_device() is never checked in the current code for that reason). Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/base.c | 2 ++ drivers/gpu/drm/omapdrm/dss/dpi.c | 1 - drivers/gpu/drm/omapdrm/dss/dsi.c | 1 - drivers/gpu/drm/omapdrm/dss/hdmi4.c | 1 - drivers/gpu/drm/omapdrm/dss/hdmi5.c | 1 - drivers/gpu/drm/omapdrm/dss/omapdss.h | 1 - drivers/gpu/drm/omapdrm/dss/output.c | 21 --------------------- drivers/gpu/drm/omapdrm/dss/sdi.c | 1 - drivers/gpu/drm/omapdrm/dss/venc.c | 1 - 9 files changed, 2 insertions(+), 28 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c index 02c6ed97d632..89472715ee8f 100644 --- a/drivers/gpu/drm/omapdrm/dss/base.c +++ b/drivers/gpu/drm/omapdrm/dss/base.c @@ -235,6 +235,8 @@ void omapdss_device_disconnect(struct omap_dss_device *src, src->dst = NULL; } + WARN_ON(dst->state != OMAP_DSS_DISPLAY_DISABLED); + if (dst->driver) dst->driver->disconnect(src, dst); else diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c index eeeea936f889..178b463c2d60 100644 --- a/drivers/gpu/drm/omapdrm/dss/dpi.c +++ b/drivers/gpu/drm/omapdrm/dss/dpi.c @@ -633,7 +633,6 @@ static void dpi_disconnect(struct omap_dss_device *src, struct omap_dss_device *dst) { omapdss_device_disconnect(dst, dst->next); - omapdss_output_unset_device(dst); dss_mgr_disconnect(dst); } diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 9c617e35efd1..948e3b873523 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -4903,7 +4903,6 @@ static void dsi_disconnect(struct omap_dss_device *src, struct omap_dss_device *dst) { omapdss_device_disconnect(dst, dst->next); - omapdss_output_unset_device(dst); dss_mgr_disconnect(dst); } diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index 22f8b74f5bf5..6616530d5fe6 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c @@ -450,7 +450,6 @@ static void hdmi_disconnect(struct omap_dss_device *src, struct omap_dss_device *dst) { omapdss_device_disconnect(dst, dst->next); - omapdss_output_unset_device(dst); dss_mgr_disconnect(dst); } diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index d8592d02a58d..f7e15edc05fc 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c @@ -455,7 +455,6 @@ static void hdmi_disconnect(struct omap_dss_device *src, struct omap_dss_device *dst) { omapdss_device_disconnect(dst, dst->next); - omapdss_output_unset_device(dst); dss_mgr_disconnect(dst); } diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index 600ac7c25724..8f9538e17ea4 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -522,7 +522,6 @@ int omap_dss_get_num_overlays(void); #define for_each_dss_output(d) \ while ((d = omapdss_device_get_next(d, OMAP_DSS_DEVICE_TYPE_OUTPUT)) != NULL) int omapdss_output_validate(struct omap_dss_device *out); -int omapdss_output_unset_device(struct omap_dss_device *out); typedef void (*omap_dispc_isr_t) (void *arg, u32 mask); int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask); diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c index be544dd48bf4..2da480be918d 100644 --- a/drivers/gpu/drm/omapdrm/dss/output.c +++ b/drivers/gpu/drm/omapdrm/dss/output.c @@ -24,8 +24,6 @@ #include "dss.h" #include "omapdss.h" -static DEFINE_MUTEX(output_lock); - int omapdss_output_validate(struct omap_dss_device *out) { if (out->next && out->output_type != out->next->type) { @@ -37,25 +35,6 @@ int omapdss_output_validate(struct omap_dss_device *out) } EXPORT_SYMBOL(omapdss_output_validate); -int omapdss_output_unset_device(struct omap_dss_device *out) -{ - int r = 0; - - mutex_lock(&output_lock); - - if (out->dst->state != OMAP_DSS_DISPLAY_DISABLED) { - dev_err(out->dev, - "device %s is not disabled, cannot unset device\n", - out->dst->name); - r = -EINVAL; - } - - mutex_unlock(&output_lock); - - return r; -} -EXPORT_SYMBOL(omapdss_output_unset_device); - int dss_install_mgr_ops(struct dss_device *dss, const struct dss_mgr_ops *mgr_ops, struct omap_drm_private *priv) diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c index 2101a697a08a..764299cafbe2 100644 --- a/drivers/gpu/drm/omapdrm/dss/sdi.c +++ b/drivers/gpu/drm/omapdrm/dss/sdi.c @@ -274,7 +274,6 @@ static void sdi_disconnect(struct omap_dss_device *src, struct omap_dss_device *dst) { omapdss_device_disconnect(dst, dst->next); - omapdss_output_unset_device(dst); dss_mgr_disconnect(dst); } diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c index e673f3e78c69..c2811c425195 100644 --- a/drivers/gpu/drm/omapdrm/dss/venc.c +++ b/drivers/gpu/drm/omapdrm/dss/venc.c @@ -713,7 +713,6 @@ static void venc_disconnect(struct omap_dss_device *src, struct omap_dss_device *dst) { omapdss_device_disconnect(dst, dst->next); - omapdss_output_unset_device(dst); dss_mgr_disconnect(dst); } -- GitLab From 00b30e794ffc3bd8f4c6dc357fe7e881ae6e5373 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 6 Mar 2018 23:37:25 +0200 Subject: [PATCH 0943/1692] drm/omap: Pass pipe pointer to omap_crtc_init() Replace the dss display device pointer by a pipe pointer that will allow the omap_crtc_init() function to access both the display and the DSS output. As a result we can remove the omapdss_device_get_dispc_channel() function that is now unneeded. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/base.c | 9 --------- drivers/gpu/drm/omapdrm/dss/omapdss.h | 1 - drivers/gpu/drm/omapdrm/omap_crtc.c | 7 ++++--- drivers/gpu/drm/omapdrm/omap_crtc.h | 4 +++- drivers/gpu/drm/omapdrm/omap_drv.c | 2 +- 5 files changed, 8 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c index 89472715ee8f..2051bab30484 100644 --- a/drivers/gpu/drm/omapdrm/dss/base.c +++ b/drivers/gpu/drm/omapdrm/dss/base.c @@ -246,15 +246,6 @@ void omapdss_device_disconnect(struct omap_dss_device *src, } EXPORT_SYMBOL_GPL(omapdss_device_disconnect); -enum omap_channel omapdss_device_get_dispc_channel(struct omap_dss_device *dssdev) -{ - while (dssdev->src) - dssdev = dssdev->src; - - return dssdev->dispc_channel; -} -EXPORT_SYMBOL(omapdss_device_get_dispc_channel); - /* ----------------------------------------------------------------------------- * Components Handling */ diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index 8f9538e17ea4..a732a4a0dc36 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -513,7 +513,6 @@ int omapdss_device_connect(struct dss_device *dss, struct omap_dss_device *dst); void omapdss_device_disconnect(struct omap_dss_device *src, struct omap_dss_device *dst); -enum omap_channel omapdss_device_get_dispc_channel(struct omap_dss_device *dssdev); int omap_dss_get_num_overlay_managers(void); diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index f5bf553a862f..f5bdb8de98f4 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c @@ -693,7 +693,8 @@ void omap_crtc_pre_uninit(struct omap_drm_private *priv) /* initialize crtc */ struct drm_crtc *omap_crtc_init(struct drm_device *dev, - struct drm_plane *plane, struct omap_dss_device *dssdev) + struct omap_drm_pipeline *pipe, + struct drm_plane *plane) { struct omap_drm_private *priv = dev->dev_private; struct drm_crtc *crtc = NULL; @@ -701,7 +702,7 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev, enum omap_channel channel; int ret; - channel = omapdss_device_get_dispc_channel(dssdev); + channel = pipe->output->dispc_channel; DBG("%s", channel_names[channel]); @@ -724,7 +725,7 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev, &omap_crtc_funcs, NULL); if (ret < 0) { dev_err(dev->dev, "%s(): could not init crtc for: %s\n", - __func__, dssdev->name); + __func__, pipe->display->name); kfree(omap_crtc); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.h b/drivers/gpu/drm/omapdrm/omap_crtc.h index 1c6530703855..d9de437ba9dd 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.h +++ b/drivers/gpu/drm/omapdrm/omap_crtc.h @@ -27,6 +27,7 @@ enum omap_channel; struct drm_crtc; struct drm_device; struct drm_plane; +struct omap_drm_pipeline; struct omap_dss_device; struct videomode; @@ -35,7 +36,8 @@ enum omap_channel omap_crtc_channel(struct drm_crtc *crtc); void omap_crtc_pre_init(struct omap_drm_private *priv); void omap_crtc_pre_uninit(struct omap_drm_private *priv); struct drm_crtc *omap_crtc_init(struct drm_device *dev, - struct drm_plane *plane, struct omap_dss_device *dssdev); + struct omap_drm_pipeline *pipe, + struct drm_plane *plane); int omap_crtc_wait_pending(struct drm_crtc *crtc); void omap_crtc_error_irq(struct drm_crtc *crtc, u32 irqstatus); void omap_crtc_vblank_irq(struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index 0052f151bf7a..bb9ee2c93eca 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -308,7 +308,7 @@ static int omap_modeset_init(struct drm_device *dev) if (!connector) return -ENOMEM; - crtc = omap_crtc_init(dev, priv->planes[i], display); + crtc = omap_crtc_init(dev, pipe, priv->planes[i]); if (IS_ERR(crtc)) return PTR_ERR(crtc); -- GitLab From e48f9f16a16a6ee1befda6d8e5486234ac3a5162 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 7 Mar 2018 00:01:33 +0200 Subject: [PATCH 0944/1692] drm/omap: Store CRTC lookup by channel table in omap_drm_private The omap_crtcs global array is used to store pointers to omap_crtc indexed by DISPC channel number, in order to look them up in the dss_mgr operations. Store the information in the omap_drm_private structure in the form of an array of omap_drm_pipeline pointers. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/omap_crtc.c | 22 +++++++++------------- drivers/gpu/drm/omapdrm/omap_drv.c | 19 +++++++++++++++++++ drivers/gpu/drm/omapdrm/omap_drv.h | 1 + 3 files changed, 29 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index f5bdb8de98f4..9742d9f49a7c 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c @@ -109,7 +109,6 @@ int omap_crtc_wait_pending(struct drm_crtc *crtc) */ /* ovl-mgr-id -> crtc */ -static struct omap_crtc *omap_crtcs[8]; static struct omap_dss_device *omap_crtc_output[8]; /* we can probably ignore these until we support command-mode panels: */ @@ -215,7 +214,8 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable) static int omap_crtc_dss_enable(struct omap_drm_private *priv, enum omap_channel channel) { - struct omap_crtc *omap_crtc = omap_crtcs[channel]; + struct drm_crtc *crtc = priv->channels[channel]->crtc; + struct omap_crtc *omap_crtc = to_omap_crtc(crtc); priv->dispc_ops->mgr_set_timings(priv->dispc, omap_crtc->channel, &omap_crtc->vm); @@ -227,7 +227,8 @@ static int omap_crtc_dss_enable(struct omap_drm_private *priv, static void omap_crtc_dss_disable(struct omap_drm_private *priv, enum omap_channel channel) { - struct omap_crtc *omap_crtc = omap_crtcs[channel]; + struct drm_crtc *crtc = priv->channels[channel]->crtc; + struct omap_crtc *omap_crtc = to_omap_crtc(crtc); omap_crtc_set_enabled(&omap_crtc->base, false); } @@ -236,7 +237,9 @@ static void omap_crtc_dss_set_timings(struct omap_drm_private *priv, enum omap_channel channel, const struct videomode *vm) { - struct omap_crtc *omap_crtc = omap_crtcs[channel]; + struct drm_crtc *crtc = priv->channels[channel]->crtc; + struct omap_crtc *omap_crtc = to_omap_crtc(crtc); + DBG("%s", omap_crtc->name); omap_crtc->vm = *vm; } @@ -245,7 +248,8 @@ static void omap_crtc_dss_set_lcd_config(struct omap_drm_private *priv, enum omap_channel channel, const struct dss_lcd_mgr_config *config) { - struct omap_crtc *omap_crtc = omap_crtcs[channel]; + struct drm_crtc *crtc = priv->channels[channel]->crtc; + struct omap_crtc *omap_crtc = to_omap_crtc(crtc); DBG("%s", omap_crtc->name); priv->dispc_ops->mgr_set_lcd_config(priv->dispc, omap_crtc->channel, @@ -681,8 +685,6 @@ static const char *channel_names[] = { void omap_crtc_pre_init(struct omap_drm_private *priv) { - memset(omap_crtcs, 0, sizeof(omap_crtcs)); - dss_install_mgr_ops(priv->dss, &mgr_ops, priv); } @@ -706,10 +708,6 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev, DBG("%s", channel_names[channel]); - /* Multiple displays on same channel is not allowed */ - if (WARN_ON(omap_crtcs[channel] != NULL)) - return ERR_PTR(-EINVAL); - omap_crtc = kzalloc(sizeof(*omap_crtc), GFP_KERNEL); if (!omap_crtc) return ERR_PTR(-ENOMEM); @@ -748,7 +746,5 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev, omap_plane_install_properties(crtc->primary, &crtc->base); - omap_crtcs[channel] = omap_crtc; - return crtc; } diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index bb9ee2c93eca..f2a69cfb6ebf 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -167,6 +167,8 @@ static void omap_disconnect_pipelines(struct drm_device *ddev) pipe->display = NULL; } + memset(&priv->channels, 0, sizeof(priv->channels)); + priv->num_pipes = 0; } @@ -186,6 +188,7 @@ static int omap_connect_pipelines(struct drm_device *ddev) { struct omap_drm_private *priv = ddev->dev_private; struct omap_dss_device *output = NULL; + unsigned int i; int r; if (!omapdss_stack_is_ready()) @@ -218,6 +221,22 @@ static int omap_connect_pipelines(struct drm_device *ddev) sort(priv->pipes, priv->num_pipes, sizeof(priv->pipes[0]), omap_compare_pipes, NULL); + /* + * Populate the pipeline lookup table by DISPC channel. Only one display + * is allowed per channel. + */ + for (i = 0; i < priv->num_pipes; ++i) { + struct omap_drm_pipeline *pipe = &priv->pipes[i]; + enum omap_channel channel = pipe->output->dispc_channel; + + if (WARN_ON(priv->channels[channel] != NULL)) { + r = -EINVAL; + goto cleanup; + } + + priv->channels[channel] = pipe; + } + return 0; cleanup: diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h index a38d07d4d6ea..bd7f2c227a25 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.h +++ b/drivers/gpu/drm/omapdrm/omap_drv.h @@ -64,6 +64,7 @@ struct omap_drm_private { unsigned int num_pipes; struct omap_drm_pipeline pipes[8]; + struct omap_drm_pipeline *channels[8]; unsigned int num_planes; struct drm_plane *planes[8]; -- GitLab From 67dfd2d3d0c24217e12d82909f88dfadb04f34bb Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 6 Mar 2018 23:38:21 +0200 Subject: [PATCH 0945/1692] drm/omap: Remove omap_crtc_output global array The omap_crtc_output global array is used to look up the DSS output device by channel. We can replace that by accessing the output device from the pipeline if we store the pipeline pointer in the omap_crtc structure. The global array is also used to protect against double connection of an output. This can't happen with the connection handling mechanism going from DSS outputs to displays. We can thus drop that check, allowing removal of the global array. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/omap_crtc.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index 9742d9f49a7c..5a56c8e02179 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c @@ -41,6 +41,7 @@ struct omap_crtc { struct drm_crtc base; const char *name; + struct omap_drm_pipeline *pipe; enum omap_channel channel; struct videomode vm; @@ -108,9 +109,6 @@ int omap_crtc_wait_pending(struct drm_crtc *crtc) * job of sequencing the setup of the video pipe in the proper order */ -/* ovl-mgr-id -> crtc */ -static struct omap_dss_device *omap_crtc_output[8]; - /* we can probably ignore these until we support command-mode panels: */ static int omap_crtc_dss_connect(struct omap_drm_private *priv, enum omap_channel channel, @@ -119,13 +117,9 @@ static int omap_crtc_dss_connect(struct omap_drm_private *priv, const struct dispc_ops *dispc_ops = priv->dispc_ops; struct dispc_device *dispc = priv->dispc; - if (omap_crtc_output[channel]) - return -EINVAL; - if (!(dispc_ops->mgr_get_supported_outputs(dispc, channel) & dst->id)) return -EINVAL; - omap_crtc_output[channel] = dst; dst->dispc_channel_connected = true; return 0; @@ -135,7 +129,6 @@ static void omap_crtc_dss_disconnect(struct omap_drm_private *priv, enum omap_channel channel, struct omap_dss_device *dst) { - omap_crtc_output[channel] = NULL; dst->dispc_channel_connected = false; } @@ -158,7 +151,7 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable) if (WARN_ON(omap_crtc->enabled == enable)) return; - if (omap_crtc_output[channel]->output_type == OMAP_DISPLAY_TYPE_HDMI) { + if (omap_crtc->pipe->output->output_type == OMAP_DISPLAY_TYPE_HDMI) { priv->dispc_ops->mgr_enable(priv->dispc, channel, enable); omap_crtc->enabled = enable; return; @@ -716,6 +709,7 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev, init_waitqueue_head(&omap_crtc->pending_wait); + omap_crtc->pipe = pipe; omap_crtc->channel = channel; omap_crtc->name = channel_names[channel]; -- GitLab From d25a7d67465faa28062323d46a1d755d3ae6abc6 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 7 Mar 2018 00:22:10 +0200 Subject: [PATCH 0946/1692] drm/omap: Remove supported output check in CRTC connect handler The CRTC connect handler checks whether the DSS output supports the DISPC channel assigned to it. As the channel is assigned to the output by the output driver a failure there could only result from a driver bug. All the output drivers have been verified and they are always assigned a DISPC channel that is supported on the SoC they run on. The check can thus be removed. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/dispc.c | 8 -------- drivers/gpu/drm/omapdrm/dss/dss.c | 6 ------ drivers/gpu/drm/omapdrm/dss/dss.h | 2 -- drivers/gpu/drm/omapdrm/dss/omapdss.h | 2 -- drivers/gpu/drm/omapdrm/omap_crtc.c | 6 ------ 5 files changed, 24 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c index 84f274c4a4cb..da95dbfdf790 100644 --- a/drivers/gpu/drm/omapdrm/dss/dispc.c +++ b/drivers/gpu/drm/omapdrm/dss/dispc.c @@ -2904,13 +2904,6 @@ static int dispc_ovl_enable(struct dispc_device *dispc, return 0; } -static enum omap_dss_output_id -dispc_mgr_get_supported_outputs(struct dispc_device *dispc, - enum omap_channel channel) -{ - return dss_get_supported_outputs(dispc->dss, channel); -} - static void dispc_lcd_enable_signal_polarity(struct dispc_device *dispc, bool act_high) { @@ -4742,7 +4735,6 @@ static const struct dispc_ops dispc_ops = { .mgr_set_lcd_config = dispc_mgr_set_lcd_config, .mgr_set_timings = dispc_mgr_set_timings, .mgr_setup = dispc_mgr_setup, - .mgr_get_supported_outputs = dispc_mgr_get_supported_outputs, .mgr_gamma_size = dispc_mgr_gamma_size, .mgr_set_gamma = dispc_mgr_set_gamma, diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c index 7245dd3423e6..b473aff466d7 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.c +++ b/drivers/gpu/drm/omapdrm/dss/dss.c @@ -678,12 +678,6 @@ unsigned long dss_get_max_fck_rate(struct dss_device *dss) return dss->feat->fck_freq_max; } -enum omap_dss_output_id dss_get_supported_outputs(struct dss_device *dss, - enum omap_channel channel) -{ - return dss->feat->outputs[channel]; -} - static int dss_setup_default_clock(struct dss_device *dss) { unsigned long max_dss_fck, prate; diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h index 0305eaf2c30c..ee06051933c5 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.h +++ b/drivers/gpu/drm/omapdrm/dss/dss.h @@ -317,8 +317,6 @@ void dss_runtime_put(struct dss_device *dss); unsigned long dss_get_dispc_clk_rate(struct dss_device *dss); unsigned long dss_get_max_fck_rate(struct dss_device *dss); -enum omap_dss_output_id dss_get_supported_outputs(struct dss_device *dss, - enum omap_channel channel); int dss_dpi_select_source(struct dss_device *dss, int port, enum omap_channel channel); void dss_select_hdmi_venc_clk_source(struct dss_device *dss, diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index a732a4a0dc36..8ef0ac3d1d44 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -640,8 +640,6 @@ struct dispc_ops { const struct videomode *vm); void (*mgr_setup)(struct dispc_device *dispc, enum omap_channel channel, const struct omap_overlay_manager_info *info); - enum omap_dss_output_id (*mgr_get_supported_outputs)( - struct dispc_device *dispc, enum omap_channel channel); u32 (*mgr_gamma_size)(struct dispc_device *dispc, enum omap_channel channel); void (*mgr_set_gamma)(struct dispc_device *dispc, diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index 5a56c8e02179..90917d040ddb 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c @@ -114,12 +114,6 @@ static int omap_crtc_dss_connect(struct omap_drm_private *priv, enum omap_channel channel, struct omap_dss_device *dst) { - const struct dispc_ops *dispc_ops = priv->dispc_ops; - struct dispc_device *dispc = priv->dispc; - - if (!(dispc_ops->mgr_get_supported_outputs(dispc, channel) & dst->id)) - return -EINVAL; - dst->dispc_channel_connected = true; return 0; -- GitLab From 0f37938c7c432c7737d85940475bcbd3c362447e Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 7 Mar 2018 00:28:18 +0200 Subject: [PATCH 0947/1692] drm/omap: Set dispc_channel_connect from DSS output connect handlers The omap_dss_device.dispc_channel_connect field is used by DSS outputs to fail the .enable() operation if they're not connected. Set the field directly from the (dis)connect handlers of the DSS outputs instead of going through the CRTC dss_mgr operations. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/dpi.c | 3 +++ drivers/gpu/drm/omapdrm/dss/dsi.c | 3 +++ drivers/gpu/drm/omapdrm/dss/hdmi4.c | 3 +++ drivers/gpu/drm/omapdrm/dss/hdmi5.c | 3 +++ drivers/gpu/drm/omapdrm/dss/sdi.c | 3 +++ drivers/gpu/drm/omapdrm/dss/venc.c | 3 +++ drivers/gpu/drm/omapdrm/omap_crtc.c | 3 --- 7 files changed, 18 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c index 178b463c2d60..5b04cc514c58 100644 --- a/drivers/gpu/drm/omapdrm/dss/dpi.c +++ b/drivers/gpu/drm/omapdrm/dss/dpi.c @@ -626,12 +626,15 @@ static int dpi_connect(struct omap_dss_device *src, return r; } + dst->dispc_channel_connected = true; return 0; } static void dpi_disconnect(struct omap_dss_device *src, struct omap_dss_device *dst) { + dst->dispc_channel_connected = false; + omapdss_device_disconnect(dst, dst->next); dss_mgr_disconnect(dst); diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 948e3b873523..921e794aec0d 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -4896,12 +4896,15 @@ static int dsi_connect(struct omap_dss_device *src, return r; } + dst->dispc_channel_connected = true; return 0; } static void dsi_disconnect(struct omap_dss_device *src, struct omap_dss_device *dst) { + dst->dispc_channel_connected = false; + omapdss_device_disconnect(dst, dst->next); dss_mgr_disconnect(dst); diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index 6616530d5fe6..1e025a8b99c9 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c @@ -443,12 +443,15 @@ static int hdmi_connect(struct omap_dss_device *src, return r; } + dst->dispc_channel_connected = true; return 0; } static void hdmi_disconnect(struct omap_dss_device *src, struct omap_dss_device *dst) { + dst->dispc_channel_connected = false; + omapdss_device_disconnect(dst, dst->next); dss_mgr_disconnect(dst); diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index f7e15edc05fc..d5860438ddd9 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c @@ -448,12 +448,15 @@ static int hdmi_connect(struct omap_dss_device *src, return r; } + dst->dispc_channel_connected = true; return 0; } static void hdmi_disconnect(struct omap_dss_device *src, struct omap_dss_device *dst) { + dst->dispc_channel_connected = false; + omapdss_device_disconnect(dst, dst->next); dss_mgr_disconnect(dst); diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c index 764299cafbe2..b74188458e91 100644 --- a/drivers/gpu/drm/omapdrm/dss/sdi.c +++ b/drivers/gpu/drm/omapdrm/dss/sdi.c @@ -267,12 +267,15 @@ static int sdi_connect(struct omap_dss_device *src, return r; } + dst->dispc_channel_connected = true; return 0; } static void sdi_disconnect(struct omap_dss_device *src, struct omap_dss_device *dst) { + dst->dispc_channel_connected = false; + omapdss_device_disconnect(dst, dst->next); dss_mgr_disconnect(dst); diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c index c2811c425195..00421e2a8eb6 100644 --- a/drivers/gpu/drm/omapdrm/dss/venc.c +++ b/drivers/gpu/drm/omapdrm/dss/venc.c @@ -706,12 +706,15 @@ static int venc_connect(struct omap_dss_device *src, return r; } + dst->dispc_channel_connected = true; return 0; } static void venc_disconnect(struct omap_dss_device *src, struct omap_dss_device *dst) { + dst->dispc_channel_connected = false; + omapdss_device_disconnect(dst, dst->next); dss_mgr_disconnect(dst); diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index 90917d040ddb..7f837697e76c 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c @@ -114,8 +114,6 @@ static int omap_crtc_dss_connect(struct omap_drm_private *priv, enum omap_channel channel, struct omap_dss_device *dst) { - dst->dispc_channel_connected = true; - return 0; } @@ -123,7 +121,6 @@ static void omap_crtc_dss_disconnect(struct omap_drm_private *priv, enum omap_channel channel, struct omap_dss_device *dst) { - dst->dispc_channel_connected = false; } static void omap_crtc_dss_start_update(struct omap_drm_private *priv, -- GitLab From 43f7078f6b6f8fed8edfbbdeff83e276306e5e6e Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 6 Mar 2018 23:34:53 +0200 Subject: [PATCH 0948/1692] drm/omap: dss: Remove the dss_mgr_(dis)connect() operations The dss_mgr .connect() and .disconnect() are implemented as no-op in omapdrm. The operations are unneeded, remove them. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/dpi.c | 10 +--------- drivers/gpu/drm/omapdrm/dss/dsi.c | 10 +--------- drivers/gpu/drm/omapdrm/dss/hdmi4.c | 10 +--------- drivers/gpu/drm/omapdrm/dss/hdmi5.c | 10 +--------- drivers/gpu/drm/omapdrm/dss/omapdss.h | 9 --------- drivers/gpu/drm/omapdrm/dss/output.c | 14 -------------- drivers/gpu/drm/omapdrm/dss/sdi.c | 10 +--------- drivers/gpu/drm/omapdrm/dss/venc.c | 10 +--------- drivers/gpu/drm/omapdrm/omap_crtc.c | 15 --------------- 9 files changed, 6 insertions(+), 92 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c index 5b04cc514c58..f61e3e3186b4 100644 --- a/drivers/gpu/drm/omapdrm/dss/dpi.c +++ b/drivers/gpu/drm/omapdrm/dss/dpi.c @@ -616,15 +616,9 @@ static int dpi_connect(struct omap_dss_device *src, dpi_init_pll(dpi); - r = dss_mgr_connect(dst); - if (r) - return r; - r = omapdss_device_connect(dst->dss, dst, dst->next); - if (r) { - dss_mgr_disconnect(dst); + if (r) return r; - } dst->dispc_channel_connected = true; return 0; @@ -636,8 +630,6 @@ static void dpi_disconnect(struct omap_dss_device *src, dst->dispc_channel_connected = false; omapdss_device_disconnect(dst, dst->next); - - dss_mgr_disconnect(dst); } static const struct omap_dss_device_ops dpi_ops = { diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 921e794aec0d..0afefac4bf65 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -4886,15 +4886,9 @@ static int dsi_connect(struct omap_dss_device *src, { int r; - r = dss_mgr_connect(dst); - if (r) - return r; - r = omapdss_device_connect(dst->dss, dst, dst->next); - if (r) { - dss_mgr_disconnect(dst); + if (r) return r; - } dst->dispc_channel_connected = true; return 0; @@ -4906,8 +4900,6 @@ static void dsi_disconnect(struct omap_dss_device *src, dst->dispc_channel_connected = false; omapdss_device_disconnect(dst, dst->next); - - dss_mgr_disconnect(dst); } static const struct omap_dss_device_ops dsi_ops = { diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index 1e025a8b99c9..c4fcdc9ed62d 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c @@ -433,15 +433,9 @@ static int hdmi_connect(struct omap_dss_device *src, { int r; - r = dss_mgr_connect(dst); - if (r) - return r; - r = omapdss_device_connect(dst->dss, dst, dst->next); - if (r) { - dss_mgr_disconnect(dst); + if (r) return r; - } dst->dispc_channel_connected = true; return 0; @@ -453,8 +447,6 @@ static void hdmi_disconnect(struct omap_dss_device *src, dst->dispc_channel_connected = false; omapdss_device_disconnect(dst, dst->next); - - dss_mgr_disconnect(dst); } static int hdmi_read_edid(struct omap_dss_device *dssdev, diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index d5860438ddd9..889c31745492 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c @@ -438,15 +438,9 @@ static int hdmi_connect(struct omap_dss_device *src, { int r; - r = dss_mgr_connect(dst); - if (r) - return r; - r = omapdss_device_connect(dst->dss, dst, dst->next); - if (r) { - dss_mgr_disconnect(dst); + if (r) return r; - } dst->dispc_channel_connected = true; return 0; @@ -458,8 +452,6 @@ static void hdmi_disconnect(struct omap_dss_device *src, dst->dispc_channel_connected = false; omapdss_device_disconnect(dst, dst->next); - - dss_mgr_disconnect(dst); } static int hdmi_read_edid(struct omap_dss_device *dssdev, diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index 8ef0ac3d1d44..c00572ecb9d6 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -554,13 +554,6 @@ enum dss_writeback_channel { }; struct dss_mgr_ops { - int (*connect)(struct omap_drm_private *priv, - enum omap_channel channel, - struct omap_dss_device *dst); - void (*disconnect)(struct omap_drm_private *priv, - enum omap_channel channel, - struct omap_dss_device *dst); - void (*start_update)(struct omap_drm_private *priv, enum omap_channel channel); int (*enable)(struct omap_drm_private *priv, @@ -586,8 +579,6 @@ int dss_install_mgr_ops(struct dss_device *dss, struct omap_drm_private *priv); void dss_uninstall_mgr_ops(struct dss_device *dss); -int dss_mgr_connect(struct omap_dss_device *dssdev); -void dss_mgr_disconnect(struct omap_dss_device *dssdev); void dss_mgr_set_timings(struct omap_dss_device *dssdev, const struct videomode *vm); void dss_mgr_set_lcd_config(struct omap_dss_device *dssdev, diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c index 2da480be918d..18505bc70f7e 100644 --- a/drivers/gpu/drm/omapdrm/dss/output.c +++ b/drivers/gpu/drm/omapdrm/dss/output.c @@ -56,20 +56,6 @@ void dss_uninstall_mgr_ops(struct dss_device *dss) } EXPORT_SYMBOL(dss_uninstall_mgr_ops); -int dss_mgr_connect(struct omap_dss_device *dssdev) -{ - return dssdev->dss->mgr_ops->connect(dssdev->dss->mgr_ops_priv, - dssdev->dispc_channel, dssdev); -} -EXPORT_SYMBOL(dss_mgr_connect); - -void dss_mgr_disconnect(struct omap_dss_device *dssdev) -{ - dssdev->dss->mgr_ops->disconnect(dssdev->dss->mgr_ops_priv, - dssdev->dispc_channel, dssdev); -} -EXPORT_SYMBOL(dss_mgr_disconnect); - void dss_mgr_set_timings(struct omap_dss_device *dssdev, const struct videomode *vm) { diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c index b74188458e91..8d46f44bcb7d 100644 --- a/drivers/gpu/drm/omapdrm/dss/sdi.c +++ b/drivers/gpu/drm/omapdrm/dss/sdi.c @@ -257,15 +257,9 @@ static int sdi_connect(struct omap_dss_device *src, { int r; - r = dss_mgr_connect(dst); - if (r) - return r; - r = omapdss_device_connect(dst->dss, dst, dst->next); - if (r) { - dss_mgr_disconnect(dst); + if (r) return r; - } dst->dispc_channel_connected = true; return 0; @@ -277,8 +271,6 @@ static void sdi_disconnect(struct omap_dss_device *src, dst->dispc_channel_connected = false; omapdss_device_disconnect(dst, dst->next); - - dss_mgr_disconnect(dst); } static const struct omap_dss_device_ops sdi_ops = { diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c index 00421e2a8eb6..100a02a9447f 100644 --- a/drivers/gpu/drm/omapdrm/dss/venc.c +++ b/drivers/gpu/drm/omapdrm/dss/venc.c @@ -696,15 +696,9 @@ static int venc_connect(struct omap_dss_device *src, { int r; - r = dss_mgr_connect(dst); - if (r) - return r; - r = omapdss_device_connect(dst->dss, dst, dst->next); - if (r) { - dss_mgr_disconnect(dst); + if (r) return r; - } dst->dispc_channel_connected = true; return 0; @@ -716,8 +710,6 @@ static void venc_disconnect(struct omap_dss_device *src, dst->dispc_channel_connected = false; omapdss_device_disconnect(dst, dst->next); - - dss_mgr_disconnect(dst); } static const struct omap_dss_device_ops venc_ops = { diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index 7f837697e76c..80498dcde6d7 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c @@ -110,19 +110,6 @@ int omap_crtc_wait_pending(struct drm_crtc *crtc) */ /* we can probably ignore these until we support command-mode panels: */ -static int omap_crtc_dss_connect(struct omap_drm_private *priv, - enum omap_channel channel, - struct omap_dss_device *dst) -{ - return 0; -} - -static void omap_crtc_dss_disconnect(struct omap_drm_private *priv, - enum omap_channel channel, - struct omap_dss_device *dst) -{ -} - static void omap_crtc_dss_start_update(struct omap_drm_private *priv, enum omap_channel channel) { @@ -254,8 +241,6 @@ static void omap_crtc_dss_unregister_framedone( } static const struct dss_mgr_ops mgr_ops = { - .connect = omap_crtc_dss_connect, - .disconnect = omap_crtc_dss_disconnect, .start_update = omap_crtc_dss_start_update, .enable = omap_crtc_dss_enable, .disable = omap_crtc_dss_disable, -- GitLab From e553ea09e26810221707e16842e8c7e05297ac75 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Thu, 29 Mar 2018 14:08:12 +0300 Subject: [PATCH 0949/1692] drm/omap: dss: Remove unused omap_dss_driver operations The .probe(), .remove(), .run_test(), .get_rotate() and .set_rotate() omap_dss_driver operations are not used. Remove them. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/omapdss.h | 7 ------- 1 file changed, 7 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index c00572ecb9d6..01ba919e34df 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -434,9 +434,6 @@ struct omap_dss_device { }; struct omap_dss_driver { - int (*probe)(struct omap_dss_device *); - void (*remove)(struct omap_dss_device *); - int (*connect)(struct omap_dss_device *src, struct omap_dss_device *dst); void (*disconnect)(struct omap_dss_device *src, @@ -444,7 +441,6 @@ struct omap_dss_driver { int (*enable)(struct omap_dss_device *display); void (*disable)(struct omap_dss_device *display); - int (*run_test)(struct omap_dss_device *display, int test); int (*update)(struct omap_dss_device *dssdev, u16 x, u16 y, u16 w, u16 h); @@ -453,9 +449,6 @@ struct omap_dss_driver { int (*enable_te)(struct omap_dss_device *dssdev, bool enable); int (*get_te)(struct omap_dss_device *dssdev); - u8 (*get_rotate)(struct omap_dss_device *dssdev); - int (*set_rotate)(struct omap_dss_device *dssdev, u8 rotate); - bool (*get_mirror)(struct omap_dss_device *dssdev); int (*set_mirror)(struct omap_dss_device *dssdev, bool enable); -- GitLab From 6f7ae8c29242df34386ba9cbbc77ba21f69ac18e Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Thu, 29 Mar 2018 14:08:12 +0300 Subject: [PATCH 0950/1692] drm/omap: dss: Remove omap_dss_driver .[gs]et_mirror operations The .get_mirror() and .set_mirror() omap_dss_driver operations are implemented by the panel-tpo-td043mtea1 driver but are never used. Remove them. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- .../omapdrm/displays/panel-tpo-td043mtea1.c | 25 ++----------------- drivers/gpu/drm/omapdrm/dss/omapdss.h | 3 --- 2 files changed, 2 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c index cb6f19f8a0da..34531169c166 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c @@ -62,7 +62,6 @@ struct panel_drv_data { int nreset_gpio; u16 gamma[12]; u32 mode; - u32 hmirror:1; u32 vmirror:1; u32 powered_on:1; u32 spi_suspended:1; @@ -151,22 +150,6 @@ static int tpo_td043_write_mirror(struct spi_device *spi, bool h, bool v) return tpo_td043_write(spi, 4, reg4); } -static int tpo_td043_set_hmirror(struct omap_dss_device *dssdev, bool enable) -{ - struct panel_drv_data *ddata = dev_get_drvdata(dssdev->dev); - - ddata->hmirror = enable; - return tpo_td043_write_mirror(ddata->spi, ddata->hmirror, - ddata->vmirror); -} - -static bool tpo_td043_get_hmirror(struct omap_dss_device *dssdev) -{ - struct panel_drv_data *ddata = dev_get_drvdata(dssdev->dev); - - return ddata->hmirror; -} - static ssize_t tpo_td043_vmirror_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -188,7 +171,7 @@ static ssize_t tpo_td043_vmirror_store(struct device *dev, val = !!val; - ret = tpo_td043_write_mirror(ddata->spi, ddata->hmirror, val); + ret = tpo_td043_write_mirror(ddata->spi, false, val); if (ret < 0) return ret; @@ -307,8 +290,7 @@ static int tpo_td043_power_on(struct panel_drv_data *ddata) tpo_td043_write(ddata->spi, 3, TPO_R03_VAL_NORMAL); tpo_td043_write(ddata->spi, 0x20, 0xf0); tpo_td043_write(ddata->spi, 0x21, 0xf0); - tpo_td043_write_mirror(ddata->spi, ddata->hmirror, - ddata->vmirror); + tpo_td043_write_mirror(ddata->spi, false, ddata->vmirror); tpo_td043_write_gamma(ddata->spi, ddata->gamma); ddata->powered_on = 1; @@ -435,9 +417,6 @@ static const struct omap_dss_driver tpo_td043_ops = { .set_timings = tpo_td043_set_timings, .get_timings = tpo_td043_get_timings, .check_timings = tpo_td043_check_timings, - - .set_mirror = tpo_td043_set_hmirror, - .get_mirror = tpo_td043_get_hmirror, }; static int tpo_td043_probe_of(struct spi_device *spi) diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index 01ba919e34df..c29633765898 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -449,9 +449,6 @@ struct omap_dss_driver { int (*enable_te)(struct omap_dss_device *dssdev, bool enable); int (*get_te)(struct omap_dss_device *dssdev); - bool (*get_mirror)(struct omap_dss_device *dssdev); - int (*set_mirror)(struct omap_dss_device *dssdev, bool enable); - int (*memory_read)(struct omap_dss_device *dssdev, void *buf, size_t size, u16 x, u16 y, u16 w, u16 h); -- GitLab From a4e9ecf4bb19f13bbd346fae646e7d9ce33db3b8 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Thu, 29 Mar 2018 14:51:04 +0300 Subject: [PATCH 0951/1692] drm/omap: Remove unnecessary display output sanity checks The omapdrm driver checks at suspend and resume time whether the displays it operates on have their driver operations set. This check is unneeded, as all display drivers set the driver operations field at probe time and never touch it afterwards. This is furthermore proven by the dereferencing of the driver field without checking it first in several locations. The omapdss driver performs a similar check at shutdown time. This is unneeded as well, as the for_each_dss_display() macro it uses to iterate over displays locates the displays by checking the driver field internally. As those checks are unnecessary, remove them. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/dss.c | 3 --- drivers/gpu/drm/omapdrm/omap_drv.c | 6 ------ 2 files changed, 9 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c index b473aff466d7..14ffe23b5ecf 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.c +++ b/drivers/gpu/drm/omapdrm/dss/dss.c @@ -1552,9 +1552,6 @@ static void dss_shutdown(struct platform_device *pdev) DSSDBG("shutdown\n"); for_each_dss_display(dssdev) { - if (!dssdev->driver) - continue; - if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) dssdev->driver->disable(dssdev); } diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index f2a69cfb6ebf..d0f6929857bb 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -723,9 +723,6 @@ static int omap_drm_suspend_all_displays(struct drm_device *ddev) for (i = 0; i < priv->num_pipes; i++) { struct omap_dss_device *display = priv->pipes[i].display; - if (!display->driver) - continue; - if (display->state == OMAP_DSS_DISPLAY_ACTIVE) { display->driver->disable(display); display->activate_after_resume = true; @@ -745,9 +742,6 @@ static int omap_drm_resume_all_displays(struct drm_device *ddev) for (i = 0; i < priv->num_pipes; i++) { struct omap_dss_device *display = priv->pipes[i].display; - if (!display->driver) - continue; - if (display->activate_after_resume) { display->driver->enable(display); display->activate_after_resume = false; -- GitLab From 1298977f0c8a06743dad0bf64d9a9e5a1e863fa3 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Sun, 27 May 2018 22:01:24 +0300 Subject: [PATCH 0952/1692] drm/omap: Check omap_dss_device type based on the output_type field Various functions that need to differentiate between omap_dss_device instances corresponding to displays and to internal encoders use the omap_dss_device.driver field, which is only set for display instances. This gets in the way of the omap_dss_device operations refactoring. Replace that with a check based on the output_type field which is set for all omap_dss_device instances but displays. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/base.c | 5 +++-- drivers/gpu/drm/omapdrm/dss/omapdss.h | 6 ++++++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c index 2051bab30484..614331b7d702 100644 --- a/drivers/gpu/drm/omapdrm/dss/base.c +++ b/drivers/gpu/drm/omapdrm/dss/base.c @@ -164,7 +164,8 @@ struct omap_dss_device *omapdss_device_get_next(struct omap_dss_device *from, * Accept display entities if the display type is requested, * and output entities if the output type is requested. */ - if ((type & OMAP_DSS_DEVICE_TYPE_DISPLAY) && dssdev->driver) + if ((type & OMAP_DSS_DEVICE_TYPE_DISPLAY) && + !dssdev->output_type) goto done; if ((type & OMAP_DSS_DEVICE_TYPE_OUTPUT) && dssdev->id && dssdev->next) @@ -223,7 +224,7 @@ void omapdss_device_disconnect(struct omap_dss_device *src, dev_dbg(dst->dev, "disconnect\n"); if (!dst->id && !omapdss_device_is_connected(dst)) { - WARN_ON(!dst->driver); + WARN_ON(dst->output_type); return; } diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index c29633765898..6d22b38f2ce5 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -406,6 +406,12 @@ struct omap_dss_device { unsigned int alias_id; enum omap_display_type type; + /* + * DSS output type that this device generates (for DSS internal devices) + * or requires (for external encoders). Must be OMAP_DISPLAY_TYPE_NONE + * for display devices (connectors and panels) and to non-zero value for + * all other devices. + */ enum omap_display_type output_type; const char *name; -- GitLab From ede880e1825bfe267088afcf1c096ec62713f005 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Mon, 28 May 2018 17:18:26 +0300 Subject: [PATCH 0953/1692] drm/omap: connector-hdmi: Convert to the GPIO descriptors API The GPIO descriptor API is favoured over the plain GPIO API for consumer drivers. Using it simplifies the driver code. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- .../gpu/drm/omapdrm/displays/connector-hdmi.c | 57 +++++++------------ 1 file changed, 20 insertions(+), 37 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c index e9878da5bfdb..d39480b8cf6b 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c @@ -10,12 +10,10 @@ */ #include -#include #include -#include -#include -#include #include +#include +#include #include @@ -46,7 +44,7 @@ struct panel_drv_data { struct videomode vm; - int hpd_gpio; + struct gpio_desc *hpd_gpio; }; #define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) @@ -143,8 +141,8 @@ static bool hdmic_detect(struct omap_dss_device *dssdev) struct omap_dss_device *src = dssdev->src; bool connected; - if (gpio_is_valid(ddata->hpd_gpio)) - connected = gpio_get_value_cansleep(ddata->hpd_gpio); + if (ddata->hpd_gpio) + connected = gpiod_get_value_cansleep(ddata->hpd_gpio); else connected = src->ops->hdmi.detect(src); if (!connected && src->ops->hdmi.lost_hotplug) @@ -160,7 +158,7 @@ static int hdmic_register_hpd_cb(struct omap_dss_device *dssdev, struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *src = dssdev->src; - if (gpio_is_valid(ddata->hpd_gpio)) { + if (ddata->hpd_gpio) { mutex_lock(&ddata->hpd_lock); ddata->hpd_cb = cb; ddata->hpd_cb_data = cb_data; @@ -178,7 +176,7 @@ static void hdmic_unregister_hpd_cb(struct omap_dss_device *dssdev) struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *src = dssdev->src; - if (gpio_is_valid(ddata->hpd_gpio)) { + if (ddata->hpd_gpio) { mutex_lock(&ddata->hpd_lock); ddata->hpd_cb = NULL; ddata->hpd_cb_data = NULL; @@ -193,7 +191,7 @@ static void hdmic_enable_hpd(struct omap_dss_device *dssdev) struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *src = dssdev->src; - if (gpio_is_valid(ddata->hpd_gpio)) { + if (ddata->hpd_gpio) { mutex_lock(&ddata->hpd_lock); ddata->hpd_enabled = true; mutex_unlock(&ddata->hpd_lock); @@ -207,7 +205,7 @@ static void hdmic_disable_hpd(struct omap_dss_device *dssdev) struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *src = dssdev->src; - if (gpio_is_valid(ddata->hpd_gpio)) { + if (ddata->hpd_gpio) { mutex_lock(&ddata->hpd_lock); ddata->hpd_enabled = false; mutex_unlock(&ddata->hpd_lock); @@ -272,26 +270,11 @@ static irqreturn_t hdmic_hpd_isr(int irq, void *data) return IRQ_HANDLED; } -static int hdmic_probe_of(struct platform_device *pdev) -{ - struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct device_node *node = pdev->dev.of_node; - int gpio; - - /* HPD GPIO */ - gpio = of_get_named_gpio(node, "hpd-gpios", 0); - if (gpio_is_valid(gpio)) - ddata->hpd_gpio = gpio; - else - ddata->hpd_gpio = -ENODEV; - - return 0; -} - static int hdmic_probe(struct platform_device *pdev) { struct panel_drv_data *ddata; struct omap_dss_device *dssdev; + struct gpio_desc *gpio; int r; ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); @@ -301,20 +284,20 @@ static int hdmic_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ddata); ddata->dev = &pdev->dev; - r = hdmic_probe_of(pdev); - if (r) - return r; - mutex_init(&ddata->hpd_lock); - if (gpio_is_valid(ddata->hpd_gpio)) { - r = devm_gpio_request_one(&pdev->dev, ddata->hpd_gpio, - GPIOF_DIR_IN, "hdmi_hpd"); - if (r) - return r; + /* HPD GPIO */ + gpio = devm_gpiod_get_optional(&pdev->dev, "hpd", GPIOD_IN); + if (IS_ERR(gpio)) { + dev_err(&pdev->dev, "failed to parse HPD gpio\n"); + return PTR_ERR(gpio); + } + + ddata->hpd_gpio = gpio; + if (ddata->hpd_gpio) { r = devm_request_threaded_irq(&pdev->dev, - gpio_to_irq(ddata->hpd_gpio), + gpiod_to_irq(ddata->hpd_gpio), NULL, hdmic_hpd_isr, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, -- GitLab From ac2d1fcbebd6e9ff3a5ef645f88611a6ba9b4ece Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Mon, 28 May 2018 17:18:26 +0300 Subject: [PATCH 0954/1692] drm/omap: encoder-tfp410: Convert to the GPIO descriptors API The GPIO descriptor API is favoured over the plain GPIO API for consumer drivers. Using it simplifies the driver code. As the descriptor API handles the active-low flag internally we need to invert the polarity of all GPIO operations in the driver. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- .../gpu/drm/omapdrm/displays/encoder-tfp410.c | 51 +++++-------------- 1 file changed, 13 insertions(+), 38 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c index 7114ea672e69..29bda16afbdc 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c @@ -13,14 +13,13 @@ #include #include #include -#include #include "../dss/omapdss.h" struct panel_drv_data { struct omap_dss_device dssdev; - int pd_gpio; + struct gpio_desc *pd_gpio; struct videomode vm; }; @@ -57,8 +56,8 @@ static int tfp410_enable(struct omap_dss_device *dssdev) if (r) return r; - if (gpio_is_valid(ddata->pd_gpio)) - gpio_set_value_cansleep(ddata->pd_gpio, 1); + if (ddata->pd_gpio) + gpiod_set_value_cansleep(ddata->pd_gpio, 0); dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; @@ -73,8 +72,8 @@ static void tfp410_disable(struct omap_dss_device *dssdev) if (!omapdss_device_is_enabled(dssdev)) return; - if (gpio_is_valid(ddata->pd_gpio)) - gpio_set_value_cansleep(ddata->pd_gpio, 0); + if (ddata->pd_gpio) + gpiod_set_value_cansleep(ddata->pd_gpio, 0); src->ops->disable(src); @@ -119,30 +118,11 @@ static const struct omap_dss_device_ops tfp410_ops = { .set_timings = tfp410_set_timings, }; -static int tfp410_probe_of(struct platform_device *pdev) -{ - struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct device_node *node = pdev->dev.of_node; - int gpio; - - gpio = of_get_named_gpio(node, "powerdown-gpios", 0); - - if (gpio_is_valid(gpio) || gpio == -ENOENT) { - ddata->pd_gpio = gpio; - } else { - if (gpio != -EPROBE_DEFER) - dev_err(&pdev->dev, "failed to parse PD gpio\n"); - return gpio; - } - - return 0; -} - static int tfp410_probe(struct platform_device *pdev) { struct panel_drv_data *ddata; struct omap_dss_device *dssdev; - int r; + struct gpio_desc *gpio; ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); if (!ddata) @@ -150,20 +130,15 @@ static int tfp410_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ddata); - r = tfp410_probe_of(pdev); - if (r) - return r; - - if (gpio_is_valid(ddata->pd_gpio)) { - r = devm_gpio_request_one(&pdev->dev, ddata->pd_gpio, - GPIOF_OUT_INIT_LOW, "tfp410 PD"); - if (r) { - dev_err(&pdev->dev, "Failed to request PD GPIO %d\n", - ddata->pd_gpio); - return r; - } + /* Powerdown GPIO */ + gpio = devm_gpiod_get_optional(&pdev->dev, "powerdown", GPIOD_OUT_HIGH); + if (IS_ERR(gpio)) { + dev_err(&pdev->dev, "failed to parse powerdown gpio\n"); + return PTR_ERR(gpio); } + ddata->pd_gpio = gpio; + dssdev = &ddata->dssdev; dssdev->ops = &tfp410_ops; dssdev->dev = &pdev->dev; -- GitLab From 57e0478a29cf280c8ea26d06d393994ff336eeb6 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Mon, 28 May 2018 17:18:26 +0300 Subject: [PATCH 0955/1692] drm/omap: panel-nec-nl8048hl11: Convert to the GPIO descriptors API The GPIO descriptor API is favoured over the plain GPIO API for consumer drivers. Using it simplifies the driver code. The reset GPIO is mandatory, so drop conditional tests through the driver. The qvga GPIO is unused, so drop it completely. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- .../omapdrm/displays/panel-nec-nl8048hl11.c | 54 ++++--------------- 1 file changed, 11 insertions(+), 43 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c index b4dba55b678b..767ffd2fa0f4 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c @@ -11,11 +11,10 @@ * (at your option) any later version. */ -#include #include -#include #include -#include +#include +#include #include "../dss/omapdss.h" @@ -24,8 +23,7 @@ struct panel_drv_data { struct videomode vm; - int res_gpio; - int qvga_gpio; + struct gpio_desc *res_gpio; struct spi_device *spi; }; @@ -140,8 +138,7 @@ static int nec_8048_enable(struct omap_dss_device *dssdev) if (r) return r; - if (gpio_is_valid(ddata->res_gpio)) - gpio_set_value_cansleep(ddata->res_gpio, 1); + gpiod_set_value_cansleep(ddata->res_gpio, 1); dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; @@ -156,8 +153,7 @@ static void nec_8048_disable(struct omap_dss_device *dssdev) if (!omapdss_device_is_enabled(dssdev)) return; - if (gpio_is_valid(ddata->res_gpio)) - gpio_set_value_cansleep(ddata->res_gpio, 0); + gpiod_set_value_cansleep(ddata->res_gpio, 0); src->ops->disable(src); @@ -203,29 +199,11 @@ static const struct omap_dss_driver nec_8048_ops = { .check_timings = nec_8048_check_timings, }; -static int nec_8048_probe_of(struct spi_device *spi) -{ - struct device_node *node = spi->dev.of_node; - struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); - int gpio; - - gpio = of_get_named_gpio(node, "reset-gpios", 0); - if (!gpio_is_valid(gpio)) { - dev_err(&spi->dev, "failed to parse enable gpio\n"); - return gpio; - } - ddata->res_gpio = gpio; - - /* XXX the panel spec doesn't mention any QVGA pin?? */ - ddata->qvga_gpio = -ENOENT; - - return 0; -} - static int nec_8048_probe(struct spi_device *spi) { struct panel_drv_data *ddata; struct omap_dss_device *dssdev; + struct gpio_desc *gpio; int r; dev_dbg(&spi->dev, "%s\n", __func__); @@ -249,23 +227,13 @@ static int nec_8048_probe(struct spi_device *spi) ddata->spi = spi; - r = nec_8048_probe_of(spi); - if (r) - return r; - - if (gpio_is_valid(ddata->qvga_gpio)) { - r = devm_gpio_request_one(&spi->dev, ddata->qvga_gpio, - GPIOF_OUT_INIT_HIGH, "lcd QVGA"); - if (r) - return r; + gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(gpio)) { + dev_err(&spi->dev, "failed to get reset gpio\n"); + return PTR_ERR(gpio); } - if (gpio_is_valid(ddata->res_gpio)) { - r = devm_gpio_request_one(&spi->dev, ddata->res_gpio, - GPIOF_OUT_INIT_LOW, "lcd RES"); - if (r) - return r; - } + ddata->res_gpio = gpio; ddata->vm = nec_8048_panel_vm; -- GitLab From aec338cbf8c3c86e318eac4d896087ac78fc85b9 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Mon, 28 May 2018 17:18:26 +0300 Subject: [PATCH 0956/1692] drm/omap: panel-sony-acx565akm: Convert to the GPIO descriptors API The GPIO descriptor API is favoured over the plain GPIO API for consumer drivers. Using it simplifies the driver code. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- .../omapdrm/displays/panel-sony-acx565akm.c | 56 +++++++------------ 1 file changed, 21 insertions(+), 35 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c index 036fd8e57074..3eca39821d79 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c @@ -20,17 +20,15 @@ * this program. If not, see . */ +#include +#include +#include +#include #include #include #include -#include -#include -#include #include -#include -#include -#include -#include +#include #include "../dss/omapdss.h" @@ -65,7 +63,7 @@ struct panel_drv_data { struct omap_dss_device dssdev; - int reset_gpio; + struct gpio_desc *reset_gpio; struct videomode vm; @@ -536,8 +534,8 @@ static int acx565akm_panel_power_on(struct omap_dss_device *dssdev) /*FIXME tweak me */ msleep(50); - if (gpio_is_valid(ddata->reset_gpio)) - gpio_set_value(ddata->reset_gpio, 1); + if (ddata->reset_gpio) + gpiod_set_value(ddata->reset_gpio, 1); if (ddata->enabled) { dev_dbg(&ddata->spi->dev, "panel already enabled\n"); @@ -586,8 +584,8 @@ static void acx565akm_panel_power_off(struct omap_dss_device *dssdev) */ msleep(50); - if (gpio_is_valid(ddata->reset_gpio)) - gpio_set_value(ddata->reset_gpio, 0); + if (ddata->reset_gpio) + gpiod_set_value(ddata->reset_gpio, 0); /* FIXME need to tweak this delay */ msleep(100); @@ -674,16 +672,6 @@ static const struct omap_dss_driver acx565akm_ops = { .check_timings = acx565akm_check_timings, }; -static int acx565akm_probe_of(struct spi_device *spi) -{ - struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); - struct device_node *np = spi->dev.of_node; - - ddata->reset_gpio = of_get_named_gpio(np, "reset-gpios", 0); - - return 0; -} - static int acx565akm_probe(struct spi_device *spi) { struct panel_drv_data *ddata; @@ -691,6 +679,7 @@ static int acx565akm_probe(struct spi_device *spi) struct backlight_device *bldev; int max_brightness, brightness; struct backlight_properties props; + struct gpio_desc *gpio; int r; dev_dbg(&spi->dev, "%s\n", __func__); @@ -707,19 +696,16 @@ static int acx565akm_probe(struct spi_device *spi) mutex_init(&ddata->mutex); - r = acx565akm_probe_of(spi); - if (r) - return r; - - if (gpio_is_valid(ddata->reset_gpio)) { - r = devm_gpio_request_one(&spi->dev, ddata->reset_gpio, - GPIOF_OUT_INIT_LOW, "lcd reset"); - if (r) - return r; + gpio = devm_gpiod_get_optional(&spi->dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(gpio)) { + dev_err(&spi->dev, "failed to parse reset gpio\n"); + return PTR_ERR(gpio); } - if (gpio_is_valid(ddata->reset_gpio)) - gpio_set_value(ddata->reset_gpio, 1); + ddata->reset_gpio = gpio; + + if (ddata->reset_gpio) + gpiod_set_value(ddata->reset_gpio, 1); /* * After reset we have to wait 5 msec before the first @@ -731,8 +717,8 @@ static int acx565akm_probe(struct spi_device *spi) r = panel_detect(ddata); - if (!ddata->enabled && gpio_is_valid(ddata->reset_gpio)) - gpio_set_value(ddata->reset_gpio, 0); + if (!ddata->enabled && ddata->reset_gpio) + gpiod_set_value(ddata->reset_gpio, 0); if (r) { dev_err(&spi->dev, "%s panel detect error\n", __func__); -- GitLab From 2167f9e28a30a4b129b2464fbc5ee8c15e254ff1 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Mon, 28 May 2018 17:16:52 +0300 Subject: [PATCH 0957/1692] drm/omap: panel-tpo-td028ttec1: Drop unneeded linux/gpio.h header The driver doesn't use GPIOs and thus doesn't need to include the linux/gpio.h header. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c index fc08f71b95a0..ecb903a93cf4 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c @@ -27,7 +27,6 @@ #include #include #include -#include #include "../dss/omapdss.h" -- GitLab From e7df6571024ba791c6521efba5b3875724c47af6 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Mon, 28 May 2018 17:18:26 +0300 Subject: [PATCH 0958/1692] drm/omap: panel-tpo-td043mtea1: Convert to the GPIO descriptors API The GPIO descriptor API is favoured over the plain GPIO API for consumer drivers. Using it simplifies the driver code. As the descriptor API handles the active-low flag internally we need to invert the polarity of all GPIO operations in the driver. Rename the nreset_gpio field to reset_gpio to reflect that. The reset GPIO is mandatory, so drop conditional tests through the driver. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- .../omapdrm/displays/panel-tpo-td043mtea1.c | 52 +++++-------------- 1 file changed, 14 insertions(+), 38 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c index 34531169c166..1521812ab15b 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c @@ -10,14 +10,13 @@ * (at your option) any later version. */ -#include #include -#include -#include -#include #include +#include +#include +#include #include -#include +#include #include "../dss/omapdss.h" @@ -59,7 +58,7 @@ struct panel_drv_data { struct spi_device *spi; struct regulator *vcc_reg; - int nreset_gpio; + struct gpio_desc *reset_gpio; u16 gamma[12]; u32 mode; u32 vmirror:1; @@ -282,8 +281,7 @@ static int tpo_td043_power_on(struct panel_drv_data *ddata) /* wait for panel to stabilize */ msleep(160); - if (gpio_is_valid(ddata->nreset_gpio)) - gpio_set_value(ddata->nreset_gpio, 1); + gpiod_set_value(ddata->reset_gpio, 0); tpo_td043_write(ddata->spi, 2, TPO_R02_MODE(ddata->mode) | TPO_R02_NCLK_RISING); @@ -305,8 +303,7 @@ static void tpo_td043_power_off(struct panel_drv_data *ddata) tpo_td043_write(ddata->spi, 3, TPO_R03_VAL_STANDBY | TPO_R03_EN_PWM); - if (gpio_is_valid(ddata->nreset_gpio)) - gpio_set_value(ddata->nreset_gpio, 0); + gpiod_set_value(ddata->reset_gpio, 1); /* wait for at least 2 vsyncs before cutting off power */ msleep(50); @@ -419,26 +416,11 @@ static const struct omap_dss_driver tpo_td043_ops = { .check_timings = tpo_td043_check_timings, }; -static int tpo_td043_probe_of(struct spi_device *spi) -{ - struct device_node *node = spi->dev.of_node; - struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); - int gpio; - - gpio = of_get_named_gpio(node, "reset-gpios", 0); - if (!gpio_is_valid(gpio)) { - dev_err(&spi->dev, "failed to parse enable gpio\n"); - return gpio; - } - ddata->nreset_gpio = gpio; - - return 0; -} - static int tpo_td043_probe(struct spi_device *spi) { struct panel_drv_data *ddata; struct omap_dss_device *dssdev; + struct gpio_desc *gpio; int r; dev_dbg(&spi->dev, "%s\n", __func__); @@ -460,10 +442,6 @@ static int tpo_td043_probe(struct spi_device *spi) ddata->spi = spi; - r = tpo_td043_probe_of(spi); - if (r) - return r; - ddata->mode = TPO_R02_MODE_800x480; memcpy(ddata->gamma, tpo_td043_def_gamma, sizeof(ddata->gamma)); @@ -473,16 +451,14 @@ static int tpo_td043_probe(struct spi_device *spi) return PTR_ERR(ddata->vcc_reg); } - if (gpio_is_valid(ddata->nreset_gpio)) { - r = devm_gpio_request_one(&spi->dev, - ddata->nreset_gpio, GPIOF_OUT_INIT_LOW, - "lcd reset"); - if (r < 0) { - dev_err(&spi->dev, "couldn't request reset GPIO\n"); - return r; - } + gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(gpio)) { + dev_err(&spi->dev, "failed to get reset gpio\n"); + return PTR_ERR(gpio); } + ddata->reset_gpio = gpio; + r = sysfs_create_group(&spi->dev.kobj, &tpo_td043_attr_group); if (r) { dev_err(&spi->dev, "failed to create sysfs files\n"); -- GitLab From 83910ad3f51fbc0e6546b60aafa90697b5127a8a Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Fri, 1 Jun 2018 19:45:01 +0300 Subject: [PATCH 0959/1692] drm/omap: Move most omap_dss_driver operations to omap_dss_device_ops omap_dss_device instances have two ops structures, omap_dss_driver and omap_dss_device_ops. The former is used for devices at the end of the pipeline (a.k.a. display devices), and the latter for intermediate devices. Having two sets of operations isn't convenient as code that iterates over omap_dss_device instances need to take them both into account. There's currently a reasonably small amount of such code, but more will be introduced to move the driver away from recursive operations. To simplify current and future code, move all operations that are not specific to the display device to the omap_dss_device_ops. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- .../omapdrm/displays/connector-analog-tv.c | 4 +- .../gpu/drm/omapdrm/displays/connector-dvi.c | 4 +- .../gpu/drm/omapdrm/displays/connector-hdmi.c | 31 ++++++----- .../drm/omapdrm/displays/encoder-tpd12s015.c | 14 ++--- drivers/gpu/drm/omapdrm/displays/panel-dpi.c | 4 +- .../gpu/drm/omapdrm/displays/panel-dsi-cm.c | 12 +++-- .../displays/panel-lgphilips-lb035q02.c | 4 +- .../omapdrm/displays/panel-nec-nl8048hl11.c | 4 +- .../displays/panel-sharp-ls037v7dw01.c | 4 +- .../omapdrm/displays/panel-sony-acx565akm.c | 4 +- .../omapdrm/displays/panel-tpo-td028ttec1.c | 4 +- .../omapdrm/displays/panel-tpo-td043mtea1.c | 4 +- drivers/gpu/drm/omapdrm/dss/base.c | 12 +---- drivers/gpu/drm/omapdrm/dss/dss.c | 2 +- drivers/gpu/drm/omapdrm/dss/hdmi4.c | 3 +- drivers/gpu/drm/omapdrm/dss/hdmi5.c | 3 +- drivers/gpu/drm/omapdrm/dss/omapdss.h | 54 +++++-------------- drivers/gpu/drm/omapdrm/omap_connector.c | 37 ++++++------- drivers/gpu/drm/omapdrm/omap_crtc.c | 2 +- drivers/gpu/drm/omapdrm/omap_drv.c | 12 ++--- drivers/gpu/drm/omapdrm/omap_encoder.c | 25 ++++----- 21 files changed, 106 insertions(+), 137 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c index d59b4f2e22dc..563fc7e618b3 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c @@ -119,7 +119,7 @@ static int tvc_check_timings(struct omap_dss_device *dssdev, return src->ops->check_timings(src, vm); } -static const struct omap_dss_driver tvc_driver = { +static const struct omap_dss_device_ops tvc_ops = { .connect = tvc_connect, .disconnect = tvc_disconnect, @@ -146,7 +146,7 @@ static int tvc_probe(struct platform_device *pdev) ddata->vm = tvc_pal_vm; dssdev = &ddata->dssdev; - dssdev->driver = &tvc_driver; + dssdev->ops = &tvc_ops; dssdev->dev = &pdev->dev; dssdev->type = OMAP_DISPLAY_TYPE_VENC; dssdev->owner = THIS_MODULE; diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c index 39e7d0be887f..a639a86cd47b 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c @@ -265,7 +265,7 @@ static void dvic_disable_hpd(struct omap_dss_device *dssdev) mutex_unlock(&ddata->hpd_lock); } -static const struct omap_dss_driver dvic_driver = { +static const struct omap_dss_device_ops dvic_ops = { .connect = dvic_connect, .disconnect = dvic_disconnect, @@ -367,7 +367,7 @@ static int dvic_probe(struct platform_device *pdev) ddata->vm = dvic_default_vm; dssdev = &ddata->dssdev; - dssdev->driver = &dvic_driver; + dssdev->ops = &dvic_ops; dssdev->dev = &pdev->dev; dssdev->type = OMAP_DISPLAY_TYPE_DVI; dssdev->owner = THIS_MODULE; diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c index d39480b8cf6b..54bfd7156360 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c @@ -132,7 +132,7 @@ static int hdmic_read_edid(struct omap_dss_device *dssdev, { struct omap_dss_device *src = dssdev->src; - return src->ops->hdmi.read_edid(src, edid, len); + return src->ops->read_edid(src, edid, len); } static bool hdmic_detect(struct omap_dss_device *dssdev) @@ -144,7 +144,7 @@ static bool hdmic_detect(struct omap_dss_device *dssdev) if (ddata->hpd_gpio) connected = gpiod_get_value_cansleep(ddata->hpd_gpio); else - connected = src->ops->hdmi.detect(src); + connected = src->ops->detect(src); if (!connected && src->ops->hdmi.lost_hotplug) src->ops->hdmi.lost_hotplug(src); return connected; @@ -164,8 +164,8 @@ static int hdmic_register_hpd_cb(struct omap_dss_device *dssdev, ddata->hpd_cb_data = cb_data; mutex_unlock(&ddata->hpd_lock); return 0; - } else if (src->ops->hdmi.register_hpd_cb) { - return src->ops->hdmi.register_hpd_cb(src, cb, cb_data); + } else if (src->ops->register_hpd_cb) { + return src->ops->register_hpd_cb(src, cb, cb_data); } return -ENOTSUPP; @@ -181,8 +181,8 @@ static void hdmic_unregister_hpd_cb(struct omap_dss_device *dssdev) ddata->hpd_cb = NULL; ddata->hpd_cb_data = NULL; mutex_unlock(&ddata->hpd_lock); - } else if (src->ops->hdmi.unregister_hpd_cb) { - src->ops->hdmi.unregister_hpd_cb(src); + } else if (src->ops->unregister_hpd_cb) { + src->ops->unregister_hpd_cb(src); } } @@ -195,8 +195,8 @@ static void hdmic_enable_hpd(struct omap_dss_device *dssdev) mutex_lock(&ddata->hpd_lock); ddata->hpd_enabled = true; mutex_unlock(&ddata->hpd_lock); - } else if (src->ops->hdmi.enable_hpd) { - src->ops->hdmi.enable_hpd(src); + } else if (src->ops->enable_hpd) { + src->ops->enable_hpd(src); } } @@ -209,8 +209,8 @@ static void hdmic_disable_hpd(struct omap_dss_device *dssdev) mutex_lock(&ddata->hpd_lock); ddata->hpd_enabled = false; mutex_unlock(&ddata->hpd_lock); - } else if (src->ops->hdmi.disable_hpd) { - src->ops->hdmi.disable_hpd(src); + } else if (src->ops->disable_hpd) { + src->ops->disable_hpd(src); } } @@ -229,7 +229,7 @@ static int hdmic_set_infoframe(struct omap_dss_device *dssdev, return src->ops->hdmi.set_infoframe(src, avi); } -static const struct omap_dss_driver hdmic_driver = { +static const struct omap_dss_device_ops hdmic_ops = { .connect = hdmic_connect, .disconnect = hdmic_disconnect, @@ -246,8 +246,11 @@ static const struct omap_dss_driver hdmic_driver = { .unregister_hpd_cb = hdmic_unregister_hpd_cb, .enable_hpd = hdmic_enable_hpd, .disable_hpd = hdmic_disable_hpd, - .set_hdmi_mode = hdmic_set_hdmi_mode, - .set_hdmi_infoframe = hdmic_set_infoframe, + + .hdmi = { + .set_hdmi_mode = hdmic_set_hdmi_mode, + .set_infoframe = hdmic_set_infoframe, + }, }; static irqreturn_t hdmic_hpd_isr(int irq, void *data) @@ -309,7 +312,7 @@ static int hdmic_probe(struct platform_device *pdev) ddata->vm = hdmic_default_vm; dssdev = &ddata->dssdev; - dssdev->driver = &hdmic_driver; + dssdev->ops = &hdmic_ops; dssdev->dev = &pdev->dev; dssdev->type = OMAP_DISPLAY_TYPE_HDMI; dssdev->owner = THIS_MODULE; diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c index c99e55487d38..0cc7bd656473 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c @@ -125,7 +125,7 @@ static int tpd_read_edid(struct omap_dss_device *dssdev, if (!gpiod_get_value_cansleep(ddata->hpd_gpio)) return -ENODEV; - return src->ops->hdmi.read_edid(src, edid, len); + return src->ops->read_edid(src, edid, len); } static bool tpd_detect(struct omap_dss_device *dssdev) @@ -205,14 +205,14 @@ static const struct omap_dss_device_ops tpd_ops = { .disable = tpd_disable, .check_timings = tpd_check_timings, .set_timings = tpd_set_timings, + .read_edid = tpd_read_edid, + .detect = tpd_detect, + .register_hpd_cb = tpd_register_hpd_cb, + .unregister_hpd_cb = tpd_unregister_hpd_cb, + .enable_hpd = tpd_enable_hpd, + .disable_hpd = tpd_disable_hpd, .hdmi = { - .read_edid = tpd_read_edid, - .detect = tpd_detect, - .register_hpd_cb = tpd_register_hpd_cb, - .unregister_hpd_cb = tpd_unregister_hpd_cb, - .enable_hpd = tpd_enable_hpd, - .disable_hpd = tpd_disable_hpd, .set_infoframe = tpd_set_infoframe, .set_hdmi_mode = tpd_set_hdmi_mode, }, diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c index 91f99c95c4c4..c03877af9cdb 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c @@ -122,7 +122,7 @@ static int panel_dpi_check_timings(struct omap_dss_device *dssdev, return src->ops->check_timings(src, vm); } -static const struct omap_dss_driver panel_dpi_ops = { +static const struct omap_dss_device_ops panel_dpi_ops = { .connect = panel_dpi_connect, .disconnect = panel_dpi_disconnect, @@ -196,7 +196,7 @@ static int panel_dpi_probe(struct platform_device *pdev) dssdev = &ddata->dssdev; dssdev->dev = &pdev->dev; - dssdev->driver = &panel_dpi_ops; + dssdev->ops = &panel_dpi_ops; dssdev->type = OMAP_DISPLAY_TYPE_DPI; dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(0); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c index e30f0ab315f5..29692a5217c5 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c @@ -1179,18 +1179,21 @@ static void dsicm_get_size(struct omap_dss_device *dssdev, *height = ddata->height_mm; } -static const struct omap_dss_driver dsicm_ops = { +static const struct omap_dss_device_ops dsicm_ops = { .connect = dsicm_connect, .disconnect = dsicm_disconnect, .enable = dsicm_enable, .disable = dsicm_disable, + .get_timings = dsicm_get_timings, + .check_timings = dsicm_check_timings, +}; + +static const struct omap_dss_driver dsicm_dss_driver = { .update = dsicm_update, .sync = dsicm_sync, - .get_timings = dsicm_get_timings, - .check_timings = dsicm_check_timings, .get_size = dsicm_get_size, .enable_te = dsicm_enable_te, @@ -1299,7 +1302,8 @@ static int dsicm_probe(struct platform_device *pdev) dssdev = &ddata->dssdev; dssdev->dev = dev; - dssdev->driver = &dsicm_ops; + dssdev->ops = &dsicm_ops; + dssdev->driver = &dsicm_dss_driver; dssdev->type = OMAP_DISPLAY_TYPE_DSI; dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(0); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c index 66763a12fc3d..62576e4f89e3 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c @@ -199,7 +199,7 @@ static int lb035q02_check_timings(struct omap_dss_device *dssdev, return src->ops->check_timings(src, vm); } -static const struct omap_dss_driver lb035q02_ops = { +static const struct omap_dss_device_ops lb035q02_ops = { .connect = lb035q02_connect, .disconnect = lb035q02_disconnect, @@ -249,7 +249,7 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi) dssdev = &ddata->dssdev; dssdev->dev = &spi->dev; - dssdev->driver = &lb035q02_ops; + dssdev->ops = &lb035q02_ops; dssdev->type = OMAP_DISPLAY_TYPE_DPI; dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(0); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c index 767ffd2fa0f4..9f34cf02a114 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c @@ -187,7 +187,7 @@ static int nec_8048_check_timings(struct omap_dss_device *dssdev, return src->ops->check_timings(src, vm); } -static const struct omap_dss_driver nec_8048_ops = { +static const struct omap_dss_device_ops nec_8048_ops = { .connect = nec_8048_connect, .disconnect = nec_8048_disconnect, @@ -239,7 +239,7 @@ static int nec_8048_probe(struct spi_device *spi) dssdev = &ddata->dssdev; dssdev->dev = &spi->dev; - dssdev->driver = &nec_8048_ops; + dssdev->ops = &nec_8048_ops; dssdev->type = OMAP_DISPLAY_TYPE_DPI; dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(0); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c index 7fbdf3ec0113..9ee6b8376916 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c @@ -161,7 +161,7 @@ static int sharp_ls_check_timings(struct omap_dss_device *dssdev, return src->ops->check_timings(src, vm); } -static const struct omap_dss_driver sharp_ls_ops = { +static const struct omap_dss_device_ops sharp_ls_ops = { .connect = sharp_ls_connect, .disconnect = sharp_ls_disconnect, @@ -247,7 +247,7 @@ static int sharp_ls_probe(struct platform_device *pdev) dssdev = &ddata->dssdev; dssdev->dev = &pdev->dev; - dssdev->driver = &sharp_ls_ops; + dssdev->ops = &sharp_ls_ops; dssdev->type = OMAP_DISPLAY_TYPE_DPI; dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(0); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c index 3eca39821d79..d0a8d2810c33 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c @@ -660,7 +660,7 @@ static int acx565akm_check_timings(struct omap_dss_device *dssdev, return src->ops->check_timings(src, vm); } -static const struct omap_dss_driver acx565akm_ops = { +static const struct omap_dss_device_ops acx565akm_ops = { .connect = acx565akm_connect, .disconnect = acx565akm_disconnect, @@ -762,7 +762,7 @@ static int acx565akm_probe(struct spi_device *spi) dssdev = &ddata->dssdev; dssdev->dev = &spi->dev; - dssdev->driver = &acx565akm_ops; + dssdev->ops = &acx565akm_ops; dssdev->type = OMAP_DISPLAY_TYPE_SDI; dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(0); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c index ecb903a93cf4..1b0e42daa296 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c @@ -330,7 +330,7 @@ static int td028ttec1_panel_check_timings(struct omap_dss_device *dssdev, return src->ops->check_timings(src, vm); } -static const struct omap_dss_driver td028ttec1_ops = { +static const struct omap_dss_device_ops td028ttec1_ops = { .connect = td028ttec1_panel_connect, .disconnect = td028ttec1_panel_disconnect, @@ -371,7 +371,7 @@ static int td028ttec1_panel_probe(struct spi_device *spi) dssdev = &ddata->dssdev; dssdev->dev = &spi->dev; - dssdev->driver = &td028ttec1_ops; + dssdev->ops = &td028ttec1_ops; dssdev->type = OMAP_DISPLAY_TYPE_DPI; dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(0); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c index 1521812ab15b..b211a7809a26 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c @@ -404,7 +404,7 @@ static int tpo_td043_check_timings(struct omap_dss_device *dssdev, return src->ops->check_timings(src, vm); } -static const struct omap_dss_driver tpo_td043_ops = { +static const struct omap_dss_device_ops tpo_td043_ops = { .connect = tpo_td043_connect, .disconnect = tpo_td043_disconnect, @@ -469,7 +469,7 @@ static int tpo_td043_probe(struct spi_device *spi) dssdev = &ddata->dssdev; dssdev->dev = &spi->dev; - dssdev->driver = &tpo_td043_ops; + dssdev->ops = &tpo_td043_ops; dssdev->type = OMAP_DISPLAY_TYPE_DPI; dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(0); diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c index 614331b7d702..472f56e3de70 100644 --- a/drivers/gpu/drm/omapdrm/dss/base.c +++ b/drivers/gpu/drm/omapdrm/dss/base.c @@ -198,11 +198,7 @@ int omapdss_device_connect(struct dss_device *dss, dst->dss = dss; - if (dst->driver) - ret = dst->driver->connect(src, dst); - else - ret = dst->ops->connect(src, dst); - + ret = dst->ops->connect(src, dst); if (ret < 0) { dst->dss = NULL; return ret; @@ -238,11 +234,7 @@ void omapdss_device_disconnect(struct omap_dss_device *src, WARN_ON(dst->state != OMAP_DSS_DISPLAY_DISABLED); - if (dst->driver) - dst->driver->disconnect(src, dst); - else - dst->ops->disconnect(src, dst); - + dst->ops->disconnect(src, dst); dst->dss = NULL; } EXPORT_SYMBOL_GPL(omapdss_device_disconnect); diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c index 14ffe23b5ecf..19fc4dfc429e 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.c +++ b/drivers/gpu/drm/omapdrm/dss/dss.c @@ -1553,7 +1553,7 @@ static void dss_shutdown(struct platform_device *pdev) for_each_dss_display(dssdev) { if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) - dssdev->driver->disable(dssdev); + dssdev->ops->disable(dssdev); } } diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index c4fcdc9ed62d..bebce93fed3e 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c @@ -511,8 +511,9 @@ static const struct omap_dss_device_ops hdmi_ops = { .check_timings = hdmi_display_check_timing, .set_timings = hdmi_display_set_timing, + .read_edid = hdmi_read_edid, + .hdmi = { - .read_edid = hdmi_read_edid, .lost_hotplug = hdmi_lost_hotplug, .set_infoframe = hdmi_set_infoframe, .set_hdmi_mode = hdmi_set_hdmi_mode, diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index 889c31745492..7c07e0208107 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c @@ -505,8 +505,9 @@ static const struct omap_dss_device_ops hdmi_ops = { .check_timings = hdmi_display_check_timing, .set_timings = hdmi_display_set_timing, + .read_edid = hdmi_read_edid, + .hdmi = { - .read_edid = hdmi_read_edid, .set_infoframe = hdmi_set_infoframe, .set_hdmi_mode = hdmi_set_hdmi_mode, }, diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index 6d22b38f2ce5..60e4269e6c88 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -297,18 +297,7 @@ struct omap_dss_writeback_info { }; struct omapdss_hdmi_ops { - int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len); void (*lost_hotplug)(struct omap_dss_device *dssdev); - bool (*detect)(struct omap_dss_device *dssdev); - - int (*register_hpd_cb)(struct omap_dss_device *dssdev, - void (*cb)(void *cb_data, - enum drm_connector_status status), - void *cb_data); - void (*unregister_hpd_cb)(struct omap_dss_device *dssdev); - void (*enable_hpd)(struct omap_dss_device *dssdev); - void (*disable_hpd)(struct omap_dss_device *dssdev); - int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode); int (*set_infoframe)(struct omap_dss_device *dssdev, const struct hdmi_avi_infoframe *avi); @@ -376,9 +365,23 @@ struct omap_dss_device_ops { int (*check_timings)(struct omap_dss_device *dssdev, struct videomode *vm); + void (*get_timings)(struct omap_dss_device *dssdev, + struct videomode *vm); void (*set_timings)(struct omap_dss_device *dssdev, struct videomode *vm); + bool (*detect)(struct omap_dss_device *dssdev); + + int (*register_hpd_cb)(struct omap_dss_device *dssdev, + void (*cb)(void *cb_data, + enum drm_connector_status status), + void *cb_data); + void (*unregister_hpd_cb)(struct omap_dss_device *dssdev); + void (*enable_hpd)(struct omap_dss_device *dssdev); + void (*disable_hpd)(struct omap_dss_device *dssdev); + + int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len); + union { const struct omapdss_hdmi_ops hdmi; const struct omapdss_dsi_ops dsi; @@ -440,14 +443,6 @@ struct omap_dss_device { }; struct omap_dss_driver { - int (*connect)(struct omap_dss_device *src, - struct omap_dss_device *dst); - void (*disconnect)(struct omap_dss_device *src, - struct omap_dss_device *dst); - - int (*enable)(struct omap_dss_device *display); - void (*disable)(struct omap_dss_device *display); - int (*update)(struct omap_dss_device *dssdev, u16 x, u16 y, u16 w, u16 h); int (*sync)(struct omap_dss_device *dssdev); @@ -459,29 +454,8 @@ struct omap_dss_driver { void *buf, size_t size, u16 x, u16 y, u16 w, u16 h); - int (*check_timings)(struct omap_dss_device *dssdev, - struct videomode *vm); - void (*set_timings)(struct omap_dss_device *dssdev, - struct videomode *vm); - void (*get_timings)(struct omap_dss_device *dssdev, - struct videomode *vm); void (*get_size)(struct omap_dss_device *dssdev, unsigned int *width, unsigned int *height); - - int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len); - bool (*detect)(struct omap_dss_device *dssdev); - - int (*register_hpd_cb)(struct omap_dss_device *dssdev, - void (*cb)(void *cb_data, - enum drm_connector_status status), - void *cb_data); - void (*unregister_hpd_cb)(struct omap_dss_device *dssdev); - void (*enable_hpd)(struct omap_dss_device *dssdev); - void (*disable_hpd)(struct omap_dss_device *dssdev); - - int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode); - int (*set_hdmi_infoframe)(struct omap_dss_device *dssdev, - const struct hdmi_avi_infoframe *avi); }; struct dss_device *omapdss_get_dss(void); diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c index 69ebb0fa1df5..f9cc04c7c0fa 100644 --- a/drivers/gpu/drm/omapdrm/omap_connector.c +++ b/drivers/gpu/drm/omapdrm/omap_connector.c @@ -62,11 +62,10 @@ static enum drm_connector_status omap_connector_detect( { struct omap_connector *omap_connector = to_omap_connector(connector); struct omap_dss_device *dssdev = omap_connector->dssdev; - const struct omap_dss_driver *dssdrv = dssdev->driver; enum drm_connector_status ret; - if (dssdrv->detect) { - if (dssdrv->detect(dssdev)) + if (dssdev->ops->detect) { + if (dssdev->ops->detect(dssdev)) ret = connector_status_connected; else ret = connector_status_disconnected; @@ -91,8 +90,8 @@ static void omap_connector_destroy(struct drm_connector *connector) DBG("%s", omap_connector->dssdev->name); if (connector->polled == DRM_CONNECTOR_POLL_HPD && - dssdev->driver->unregister_hpd_cb) { - dssdev->driver->unregister_hpd_cb(dssdev); + dssdev->ops->unregister_hpd_cb) { + dssdev->ops->unregister_hpd_cb(dssdev); } drm_connector_unregister(connector); drm_connector_cleanup(connector); @@ -107,7 +106,6 @@ static int omap_connector_get_modes(struct drm_connector *connector) { struct omap_connector *omap_connector = to_omap_connector(connector); struct omap_dss_device *dssdev = omap_connector->dssdev; - const struct omap_dss_driver *dssdrv = dssdev->driver; struct drm_device *dev = connector->dev; int n = 0; @@ -118,13 +116,13 @@ static int omap_connector_get_modes(struct drm_connector *connector) * LCD panels) we just return a single mode corresponding to the * currently configured timings: */ - if (dssdrv->read_edid) { + if (dssdev->ops->read_edid) { void *edid = kzalloc(MAX_EDID, GFP_KERNEL); if (!edid) return 0; - if ((dssdrv->read_edid(dssdev, edid, MAX_EDID) > 0) && + if ((dssdev->ops->read_edid(dssdev, edid, MAX_EDID) > 0) && drm_edid_is_valid(edid)) { drm_connector_update_edid_property( connector, edid); @@ -145,7 +143,7 @@ static int omap_connector_get_modes(struct drm_connector *connector) if (!mode) return 0; - dssdrv->get_timings(dssdev, &vm); + dssdev->ops->get_timings(dssdev, &vm); drm_display_mode_from_videomode(&vm, mode); @@ -153,8 +151,8 @@ static int omap_connector_get_modes(struct drm_connector *connector) drm_mode_set_name(mode); drm_mode_probed_add(connector, mode); - if (dssdrv->get_size) { - dssdrv->get_size(dssdev, + if (dssdev->driver && dssdev->driver->get_size) { + dssdev->driver->get_size(dssdev, &connector->display_info.width_mm, &connector->display_info.height_mm); } @@ -170,7 +168,6 @@ static int omap_connector_mode_valid(struct drm_connector *connector, { struct omap_connector *omap_connector = to_omap_connector(connector); struct omap_dss_device *dssdev = omap_connector->dssdev; - const struct omap_dss_driver *dssdrv = dssdev->driver; struct videomode vm = {0}; struct drm_device *dev = connector->dev; struct drm_display_mode *new_mode; @@ -184,12 +181,12 @@ static int omap_connector_mode_valid(struct drm_connector *connector, * a fixed resolution panel, check if the timings match with the * panel's timings */ - if (dssdrv->check_timings) { - r = dssdrv->check_timings(dssdev, &vm); + if (dssdev->ops->check_timings) { + r = dssdev->ops->check_timings(dssdev, &vm); } else { struct videomode t = {0}; - dssdrv->get_timings(dssdev, &t); + dssdev->ops->get_timings(dssdev, &t); /* * Ignore the flags, as we don't get them from @@ -268,10 +265,10 @@ struct drm_connector *omap_connector_init(struct drm_device *dev, connector_type); drm_connector_helper_add(connector, &omap_connector_helper_funcs); - if (dssdev->driver->register_hpd_cb) { - int ret = dssdev->driver->register_hpd_cb(dssdev, - omap_connector_hpd_cb, - omap_connector); + if (dssdev->ops->register_hpd_cb) { + int ret = dssdev->ops->register_hpd_cb(dssdev, + omap_connector_hpd_cb, + omap_connector); if (!ret) hpd_supported = true; else if (ret != -ENOTSUPP) @@ -281,7 +278,7 @@ struct drm_connector *omap_connector_init(struct drm_device *dev, if (hpd_supported) connector->polled = DRM_CONNECTOR_POLL_HPD; - else if (dssdev->driver->detect) + else if (dssdev->ops->detect) connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; else diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index 80498dcde6d7..197d05312306 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c @@ -458,7 +458,7 @@ static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc) if (dssdev) { struct videomode vm = {0}; - dssdev->driver->get_timings(dssdev, &vm); + dssdev->ops->get_timings(dssdev, &vm); omap_crtc->vm.flags |= vm.flags & flags_mask; } diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index d0f6929857bb..843222118fa7 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -378,8 +378,8 @@ static void omap_modeset_enable_external_hpd(struct drm_device *ddev) for (i = 0; i < priv->num_pipes; i++) { struct omap_dss_device *display = priv->pipes[i].display; - if (display->driver->enable_hpd) - display->driver->enable_hpd(display); + if (display->ops->enable_hpd) + display->ops->enable_hpd(display); } } @@ -394,8 +394,8 @@ static void omap_modeset_disable_external_hpd(struct drm_device *ddev) for (i = 0; i < priv->num_pipes; i++) { struct omap_dss_device *display = priv->pipes[i].display; - if (display->driver->disable_hpd) - display->driver->disable_hpd(display); + if (display->ops->disable_hpd) + display->ops->disable_hpd(display); } } @@ -724,7 +724,7 @@ static int omap_drm_suspend_all_displays(struct drm_device *ddev) struct omap_dss_device *display = priv->pipes[i].display; if (display->state == OMAP_DSS_DISPLAY_ACTIVE) { - display->driver->disable(display); + display->ops->disable(display); display->activate_after_resume = true; } else { display->activate_after_resume = false; @@ -743,7 +743,7 @@ static int omap_drm_resume_all_displays(struct drm_device *ddev) struct omap_dss_device *display = priv->pipes[i].display; if (display->activate_after_resume) { - display->driver->enable(display); + display->ops->enable(display); display->activate_after_resume = false; } } diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c index ec0f451e3b36..7bbf3700e393 100644 --- a/drivers/gpu/drm/omapdrm/omap_encoder.c +++ b/drivers/gpu/drm/omapdrm/omap_encoder.c @@ -77,16 +77,16 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder, } } - if (dssdev->driver->set_hdmi_mode) - dssdev->driver->set_hdmi_mode(dssdev, hdmi_mode); + if (dssdev->ops->hdmi.set_hdmi_mode) + dssdev->ops->hdmi.set_hdmi_mode(dssdev, hdmi_mode); - if (hdmi_mode && dssdev->driver->set_hdmi_infoframe) { + if (hdmi_mode && dssdev->ops->hdmi.set_infoframe) { struct hdmi_avi_infoframe avi; r = drm_hdmi_avi_infoframe_from_display_mode(&avi, adjusted_mode, false); if (r == 0) - dssdev->driver->set_hdmi_infoframe(dssdev, &avi); + dssdev->ops->hdmi.set_infoframe(dssdev, &avi); } } @@ -94,9 +94,8 @@ static void omap_encoder_disable(struct drm_encoder *encoder) { struct omap_encoder *omap_encoder = to_omap_encoder(encoder); struct omap_dss_device *dssdev = omap_encoder->dssdev; - const struct omap_dss_driver *dssdrv = dssdev->driver; - dssdrv->disable(dssdev); + dssdev->ops->disable(dssdev); } static int omap_encoder_update(struct drm_encoder *encoder, @@ -106,15 +105,14 @@ static int omap_encoder_update(struct drm_encoder *encoder, struct drm_device *dev = encoder->dev; struct omap_encoder *omap_encoder = to_omap_encoder(encoder); struct omap_dss_device *dssdev = omap_encoder->dssdev; - const struct omap_dss_driver *dssdrv = dssdev->driver; int ret; - if (dssdrv->check_timings) { - ret = dssdrv->check_timings(dssdev, vm); + if (dssdev->ops->check_timings) { + ret = dssdev->ops->check_timings(dssdev, vm); } else { struct videomode t = {0}; - dssdrv->get_timings(dssdev, &t); + dssdev->ops->get_timings(dssdev, &t); if (memcmp(vm, &t, sizeof(*vm))) ret = -EINVAL; @@ -127,8 +125,8 @@ static int omap_encoder_update(struct drm_encoder *encoder, return ret; } - if (dssdrv->set_timings) - dssdrv->set_timings(dssdev, vm); + if (dssdev->ops->set_timings) + dssdev->ops->set_timings(dssdev, vm); return 0; } @@ -137,13 +135,12 @@ static void omap_encoder_enable(struct drm_encoder *encoder) { struct omap_encoder *omap_encoder = to_omap_encoder(encoder); struct omap_dss_device *dssdev = omap_encoder->dssdev; - const struct omap_dss_driver *dssdrv = dssdev->driver; int r; omap_encoder_update(encoder, omap_crtc_channel(encoder->crtc), omap_crtc_timings(encoder->crtc)); - r = dssdrv->enable(dssdev); + r = dssdev->ops->enable(dssdev); if (r) dev_err(encoder->dev->dev, "Failed to enable display '%s': %d\n", -- GitLab From 09e5bb6d5b94929d9b0c17eec56c2f5d19886514 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Mon, 28 May 2018 16:03:13 +0300 Subject: [PATCH 0960/1692] drm/omap: dss: Add device operations flags When an omap_dss_device operation can be implemented in multiple places in a chain of devices, it is important to find out which device to address to perfom the operation. This is currently done by calling the operation on the display device at the end of the chain, and recursively delagating the operation to the previous device if it can't be performed locally. The drawback of this approach is an increased complexity in omap_dss_device drivers. In order to simplify the drivers, we will switch from a recursive model to an interative model, centralizing the complexity in a single location. This requires knowing which operations an omap_dss_device supports at runtime. We can already test which operations are implemented by checking the operation pointer, but implemented operations can require resources whose availability varies between systems. For instance a hot-plug signal from a connector can be wired to a GPIO or to a bridge chip. Add operation flags that can be set in the omap_dss_device structure by drivers to signal support for operations. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/omapdss.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index 60e4269e6c88..30ad9985776f 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -388,6 +388,18 @@ struct omap_dss_device_ops { }; }; +/** + * enum omap_dss_device_ops_flag - Indicates which device ops are supported + * @OMAP_DSS_DEVICE_OP_DETECT: The device supports output connection detection + * @OMAP_DSS_DEVICE_OP_HPD: The device supports all hot-plug-related operations + * @OMAP_DSS_DEVICE_OP_EDID: The device supports readind EDID + */ +enum omap_dss_device_ops_flag { + OMAP_DSS_DEVICE_OP_DETECT = BIT(0), + OMAP_DSS_DEVICE_OP_HPD = BIT(1), + OMAP_DSS_DEVICE_OP_EDID = BIT(2), +}; + enum omap_dss_device_type { OMAP_DSS_DEVICE_TYPE_OUTPUT = (1 << 0), OMAP_DSS_DEVICE_TYPE_DISPLAY = (1 << 1), @@ -421,6 +433,7 @@ struct omap_dss_device { const struct omap_dss_driver *driver; const struct omap_dss_device_ops *ops; + unsigned long ops_flags; /* helper variable for driver suspend/resume */ bool activate_after_resume; -- GitLab From f2ea55775e0591aa292e2e8d9707d0196df73d61 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Mon, 28 May 2018 16:49:36 +0300 Subject: [PATCH 0961/1692] drm/omap: Don't call .detect() operation recursively Instead of calling the .detect() operation recursively from the display device back to the first device that provides hot plug detection support, iterate over the devices manually in the DRM connector .detect() implementation. This moves the complexity to a single central location and simplifies the logic in omap_dss_device drivers. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- .../gpu/drm/omapdrm/displays/connector-dvi.c | 2 ++ .../gpu/drm/omapdrm/displays/connector-hdmi.c | 6 ++-- .../drm/omapdrm/displays/encoder-tpd12s015.c | 4 ++- drivers/gpu/drm/omapdrm/omap_connector.c | 36 ++++++++++++------- 4 files changed, 30 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c index a639a86cd47b..f1674b3eee50 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c @@ -372,6 +372,8 @@ static int dvic_probe(struct platform_device *pdev) dssdev->type = OMAP_DISPLAY_TYPE_DVI; dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(0); + dssdev->ops_flags = ddata->hpd_gpio || ddata->i2c_adapter + ? OMAP_DSS_DEVICE_OP_DETECT : 0; omapdss_display_init(dssdev); omapdss_device_register(dssdev); diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c index 54bfd7156360..0d22d7004c98 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c @@ -141,10 +141,7 @@ static bool hdmic_detect(struct omap_dss_device *dssdev) struct omap_dss_device *src = dssdev->src; bool connected; - if (ddata->hpd_gpio) - connected = gpiod_get_value_cansleep(ddata->hpd_gpio); - else - connected = src->ops->detect(src); + connected = gpiod_get_value_cansleep(ddata->hpd_gpio); if (!connected && src->ops->hdmi.lost_hotplug) src->ops->hdmi.lost_hotplug(src); return connected; @@ -317,6 +314,7 @@ static int hdmic_probe(struct platform_device *pdev) dssdev->type = OMAP_DISPLAY_TYPE_HDMI; dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(0); + dssdev->ops_flags = ddata->hpd_gpio ? OMAP_DSS_DEVICE_OP_DETECT : 0; omapdss_display_init(dssdev); omapdss_device_register(dssdev); diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c index 0cc7bd656473..e30ead0cacb7 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c @@ -132,8 +132,9 @@ static bool tpd_detect(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *src = dssdev->src; - bool connected = gpiod_get_value_cansleep(ddata->hpd_gpio); + bool connected; + connected = gpiod_get_value_cansleep(ddata->hpd_gpio); if (!connected && src->ops->hdmi.lost_hotplug) src->ops->hdmi.lost_hotplug(src); return connected; @@ -288,6 +289,7 @@ static int tpd_probe(struct platform_device *pdev) dssdev->output_type = OMAP_DISPLAY_TYPE_HDMI; dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(1) | BIT(0); + dssdev->ops_flags = OMAP_DSS_DEVICE_OP_DETECT; dssdev->next = omapdss_of_find_connected_device(pdev->dev.of_node, 1); if (IS_ERR(dssdev->next)) { diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c index f9cc04c7c0fa..4729af395156 100644 --- a/drivers/gpu/drm/omapdrm/omap_connector.c +++ b/drivers/gpu/drm/omapdrm/omap_connector.c @@ -61,26 +61,36 @@ static enum drm_connector_status omap_connector_detect( struct drm_connector *connector, bool force) { struct omap_connector *omap_connector = to_omap_connector(connector); - struct omap_dss_device *dssdev = omap_connector->dssdev; - enum drm_connector_status ret; + struct omap_dss_device *dssdev; + enum drm_connector_status status; + + for (dssdev = omap_connector->dssdev; dssdev; dssdev = dssdev->src) { + if (dssdev->ops_flags & OMAP_DSS_DEVICE_OP_DETECT) + break; + } - if (dssdev->ops->detect) { + if (dssdev) { if (dssdev->ops->detect(dssdev)) - ret = connector_status_connected; + status = connector_status_connected; else - ret = connector_status_disconnected; - } else if (dssdev->type == OMAP_DISPLAY_TYPE_DPI || - dssdev->type == OMAP_DISPLAY_TYPE_DBI || - dssdev->type == OMAP_DISPLAY_TYPE_SDI || - dssdev->type == OMAP_DISPLAY_TYPE_DSI) { - ret = connector_status_connected; + status = connector_status_disconnected; } else { - ret = connector_status_unknown; + switch (omap_connector->dssdev->type) { + case OMAP_DISPLAY_TYPE_DPI: + case OMAP_DISPLAY_TYPE_DBI: + case OMAP_DISPLAY_TYPE_SDI: + case OMAP_DISPLAY_TYPE_DSI: + status = connector_status_connected; + break; + default: + status = connector_status_unknown; + break; + } } - VERB("%s: %d (force=%d)", omap_connector->dssdev->name, ret, force); + VERB("%s: %d (force=%d)", omap_connector->dssdev->name, status, force); - return ret; + return status; } static void omap_connector_destroy(struct drm_connector *connector) -- GitLab From 949ea2ef3fed4e1d0f9b80ec21ed81a9833ac248 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 30 May 2018 18:49:48 +0300 Subject: [PATCH 0962/1692] drm/omap: Don't call HPD registration operations recursively Instead of calling the hot-plug detection callback registration operations (.register_hpd_cb() and .unregister_hpd_cb()) recursively from the display device back to the first device that provides hot plug detection support, iterate over the devices manually in the DRM connector code. This moves the complexity to a single central location and simplifies the logic in omap_dss_device drivers. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- .../gpu/drm/omapdrm/displays/connector-dvi.c | 8 +- .../gpu/drm/omapdrm/displays/connector-hdmi.c | 67 +++++++--------- .../drm/omapdrm/displays/encoder-tpd12s015.c | 3 +- drivers/gpu/drm/omapdrm/omap_connector.c | 79 ++++++++++++------- 4 files changed, 88 insertions(+), 69 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c index f1674b3eee50..e9353e4cd297 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c @@ -372,8 +372,12 @@ static int dvic_probe(struct platform_device *pdev) dssdev->type = OMAP_DISPLAY_TYPE_DVI; dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(0); - dssdev->ops_flags = ddata->hpd_gpio || ddata->i2c_adapter - ? OMAP_DSS_DEVICE_OP_DETECT : 0; + + if (ddata->hpd_gpio) + dssdev->ops_flags = OMAP_DSS_DEVICE_OP_DETECT + | OMAP_DSS_DEVICE_OP_HPD; + else if (ddata->i2c_adapter) + dssdev->ops_flags = OMAP_DSS_DEVICE_OP_DETECT; omapdss_display_init(dssdev); omapdss_device_register(dssdev); diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c index 0d22d7004c98..8eae973474dd 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c @@ -153,62 +153,53 @@ static int hdmic_register_hpd_cb(struct omap_dss_device *dssdev, void *cb_data) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - if (ddata->hpd_gpio) { - mutex_lock(&ddata->hpd_lock); - ddata->hpd_cb = cb; - ddata->hpd_cb_data = cb_data; - mutex_unlock(&ddata->hpd_lock); - return 0; - } else if (src->ops->register_hpd_cb) { - return src->ops->register_hpd_cb(src, cb, cb_data); - } + if (!ddata->hpd_gpio) + return -ENOTSUPP; - return -ENOTSUPP; + mutex_lock(&ddata->hpd_lock); + ddata->hpd_cb = cb; + ddata->hpd_cb_data = cb_data; + mutex_unlock(&ddata->hpd_lock); + + return 0; } static void hdmic_unregister_hpd_cb(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - if (ddata->hpd_gpio) { - mutex_lock(&ddata->hpd_lock); - ddata->hpd_cb = NULL; - ddata->hpd_cb_data = NULL; - mutex_unlock(&ddata->hpd_lock); - } else if (src->ops->unregister_hpd_cb) { - src->ops->unregister_hpd_cb(src); - } + if (!ddata->hpd_gpio) + return; + + mutex_lock(&ddata->hpd_lock); + ddata->hpd_cb = NULL; + ddata->hpd_cb_data = NULL; + mutex_unlock(&ddata->hpd_lock); } static void hdmic_enable_hpd(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - if (ddata->hpd_gpio) { - mutex_lock(&ddata->hpd_lock); - ddata->hpd_enabled = true; - mutex_unlock(&ddata->hpd_lock); - } else if (src->ops->enable_hpd) { - src->ops->enable_hpd(src); - } + if (!ddata->hpd_gpio) + return; + + mutex_lock(&ddata->hpd_lock); + ddata->hpd_enabled = true; + mutex_unlock(&ddata->hpd_lock); } static void hdmic_disable_hpd(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - if (ddata->hpd_gpio) { - mutex_lock(&ddata->hpd_lock); - ddata->hpd_enabled = false; - mutex_unlock(&ddata->hpd_lock); - } else if (src->ops->disable_hpd) { - src->ops->disable_hpd(src); - } + if (!ddata->hpd_gpio) + return; + + mutex_lock(&ddata->hpd_lock); + ddata->hpd_enabled = false; + mutex_unlock(&ddata->hpd_lock); } static int hdmic_set_hdmi_mode(struct omap_dss_device *dssdev, bool hdmi_mode) @@ -314,7 +305,9 @@ static int hdmic_probe(struct platform_device *pdev) dssdev->type = OMAP_DISPLAY_TYPE_HDMI; dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(0); - dssdev->ops_flags = ddata->hpd_gpio ? OMAP_DSS_DEVICE_OP_DETECT : 0; + dssdev->ops_flags = ddata->hpd_gpio + ? OMAP_DSS_DEVICE_OP_DETECT | OMAP_DSS_DEVICE_OP_HPD + : 0; omapdss_display_init(dssdev); omapdss_device_register(dssdev); diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c index e30ead0cacb7..f6d4f90f2c08 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c @@ -289,7 +289,8 @@ static int tpd_probe(struct platform_device *pdev) dssdev->output_type = OMAP_DISPLAY_TYPE_HDMI; dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(1) | BIT(0); - dssdev->ops_flags = OMAP_DSS_DEVICE_OP_DETECT; + dssdev->ops_flags = OMAP_DSS_DEVICE_OP_DETECT + | OMAP_DSS_DEVICE_OP_HPD; dssdev->next = omapdss_of_find_connected_device(pdev->dev.of_node, 1); if (IS_ERR(dssdev->next)) { diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c index 4729af395156..05734c908aae 100644 --- a/drivers/gpu/drm/omapdrm/omap_connector.c +++ b/drivers/gpu/drm/omapdrm/omap_connector.c @@ -57,6 +57,21 @@ bool omap_connector_get_hdmi_mode(struct drm_connector *connector) return omap_connector->hdmi_mode; } +static struct omap_dss_device * +omap_connector_find_device(struct drm_connector *connector, + enum omap_dss_device_ops_flag op) +{ + struct omap_connector *omap_connector = to_omap_connector(connector); + struct omap_dss_device *dssdev; + + for (dssdev = omap_connector->dssdev; dssdev; dssdev = dssdev->src) { + if (dssdev->ops_flags & op) + return dssdev; + } + + return NULL; +} + static enum drm_connector_status omap_connector_detect( struct drm_connector *connector, bool force) { @@ -64,10 +79,8 @@ static enum drm_connector_status omap_connector_detect( struct omap_dss_device *dssdev; enum drm_connector_status status; - for (dssdev = omap_connector->dssdev; dssdev; dssdev = dssdev->src) { - if (dssdev->ops_flags & OMAP_DSS_DEVICE_OP_DETECT) - break; - } + dssdev = omap_connector_find_device(connector, + OMAP_DSS_DEVICE_OP_DETECT); if (dssdev) { if (dssdev->ops->detect(dssdev)) @@ -96,18 +109,21 @@ static enum drm_connector_status omap_connector_detect( static void omap_connector_destroy(struct drm_connector *connector) { struct omap_connector *omap_connector = to_omap_connector(connector); - struct omap_dss_device *dssdev = omap_connector->dssdev; + struct omap_dss_device *dssdev; DBG("%s", omap_connector->dssdev->name); - if (connector->polled == DRM_CONNECTOR_POLL_HPD && - dssdev->ops->unregister_hpd_cb) { + + if (connector->polled == DRM_CONNECTOR_POLL_HPD) { + dssdev = omap_connector_find_device(connector, + OMAP_DSS_DEVICE_OP_HPD); dssdev->ops->unregister_hpd_cb(dssdev); } + drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(omap_connector); - omapdss_device_put(dssdev); + omapdss_device_put(omap_connector->dssdev); } #define MAX_EDID 512 @@ -257,45 +273,50 @@ struct drm_connector *omap_connector_init(struct drm_device *dev, { struct drm_connector *connector = NULL; struct omap_connector *omap_connector; - bool hpd_supported = false; DBG("%s", dssdev->name); - omapdss_device_get(dssdev); - omap_connector = kzalloc(sizeof(*omap_connector), GFP_KERNEL); if (!omap_connector) goto fail; - omap_connector->dssdev = dssdev; + omap_connector->dssdev = omapdss_device_get(dssdev); connector = &omap_connector->base; + connector->interlace_allowed = 1; + connector->doublescan_allowed = 0; drm_connector_init(dev, connector, &omap_connector_funcs, connector_type); drm_connector_helper_add(connector, &omap_connector_helper_funcs); - if (dssdev->ops->register_hpd_cb) { - int ret = dssdev->ops->register_hpd_cb(dssdev, - omap_connector_hpd_cb, - omap_connector); - if (!ret) - hpd_supported = true; - else if (ret != -ENOTSUPP) + /* + * Initialize connector status handling. First try to find a device that + * supports hot-plug reporting. If it fails, fall back to a device that + * support polling. If that fails too, we don't support hot-plug + * detection at all. + */ + dssdev = omap_connector_find_device(connector, OMAP_DSS_DEVICE_OP_HPD); + if (dssdev) { + int ret; + + ret = dssdev->ops->register_hpd_cb(dssdev, + omap_connector_hpd_cb, + omap_connector); + if (ret < 0) DBG("%s: Failed to register HPD callback (%d).", dssdev->name, ret); + else + connector->polled = DRM_CONNECTOR_POLL_HPD; } - if (hpd_supported) - connector->polled = DRM_CONNECTOR_POLL_HPD; - else if (dssdev->ops->detect) - connector->polled = DRM_CONNECTOR_POLL_CONNECT | - DRM_CONNECTOR_POLL_DISCONNECT; - else - connector->polled = 0; - - connector->interlace_allowed = 1; - connector->doublescan_allowed = 0; + if (!connector->polled) { + dssdev = omap_connector_find_device(connector, + OMAP_DSS_DEVICE_OP_DETECT); + if (dssdev) + connector->polled = DRM_CONNECTOR_POLL_CONNECT | + DRM_CONNECTOR_POLL_DISCONNECT; + } return connector; -- GitLab From a21a8f3c93e1a95cc811a1297ec60aa455a6c523 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 30 May 2018 18:51:59 +0300 Subject: [PATCH 0963/1692] drm/omap: Remove unneeded safety checks in the HPD operations The HPD-related omap_dss_device operations are now only called when the device supports HPD. There's no need to duplicate that check in the omap_dss_device drivers. The .register_hpd_cb() operation can as a result be turned into a void operation. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- .../gpu/drm/omapdrm/displays/connector-dvi.c | 9 +-------- .../gpu/drm/omapdrm/displays/connector-hdmi.c | 14 +++----------- .../drm/omapdrm/displays/encoder-tpd12s015.c | 8 +++----- drivers/gpu/drm/omapdrm/dss/omapdss.h | 6 +++--- drivers/gpu/drm/omapdrm/omap_connector.c | 17 ++++------------- 5 files changed, 14 insertions(+), 40 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c index e9353e4cd297..a53d5967e5a9 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c @@ -211,30 +211,23 @@ static bool dvic_detect(struct omap_dss_device *dssdev) return r == 0; } -static int dvic_register_hpd_cb(struct omap_dss_device *dssdev, +static void dvic_register_hpd_cb(struct omap_dss_device *dssdev, void (*cb)(void *cb_data, enum drm_connector_status status), void *cb_data) { struct panel_drv_data *ddata = to_panel_data(dssdev); - if (!ddata->hpd_gpio) - return -ENOTSUPP; - mutex_lock(&ddata->hpd_lock); ddata->hpd_cb = cb; ddata->hpd_cb_data = cb_data; mutex_unlock(&ddata->hpd_lock); - return 0; } static void dvic_unregister_hpd_cb(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - if (!ddata->hpd_gpio) - return; - mutex_lock(&ddata->hpd_lock); ddata->hpd_cb = NULL; ddata->hpd_cb_data = NULL; diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c index 8eae973474dd..c58bf64d1a9b 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c @@ -147,31 +147,23 @@ static bool hdmic_detect(struct omap_dss_device *dssdev) return connected; } -static int hdmic_register_hpd_cb(struct omap_dss_device *dssdev, - void (*cb)(void *cb_data, +static void hdmic_register_hpd_cb(struct omap_dss_device *dssdev, + void (*cb)(void *cb_data, enum drm_connector_status status), - void *cb_data) + void *cb_data) { struct panel_drv_data *ddata = to_panel_data(dssdev); - if (!ddata->hpd_gpio) - return -ENOTSUPP; - mutex_lock(&ddata->hpd_lock); ddata->hpd_cb = cb; ddata->hpd_cb_data = cb_data; mutex_unlock(&ddata->hpd_lock); - - return 0; } static void hdmic_unregister_hpd_cb(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - if (!ddata->hpd_gpio) - return; - mutex_lock(&ddata->hpd_lock); ddata->hpd_cb = NULL; ddata->hpd_cb_data = NULL; diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c index f6d4f90f2c08..508df4174c5e 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c @@ -140,10 +140,10 @@ static bool tpd_detect(struct omap_dss_device *dssdev) return connected; } -static int tpd_register_hpd_cb(struct omap_dss_device *dssdev, - void (*cb)(void *cb_data, +static void tpd_register_hpd_cb(struct omap_dss_device *dssdev, + void (*cb)(void *cb_data, enum drm_connector_status status), - void *cb_data) + void *cb_data) { struct panel_drv_data *ddata = to_panel_data(dssdev); @@ -151,8 +151,6 @@ static int tpd_register_hpd_cb(struct omap_dss_device *dssdev, ddata->hpd_cb = cb; ddata->hpd_cb_data = cb_data; mutex_unlock(&ddata->hpd_lock); - - return 0; } static void tpd_unregister_hpd_cb(struct omap_dss_device *dssdev) diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index 30ad9985776f..b05d47b34937 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -372,10 +372,10 @@ struct omap_dss_device_ops { bool (*detect)(struct omap_dss_device *dssdev); - int (*register_hpd_cb)(struct omap_dss_device *dssdev, - void (*cb)(void *cb_data, + void (*register_hpd_cb)(struct omap_dss_device *dssdev, + void (*cb)(void *cb_data, enum drm_connector_status status), - void *cb_data); + void *cb_data); void (*unregister_hpd_cb)(struct omap_dss_device *dssdev); void (*enable_hpd)(struct omap_dss_device *dssdev); void (*disable_hpd)(struct omap_dss_device *dssdev); diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c index 05734c908aae..cd33995b0b1a 100644 --- a/drivers/gpu/drm/omapdrm/omap_connector.c +++ b/drivers/gpu/drm/omapdrm/omap_connector.c @@ -298,19 +298,10 @@ struct drm_connector *omap_connector_init(struct drm_device *dev, */ dssdev = omap_connector_find_device(connector, OMAP_DSS_DEVICE_OP_HPD); if (dssdev) { - int ret; - - ret = dssdev->ops->register_hpd_cb(dssdev, - omap_connector_hpd_cb, - omap_connector); - if (ret < 0) - DBG("%s: Failed to register HPD callback (%d).", - dssdev->name, ret); - else - connector->polled = DRM_CONNECTOR_POLL_HPD; - } - - if (!connector->polled) { + dssdev->ops->register_hpd_cb(dssdev, omap_connector_hpd_cb, + omap_connector); + connector->polled = DRM_CONNECTOR_POLL_HPD; + } else { dssdev = omap_connector_find_device(connector, OMAP_DSS_DEVICE_OP_DETECT); if (dssdev) -- GitLab From 18412b667c96d1a5210f33191e128866a72cea07 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 30 May 2018 21:46:44 +0300 Subject: [PATCH 0964/1692] drm/omap: Merge HPD enable operation with HPD callback registration The omap_dss_device .enable_hpd() and .disable_hpd() are used to enable and disable hot-plug detection at omapdrm probe and remove time. This is required to avoid reporting hot-plug detection events before the DRM infrastructure is ready to accept them, as that could result in crashes or other malfunction. Hot-plug event reporting is conditioned by both HPD being enabled through the .enable_hpd() operation and by the HPD callback being registered though the .register_hpd_cb() operation. We thus don't need a separate enable operation if we can guarantee that callbacks won't be registered too early. HPD callbacks are registered at connector initialization time, which is too early to start reporting HPD events. There's however nothing blocking a move of callback registration to a later time when the omapdrm driver calls the HPD enable operations. Do so, and remove the HPD enable operation completely from omap_dss_device drivers. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- .../gpu/drm/omapdrm/displays/connector-dvi.c | 26 -------------- .../gpu/drm/omapdrm/displays/connector-hdmi.c | 29 +--------------- .../drm/omapdrm/displays/encoder-tpd12s015.c | 23 +------------ drivers/gpu/drm/omapdrm/dss/omapdss.h | 2 -- drivers/gpu/drm/omapdrm/omap_connector.c | 34 +++++++++++++++---- drivers/gpu/drm/omapdrm/omap_connector.h | 2 ++ drivers/gpu/drm/omapdrm/omap_drv.c | 16 +++------ 7 files changed, 35 insertions(+), 97 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c index a53d5967e5a9..6be260ff6458 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c @@ -234,30 +234,6 @@ static void dvic_unregister_hpd_cb(struct omap_dss_device *dssdev) mutex_unlock(&ddata->hpd_lock); } -static void dvic_enable_hpd(struct omap_dss_device *dssdev) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - - if (!ddata->hpd_gpio) - return; - - mutex_lock(&ddata->hpd_lock); - ddata->hpd_enabled = true; - mutex_unlock(&ddata->hpd_lock); -} - -static void dvic_disable_hpd(struct omap_dss_device *dssdev) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - - if (!ddata->hpd_gpio) - return; - - mutex_lock(&ddata->hpd_lock); - ddata->hpd_enabled = false; - mutex_unlock(&ddata->hpd_lock); -} - static const struct omap_dss_device_ops dvic_ops = { .connect = dvic_connect, .disconnect = dvic_disconnect, @@ -274,8 +250,6 @@ static const struct omap_dss_device_ops dvic_ops = { .register_hpd_cb = dvic_register_hpd_cb, .unregister_hpd_cb = dvic_unregister_hpd_cb, - .enable_hpd = dvic_enable_hpd, - .disable_hpd = dvic_disable_hpd, }; static irqreturn_t dvic_hpd_isr(int irq, void *data) diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c index c58bf64d1a9b..84cc68388940 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c @@ -37,7 +37,6 @@ struct panel_drv_data { struct omap_dss_device dssdev; void (*hpd_cb)(void *cb_data, enum drm_connector_status status); void *hpd_cb_data; - bool hpd_enabled; struct mutex hpd_lock; struct device *dev; @@ -170,30 +169,6 @@ static void hdmic_unregister_hpd_cb(struct omap_dss_device *dssdev) mutex_unlock(&ddata->hpd_lock); } -static void hdmic_enable_hpd(struct omap_dss_device *dssdev) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - - if (!ddata->hpd_gpio) - return; - - mutex_lock(&ddata->hpd_lock); - ddata->hpd_enabled = true; - mutex_unlock(&ddata->hpd_lock); -} - -static void hdmic_disable_hpd(struct omap_dss_device *dssdev) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - - if (!ddata->hpd_gpio) - return; - - mutex_lock(&ddata->hpd_lock); - ddata->hpd_enabled = false; - mutex_unlock(&ddata->hpd_lock); -} - static int hdmic_set_hdmi_mode(struct omap_dss_device *dssdev, bool hdmi_mode) { struct omap_dss_device *src = dssdev->src; @@ -224,8 +199,6 @@ static const struct omap_dss_device_ops hdmic_ops = { .detect = hdmic_detect, .register_hpd_cb = hdmic_register_hpd_cb, .unregister_hpd_cb = hdmic_unregister_hpd_cb, - .enable_hpd = hdmic_enable_hpd, - .disable_hpd = hdmic_disable_hpd, .hdmi = { .set_hdmi_mode = hdmic_set_hdmi_mode, @@ -238,7 +211,7 @@ static irqreturn_t hdmic_hpd_isr(int irq, void *data) struct panel_drv_data *ddata = data; mutex_lock(&ddata->hpd_lock); - if (ddata->hpd_enabled && ddata->hpd_cb) { + if (ddata->hpd_cb) { enum drm_connector_status status; if (hdmic_detect(&ddata->dssdev)) diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c index 508df4174c5e..d6d08148a3e5 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c @@ -23,7 +23,6 @@ struct panel_drv_data { struct omap_dss_device dssdev; void (*hpd_cb)(void *cb_data, enum drm_connector_status status); void *hpd_cb_data; - bool hpd_enabled; struct mutex hpd_lock; struct gpio_desc *ct_cp_hpd_gpio; @@ -163,24 +162,6 @@ static void tpd_unregister_hpd_cb(struct omap_dss_device *dssdev) mutex_unlock(&ddata->hpd_lock); } -static void tpd_enable_hpd(struct omap_dss_device *dssdev) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - - mutex_lock(&ddata->hpd_lock); - ddata->hpd_enabled = true; - mutex_unlock(&ddata->hpd_lock); -} - -static void tpd_disable_hpd(struct omap_dss_device *dssdev) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - - mutex_lock(&ddata->hpd_lock); - ddata->hpd_enabled = false; - mutex_unlock(&ddata->hpd_lock); -} - static int tpd_set_infoframe(struct omap_dss_device *dssdev, const struct hdmi_avi_infoframe *avi) { @@ -208,8 +189,6 @@ static const struct omap_dss_device_ops tpd_ops = { .detect = tpd_detect, .register_hpd_cb = tpd_register_hpd_cb, .unregister_hpd_cb = tpd_unregister_hpd_cb, - .enable_hpd = tpd_enable_hpd, - .disable_hpd = tpd_disable_hpd, .hdmi = { .set_infoframe = tpd_set_infoframe, @@ -222,7 +201,7 @@ static irqreturn_t tpd_hpd_isr(int irq, void *data) struct panel_drv_data *ddata = data; mutex_lock(&ddata->hpd_lock); - if (ddata->hpd_enabled && ddata->hpd_cb) { + if (ddata->hpd_cb) { enum drm_connector_status status; if (tpd_detect(&ddata->dssdev)) diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index b05d47b34937..ecefc1b193ca 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -377,8 +377,6 @@ struct omap_dss_device_ops { enum drm_connector_status status), void *cb_data); void (*unregister_hpd_cb)(struct omap_dss_device *dssdev); - void (*enable_hpd)(struct omap_dss_device *dssdev); - void (*disable_hpd)(struct omap_dss_device *dssdev); int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len); diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c index cd33995b0b1a..e77427d81eb9 100644 --- a/drivers/gpu/drm/omapdrm/omap_connector.c +++ b/drivers/gpu/drm/omapdrm/omap_connector.c @@ -30,6 +30,7 @@ struct omap_connector { struct drm_connector base; struct omap_dss_device *dssdev; + struct omap_dss_device *hpd; bool hdmi_mode; }; @@ -50,6 +51,25 @@ static void omap_connector_hpd_cb(void *cb_data, drm_kms_helper_hotplug_event(dev); } +void omap_connector_enable_hpd(struct drm_connector *connector) +{ + struct omap_connector *omap_connector = to_omap_connector(connector); + struct omap_dss_device *hpd = omap_connector->hpd; + + if (hpd) + hpd->ops->register_hpd_cb(hpd, omap_connector_hpd_cb, + omap_connector); +} + +void omap_connector_disable_hpd(struct drm_connector *connector) +{ + struct omap_connector *omap_connector = to_omap_connector(connector); + struct omap_dss_device *hpd = omap_connector->hpd; + + if (hpd) + hpd->ops->unregister_hpd_cb(hpd); +} + bool omap_connector_get_hdmi_mode(struct drm_connector *connector) { struct omap_connector *omap_connector = to_omap_connector(connector); @@ -109,14 +129,15 @@ static enum drm_connector_status omap_connector_detect( static void omap_connector_destroy(struct drm_connector *connector) { struct omap_connector *omap_connector = to_omap_connector(connector); - struct omap_dss_device *dssdev; DBG("%s", omap_connector->dssdev->name); - if (connector->polled == DRM_CONNECTOR_POLL_HPD) { - dssdev = omap_connector_find_device(connector, - OMAP_DSS_DEVICE_OP_HPD); - dssdev->ops->unregister_hpd_cb(dssdev); + if (omap_connector->hpd) { + struct omap_dss_device *hpd = omap_connector->hpd; + + hpd->ops->unregister_hpd_cb(hpd); + omapdss_device_put(hpd); + omap_connector->hpd = NULL; } drm_connector_unregister(connector); @@ -298,8 +319,7 @@ struct drm_connector *omap_connector_init(struct drm_device *dev, */ dssdev = omap_connector_find_device(connector, OMAP_DSS_DEVICE_OP_HPD); if (dssdev) { - dssdev->ops->register_hpd_cb(dssdev, omap_connector_hpd_cb, - omap_connector); + omap_connector->hpd = omapdss_device_get(dssdev); connector->polled = DRM_CONNECTOR_POLL_HPD; } else { dssdev = omap_connector_find_device(connector, diff --git a/drivers/gpu/drm/omapdrm/omap_connector.h b/drivers/gpu/drm/omapdrm/omap_connector.h index 98bbc779b302..465b3c9499d5 100644 --- a/drivers/gpu/drm/omapdrm/omap_connector.h +++ b/drivers/gpu/drm/omapdrm/omap_connector.h @@ -33,5 +33,7 @@ struct drm_connector *omap_connector_init(struct drm_device *dev, struct drm_encoder *omap_connector_attached_encoder( struct drm_connector *connector); bool omap_connector_get_hdmi_mode(struct drm_connector *connector); +void omap_connector_enable_hpd(struct drm_connector *connector); +void omap_connector_disable_hpd(struct drm_connector *connector); #endif /* __OMAPDRM_CONNECTOR_H__ */ diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index 843222118fa7..0cca16c323d9 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -375,12 +375,8 @@ static void omap_modeset_enable_external_hpd(struct drm_device *ddev) struct omap_drm_private *priv = ddev->dev_private; int i; - for (i = 0; i < priv->num_pipes; i++) { - struct omap_dss_device *display = priv->pipes[i].display; - - if (display->ops->enable_hpd) - display->ops->enable_hpd(display); - } + for (i = 0; i < priv->num_pipes; i++) + omap_connector_enable_hpd(priv->pipes[i].connector); } /* @@ -391,12 +387,8 @@ static void omap_modeset_disable_external_hpd(struct drm_device *ddev) struct omap_drm_private *priv = ddev->dev_private; int i; - for (i = 0; i < priv->num_pipes; i++) { - struct omap_dss_device *display = priv->pipes[i].display; - - if (display->ops->disable_hpd) - display->ops->disable_hpd(display); - } + for (i = 0; i < priv->num_pipes; i++) + omap_connector_disable_hpd(priv->pipes[i].connector); } /* -- GitLab From f006325cdc8008b015b47d830bce072adf40f313 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 30 May 2018 16:53:43 +0300 Subject: [PATCH 0965/1692] drm/omap: Move HPD disconnection handling to omap_connector On HDMI outputs, CEC support requires notification of HPD signal deassertion. The HPD signal can be handled by various omap_dss_device instances in the pipeline, and all of them forward HPD events to the OMAP4 internal HDMI encoder. Knowledge of the DSS internals need to be removed from the omap_dss_device instances in order to migrate to drm_bridge. To do so, move HPD handling for CEC to the omap_connector. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- .../gpu/drm/omapdrm/displays/connector-hdmi.c | 7 +--- .../drm/omapdrm/displays/encoder-tpd12s015.c | 7 +--- drivers/gpu/drm/omapdrm/omap_connector.c | 33 +++++++++++++++---- 3 files changed, 29 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c index 84cc68388940..6f2364afb14a 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c @@ -137,13 +137,8 @@ static int hdmic_read_edid(struct omap_dss_device *dssdev, static bool hdmic_detect(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - bool connected; - connected = gpiod_get_value_cansleep(ddata->hpd_gpio); - if (!connected && src->ops->hdmi.lost_hotplug) - src->ops->hdmi.lost_hotplug(src); - return connected; + return gpiod_get_value_cansleep(ddata->hpd_gpio); } static void hdmic_register_hpd_cb(struct omap_dss_device *dssdev, diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c index d6d08148a3e5..da97d357bde7 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c @@ -130,13 +130,8 @@ static int tpd_read_edid(struct omap_dss_device *dssdev, static bool tpd_detect(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - bool connected; - connected = gpiod_get_value_cansleep(ddata->hpd_gpio); - if (!connected && src->ops->hdmi.lost_hotplug) - src->ops->hdmi.lost_hotplug(src); - return connected; + return gpiod_get_value_cansleep(ddata->hpd_gpio); } static void tpd_register_hpd_cb(struct omap_dss_device *dssdev, diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c index e77427d81eb9..344414ef3962 100644 --- a/drivers/gpu/drm/omapdrm/omap_connector.c +++ b/drivers/gpu/drm/omapdrm/omap_connector.c @@ -34,6 +34,22 @@ struct omap_connector { bool hdmi_mode; }; +static void omap_connector_hpd_notify(struct drm_connector *connector, + struct omap_dss_device *src, + enum drm_connector_status status) +{ + if (status == connector_status_disconnected) { + /* + * If the source is an HDMI encoder, notify it of disconnection. + * This is required to let the HDMI encoder reset any internal + * state related to connection status, such as the CEC address. + */ + if (src && src->type == OMAP_DISPLAY_TYPE_HDMI && + src->ops->hdmi.lost_hotplug) + src->ops->hdmi.lost_hotplug(src); + } +} + static void omap_connector_hpd_cb(void *cb_data, enum drm_connector_status status) { @@ -47,8 +63,12 @@ static void omap_connector_hpd_cb(void *cb_data, connector->status = status; mutex_unlock(&dev->mode_config.mutex); - if (old_status != status) - drm_kms_helper_hotplug_event(dev); + if (old_status == status) + return; + + omap_connector_hpd_notify(connector, omap_connector->hpd, status); + + drm_kms_helper_hotplug_event(dev); } void omap_connector_enable_hpd(struct drm_connector *connector) @@ -103,10 +123,11 @@ static enum drm_connector_status omap_connector_detect( OMAP_DSS_DEVICE_OP_DETECT); if (dssdev) { - if (dssdev->ops->detect(dssdev)) - status = connector_status_connected; - else - status = connector_status_disconnected; + status = dssdev->ops->detect(dssdev) + ? connector_status_connected + : connector_status_disconnected; + + omap_connector_hpd_notify(connector, dssdev->src, status); } else { switch (omap_connector->dssdev->type) { case OMAP_DISPLAY_TYPE_DPI: -- GitLab From 90279e9518da8488982e9d5704b890fe0e34ad30 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Thu, 31 May 2018 22:09:14 +0300 Subject: [PATCH 0966/1692] drm/omap: Don't call EDID read operation recursively Instead of calling the EDID read operation (.read_edid()) recursively from the display device back to the first device that provides EDID read support, iterate over the devices manually in the DRM connector code. This moves the complexity to a single central location and simplifies the logic in omap_dss_device drivers. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- .../gpu/drm/omapdrm/displays/connector-dvi.c | 15 +-- .../gpu/drm/omapdrm/displays/connector-hdmi.c | 11 -- .../drm/omapdrm/displays/encoder-tpd12s015.c | 13 --- drivers/gpu/drm/omapdrm/dss/hdmi4.c | 1 + drivers/gpu/drm/omapdrm/dss/hdmi5.c | 1 + drivers/gpu/drm/omapdrm/omap_connector.c | 101 ++++++++++-------- 6 files changed, 65 insertions(+), 77 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c index 6be260ff6458..eae4108330f1 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c @@ -166,12 +166,6 @@ static int dvic_read_edid(struct omap_dss_device *dssdev, struct panel_drv_data *ddata = to_panel_data(dssdev); int r, l, bytes_read; - if (ddata->hpd_gpio && !gpiod_get_value_cansleep(ddata->hpd_gpio)) - return -ENODEV; - - if (!ddata->i2c_adapter) - return -ENODEV; - l = min(EDID_LENGTH, len); r = dvic_ddc_read(ddata->i2c_adapter, edid, l, 0); if (r) @@ -341,10 +335,11 @@ static int dvic_probe(struct platform_device *pdev) dssdev->of_ports = BIT(0); if (ddata->hpd_gpio) - dssdev->ops_flags = OMAP_DSS_DEVICE_OP_DETECT - | OMAP_DSS_DEVICE_OP_HPD; - else if (ddata->i2c_adapter) - dssdev->ops_flags = OMAP_DSS_DEVICE_OP_DETECT; + dssdev->ops_flags |= OMAP_DSS_DEVICE_OP_DETECT + | OMAP_DSS_DEVICE_OP_HPD; + if (ddata->i2c_adapter) + dssdev->ops_flags |= OMAP_DSS_DEVICE_OP_DETECT + | OMAP_DSS_DEVICE_OP_EDID; omapdss_display_init(dssdev); omapdss_device_register(dssdev); diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c index 6f2364afb14a..16dc22edcb8e 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c @@ -15,8 +15,6 @@ #include #include -#include - #include "../dss/omapdss.h" static const struct videomode hdmic_default_vm = { @@ -126,14 +124,6 @@ static int hdmic_check_timings(struct omap_dss_device *dssdev, return src->ops->check_timings(src, vm); } -static int hdmic_read_edid(struct omap_dss_device *dssdev, - u8 *edid, int len) -{ - struct omap_dss_device *src = dssdev->src; - - return src->ops->read_edid(src, edid, len); -} - static bool hdmic_detect(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); @@ -190,7 +180,6 @@ static const struct omap_dss_device_ops hdmic_ops = { .get_timings = hdmic_get_timings, .check_timings = hdmic_check_timings, - .read_edid = hdmic_read_edid, .detect = hdmic_detect, .register_hpd_cb = hdmic_register_hpd_cb, .unregister_hpd_cb = hdmic_unregister_hpd_cb, diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c index da97d357bde7..3ce1c935a48c 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c @@ -115,18 +115,6 @@ static int tpd_check_timings(struct omap_dss_device *dssdev, return src->ops->check_timings(src, vm); } -static int tpd_read_edid(struct omap_dss_device *dssdev, - u8 *edid, int len) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - - if (!gpiod_get_value_cansleep(ddata->hpd_gpio)) - return -ENODEV; - - return src->ops->read_edid(src, edid, len); -} - static bool tpd_detect(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); @@ -180,7 +168,6 @@ static const struct omap_dss_device_ops tpd_ops = { .disable = tpd_disable, .check_timings = tpd_check_timings, .set_timings = tpd_set_timings, - .read_edid = tpd_read_edid, .detect = tpd_detect, .register_hpd_cb = tpd_register_hpd_cb, .unregister_hpd_cb = tpd_unregister_hpd_cb, diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index bebce93fed3e..c92564300446 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c @@ -711,6 +711,7 @@ static int hdmi4_init_output(struct omap_hdmi *hdmi) out->ops = &hdmi_ops; out->owner = THIS_MODULE; out->of_ports = BIT(0); + out->ops_flags = OMAP_DSS_DEVICE_OP_EDID; out->next = omapdss_of_find_connected_device(out->dev->of_node, 0); if (IS_ERR(out->next)) { diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index 7c07e0208107..2aaa8ee61662 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c @@ -703,6 +703,7 @@ static int hdmi5_init_output(struct omap_hdmi *hdmi) out->ops = &hdmi_ops; out->owner = THIS_MODULE; out->of_ports = BIT(0); + out->ops_flags = OMAP_DSS_DEVICE_OP_EDID; out->next = omapdss_of_find_connected_device(out->dev->of_node, 0); if (IS_ERR(out->next)) { diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c index 344414ef3962..5091991363d6 100644 --- a/drivers/gpu/drm/omapdrm/omap_connector.c +++ b/drivers/gpu/drm/omapdrm/omap_connector.c @@ -170,65 +170,80 @@ static void omap_connector_destroy(struct drm_connector *connector) #define MAX_EDID 512 +static int omap_connector_get_modes_edid(struct drm_connector *connector, + struct omap_dss_device *dssdev) +{ + struct omap_connector *omap_connector = to_omap_connector(connector); + enum drm_connector_status status; + void *edid; + int n; + + status = omap_connector_detect(connector, false); + if (status != connector_status_connected) + goto no_edid; + + edid = kzalloc(MAX_EDID, GFP_KERNEL); + if (!edid) + goto no_edid; + + if (dssdev->ops->read_edid(dssdev, edid, MAX_EDID) <= 0 || + !drm_edid_is_valid(edid)) { + kfree(edid); + goto no_edid; + } + + drm_connector_update_edid_property(connector, edid); + n = drm_add_edid_modes(connector, edid); + + omap_connector->hdmi_mode = drm_detect_hdmi_monitor(edid); + + kfree(edid); + return n; + +no_edid: + drm_connector_update_edid_property(connector, NULL); + return 0; +} + static int omap_connector_get_modes(struct drm_connector *connector) { struct omap_connector *omap_connector = to_omap_connector(connector); - struct omap_dss_device *dssdev = omap_connector->dssdev; - struct drm_device *dev = connector->dev; - int n = 0; + struct omap_dss_device *dssdev; + struct drm_display_mode *mode; + struct videomode vm = {0}; DBG("%s", omap_connector->dssdev->name); - /* if display exposes EDID, then we parse that in the normal way to - * build table of supported modes.. otherwise (ie. fixed resolution + /* + * If display exposes EDID, then we parse that in the normal way to + * build table of supported modes. Otherwise (ie. fixed resolution * LCD panels) we just return a single mode corresponding to the - * currently configured timings: + * currently configured timings. */ - if (dssdev->ops->read_edid) { - void *edid = kzalloc(MAX_EDID, GFP_KERNEL); - - if (!edid) - return 0; - - if ((dssdev->ops->read_edid(dssdev, edid, MAX_EDID) > 0) && - drm_edid_is_valid(edid)) { - drm_connector_update_edid_property( - connector, edid); - n = drm_add_edid_modes(connector, edid); - - omap_connector->hdmi_mode = - drm_detect_hdmi_monitor(edid); - } else { - drm_connector_update_edid_property( - connector, NULL); - } - - kfree(edid); - } else { - struct drm_display_mode *mode = drm_mode_create(dev); - struct videomode vm = {0}; + dssdev = omap_connector_find_device(connector, + OMAP_DSS_DEVICE_OP_EDID); + if (dssdev) + return omap_connector_get_modes_edid(connector, dssdev); - if (!mode) - return 0; + mode = drm_mode_create(connector->dev); + if (!mode) + return 0; - dssdev->ops->get_timings(dssdev, &vm); + dssdev = omap_connector->dssdev; + dssdev->ops->get_timings(dssdev, &vm); - drm_display_mode_from_videomode(&vm, mode); + drm_display_mode_from_videomode(&vm, mode); - mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; - drm_mode_set_name(mode); - drm_mode_probed_add(connector, mode); + mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; + drm_mode_set_name(mode); + drm_mode_probed_add(connector, mode); - if (dssdev->driver && dssdev->driver->get_size) { - dssdev->driver->get_size(dssdev, + if (dssdev->driver && dssdev->driver->get_size) + dssdev->driver->get_size(dssdev, &connector->display_info.width_mm, &connector->display_info.height_mm); - } - n = 1; - } - - return n; + return 1; } static int omap_connector_mode_valid(struct drm_connector *connector, -- GitLab From 70f9cbfc56a3239ff65796e024b4bbf653d1e0ba Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Thu, 31 May 2018 23:04:30 +0300 Subject: [PATCH 0967/1692] drm/omap: Get from CRTC to display device directly The CRTC mode set implementation needs to access the omap_dss_device for the pipeline display. To do so, it iterates over all pipelines to find the one that contains an encoder corresponding to the CRTC, and request the display device from the encoder. That's a very complicated dance when the CRTC has a direct pipeline pointer already, and the pipeline contains a pointer to the display device. Replace the convoluted code with direct access. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/omap_crtc.c | 25 ++++--------------------- drivers/gpu/drm/omapdrm/omap_encoder.c | 7 ------- drivers/gpu/drm/omapdrm/omap_encoder.h | 3 --- 3 files changed, 4 insertions(+), 31 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index 197d05312306..6e7a777907f5 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c @@ -419,12 +419,12 @@ static enum drm_mode_status omap_crtc_mode_valid(struct drm_crtc *crtc, static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc) { struct omap_crtc *omap_crtc = to_omap_crtc(crtc); + struct omap_dss_device *display = omap_crtc->pipe->display; struct drm_display_mode *mode = &crtc->state->adjusted_mode; - struct omap_drm_private *priv = crtc->dev->dev_private; const u32 flags_mask = DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_DE_LOW | DISPLAY_FLAGS_PIXDATA_POSEDGE | DISPLAY_FLAGS_PIXDATA_NEGEDGE | DISPLAY_FLAGS_SYNC_POSEDGE | DISPLAY_FLAGS_SYNC_NEGEDGE; - unsigned int i; + struct videomode vm = {0}; DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", omap_crtc->name, mode->base.id, mode->name, @@ -447,25 +447,8 @@ static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc) * has been changed to the DRM model. */ - for (i = 0; i < priv->num_pipes; ++i) { - struct drm_encoder *encoder = priv->pipes[i].encoder; - - if (encoder->crtc == crtc) { - struct omap_dss_device *dssdev; - - dssdev = omap_encoder_get_dssdev(encoder); - - if (dssdev) { - struct videomode vm = {0}; - - dssdev->ops->get_timings(dssdev, &vm); - - omap_crtc->vm.flags |= vm.flags & flags_mask; - } - - break; - } - } + display->ops->get_timings(display, &vm); + omap_crtc->vm.flags |= vm.flags & flags_mask; } static int omap_crtc_atomic_check(struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c index 7bbf3700e393..87e2b3799a45 100644 --- a/drivers/gpu/drm/omapdrm/omap_encoder.c +++ b/drivers/gpu/drm/omapdrm/omap_encoder.c @@ -39,13 +39,6 @@ struct omap_encoder { struct omap_dss_device *dssdev; }; -struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder) -{ - struct omap_encoder *omap_encoder = to_omap_encoder(encoder); - - return omap_encoder->dssdev; -} - static void omap_encoder_destroy(struct drm_encoder *encoder) { struct omap_encoder *omap_encoder = to_omap_encoder(encoder); diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.h b/drivers/gpu/drm/omapdrm/omap_encoder.h index d2f308bec494..e8f1a35dce2f 100644 --- a/drivers/gpu/drm/omapdrm/omap_encoder.h +++ b/drivers/gpu/drm/omapdrm/omap_encoder.h @@ -27,7 +27,4 @@ struct omap_dss_device; struct drm_encoder *omap_encoder_init(struct drm_device *dev, struct omap_dss_device *dssdev); -/* map crtc to vblank mask */ -struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder); - #endif /* __OMAPDRM_ENCODER_H__ */ -- GitLab From d96aaada55553b9e4264a2f2c8cc9599f027cd28 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Thu, 31 May 2018 23:14:43 +0300 Subject: [PATCH 0968/1692] drm/omap: Pass both output and display omap_dss_device to encoder init The drm_encoder implementation requires access to the omap_dss_device corresponding to the display, which is passed to its initialization function and stored internally. Clean up of the HDMI mode and infoframe handling will require access to the output omap_dss_device. To prepare for that, pass it to the encoder initialization function and store it internally as well. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/omap_drv.c | 2 +- drivers/gpu/drm/omapdrm/omap_encoder.c | 17 ++++++++++------- drivers/gpu/drm/omapdrm/omap_encoder.h | 3 ++- 3 files changed, 13 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index 0cca16c323d9..174bf498f4e5 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -318,7 +318,7 @@ static int omap_modeset_init(struct drm_device *dev) struct drm_encoder *encoder; struct drm_crtc *crtc; - encoder = omap_encoder_init(dev, display); + encoder = omap_encoder_init(dev, pipe->output, display); if (!encoder) return -ENOMEM; diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c index 87e2b3799a45..2689ae74ea60 100644 --- a/drivers/gpu/drm/omapdrm/omap_encoder.c +++ b/drivers/gpu/drm/omapdrm/omap_encoder.c @@ -36,7 +36,8 @@ */ struct omap_encoder { struct drm_encoder base; - struct omap_dss_device *dssdev; + struct omap_dss_device *output; + struct omap_dss_device *display; }; static void omap_encoder_destroy(struct drm_encoder *encoder) @@ -57,7 +58,7 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder, { struct drm_device *dev = encoder->dev; struct omap_encoder *omap_encoder = to_omap_encoder(encoder); - struct omap_dss_device *dssdev = omap_encoder->dssdev; + struct omap_dss_device *dssdev = omap_encoder->display; struct drm_connector *connector; bool hdmi_mode; int r; @@ -86,7 +87,7 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder, static void omap_encoder_disable(struct drm_encoder *encoder) { struct omap_encoder *omap_encoder = to_omap_encoder(encoder); - struct omap_dss_device *dssdev = omap_encoder->dssdev; + struct omap_dss_device *dssdev = omap_encoder->display; dssdev->ops->disable(dssdev); } @@ -97,7 +98,7 @@ static int omap_encoder_update(struct drm_encoder *encoder, { struct drm_device *dev = encoder->dev; struct omap_encoder *omap_encoder = to_omap_encoder(encoder); - struct omap_dss_device *dssdev = omap_encoder->dssdev; + struct omap_dss_device *dssdev = omap_encoder->display; int ret; if (dssdev->ops->check_timings) { @@ -127,7 +128,7 @@ static int omap_encoder_update(struct drm_encoder *encoder, static void omap_encoder_enable(struct drm_encoder *encoder) { struct omap_encoder *omap_encoder = to_omap_encoder(encoder); - struct omap_dss_device *dssdev = omap_encoder->dssdev; + struct omap_dss_device *dssdev = omap_encoder->display; int r; omap_encoder_update(encoder, omap_crtc_channel(encoder->crtc), @@ -156,7 +157,8 @@ static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = { /* initialize encoder */ struct drm_encoder *omap_encoder_init(struct drm_device *dev, - struct omap_dss_device *dssdev) + struct omap_dss_device *output, + struct omap_dss_device *display) { struct drm_encoder *encoder = NULL; struct omap_encoder *omap_encoder; @@ -165,7 +167,8 @@ struct drm_encoder *omap_encoder_init(struct drm_device *dev, if (!omap_encoder) goto fail; - omap_encoder->dssdev = dssdev; + omap_encoder->output = output; + omap_encoder->display = display; encoder = &omap_encoder->base; diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.h b/drivers/gpu/drm/omapdrm/omap_encoder.h index e8f1a35dce2f..a7b5dde63ecb 100644 --- a/drivers/gpu/drm/omapdrm/omap_encoder.h +++ b/drivers/gpu/drm/omapdrm/omap_encoder.h @@ -25,6 +25,7 @@ struct drm_encoder; struct omap_dss_device; struct drm_encoder *omap_encoder_init(struct drm_device *dev, - struct omap_dss_device *dssdev); + struct omap_dss_device *output, + struct omap_dss_device *display); #endif /* __OMAPDRM_ENCODER_H__ */ -- GitLab From 7805d1e5098c1a90e5feb153809b16ba890b03e4 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Thu, 31 May 2018 22:56:59 +0300 Subject: [PATCH 0969/1692] drm/omap: Don't call HDMI mode and infoframe operations recursively The HDMI mode (.set_hdmi_mode()) and infoframe (.set_infoframe()) operations are called recursively from the display device back to the HDMI encoder. This isn't required, as all components other than the HDMI encoder just forward the operation to the previous component in the chain. Call the operations directly on the HDMI encoder. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- .../gpu/drm/omapdrm/displays/connector-hdmi.c | 20 ------------------ .../drm/omapdrm/displays/encoder-tpd12s015.c | 21 ------------------- drivers/gpu/drm/omapdrm/omap_encoder.c | 2 +- 3 files changed, 1 insertion(+), 42 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c index 16dc22edcb8e..fe6d2923ed81 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c @@ -154,21 +154,6 @@ static void hdmic_unregister_hpd_cb(struct omap_dss_device *dssdev) mutex_unlock(&ddata->hpd_lock); } -static int hdmic_set_hdmi_mode(struct omap_dss_device *dssdev, bool hdmi_mode) -{ - struct omap_dss_device *src = dssdev->src; - - return src->ops->hdmi.set_hdmi_mode(src, hdmi_mode); -} - -static int hdmic_set_infoframe(struct omap_dss_device *dssdev, - const struct hdmi_avi_infoframe *avi) -{ - struct omap_dss_device *src = dssdev->src; - - return src->ops->hdmi.set_infoframe(src, avi); -} - static const struct omap_dss_device_ops hdmic_ops = { .connect = hdmic_connect, .disconnect = hdmic_disconnect, @@ -183,11 +168,6 @@ static const struct omap_dss_device_ops hdmic_ops = { .detect = hdmic_detect, .register_hpd_cb = hdmic_register_hpd_cb, .unregister_hpd_cb = hdmic_unregister_hpd_cb, - - .hdmi = { - .set_hdmi_mode = hdmic_set_hdmi_mode, - .set_infoframe = hdmic_set_infoframe, - }, }; static irqreturn_t hdmic_hpd_isr(int irq, void *data) diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c index 3ce1c935a48c..babaac856067 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c @@ -145,22 +145,6 @@ static void tpd_unregister_hpd_cb(struct omap_dss_device *dssdev) mutex_unlock(&ddata->hpd_lock); } -static int tpd_set_infoframe(struct omap_dss_device *dssdev, - const struct hdmi_avi_infoframe *avi) -{ - struct omap_dss_device *src = dssdev->src; - - return src->ops->hdmi.set_infoframe(src, avi); -} - -static int tpd_set_hdmi_mode(struct omap_dss_device *dssdev, - bool hdmi_mode) -{ - struct omap_dss_device *src = dssdev->src; - - return src->ops->hdmi.set_hdmi_mode(src, hdmi_mode); -} - static const struct omap_dss_device_ops tpd_ops = { .connect = tpd_connect, .disconnect = tpd_disconnect, @@ -171,11 +155,6 @@ static const struct omap_dss_device_ops tpd_ops = { .detect = tpd_detect, .register_hpd_cb = tpd_register_hpd_cb, .unregister_hpd_cb = tpd_unregister_hpd_cb, - - .hdmi = { - .set_infoframe = tpd_set_infoframe, - .set_hdmi_mode = tpd_set_hdmi_mode, - }, }; static irqreturn_t tpd_hpd_isr(int irq, void *data) diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c index 2689ae74ea60..94b75d018e71 100644 --- a/drivers/gpu/drm/omapdrm/omap_encoder.c +++ b/drivers/gpu/drm/omapdrm/omap_encoder.c @@ -58,7 +58,7 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder, { struct drm_device *dev = encoder->dev; struct omap_encoder *omap_encoder = to_omap_encoder(encoder); - struct omap_dss_device *dssdev = omap_encoder->display; + struct omap_dss_device *dssdev = omap_encoder->output; struct drm_connector *connector; bool hdmi_mode; int r; -- GitLab From 47a3ee27934a7a774be4aa1733c662f33e1ae656 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Thu, 31 May 2018 23:14:43 +0300 Subject: [PATCH 0970/1692] drm/omap: Pass both output and display omap_dss_device to connector init The drm_connector implementation requires access to the omap_dss_device corresponding to the display, which is passed to its initialization function and stored internally. Refactoring of the timings operations will require access to the output omap_dss_device. To prepare for that, pass it to the connector initialization function and store it internally as well. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/omap_connector.c | 30 ++++++++++++++---------- drivers/gpu/drm/omapdrm/omap_connector.h | 4 ++-- drivers/gpu/drm/omapdrm/omap_drv.c | 3 ++- 3 files changed, 21 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c index 5091991363d6..92fea0085a9c 100644 --- a/drivers/gpu/drm/omapdrm/omap_connector.c +++ b/drivers/gpu/drm/omapdrm/omap_connector.c @@ -29,7 +29,8 @@ struct omap_connector { struct drm_connector base; - struct omap_dss_device *dssdev; + struct omap_dss_device *output; + struct omap_dss_device *display; struct omap_dss_device *hpd; bool hdmi_mode; }; @@ -104,7 +105,7 @@ omap_connector_find_device(struct drm_connector *connector, struct omap_connector *omap_connector = to_omap_connector(connector); struct omap_dss_device *dssdev; - for (dssdev = omap_connector->dssdev; dssdev; dssdev = dssdev->src) { + for (dssdev = omap_connector->display; dssdev; dssdev = dssdev->src) { if (dssdev->ops_flags & op) return dssdev; } @@ -129,7 +130,7 @@ static enum drm_connector_status omap_connector_detect( omap_connector_hpd_notify(connector, dssdev->src, status); } else { - switch (omap_connector->dssdev->type) { + switch (omap_connector->display->type) { case OMAP_DISPLAY_TYPE_DPI: case OMAP_DISPLAY_TYPE_DBI: case OMAP_DISPLAY_TYPE_SDI: @@ -142,7 +143,7 @@ static enum drm_connector_status omap_connector_detect( } } - VERB("%s: %d (force=%d)", omap_connector->dssdev->name, status, force); + VERB("%s: %d (force=%d)", omap_connector->display->name, status, force); return status; } @@ -151,7 +152,7 @@ static void omap_connector_destroy(struct drm_connector *connector) { struct omap_connector *omap_connector = to_omap_connector(connector); - DBG("%s", omap_connector->dssdev->name); + DBG("%s", omap_connector->display->name); if (omap_connector->hpd) { struct omap_dss_device *hpd = omap_connector->hpd; @@ -165,7 +166,8 @@ static void omap_connector_destroy(struct drm_connector *connector) drm_connector_cleanup(connector); kfree(omap_connector); - omapdss_device_put(omap_connector->dssdev); + omapdss_device_put(omap_connector->output); + omapdss_device_put(omap_connector->display); } #define MAX_EDID 512 @@ -212,7 +214,7 @@ static int omap_connector_get_modes(struct drm_connector *connector) struct drm_display_mode *mode; struct videomode vm = {0}; - DBG("%s", omap_connector->dssdev->name); + DBG("%s", omap_connector->display->name); /* * If display exposes EDID, then we parse that in the normal way to @@ -229,7 +231,7 @@ static int omap_connector_get_modes(struct drm_connector *connector) if (!mode) return 0; - dssdev = omap_connector->dssdev; + dssdev = omap_connector->display; dssdev->ops->get_timings(dssdev, &vm); drm_display_mode_from_videomode(&vm, mode); @@ -250,7 +252,7 @@ static int omap_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct omap_connector *omap_connector = to_omap_connector(connector); - struct omap_dss_device *dssdev = omap_connector->dssdev; + struct omap_dss_device *dssdev = omap_connector->display; struct videomode vm = {0}; struct drm_device *dev = connector->dev; struct drm_display_mode *new_mode; @@ -325,19 +327,21 @@ static const struct drm_connector_helper_funcs omap_connector_helper_funcs = { /* initialize connector */ struct drm_connector *omap_connector_init(struct drm_device *dev, - int connector_type, struct omap_dss_device *dssdev, - struct drm_encoder *encoder) + int connector_type, struct omap_dss_device *output, + struct omap_dss_device *display, struct drm_encoder *encoder) { struct drm_connector *connector = NULL; struct omap_connector *omap_connector; + struct omap_dss_device *dssdev; - DBG("%s", dssdev->name); + DBG("%s", display->name); omap_connector = kzalloc(sizeof(*omap_connector), GFP_KERNEL); if (!omap_connector) goto fail; - omap_connector->dssdev = omapdss_device_get(dssdev); + omap_connector->output = omapdss_device_get(output); + omap_connector->display = omapdss_device_get(display); connector = &omap_connector->base; connector->interlace_allowed = 1; diff --git a/drivers/gpu/drm/omapdrm/omap_connector.h b/drivers/gpu/drm/omapdrm/omap_connector.h index 465b3c9499d5..42ff0a106179 100644 --- a/drivers/gpu/drm/omapdrm/omap_connector.h +++ b/drivers/gpu/drm/omapdrm/omap_connector.h @@ -28,8 +28,8 @@ struct drm_encoder; struct omap_dss_device; struct drm_connector *omap_connector_init(struct drm_device *dev, - int connector_type, struct omap_dss_device *dssdev, - struct drm_encoder *encoder); + int connector_type, struct omap_dss_device *output, + struct omap_dss_device *display, struct drm_encoder *encoder); struct drm_encoder *omap_connector_attached_encoder( struct drm_connector *connector); bool omap_connector_get_hdmi_mode(struct drm_connector *connector); diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index 174bf498f4e5..03771f818eaa 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -323,7 +323,8 @@ static int omap_modeset_init(struct drm_device *dev) return -ENOMEM; connector = omap_connector_init(dev, - get_connector_type(display), display, encoder); + get_connector_type(display), pipe->output, + display, encoder); if (!connector) return -ENOMEM; -- GitLab From 52c5dd2a7bed1610a37f89aeb4f73ad8fbba05fc Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 6 Jun 2018 00:31:57 +0300 Subject: [PATCH 0971/1692] drm/omap: Determine connector type directly in omap_connector.c Instead of determining the connector type from the type of the display's omap_dss_device and passing it to the omap_connector_init() function, move the type determination code to omap_connector.c and remove the type argument to the connector init function. This moves code to a more natural location, making the driver easier to read. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/omap_connector.c | 29 +++++++++++++++++++++--- drivers/gpu/drm/omapdrm/omap_connector.h | 5 ++-- drivers/gpu/drm/omapdrm/omap_drv.c | 27 ++-------------------- 3 files changed, 31 insertions(+), 30 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c index 92fea0085a9c..06c48a64b745 100644 --- a/drivers/gpu/drm/omapdrm/omap_connector.c +++ b/drivers/gpu/drm/omapdrm/omap_connector.c @@ -325,10 +325,33 @@ static const struct drm_connector_helper_funcs omap_connector_helper_funcs = { .mode_valid = omap_connector_mode_valid, }; +static int omap_connector_get_type(struct omap_dss_device *display) +{ + switch (display->type) { + case OMAP_DISPLAY_TYPE_HDMI: + return DRM_MODE_CONNECTOR_HDMIA; + case OMAP_DISPLAY_TYPE_DVI: + return DRM_MODE_CONNECTOR_DVID; + case OMAP_DISPLAY_TYPE_DSI: + return DRM_MODE_CONNECTOR_DSI; + case OMAP_DISPLAY_TYPE_DPI: + case OMAP_DISPLAY_TYPE_DBI: + return DRM_MODE_CONNECTOR_DPI; + case OMAP_DISPLAY_TYPE_VENC: + /* TODO: This could also be composite */ + return DRM_MODE_CONNECTOR_SVIDEO; + case OMAP_DISPLAY_TYPE_SDI: + return DRM_MODE_CONNECTOR_LVDS; + default: + return DRM_MODE_CONNECTOR_Unknown; + } +} + /* initialize connector */ struct drm_connector *omap_connector_init(struct drm_device *dev, - int connector_type, struct omap_dss_device *output, - struct omap_dss_device *display, struct drm_encoder *encoder) + struct omap_dss_device *output, + struct omap_dss_device *display, + struct drm_encoder *encoder) { struct drm_connector *connector = NULL; struct omap_connector *omap_connector; @@ -348,7 +371,7 @@ struct drm_connector *omap_connector_init(struct drm_device *dev, connector->doublescan_allowed = 0; drm_connector_init(dev, connector, &omap_connector_funcs, - connector_type); + omap_connector_get_type(display)); drm_connector_helper_add(connector, &omap_connector_helper_funcs); /* diff --git a/drivers/gpu/drm/omapdrm/omap_connector.h b/drivers/gpu/drm/omapdrm/omap_connector.h index 42ff0a106179..854099801649 100644 --- a/drivers/gpu/drm/omapdrm/omap_connector.h +++ b/drivers/gpu/drm/omapdrm/omap_connector.h @@ -28,8 +28,9 @@ struct drm_encoder; struct omap_dss_device; struct drm_connector *omap_connector_init(struct drm_device *dev, - int connector_type, struct omap_dss_device *output, - struct omap_dss_device *display, struct drm_encoder *encoder); + struct omap_dss_device *output, + struct omap_dss_device *display, + struct drm_encoder *encoder); struct drm_encoder *omap_connector_attached_encoder( struct drm_connector *connector); bool omap_connector_get_hdmi_mode(struct drm_connector *connector); diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index 03771f818eaa..5f98506ac2c5 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -129,28 +129,6 @@ static const struct drm_mode_config_funcs omap_mode_config_funcs = { .atomic_commit = drm_atomic_helper_commit, }; -static int get_connector_type(struct omap_dss_device *display) -{ - switch (display->type) { - case OMAP_DISPLAY_TYPE_HDMI: - return DRM_MODE_CONNECTOR_HDMIA; - case OMAP_DISPLAY_TYPE_DVI: - return DRM_MODE_CONNECTOR_DVID; - case OMAP_DISPLAY_TYPE_DSI: - return DRM_MODE_CONNECTOR_DSI; - case OMAP_DISPLAY_TYPE_DPI: - case OMAP_DISPLAY_TYPE_DBI: - return DRM_MODE_CONNECTOR_DPI; - case OMAP_DISPLAY_TYPE_VENC: - /* TODO: This could also be composite */ - return DRM_MODE_CONNECTOR_SVIDEO; - case OMAP_DISPLAY_TYPE_SDI: - return DRM_MODE_CONNECTOR_LVDS; - default: - return DRM_MODE_CONNECTOR_Unknown; - } -} - static void omap_disconnect_pipelines(struct drm_device *ddev) { struct omap_drm_private *priv = ddev->dev_private; @@ -322,9 +300,8 @@ static int omap_modeset_init(struct drm_device *dev) if (!encoder) return -ENOMEM; - connector = omap_connector_init(dev, - get_connector_type(display), pipe->output, - display, encoder); + connector = omap_connector_init(dev, pipe->output, display, + encoder); if (!connector) return -ENOMEM; -- GitLab From ec68cd5a18e16318969e0e59a7d99513dcfa2d02 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Mon, 4 Jun 2018 18:26:00 +0300 Subject: [PATCH 0972/1692] drm/omap: dss: hdmi: Rename hdmi_display_(set|check)_timing() functions The two functions implement the .set_timings() and .check_timings() operations. Rename them to hdmi_disply_set_timings() and hdmi_display_check_timings() respectively to match the operations names and make searching the source code easier. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/hdmi4.c | 12 ++++++------ drivers/gpu/drm/omapdrm/dss/hdmi5.c | 12 ++++++------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index c92564300446..73ca79471ce4 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c @@ -251,8 +251,8 @@ static void hdmi_power_off_full(struct omap_hdmi *hdmi) hdmi_power_off_core(hdmi); } -static int hdmi_display_check_timing(struct omap_dss_device *dssdev, - struct videomode *vm) +static int hdmi_display_check_timings(struct omap_dss_device *dssdev, + struct videomode *vm) { struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev); @@ -262,8 +262,8 @@ static int hdmi_display_check_timing(struct omap_dss_device *dssdev, return 0; } -static void hdmi_display_set_timing(struct omap_dss_device *dssdev, - struct videomode *vm) +static void hdmi_display_set_timings(struct omap_dss_device *dssdev, + struct videomode *vm) { struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev); @@ -508,8 +508,8 @@ static const struct omap_dss_device_ops hdmi_ops = { .enable = hdmi_display_enable, .disable = hdmi_display_disable, - .check_timings = hdmi_display_check_timing, - .set_timings = hdmi_display_set_timing, + .check_timings = hdmi_display_check_timings, + .set_timings = hdmi_display_set_timings, .read_edid = hdmi_read_edid, diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index 2aaa8ee61662..259cd39d0c1d 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c @@ -250,8 +250,8 @@ static void hdmi_power_off_full(struct omap_hdmi *hdmi) hdmi_power_off_core(hdmi); } -static int hdmi_display_check_timing(struct omap_dss_device *dssdev, - struct videomode *vm) +static int hdmi_display_check_timings(struct omap_dss_device *dssdev, + struct videomode *vm) { struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev); @@ -261,8 +261,8 @@ static int hdmi_display_check_timing(struct omap_dss_device *dssdev, return 0; } -static void hdmi_display_set_timing(struct omap_dss_device *dssdev, - struct videomode *vm) +static void hdmi_display_set_timings(struct omap_dss_device *dssdev, + struct videomode *vm) { struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev); @@ -502,8 +502,8 @@ static const struct omap_dss_device_ops hdmi_ops = { .enable = hdmi_display_enable, .disable = hdmi_display_disable, - .check_timings = hdmi_display_check_timing, - .set_timings = hdmi_display_set_timing, + .check_timings = hdmi_display_check_timings, + .set_timings = hdmi_display_set_timings, .read_edid = hdmi_read_edid, -- GitLab From 9c626dee5cdb13aad5aa1448e08186bf6452647d Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Sun, 19 Aug 2018 14:12:27 +0300 Subject: [PATCH 0973/1692] drm/omap: encoder-tfp410: Don't fix timings in .set_timings() handler Both the .check_timings() and .set_timings() handlers call tfp410_fix_timings() to fix the timing's flags. As .check_timings() is always called before .set_timings(), there's no need to fix the flags twice. Remove the tfp410_fix_timings() call from .set_timings(). Signed-off-by: Laurent Pinchart Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c index 29bda16afbdc..54f133d7da07 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c @@ -92,8 +92,6 @@ static void tfp410_set_timings(struct omap_dss_device *dssdev, struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *src = dssdev->src; - tfp410_fix_timings(vm); - ddata->vm = *vm; src->ops->set_timings(src, vm); -- GitLab From 8fe1d36100c84ab1b501771252755d75ddcb6bff Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Mon, 4 Jun 2018 18:29:01 +0300 Subject: [PATCH 0974/1692] drm/omap: Make the video_mode pointer to .set_timings() const The .set_timings() operations of the omap_dss_device instances don't need to modify the passed timings. Make the pointer const. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c | 2 +- drivers/gpu/drm/omapdrm/displays/connector-dvi.c | 2 +- drivers/gpu/drm/omapdrm/displays/connector-hdmi.c | 2 +- drivers/gpu/drm/omapdrm/displays/encoder-opa362.c | 2 +- drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c | 2 +- drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c | 2 +- drivers/gpu/drm/omapdrm/displays/panel-dpi.c | 2 +- drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c | 2 +- drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c | 2 +- drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c | 2 +- drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c | 2 +- drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c | 2 +- drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c | 2 +- drivers/gpu/drm/omapdrm/dss/dpi.c | 2 +- drivers/gpu/drm/omapdrm/dss/hdmi4.c | 2 +- drivers/gpu/drm/omapdrm/dss/hdmi5.c | 2 +- drivers/gpu/drm/omapdrm/dss/omapdss.h | 2 +- drivers/gpu/drm/omapdrm/dss/sdi.c | 2 +- drivers/gpu/drm/omapdrm/dss/venc.c | 2 +- 19 files changed, 19 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c index 563fc7e618b3..22bc2e734b0b 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c @@ -93,7 +93,7 @@ static void tvc_disable(struct omap_dss_device *dssdev) } static void tvc_set_timings(struct omap_dss_device *dssdev, - struct videomode *vm) + const struct videomode *vm) { struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *src = dssdev->src; diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c index eae4108330f1..8f953303ece6 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c @@ -103,7 +103,7 @@ static void dvic_disable(struct omap_dss_device *dssdev) } static void dvic_set_timings(struct omap_dss_device *dssdev, - struct videomode *vm) + const struct videomode *vm) { struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *src = dssdev->src; diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c index fe6d2923ed81..1cbc593c79ff 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c @@ -98,7 +98,7 @@ static void hdmic_disable(struct omap_dss_device *dssdev) } static void hdmic_set_timings(struct omap_dss_device *dssdev, - struct videomode *vm) + const struct videomode *vm) { struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *src = dssdev->src; diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c index 3243e5f9bd06..19d1804e3fe5 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c @@ -90,7 +90,7 @@ static void opa362_disable(struct omap_dss_device *dssdev) } static void opa362_set_timings(struct omap_dss_device *dssdev, - struct videomode *vm) + const struct videomode *vm) { struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *src = dssdev->src; diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c index 54f133d7da07..fa6ed1e8649d 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c @@ -87,7 +87,7 @@ static void tfp410_fix_timings(struct videomode *vm) } static void tfp410_set_timings(struct omap_dss_device *dssdev, - struct videomode *vm) + const struct videomode *vm) { struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *src = dssdev->src; diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c index babaac856067..21c2667f9f06 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c @@ -97,7 +97,7 @@ static void tpd_disable(struct omap_dss_device *dssdev) } static void tpd_set_timings(struct omap_dss_device *dssdev, - struct videomode *vm) + const struct videomode *vm) { struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *src = dssdev->src; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c index c03877af9cdb..b2f17b2a93b2 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c @@ -96,7 +96,7 @@ static void panel_dpi_disable(struct omap_dss_device *dssdev) } static void panel_dpi_set_timings(struct omap_dss_device *dssdev, - struct videomode *vm) + const struct videomode *vm) { struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *src = dssdev->src; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c index 62576e4f89e3..1121d1eba76b 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c @@ -173,7 +173,7 @@ static void lb035q02_disable(struct omap_dss_device *dssdev) } static void lb035q02_set_timings(struct omap_dss_device *dssdev, - struct videomode *vm) + const struct videomode *vm) { struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *src = dssdev->src; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c index 9f34cf02a114..248a8f1ea7df 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c @@ -161,7 +161,7 @@ static void nec_8048_disable(struct omap_dss_device *dssdev) } static void nec_8048_set_timings(struct omap_dss_device *dssdev, - struct videomode *vm) + const struct videomode *vm) { struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *src = dssdev->src; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c index 9ee6b8376916..ef696e1e7d45 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c @@ -135,7 +135,7 @@ static void sharp_ls_disable(struct omap_dss_device *dssdev) } static void sharp_ls_set_timings(struct omap_dss_device *dssdev, - struct videomode *vm) + const struct videomode *vm) { struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *src = dssdev->src; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c index d0a8d2810c33..7be58da1075f 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c @@ -634,7 +634,7 @@ static void acx565akm_disable(struct omap_dss_device *dssdev) } static void acx565akm_set_timings(struct omap_dss_device *dssdev, - struct videomode *vm) + const struct videomode *vm) { struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *src = dssdev->src; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c index 1b0e42daa296..087f62f4311b 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c @@ -304,7 +304,7 @@ static void td028ttec1_panel_disable(struct omap_dss_device *dssdev) } static void td028ttec1_panel_set_timings(struct omap_dss_device *dssdev, - struct videomode *vm) + const struct videomode *vm) { struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *src = dssdev->src; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c index b211a7809a26..7bc602dfb84a 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c @@ -378,7 +378,7 @@ static void tpo_td043_disable(struct omap_dss_device *dssdev) } static void tpo_td043_set_timings(struct omap_dss_device *dssdev, - struct videomode *vm) + const struct videomode *vm) { struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *src = dssdev->src; diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c index f61e3e3186b4..58237decb5a8 100644 --- a/drivers/gpu/drm/omapdrm/dss/dpi.c +++ b/drivers/gpu/drm/omapdrm/dss/dpi.c @@ -479,7 +479,7 @@ static void dpi_display_disable(struct omap_dss_device *dssdev) } static void dpi_set_timings(struct omap_dss_device *dssdev, - struct videomode *vm) + const struct videomode *vm) { struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev); diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index 73ca79471ce4..a66bdbe3b969 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c @@ -263,7 +263,7 @@ static int hdmi_display_check_timings(struct omap_dss_device *dssdev, } static void hdmi_display_set_timings(struct omap_dss_device *dssdev, - struct videomode *vm) + const struct videomode *vm) { struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev); diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index 259cd39d0c1d..d63831c9eacf 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c @@ -262,7 +262,7 @@ static int hdmi_display_check_timings(struct omap_dss_device *dssdev, } static void hdmi_display_set_timings(struct omap_dss_device *dssdev, - struct videomode *vm) + const struct videomode *vm) { struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev); diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index ecefc1b193ca..87306014a53a 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -368,7 +368,7 @@ struct omap_dss_device_ops { void (*get_timings)(struct omap_dss_device *dssdev, struct videomode *vm); void (*set_timings)(struct omap_dss_device *dssdev, - struct videomode *vm); + const struct videomode *vm); bool (*detect)(struct omap_dss_device *dssdev); diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c index 8d46f44bcb7d..3b5f97932475 100644 --- a/drivers/gpu/drm/omapdrm/dss/sdi.c +++ b/drivers/gpu/drm/omapdrm/dss/sdi.c @@ -230,7 +230,7 @@ static void sdi_display_disable(struct omap_dss_device *dssdev) } static void sdi_set_timings(struct omap_dss_device *dssdev, - struct videomode *vm) + const struct videomode *vm) { struct sdi_device *sdi = dssdev_to_sdi(dssdev); diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c index 100a02a9447f..255bf2cd8afc 100644 --- a/drivers/gpu/drm/omapdrm/dss/venc.c +++ b/drivers/gpu/drm/omapdrm/dss/venc.c @@ -569,7 +569,7 @@ static void venc_display_disable(struct omap_dss_device *dssdev) } static void venc_set_timings(struct omap_dss_device *dssdev, - struct videomode *vm) + const struct videomode *vm) { struct venc_device *venc = dssdev_to_venc(dssdev); struct videomode actual_vm; -- GitLab From bb23800c887da3a96297ec3d4a09b6bd887ce503 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 5 Jun 2018 02:06:54 +0300 Subject: [PATCH 0975/1692] drm/omap: Remove duplicate calls to .set_timings() operation The omap_dss_device .set_timings() operations are called directly from omap_encoder_update(), and indirectly from the omap_dss_device .enable() operation. The latter is called from omap_encoder_enable(), right after calling omap_encoder_update(). The .set_timings() operation it thus called twice in a row. Fix it by removing the indirect call. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c | 2 -- drivers/gpu/drm/omapdrm/displays/connector-dvi.c | 3 --- drivers/gpu/drm/omapdrm/displays/connector-hdmi.c | 2 -- drivers/gpu/drm/omapdrm/displays/encoder-opa362.c | 2 -- drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c | 2 -- drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c | 3 --- drivers/gpu/drm/omapdrm/displays/panel-dpi.c | 2 -- drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c | 2 -- drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c | 2 -- drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c | 2 -- drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c | 2 -- drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c | 2 -- drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c | 2 -- 13 files changed, 28 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c index 22bc2e734b0b..fb6d4fce1853 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c @@ -66,8 +66,6 @@ static int tvc_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - src->ops->set_timings(src, &ddata->vm); - r = src->ops->enable(src); if (r) return r; diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c index 8f953303ece6..b89555ed53a0 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c @@ -69,7 +69,6 @@ static void dvic_disconnect(struct omap_dss_device *src, static int dvic_enable(struct omap_dss_device *dssdev) { - struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *src = dssdev->src; int r; @@ -79,8 +78,6 @@ static int dvic_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - src->ops->set_timings(src, &ddata->vm); - r = src->ops->enable(src); if (r) return r; diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c index 1cbc593c79ff..898eb583688f 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c @@ -71,8 +71,6 @@ static int hdmic_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - src->ops->set_timings(src, &ddata->vm); - r = src->ops->enable(src); if (r) return r; diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c index 19d1804e3fe5..824f302a515b 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c @@ -57,8 +57,6 @@ static int opa362_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - src->ops->set_timings(src, &ddata->vm); - r = src->ops->enable(src); if (r) return r; diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c index fa6ed1e8649d..de4233980898 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c @@ -50,8 +50,6 @@ static int tfp410_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - src->ops->set_timings(src, &ddata->vm); - r = src->ops->enable(src); if (r) return r; diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c index 21c2667f9f06..d9a590244eaa 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c @@ -66,15 +66,12 @@ static void tpd_disconnect(struct omap_dss_device *src, static int tpd_enable(struct omap_dss_device *dssdev) { - struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *src = dssdev->src; int r; if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) return 0; - src->ops->set_timings(src, &ddata->vm); - r = src->ops->enable(src); if (r) return r; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c index b2f17b2a93b2..32efed45a63c 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c @@ -57,8 +57,6 @@ static int panel_dpi_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - src->ops->set_timings(src, &ddata->vm); - r = src->ops->enable(src); if (r) return r; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c index 1121d1eba76b..ffa69fd44d87 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c @@ -142,8 +142,6 @@ static int lb035q02_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - src->ops->set_timings(src, &ddata->vm); - r = src->ops->enable(src); if (r) return r; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c index 248a8f1ea7df..26af95a71eab 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c @@ -132,8 +132,6 @@ static int nec_8048_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - src->ops->set_timings(src, &ddata->vm); - r = src->ops->enable(src); if (r) return r; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c index ef696e1e7d45..a83cd9f4bdb0 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c @@ -80,8 +80,6 @@ static int sharp_ls_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - src->ops->set_timings(src, &ddata->vm); - if (ddata->vcc) { r = regulator_enable(ddata->vcc); if (r != 0) diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c index 7be58da1075f..8e45c5e64efa 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c @@ -523,8 +523,6 @@ static int acx565akm_panel_power_on(struct omap_dss_device *dssdev) dev_dbg(&ddata->spi->dev, "%s\n", __func__); - src->ops->set_timings(src, &ddata->vm); - r = src->ops->enable(src); if (r) { pr_err("%s sdi enable failed\n", __func__); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c index 087f62f4311b..b2a16c470f73 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c @@ -187,8 +187,6 @@ static int td028ttec1_panel_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - src->ops->set_timings(src, &ddata->vm); - r = src->ops->enable(src); if (r) return r; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c index 7bc602dfb84a..42f80b3ec350 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c @@ -338,8 +338,6 @@ static int tpo_td043_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - src->ops->set_timings(src, &ddata->vm); - r = src->ops->enable(src); if (r) return r; -- GitLab From 138fe53ef8d341dd27a0f01d55f8774d33b880f8 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 6 Jun 2018 00:04:39 +0300 Subject: [PATCH 0976/1692] drm/omap: Remove unneeded fallback for missing .check_timings() The .check_timings() operation is present in all panels and connectors. The fallback that uses .get_timings() in the absence of .check_timings() is thus unneeded. While it could be argued that the fallback implements a useful check that should be extended to cover all fixed-resolution panels, the code is currently unused and gets in the way of the ongoing refactoring. Remove it, a similar feature can always be added later. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/omap_connector.c | 25 +----------------------- drivers/gpu/drm/omapdrm/omap_encoder.c | 16 ++------------- 2 files changed, 3 insertions(+), 38 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c index 06c48a64b745..b58d9a0bb53d 100644 --- a/drivers/gpu/drm/omapdrm/omap_connector.c +++ b/drivers/gpu/drm/omapdrm/omap_connector.c @@ -261,30 +261,7 @@ static int omap_connector_mode_valid(struct drm_connector *connector, drm_display_mode_to_videomode(mode, &vm); mode->vrefresh = drm_mode_vrefresh(mode); - /* - * if the panel driver doesn't have a check_timings, it's most likely - * a fixed resolution panel, check if the timings match with the - * panel's timings - */ - if (dssdev->ops->check_timings) { - r = dssdev->ops->check_timings(dssdev, &vm); - } else { - struct videomode t = {0}; - - dssdev->ops->get_timings(dssdev, &t); - - /* - * Ignore the flags, as we don't get them from - * drm_display_mode_to_videomode. - */ - t.flags = 0; - - if (memcmp(&vm, &t, sizeof(vm))) - r = -EINVAL; - else - r = 0; - } - + r = dssdev->ops->check_timings(dssdev, &vm); if (!r) { /* check if vrefresh is still valid */ new_mode = drm_mode_duplicate(dev, mode); diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c index 94b75d018e71..a6dce480b2cf 100644 --- a/drivers/gpu/drm/omapdrm/omap_encoder.c +++ b/drivers/gpu/drm/omapdrm/omap_encoder.c @@ -101,21 +101,9 @@ static int omap_encoder_update(struct drm_encoder *encoder, struct omap_dss_device *dssdev = omap_encoder->display; int ret; - if (dssdev->ops->check_timings) { - ret = dssdev->ops->check_timings(dssdev, vm); - } else { - struct videomode t = {0}; - - dssdev->ops->get_timings(dssdev, &t); - - if (memcmp(vm, &t, sizeof(*vm))) - ret = -EINVAL; - else - ret = 0; - } - + ret = dssdev->ops->check_timings(dssdev, vm); if (ret) { - dev_err(dev->dev, "could not set timings: %d\n", ret); + dev_err(dev->dev, "invalid timings: %d\n", ret); return ret; } -- GitLab From 26c91a3898f1fd52af4e90d03ad740586112a6f7 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 6 Jun 2018 16:18:43 +0300 Subject: [PATCH 0977/1692] drm/omap: Don't store video mode internally for external encoders The omap_dss_device .set_timings() operation for external encoders stores the video mode in the device data structure. That mode is then never used again. Drop it. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/displays/encoder-opa362.c | 5 ----- drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c | 5 ----- drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c | 5 ----- 3 files changed, 15 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c index 824f302a515b..05d128600712 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c @@ -25,8 +25,6 @@ struct panel_drv_data { struct omap_dss_device dssdev; struct gpio_desc *enable_gpio; - - struct videomode vm; }; #define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) @@ -90,13 +88,10 @@ static void opa362_disable(struct omap_dss_device *dssdev) static void opa362_set_timings(struct omap_dss_device *dssdev, const struct videomode *vm) { - struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *src = dssdev->src; dev_dbg(dssdev->dev, "set_timings\n"); - ddata->vm = *vm; - src->ops->set_timings(src, vm); } diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c index de4233980898..86c90c15681e 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c @@ -20,8 +20,6 @@ struct panel_drv_data { struct omap_dss_device dssdev; struct gpio_desc *pd_gpio; - - struct videomode vm; }; #define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) @@ -87,11 +85,8 @@ static void tfp410_fix_timings(struct videomode *vm) static void tfp410_set_timings(struct omap_dss_device *dssdev, const struct videomode *vm) { - struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *src = dssdev->src; - ddata->vm = *vm; - src->ops->set_timings(src, vm); } diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c index d9a590244eaa..1e24559e0aa1 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c @@ -28,8 +28,6 @@ struct panel_drv_data { struct gpio_desc *ct_cp_hpd_gpio; struct gpio_desc *ls_oe_gpio; struct gpio_desc *hpd_gpio; - - struct videomode vm; }; #define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) @@ -96,11 +94,8 @@ static void tpd_disable(struct omap_dss_device *dssdev) static void tpd_set_timings(struct omap_dss_device *dssdev, const struct videomode *vm) { - struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *src = dssdev->src; - ddata->vm = *vm; - src->ops->set_timings(src, vm); } -- GitLab From b4935e3a3cfa456b356e9714e75513be672c227e Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 6 Jun 2018 15:20:01 +0300 Subject: [PATCH 0978/1692] drm/omap: Store bus flags in the omap_dss_device structure Source components in the display pipeline need to configure their output signals polarities and clock driving edge based on the requirements of the sink component. Those requirements are currently shared across the whole pipeline in the flags of a videomode structure, instead of being local to each bus. This both prevents multiple buses from having different configurations (when the hardware supports it), and makes it difficult to move from videomode to drm_display_mode as the latter doesn't contain bus polarities and clock edge flags. Add a bus_flags field to the omap_dss_device structure and move the DISPLAY_FLAGS_DE_(LOW|HIGH), DISPLAY_FLAGS_PIXDATA_(POS|NEG)EDGE and DISPLAY_FLAGS_SYNC_(POS|NEG)EDGE videomode flags to bus_flags in all external encoders, connectors and panels. The videomode flags are still used internally for internal encoders, this will be addressed in a second step. The related videomode flags in the default mode of the DVI connector can simply be dropped, as they are always overridden by the TFP410 driver. Note that this results in both the DISPLAY_FLAGS_SYNC_POSEDGE and DISPLAY_FLAGS_SYNC_NEGEDGE flags being set, which is invalid, but only the former is tested for when programming the DISPC, so the DVI connector flags are effectively overridden by the TFP410 flags. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- .../gpu/drm/omapdrm/displays/connector-dvi.c | 4 +- .../gpu/drm/omapdrm/displays/encoder-tfp410.c | 10 +---- .../displays/panel-lgphilips-lb035q02.c | 17 ++++---- .../omapdrm/displays/panel-nec-nl8048hl11.c | 6 +-- .../displays/panel-sharp-ls037v7dw01.c | 15 +++---- .../omapdrm/displays/panel-sony-acx565akm.c | 6 +-- .../omapdrm/displays/panel-tpo-td028ttec1.c | 15 +++---- .../omapdrm/displays/panel-tpo-td043mtea1.c | 15 +++---- drivers/gpu/drm/omapdrm/dss/dsi.c | 9 ++-- drivers/gpu/drm/omapdrm/dss/omapdss.h | 1 + drivers/gpu/drm/omapdrm/dss/sdi.c | 5 +-- drivers/gpu/drm/omapdrm/omap_crtc.c | 42 ++++++++++++++----- 12 files changed, 79 insertions(+), 66 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c index b89555ed53a0..5871872ae19b 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c @@ -33,9 +33,7 @@ static const struct videomode dvic_default_vm = { .vsync_len = 4, .vback_porch = 7, - .flags = DISPLAY_FLAGS_HSYNC_HIGH | DISPLAY_FLAGS_VSYNC_HIGH | - DISPLAY_FLAGS_SYNC_NEGEDGE | DISPLAY_FLAGS_DE_HIGH | - DISPLAY_FLAGS_PIXDATA_POSEDGE, + .flags = DISPLAY_FLAGS_HSYNC_HIGH | DISPLAY_FLAGS_VSYNC_HIGH, }; struct panel_drv_data { diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c index 86c90c15681e..56b78cd38701 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c @@ -76,12 +76,6 @@ static void tfp410_disable(struct omap_dss_device *dssdev) dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } -static void tfp410_fix_timings(struct videomode *vm) -{ - vm->flags |= DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE | - DISPLAY_FLAGS_SYNC_POSEDGE; -} - static void tfp410_set_timings(struct omap_dss_device *dssdev, const struct videomode *vm) { @@ -95,8 +89,6 @@ static int tfp410_check_timings(struct omap_dss_device *dssdev, { struct omap_dss_device *src = dssdev->src; - tfp410_fix_timings(vm); - return src->ops->check_timings(src, vm); } @@ -137,6 +129,8 @@ static int tfp410_probe(struct platform_device *pdev) dssdev->output_type = OMAP_DISPLAY_TYPE_DVI; dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(1) | BIT(0); + dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_POSEDGE + | DRM_BUS_FLAG_PIXDATA_POSEDGE; dssdev->next = omapdss_of_find_connected_device(pdev->dev.of_node, 1); if (IS_ERR(dssdev->next)) { diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c index ffa69fd44d87..a211506506c0 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c @@ -33,14 +33,7 @@ static const struct videomode lb035q02_vm = { .vfront_porch = 4, .vback_porch = 18, - .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW | - DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_NEGEDGE | - DISPLAY_FLAGS_PIXDATA_POSEDGE, - /* - * Note: According to the panel documentation: - * DE is active LOW - * DATA needs to be driven on the FALLING edge - */ + .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW, }; struct panel_drv_data { @@ -252,6 +245,14 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi) dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(0); + /* + * Note: According to the panel documentation: + * DE is active LOW + * DATA needs to be driven on the FALLING edge + */ + dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_NEGEDGE + | DRM_BUS_FLAG_PIXDATA_POSEDGE; + omapdss_display_init(dssdev); omapdss_device_register(dssdev); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c index 26af95a71eab..1b2f33d43bd9 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c @@ -71,9 +71,7 @@ static const struct videomode nec_8048_panel_vm = { .vsync_len = 1, .vback_porch = 4, - .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW | - DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_POSEDGE | - DISPLAY_FLAGS_PIXDATA_POSEDGE, + .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW, }; #define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev) @@ -241,6 +239,8 @@ static int nec_8048_probe(struct spi_device *spi) dssdev->type = OMAP_DISPLAY_TYPE_DPI; dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(0); + dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_POSEDGE + | DRM_BUS_FLAG_PIXDATA_POSEDGE; omapdss_display_init(dssdev); omapdss_device_register(dssdev); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c index a83cd9f4bdb0..fbf88aaaaf56 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c @@ -46,13 +46,7 @@ static const struct videomode sharp_ls_vm = { .vfront_porch = 1, .vback_porch = 1, - .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW | - DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_NEGEDGE | - DISPLAY_FLAGS_PIXDATA_POSEDGE, - /* - * Note: According to the panel documentation: - * DATA needs to be driven on the FALLING edge - */ + .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW, }; #define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev) @@ -250,6 +244,13 @@ static int sharp_ls_probe(struct platform_device *pdev) dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(0); + /* + * Note: According to the panel documentation: + * DATA needs to be driven on the FALLING edge + */ + dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_NEGEDGE + | DRM_BUS_FLAG_PIXDATA_POSEDGE; + omapdss_display_init(dssdev); omapdss_device_register(dssdev); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c index 8e45c5e64efa..7e99d546132f 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c @@ -97,9 +97,7 @@ static const struct videomode acx565akm_panel_vm = { .vsync_len = 3, .vback_porch = 4, - .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW | - DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_NEGEDGE | - DISPLAY_FLAGS_PIXDATA_POSEDGE, + .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW, }; #define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev) @@ -764,6 +762,8 @@ static int acx565akm_probe(struct spi_device *spi) dssdev->type = OMAP_DISPLAY_TYPE_SDI; dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(0); + dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_NEGEDGE + | DRM_BUS_FLAG_PIXDATA_POSEDGE; omapdss_display_init(dssdev); omapdss_device_register(dssdev); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c index b2a16c470f73..e8c73ad48e0c 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c @@ -49,13 +49,7 @@ static const struct videomode td028ttec1_panel_vm = { .vsync_len = 2, .vback_porch = 2, - .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW | - DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_POSEDGE | - DISPLAY_FLAGS_PIXDATA_NEGEDGE, - /* - * Note: According to the panel documentation: - * SYNC needs to be driven on the FALLING edge - */ + .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW, }; #define JBT_COMMAND 0x000 @@ -374,6 +368,13 @@ static int td028ttec1_panel_probe(struct spi_device *spi) dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(0); + /* + * Note: According to the panel documentation: + * SYNC needs to be driven on the FALLING edge + */ + dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_POSEDGE + | DRM_BUS_FLAG_PIXDATA_NEGEDGE; + omapdss_display_init(dssdev); omapdss_device_register(dssdev); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c index 42f80b3ec350..ae7a40a1759a 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c @@ -81,13 +81,7 @@ static const struct videomode tpo_td043_vm = { .vfront_porch = 39, .vback_porch = 34, - .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW | - DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_POSEDGE | - DISPLAY_FLAGS_PIXDATA_NEGEDGE, - /* - * Note: According to the panel documentation: - * SYNC needs to be driven on the FALLING edge - */ + .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW, }; #define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev) @@ -472,6 +466,13 @@ static int tpo_td043_probe(struct spi_device *spi) dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(0); + /* + * Note: According to the panel documentation: + * SYNC needs to be driven on the FALLING edge + */ + dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_POSEDGE + | DRM_BUS_FLAG_PIXDATA_NEGEDGE; + omapdss_display_init(dssdev); omapdss_device_register(dssdev); diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 0afefac4bf65..280f63081224 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -4053,12 +4053,6 @@ static int dsi_display_init_dispc(struct dsi_data *dsi) dsi->vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH; dsi->vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW; dsi->vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH; - dsi->vm.flags &= ~DISPLAY_FLAGS_PIXDATA_NEGEDGE; - dsi->vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE; - dsi->vm.flags &= ~DISPLAY_FLAGS_DE_LOW; - dsi->vm.flags |= DISPLAY_FLAGS_DE_HIGH; - dsi->vm.flags &= ~DISPLAY_FLAGS_SYNC_POSEDGE; - dsi->vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE; dss_mgr_set_timings(&dsi->output, &dsi->vm); @@ -5142,6 +5136,9 @@ static int dsi_init_output(struct dsi_data *dsi) out->ops = &dsi_ops; out->owner = THIS_MODULE; out->of_ports = BIT(0); + out->bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE + | DRM_BUS_FLAG_DE_HIGH + | DRM_BUS_FLAG_SYNC_NEGEDGE; out->next = omapdss_of_find_connected_device(out->dev->of_node, 0); if (IS_ERR(out->next)) { diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index 87306014a53a..251e092dfb05 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -432,6 +432,7 @@ struct omap_dss_device { const struct omap_dss_driver *driver; const struct omap_dss_device_ops *ops; unsigned long ops_flags; + unsigned long bus_flags; /* helper variable for driver suspend/resume */ bool activate_after_resume; diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c index 3b5f97932475..8e111afe5958 100644 --- a/drivers/gpu/drm/omapdrm/dss/sdi.c +++ b/drivers/gpu/drm/omapdrm/dss/sdi.c @@ -151,9 +151,6 @@ static int sdi_display_enable(struct omap_dss_device *dssdev) if (r) goto err_get_dispc; - /* 15.5.9.1.2 */ - vm->flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE | DISPLAY_FLAGS_SYNC_POSEDGE; - r = sdi_calc_clock_div(sdi, vm->pixelclock, &fck, &dispc_cinfo); if (r) goto err_calc_clock_div; @@ -298,6 +295,8 @@ static int sdi_init_output(struct sdi_device *sdi) out->of_ports = BIT(1); out->ops = &sdi_ops; out->owner = THIS_MODULE; + out->bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE /* 15.5.9.1.2 */ + | DRM_BUS_FLAG_SYNC_POSEDGE; out->next = omapdss_of_find_connected_device(out->dev->of_node, 1); if (IS_ERR(out->next)) { diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index 6e7a777907f5..39693dfe54af 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c @@ -419,12 +419,9 @@ static enum drm_mode_status omap_crtc_mode_valid(struct drm_crtc *crtc, static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc) { struct omap_crtc *omap_crtc = to_omap_crtc(crtc); - struct omap_dss_device *display = omap_crtc->pipe->display; struct drm_display_mode *mode = &crtc->state->adjusted_mode; - const u32 flags_mask = DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_DE_LOW | - DISPLAY_FLAGS_PIXDATA_POSEDGE | DISPLAY_FLAGS_PIXDATA_NEGEDGE | - DISPLAY_FLAGS_SYNC_POSEDGE | DISPLAY_FLAGS_SYNC_NEGEDGE; - struct videomode vm = {0}; + struct videomode *vm = &omap_crtc->vm; + struct omap_dss_device *dssdev; DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", omap_crtc->name, mode->base.id, mode->name, @@ -433,7 +430,7 @@ static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc) mode->vdisplay, mode->vsync_start, mode->vsync_end, mode->vtotal, mode->type, mode->flags); - drm_display_mode_to_videomode(mode, &omap_crtc->vm); + drm_display_mode_to_videomode(mode, vm); /* * HACK: This fixes the vm flags. @@ -442,13 +439,36 @@ static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc) * struct drm_display_mode and struct videomode. The hack below * goes and fetches the missing flags from the panel drivers. * - * Correct solution would be to use DRM's bus-flags, but that's not - * easily possible before the omapdrm's panel/encoder driver model - * has been changed to the DRM model. + * A better solution is to use DRM's bus-flags through the whole driver. */ - display->ops->get_timings(display, &vm); - omap_crtc->vm.flags |= vm.flags & flags_mask; + for (dssdev = omap_crtc->pipe->output; dssdev; dssdev = dssdev->next) { + unsigned long bus_flags = dssdev->bus_flags; + + if (!(vm->flags & (DISPLAY_FLAGS_DE_LOW | + DISPLAY_FLAGS_DE_HIGH))) { + if (bus_flags & DRM_BUS_FLAG_DE_LOW) + vm->flags |= DISPLAY_FLAGS_DE_LOW; + else if (bus_flags & DRM_BUS_FLAG_DE_HIGH) + vm->flags |= DISPLAY_FLAGS_DE_HIGH; + } + + if (!(vm->flags & (DISPLAY_FLAGS_PIXDATA_POSEDGE | + DISPLAY_FLAGS_PIXDATA_NEGEDGE))) { + if (bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE) + vm->flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE; + else if (bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE) + vm->flags |= DISPLAY_FLAGS_PIXDATA_NEGEDGE; + } + + if (!(vm->flags & (DISPLAY_FLAGS_SYNC_POSEDGE | + DISPLAY_FLAGS_SYNC_NEGEDGE))) { + if (bus_flags & DRM_BUS_FLAG_SYNC_POSEDGE) + vm->flags |= DISPLAY_FLAGS_SYNC_POSEDGE; + else if (bus_flags & DRM_BUS_FLAG_SYNC_NEGEDGE) + vm->flags |= DISPLAY_FLAGS_SYNC_NEGEDGE; + } + } } static int omap_crtc_atomic_check(struct drm_crtc *crtc, -- GitLab From 28120302c2fdf29b515c8cbd4e3a3867cb0cde7d Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 6 Jun 2018 00:55:48 +0300 Subject: [PATCH 0979/1692] drm/omap: Don't call .check_timings() operation recursively The .check_timings() operation is called recursively from the display device back to the output device. Most components just forward the operation to the previous component in the chain, resulting in lots of duplicated pass-through functions. To avoid that, iterate over the components manually. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- .../omapdrm/displays/connector-analog-tv.c | 9 ------ .../gpu/drm/omapdrm/displays/connector-dvi.c | 9 ------ .../gpu/drm/omapdrm/displays/connector-hdmi.c | 9 ------ .../gpu/drm/omapdrm/displays/encoder-opa362.c | 11 ------- .../gpu/drm/omapdrm/displays/encoder-tfp410.c | 9 ------ .../drm/omapdrm/displays/encoder-tpd12s015.c | 9 ------ drivers/gpu/drm/omapdrm/displays/panel-dpi.c | 9 ------ .../displays/panel-lgphilips-lb035q02.c | 9 ------ .../omapdrm/displays/panel-nec-nl8048hl11.c | 9 ------ .../displays/panel-sharp-ls037v7dw01.c | 9 ------ .../omapdrm/displays/panel-sony-acx565akm.c | 9 ------ .../omapdrm/displays/panel-tpo-td028ttec1.c | 9 ------ .../omapdrm/displays/panel-tpo-td043mtea1.c | 9 ------ drivers/gpu/drm/omapdrm/omap_connector.c | 32 +++++++++++-------- drivers/gpu/drm/omapdrm/omap_encoder.c | 20 ++++++++---- 15 files changed, 32 insertions(+), 139 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c index fb6d4fce1853..a9e2a366a851 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c @@ -109,14 +109,6 @@ static void tvc_get_timings(struct omap_dss_device *dssdev, *vm = ddata->vm; } -static int tvc_check_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct omap_dss_device *src = dssdev->src; - - return src->ops->check_timings(src, vm); -} - static const struct omap_dss_device_ops tvc_ops = { .connect = tvc_connect, .disconnect = tvc_disconnect, @@ -126,7 +118,6 @@ static const struct omap_dss_device_ops tvc_ops = { .set_timings = tvc_set_timings, .get_timings = tvc_get_timings, - .check_timings = tvc_check_timings, }; static int tvc_probe(struct platform_device *pdev) diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c index 5871872ae19b..a9e2f1181987 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c @@ -116,14 +116,6 @@ static void dvic_get_timings(struct omap_dss_device *dssdev, *vm = ddata->vm; } -static int dvic_check_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct omap_dss_device *src = dssdev->src; - - return src->ops->check_timings(src, vm); -} - static int dvic_ddc_read(struct i2c_adapter *adapter, unsigned char *buf, u16 count, u8 offset) { @@ -232,7 +224,6 @@ static const struct omap_dss_device_ops dvic_ops = { .set_timings = dvic_set_timings, .get_timings = dvic_get_timings, - .check_timings = dvic_check_timings, .read_edid = dvic_read_edid, .detect = dvic_detect, diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c index 898eb583688f..7e449f8a9b5d 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c @@ -114,14 +114,6 @@ static void hdmic_get_timings(struct omap_dss_device *dssdev, *vm = ddata->vm; } -static int hdmic_check_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct omap_dss_device *src = dssdev->src; - - return src->ops->check_timings(src, vm); -} - static bool hdmic_detect(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); @@ -161,7 +153,6 @@ static const struct omap_dss_device_ops hdmic_ops = { .set_timings = hdmic_set_timings, .get_timings = hdmic_get_timings, - .check_timings = hdmic_check_timings, .detect = hdmic_detect, .register_hpd_cb = hdmic_register_hpd_cb, diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c index 05d128600712..bdf796123133 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c @@ -95,22 +95,11 @@ static void opa362_set_timings(struct omap_dss_device *dssdev, src->ops->set_timings(src, vm); } -static int opa362_check_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct omap_dss_device *src = dssdev->src; - - dev_dbg(dssdev->dev, "check_timings\n"); - - return src->ops->check_timings(src, vm); -} - static const struct omap_dss_device_ops opa362_ops = { .connect = opa362_connect, .disconnect = opa362_disconnect, .enable = opa362_enable, .disable = opa362_disable, - .check_timings = opa362_check_timings, .set_timings = opa362_set_timings, }; diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c index 56b78cd38701..c3ceee2d19d0 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c @@ -84,20 +84,11 @@ static void tfp410_set_timings(struct omap_dss_device *dssdev, src->ops->set_timings(src, vm); } -static int tfp410_check_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct omap_dss_device *src = dssdev->src; - - return src->ops->check_timings(src, vm); -} - static const struct omap_dss_device_ops tfp410_ops = { .connect = tfp410_connect, .disconnect = tfp410_disconnect, .enable = tfp410_enable, .disable = tfp410_disable, - .check_timings = tfp410_check_timings, .set_timings = tfp410_set_timings, }; diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c index 1e24559e0aa1..4598e5ca4238 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c @@ -99,14 +99,6 @@ static void tpd_set_timings(struct omap_dss_device *dssdev, src->ops->set_timings(src, vm); } -static int tpd_check_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct omap_dss_device *src = dssdev->src; - - return src->ops->check_timings(src, vm); -} - static bool tpd_detect(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); @@ -142,7 +134,6 @@ static const struct omap_dss_device_ops tpd_ops = { .disconnect = tpd_disconnect, .enable = tpd_enable, .disable = tpd_disable, - .check_timings = tpd_check_timings, .set_timings = tpd_set_timings, .detect = tpd_detect, .register_hpd_cb = tpd_register_hpd_cb, diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c index 32efed45a63c..e75600a33c37 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c @@ -112,14 +112,6 @@ static void panel_dpi_get_timings(struct omap_dss_device *dssdev, *vm = ddata->vm; } -static int panel_dpi_check_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct omap_dss_device *src = dssdev->src; - - return src->ops->check_timings(src, vm); -} - static const struct omap_dss_device_ops panel_dpi_ops = { .connect = panel_dpi_connect, .disconnect = panel_dpi_disconnect, @@ -129,7 +121,6 @@ static const struct omap_dss_device_ops panel_dpi_ops = { .set_timings = panel_dpi_set_timings, .get_timings = panel_dpi_get_timings, - .check_timings = panel_dpi_check_timings, }; static int panel_dpi_probe_of(struct platform_device *pdev) diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c index a211506506c0..3c221f7f0598 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c @@ -182,14 +182,6 @@ static void lb035q02_get_timings(struct omap_dss_device *dssdev, *vm = ddata->vm; } -static int lb035q02_check_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct omap_dss_device *src = dssdev->src; - - return src->ops->check_timings(src, vm); -} - static const struct omap_dss_device_ops lb035q02_ops = { .connect = lb035q02_connect, .disconnect = lb035q02_disconnect, @@ -199,7 +191,6 @@ static const struct omap_dss_device_ops lb035q02_ops = { .set_timings = lb035q02_set_timings, .get_timings = lb035q02_get_timings, - .check_timings = lb035q02_check_timings, }; static int lb035q02_probe_of(struct spi_device *spi) diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c index 1b2f33d43bd9..78ff18c4eb46 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c @@ -175,14 +175,6 @@ static void nec_8048_get_timings(struct omap_dss_device *dssdev, *vm = ddata->vm; } -static int nec_8048_check_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct omap_dss_device *src = dssdev->src; - - return src->ops->check_timings(src, vm); -} - static const struct omap_dss_device_ops nec_8048_ops = { .connect = nec_8048_connect, .disconnect = nec_8048_disconnect, @@ -192,7 +184,6 @@ static const struct omap_dss_device_ops nec_8048_ops = { .set_timings = nec_8048_set_timings, .get_timings = nec_8048_get_timings, - .check_timings = nec_8048_check_timings, }; static int nec_8048_probe(struct spi_device *spi) diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c index fbf88aaaaf56..47e97dbffc07 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c @@ -145,14 +145,6 @@ static void sharp_ls_get_timings(struct omap_dss_device *dssdev, *vm = ddata->vm; } -static int sharp_ls_check_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct omap_dss_device *src = dssdev->src; - - return src->ops->check_timings(src, vm); -} - static const struct omap_dss_device_ops sharp_ls_ops = { .connect = sharp_ls_connect, .disconnect = sharp_ls_disconnect, @@ -162,7 +154,6 @@ static const struct omap_dss_device_ops sharp_ls_ops = { .set_timings = sharp_ls_set_timings, .get_timings = sharp_ls_get_timings, - .check_timings = sharp_ls_check_timings, }; static int sharp_ls_get_gpio_of(struct device *dev, int index, int val, diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c index 7e99d546132f..1ec3b1e2107c 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c @@ -648,14 +648,6 @@ static void acx565akm_get_timings(struct omap_dss_device *dssdev, *vm = ddata->vm; } -static int acx565akm_check_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct omap_dss_device *src = dssdev->src; - - return src->ops->check_timings(src, vm); -} - static const struct omap_dss_device_ops acx565akm_ops = { .connect = acx565akm_connect, .disconnect = acx565akm_disconnect, @@ -665,7 +657,6 @@ static const struct omap_dss_device_ops acx565akm_ops = { .set_timings = acx565akm_set_timings, .get_timings = acx565akm_get_timings, - .check_timings = acx565akm_check_timings, }; static int acx565akm_probe(struct spi_device *spi) diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c index e8c73ad48e0c..cff1a1a68827 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c @@ -314,14 +314,6 @@ static void td028ttec1_panel_get_timings(struct omap_dss_device *dssdev, *vm = ddata->vm; } -static int td028ttec1_panel_check_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct omap_dss_device *src = dssdev->src; - - return src->ops->check_timings(src, vm); -} - static const struct omap_dss_device_ops td028ttec1_ops = { .connect = td028ttec1_panel_connect, .disconnect = td028ttec1_panel_disconnect, @@ -331,7 +323,6 @@ static const struct omap_dss_device_ops td028ttec1_ops = { .set_timings = td028ttec1_panel_set_timings, .get_timings = td028ttec1_panel_get_timings, - .check_timings = td028ttec1_panel_check_timings, }; static int td028ttec1_panel_probe(struct spi_device *spi) diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c index ae7a40a1759a..513e846d52c4 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c @@ -388,14 +388,6 @@ static void tpo_td043_get_timings(struct omap_dss_device *dssdev, *vm = ddata->vm; } -static int tpo_td043_check_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct omap_dss_device *src = dssdev->src; - - return src->ops->check_timings(src, vm); -} - static const struct omap_dss_device_ops tpo_td043_ops = { .connect = tpo_td043_connect, .disconnect = tpo_td043_disconnect, @@ -405,7 +397,6 @@ static const struct omap_dss_device_ops tpo_td043_ops = { .set_timings = tpo_td043_set_timings, .get_timings = tpo_td043_get_timings, - .check_timings = tpo_td043_check_timings, }; static int tpo_td043_probe(struct spi_device *spi) diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c index b58d9a0bb53d..302f2ed245d0 100644 --- a/drivers/gpu/drm/omapdrm/omap_connector.c +++ b/drivers/gpu/drm/omapdrm/omap_connector.c @@ -252,7 +252,7 @@ static int omap_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct omap_connector *omap_connector = to_omap_connector(connector); - struct omap_dss_device *dssdev = omap_connector->display; + struct omap_dss_device *dssdev; struct videomode vm = {0}; struct drm_device *dev = connector->dev; struct drm_display_mode *new_mode; @@ -261,21 +261,27 @@ static int omap_connector_mode_valid(struct drm_connector *connector, drm_display_mode_to_videomode(mode, &vm); mode->vrefresh = drm_mode_vrefresh(mode); - r = dssdev->ops->check_timings(dssdev, &vm); - if (!r) { - /* check if vrefresh is still valid */ - new_mode = drm_mode_duplicate(dev, mode); - - if (!new_mode) - return MODE_BAD; + for (dssdev = omap_connector->output; dssdev; dssdev = dssdev->next) { + if (!dssdev->ops->check_timings) + continue; - new_mode->clock = vm.pixelclock / 1000; - new_mode->vrefresh = 0; - if (mode->vrefresh == drm_mode_vrefresh(new_mode)) - ret = MODE_OK; - drm_mode_destroy(dev, new_mode); + r = dssdev->ops->check_timings(dssdev, &vm); + if (r) + goto done; } + /* check if vrefresh is still valid */ + new_mode = drm_mode_duplicate(dev, mode); + if (!new_mode) + return MODE_BAD; + + new_mode->clock = vm.pixelclock / 1000; + new_mode->vrefresh = 0; + if (mode->vrefresh == drm_mode_vrefresh(new_mode)) + ret = MODE_OK; + drm_mode_destroy(dev, new_mode); + +done: DBG("connector: mode %s: " "%d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", (ret == MODE_OK) ? "valid" : "invalid", diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c index a6dce480b2cf..bb010c20d8b8 100644 --- a/drivers/gpu/drm/omapdrm/omap_encoder.c +++ b/drivers/gpu/drm/omapdrm/omap_encoder.c @@ -98,17 +98,23 @@ static int omap_encoder_update(struct drm_encoder *encoder, { struct drm_device *dev = encoder->dev; struct omap_encoder *omap_encoder = to_omap_encoder(encoder); - struct omap_dss_device *dssdev = omap_encoder->display; + struct omap_dss_device *display = omap_encoder->display; + struct omap_dss_device *dssdev; int ret; - ret = dssdev->ops->check_timings(dssdev, vm); - if (ret) { - dev_err(dev->dev, "invalid timings: %d\n", ret); - return ret; + for (dssdev = omap_encoder->output; dssdev; dssdev = dssdev->next) { + if (!dssdev->ops->check_timings) + continue; + + ret = dssdev->ops->check_timings(dssdev, vm); + if (ret) { + dev_err(dev->dev, "invalid timings: %d\n", ret); + return ret; + } } - if (dssdev->ops->set_timings) - dssdev->ops->set_timings(dssdev, vm); + if (display->ops->set_timings) + display->ops->set_timings(display, vm); return 0; } -- GitLab From 35d944cbee2199312c6832a4acd0201a921545f4 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 6 Jun 2018 17:49:37 +0300 Subject: [PATCH 0980/1692] drm/omap: Query timing information from analog TV encoder Timings for the TV output are currently reported by the analog TV connector. This has the disadvantage of having to handle timing-related operations in a connector omap_dss_device that has, at the hardware level, no knowledge of any timing information. Implement the .get_timings() operation in the venc driver, and get timings from the first component in the pipeline that implements the operatation. This switches the duty of reporting analog TV timings from the connector to the encoder. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/venc.c | 12 +++++++++ drivers/gpu/drm/omapdrm/omap_connector.c | 34 +++++++++++++++++------- 2 files changed, 37 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c index 255bf2cd8afc..09ec8b0eafee 100644 --- a/drivers/gpu/drm/omapdrm/dss/venc.c +++ b/drivers/gpu/drm/omapdrm/dss/venc.c @@ -568,6 +568,16 @@ static void venc_display_disable(struct omap_dss_device *dssdev) mutex_unlock(&venc->venc_lock); } +static void venc_get_timings(struct omap_dss_device *dssdev, + struct videomode *vm) +{ + struct venc_device *venc = dssdev_to_venc(dssdev); + + mutex_lock(&venc->venc_lock); + *vm = venc->vm; + mutex_unlock(&venc->venc_lock); +} + static void venc_set_timings(struct omap_dss_device *dssdev, const struct videomode *vm) { @@ -720,6 +730,7 @@ static const struct omap_dss_device_ops venc_ops = { .disable = venc_display_disable, .check_timings = venc_check_timings, + .get_timings = venc_get_timings, .set_timings = venc_set_timings, }; @@ -877,6 +888,7 @@ static int venc_probe(struct platform_device *pdev) mutex_init(&venc->venc_lock); venc->wss_data = 0; + venc->vm = omap_dss_pal_vm; venc_mem = platform_get_resource(venc->pdev, IORESOURCE_MEM, 0); venc->base = devm_ioremap_resource(&pdev->dev, venc_mem); diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c index 302f2ed245d0..b8317b697083 100644 --- a/drivers/gpu/drm/omapdrm/omap_connector.c +++ b/drivers/gpu/drm/omapdrm/omap_connector.c @@ -218,20 +218,41 @@ static int omap_connector_get_modes(struct drm_connector *connector) /* * If display exposes EDID, then we parse that in the normal way to - * build table of supported modes. Otherwise (ie. fixed resolution - * LCD panels) we just return a single mode corresponding to the - * currently configured timings. + * build table of supported modes. */ dssdev = omap_connector_find_device(connector, OMAP_DSS_DEVICE_OP_EDID); if (dssdev) return omap_connector_get_modes_edid(connector, dssdev); + /* + * Otherwise we have either a fixed resolution panel or an output that + * doesn't support modes discovery (e.g. DVI or VGA with the DDC bus + * unconnected, or analog TV). Start by querying the size. + */ + dssdev = omap_connector->display; + if (dssdev->driver && dssdev->driver->get_size) + dssdev->driver->get_size(dssdev, + &connector->display_info.width_mm, + &connector->display_info.height_mm); + + /* + * Iterate over the pipeline to find the first device that can provide + * timing information. If we can't find any, we just let the KMS core + * add the default modes. + */ + for (dssdev = omap_connector->display; dssdev; dssdev = dssdev->src) { + if (dssdev->ops->get_timings) + break; + } + if (!dssdev) + return 0; + + /* Add a single mode corresponding to the fixed panel timings. */ mode = drm_mode_create(connector->dev); if (!mode) return 0; - dssdev = omap_connector->display; dssdev->ops->get_timings(dssdev, &vm); drm_display_mode_from_videomode(&vm, mode); @@ -240,11 +261,6 @@ static int omap_connector_get_modes(struct drm_connector *connector) drm_mode_set_name(mode); drm_mode_probed_add(connector, mode); - if (dssdev->driver && dssdev->driver->get_size) - dssdev->driver->get_size(dssdev, - &connector->display_info.width_mm, - &connector->display_info.height_mm); - return 1; } -- GitLab From ca6e968b9326a17d072b14b658fff538466c6bd2 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 6 Jun 2018 18:04:26 +0300 Subject: [PATCH 0981/1692] drm/omap: Remove .get_timings() operation from display connectors The analog TV, DVI and HDMI connectors all report timing information through the .get_timings() information. For analog TV outputs the information is queried from the encoder, so the operation is unused. Remove it. For HDMI outputs the display pipeline provides EDID capability, so the operation is unused as well. Remove it. For DVI outputs the operation is also unused if the pipeline provides EDID capability. Otherwise (when the DDC bus is not connected) we shouldn't hardcode a single mode, but instead report no mode and let the KMS core add default modes. This is achieved by removing the operation. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- .../omapdrm/displays/connector-analog-tv.c | 31 ----------------- .../gpu/drm/omapdrm/displays/connector-dvi.c | 33 ------------------- .../gpu/drm/omapdrm/displays/connector-hdmi.c | 30 ----------------- 3 files changed, 94 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c index a9e2a366a851..4866bf8ed524 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c @@ -20,23 +20,6 @@ struct panel_drv_data { struct omap_dss_device dssdev; struct device *dev; - - struct videomode vm; -}; - -static const struct videomode tvc_pal_vm = { - .hactive = 720, - .vactive = 574, - .pixelclock = 13500000, - .hsync_len = 64, - .hfront_porch = 12, - .hback_porch = 68, - .vsync_len = 5, - .vfront_porch = 5, - .vback_porch = 41, - - .flags = DISPLAY_FLAGS_INTERLACED | DISPLAY_FLAGS_HSYNC_LOW | - DISPLAY_FLAGS_VSYNC_LOW, }; #define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) @@ -93,22 +76,11 @@ static void tvc_disable(struct omap_dss_device *dssdev) static void tvc_set_timings(struct omap_dss_device *dssdev, const struct videomode *vm) { - struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *src = dssdev->src; - ddata->vm = *vm; - src->ops->set_timings(src, vm); } -static void tvc_get_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - - *vm = ddata->vm; -} - static const struct omap_dss_device_ops tvc_ops = { .connect = tvc_connect, .disconnect = tvc_disconnect, @@ -117,7 +89,6 @@ static const struct omap_dss_device_ops tvc_ops = { .disable = tvc_disable, .set_timings = tvc_set_timings, - .get_timings = tvc_get_timings, }; static int tvc_probe(struct platform_device *pdev) @@ -132,8 +103,6 @@ static int tvc_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ddata); ddata->dev = &pdev->dev; - ddata->vm = tvc_pal_vm; - dssdev = &ddata->dssdev; dssdev->ops = &tvc_ops; dssdev->dev = &pdev->dev; diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c index a9e2f1181987..818a4dc452e0 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c @@ -19,28 +19,9 @@ #include "../dss/omapdss.h" -static const struct videomode dvic_default_vm = { - .hactive = 640, - .vactive = 480, - - .pixelclock = 23500000, - - .hfront_porch = 48, - .hsync_len = 32, - .hback_porch = 80, - - .vfront_porch = 3, - .vsync_len = 4, - .vback_porch = 7, - - .flags = DISPLAY_FLAGS_HSYNC_HIGH | DISPLAY_FLAGS_VSYNC_HIGH, -}; - struct panel_drv_data { struct omap_dss_device dssdev; - struct videomode vm; - struct i2c_adapter *i2c_adapter; struct gpio_desc *hpd_gpio; @@ -100,22 +81,11 @@ static void dvic_disable(struct omap_dss_device *dssdev) static void dvic_set_timings(struct omap_dss_device *dssdev, const struct videomode *vm) { - struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *src = dssdev->src; - ddata->vm = *vm; - src->ops->set_timings(src, vm); } -static void dvic_get_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - - *vm = ddata->vm; -} - static int dvic_ddc_read(struct i2c_adapter *adapter, unsigned char *buf, u16 count, u8 offset) { @@ -223,7 +193,6 @@ static const struct omap_dss_device_ops dvic_ops = { .disable = dvic_disable, .set_timings = dvic_set_timings, - .get_timings = dvic_get_timings, .read_edid = dvic_read_edid, .detect = dvic_detect, @@ -311,8 +280,6 @@ static int dvic_probe(struct platform_device *pdev) if (r) return r; - ddata->vm = dvic_default_vm; - dssdev = &ddata->dssdev; dssdev->ops = &dvic_ops; dssdev->dev = &pdev->dev; diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c index 7e449f8a9b5d..a32e4159242d 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c @@ -17,20 +17,6 @@ #include "../dss/omapdss.h" -static const struct videomode hdmic_default_vm = { - .hactive = 640, - .vactive = 480, - .pixelclock = 25175000, - .hsync_len = 96, - .hfront_porch = 16, - .hback_porch = 48, - .vsync_len = 2, - .vfront_porch = 11, - .vback_porch = 31, - - .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW, -}; - struct panel_drv_data { struct omap_dss_device dssdev; void (*hpd_cb)(void *cb_data, enum drm_connector_status status); @@ -39,8 +25,6 @@ struct panel_drv_data { struct device *dev; - struct videomode vm; - struct gpio_desc *hpd_gpio; }; @@ -98,22 +82,11 @@ static void hdmic_disable(struct omap_dss_device *dssdev) static void hdmic_set_timings(struct omap_dss_device *dssdev, const struct videomode *vm) { - struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *src = dssdev->src; - ddata->vm = *vm; - src->ops->set_timings(src, vm); } -static void hdmic_get_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - - *vm = ddata->vm; -} - static bool hdmic_detect(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); @@ -152,7 +125,6 @@ static const struct omap_dss_device_ops hdmic_ops = { .disable = hdmic_disable, .set_timings = hdmic_set_timings, - .get_timings = hdmic_get_timings, .detect = hdmic_detect, .register_hpd_cb = hdmic_register_hpd_cb, @@ -215,8 +187,6 @@ static int hdmic_probe(struct platform_device *pdev) return r; } - ddata->vm = hdmic_default_vm; - dssdev = &ddata->dssdev; dssdev->ops = &hdmic_ops; dssdev->dev = &pdev->dev; -- GitLab From 31cd7afa3086f1206ef4c7434e033669702adf08 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Thu, 7 Jun 2018 00:17:32 +0300 Subject: [PATCH 0982/1692] drm/omap: panels: Don't modify fixed timings Panels drivers store their timings in a device data structure field that is initialized at probe time, either from hardcoded values or from firmware-supplied values. Those timings are then reported through the .get_timings() operation to construct the panel display mode. The panel timings are further modified by the .set_timings() operation, which is called with the timings retrieved by .get_timings(), and mangled by .check_timings(). The latter potentially adjusts the pixel clock only. Conceptually, modifying the panel timings is wrong, as the timings are an intrinsic property of the panel and should thus be fixed. Furthermore, modifying them this way at runtime can result in display modes reported to userspace varying between calls, which is also wrong. There's no actual need to store the mangled pixel clock value in the timings. Don't modify the panel timings in the .set_timings() operation, just forward it to the previous device in the display pipeline. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/displays/panel-dpi.c | 3 --- drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c | 3 --- drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c | 3 --- drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c | 3 --- drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c | 3 --- drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c | 3 --- drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c | 3 --- 7 files changed, 21 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c index e75600a33c37..95cdfde174aa 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c @@ -96,11 +96,8 @@ static void panel_dpi_disable(struct omap_dss_device *dssdev) static void panel_dpi_set_timings(struct omap_dss_device *dssdev, const struct videomode *vm) { - struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *src = dssdev->src; - ddata->vm = *vm; - src->ops->set_timings(src, vm); } diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c index 3c221f7f0598..4e21de0e010d 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c @@ -166,11 +166,8 @@ static void lb035q02_disable(struct omap_dss_device *dssdev) static void lb035q02_set_timings(struct omap_dss_device *dssdev, const struct videomode *vm) { - struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *src = dssdev->src; - ddata->vm = *vm; - src->ops->set_timings(src, vm); } diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c index 78ff18c4eb46..f6fc7b8e639b 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c @@ -159,11 +159,8 @@ static void nec_8048_disable(struct omap_dss_device *dssdev) static void nec_8048_set_timings(struct omap_dss_device *dssdev, const struct videomode *vm) { - struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *src = dssdev->src; - ddata->vm = *vm; - src->ops->set_timings(src, vm); } diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c index 47e97dbffc07..51ca92c82e2a 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c @@ -129,11 +129,8 @@ static void sharp_ls_disable(struct omap_dss_device *dssdev) static void sharp_ls_set_timings(struct omap_dss_device *dssdev, const struct videomode *vm) { - struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *src = dssdev->src; - ddata->vm = *vm; - src->ops->set_timings(src, vm); } diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c index 1ec3b1e2107c..974982c46445 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c @@ -632,11 +632,8 @@ static void acx565akm_disable(struct omap_dss_device *dssdev) static void acx565akm_set_timings(struct omap_dss_device *dssdev, const struct videomode *vm) { - struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *src = dssdev->src; - ddata->vm = *vm; - src->ops->set_timings(src, vm); } diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c index cff1a1a68827..ee17eb3e0a48 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c @@ -298,11 +298,8 @@ static void td028ttec1_panel_disable(struct omap_dss_device *dssdev) static void td028ttec1_panel_set_timings(struct omap_dss_device *dssdev, const struct videomode *vm) { - struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *src = dssdev->src; - ddata->vm = *vm; - src->ops->set_timings(src, vm); } diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c index 513e846d52c4..7b0439274458 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c @@ -372,11 +372,8 @@ static void tpo_td043_disable(struct omap_dss_device *dssdev) static void tpo_td043_set_timings(struct omap_dss_device *dssdev, const struct videomode *vm) { - struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *src = dssdev->src; - ddata->vm = *vm; - src->ops->set_timings(src, vm); } -- GitLab From 8e9c1c6676ea3d0dc60d84ee9a69984a4bcf859f Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Thu, 7 Jun 2018 18:32:16 +0300 Subject: [PATCH 0983/1692] drm/omap: Move bus flag hack to encoder implementation The bus flags stored in omap_dss_device instances are used to fixup the video mode before setting it, to honour constraints that can't be expressed through drm_display_mode. The fixup occurs in the CRTC mode set operation and the resulting video mode is stored internally in the CRTC. It is then used next by omap_encoder_enable() to apply mode fixups for the omap_dss_device instances in omap_encoder_update(). Move the hack to the omap_encoder_update() function right before applying the omap_dss_device fixups, in order to group all fixups together. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/omap_crtc.c | 42 +--------------------- drivers/gpu/drm/omapdrm/omap_encoder.c | 48 ++++++++++++++++++++++---- 2 files changed, 43 insertions(+), 47 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index 39693dfe54af..62928ec0e7db 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c @@ -420,8 +420,6 @@ static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc) { struct omap_crtc *omap_crtc = to_omap_crtc(crtc); struct drm_display_mode *mode = &crtc->state->adjusted_mode; - struct videomode *vm = &omap_crtc->vm; - struct omap_dss_device *dssdev; DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", omap_crtc->name, mode->base.id, mode->name, @@ -430,45 +428,7 @@ static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc) mode->vdisplay, mode->vsync_start, mode->vsync_end, mode->vtotal, mode->type, mode->flags); - drm_display_mode_to_videomode(mode, vm); - - /* - * HACK: This fixes the vm flags. - * struct drm_display_mode does not contain the VSYNC/HSYNC/DE flags - * and they get lost when converting back and forth between - * struct drm_display_mode and struct videomode. The hack below - * goes and fetches the missing flags from the panel drivers. - * - * A better solution is to use DRM's bus-flags through the whole driver. - */ - - for (dssdev = omap_crtc->pipe->output; dssdev; dssdev = dssdev->next) { - unsigned long bus_flags = dssdev->bus_flags; - - if (!(vm->flags & (DISPLAY_FLAGS_DE_LOW | - DISPLAY_FLAGS_DE_HIGH))) { - if (bus_flags & DRM_BUS_FLAG_DE_LOW) - vm->flags |= DISPLAY_FLAGS_DE_LOW; - else if (bus_flags & DRM_BUS_FLAG_DE_HIGH) - vm->flags |= DISPLAY_FLAGS_DE_HIGH; - } - - if (!(vm->flags & (DISPLAY_FLAGS_PIXDATA_POSEDGE | - DISPLAY_FLAGS_PIXDATA_NEGEDGE))) { - if (bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE) - vm->flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE; - else if (bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE) - vm->flags |= DISPLAY_FLAGS_PIXDATA_NEGEDGE; - } - - if (!(vm->flags & (DISPLAY_FLAGS_SYNC_POSEDGE | - DISPLAY_FLAGS_SYNC_NEGEDGE))) { - if (bus_flags & DRM_BUS_FLAG_SYNC_POSEDGE) - vm->flags |= DISPLAY_FLAGS_SYNC_POSEDGE; - else if (bus_flags & DRM_BUS_FLAG_SYNC_NEGEDGE) - vm->flags |= DISPLAY_FLAGS_SYNC_NEGEDGE; - } - } + drm_display_mode_to_videomode(mode, &omap_crtc->vm); } static int omap_crtc_atomic_check(struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c index bb010c20d8b8..82cdcba961a8 100644 --- a/drivers/gpu/drm/omapdrm/omap_encoder.c +++ b/drivers/gpu/drm/omapdrm/omap_encoder.c @@ -103,13 +103,49 @@ static int omap_encoder_update(struct drm_encoder *encoder, int ret; for (dssdev = omap_encoder->output; dssdev; dssdev = dssdev->next) { - if (!dssdev->ops->check_timings) - continue; + unsigned long bus_flags = dssdev->bus_flags; + + if (dssdev->ops->check_timings) { + ret = dssdev->ops->check_timings(dssdev, vm); + if (ret) { + dev_err(dev->dev, "invalid timings: %d\n", ret); + return ret; + } + } + + /* + * HACK: This fixes the vm flags. + * struct drm_display_mode does not contain the VSYNC/HSYNC/DE + * flags and they get lost when converting back and forth + * between struct drm_display_mode and struct videomode. The + * hack below goes and fetches the missing flags. + * + * A better solution is to use DRM's bus-flags through the whole + * driver. + */ + + if (!(vm->flags & (DISPLAY_FLAGS_DE_LOW | + DISPLAY_FLAGS_DE_HIGH))) { + if (bus_flags & DRM_BUS_FLAG_DE_LOW) + vm->flags |= DISPLAY_FLAGS_DE_LOW; + else if (bus_flags & DRM_BUS_FLAG_DE_HIGH) + vm->flags |= DISPLAY_FLAGS_DE_HIGH; + } + + if (!(vm->flags & (DISPLAY_FLAGS_PIXDATA_POSEDGE | + DISPLAY_FLAGS_PIXDATA_NEGEDGE))) { + if (bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE) + vm->flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE; + else if (bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE) + vm->flags |= DISPLAY_FLAGS_PIXDATA_NEGEDGE; + } - ret = dssdev->ops->check_timings(dssdev, vm); - if (ret) { - dev_err(dev->dev, "invalid timings: %d\n", ret); - return ret; + if (!(vm->flags & (DISPLAY_FLAGS_SYNC_POSEDGE | + DISPLAY_FLAGS_SYNC_NEGEDGE))) { + if (bus_flags & DRM_BUS_FLAG_SYNC_POSEDGE) + vm->flags |= DISPLAY_FLAGS_SYNC_POSEDGE; + else if (bus_flags & DRM_BUS_FLAG_SYNC_NEGEDGE) + vm->flags |= DISPLAY_FLAGS_SYNC_NEGEDGE; } } -- GitLab From 3fbda31e814868d8477ddf52d74b7b8f596578e8 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Thu, 7 Jun 2018 17:58:57 +0300 Subject: [PATCH 0984/1692] drm/omap: Split mode fixup and mode set from encoder enable The encoder enable operation currently performs mode fixup and mode setting for all omap_dss_device instances in the display pipeline. There are dedicated encoder operations for those operations (respectively .atomic_check() and .mode_set()), but they are not used for this purpose. Move the mode fixup code to .atomic_check() and the mode set code .mode_set() to better fit the KMS model. The bus flags fixup has to happen at .mode_set() time as there is no place to store the bus flags in the atomic state structures. This could be solved by extending one of the state structures, but as the goal is to replace the fixup by direct usage of bus flags through the driver, that would be pointless. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/omap_encoder.c | 148 +++++++++++++------------ 1 file changed, 79 insertions(+), 69 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c index 82cdcba961a8..0177a2c4b77a 100644 --- a/drivers/gpu/drm/omapdrm/omap_encoder.c +++ b/drivers/gpu/drm/omapdrm/omap_encoder.c @@ -53,16 +53,69 @@ static const struct drm_encoder_funcs omap_encoder_funcs = { }; static void omap_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct omap_encoder *omap_encoder = to_omap_encoder(encoder); - struct omap_dss_device *dssdev = omap_encoder->output; + struct omap_dss_device *display = omap_encoder->display; struct drm_connector *connector; + struct omap_dss_device *dssdev; + struct videomode vm = { 0 }; bool hdmi_mode; int r; + drm_display_mode_to_videomode(adjusted_mode, &vm); + + /* + * HACK: This fixes the vm flags. + * struct drm_display_mode does not contain the VSYNC/HSYNC/DE flags and + * they get lost when converting back and forth between struct + * drm_display_mode and struct videomode. The hack below goes and + * fetches the missing flags. + * + * A better solution is to use DRM's bus-flags through the whole driver. + */ + for (dssdev = omap_encoder->output; dssdev; dssdev = dssdev->next) { + unsigned long bus_flags = dssdev->bus_flags; + + if (!(vm.flags & (DISPLAY_FLAGS_DE_LOW | + DISPLAY_FLAGS_DE_HIGH))) { + if (bus_flags & DRM_BUS_FLAG_DE_LOW) + vm.flags |= DISPLAY_FLAGS_DE_LOW; + else if (bus_flags & DRM_BUS_FLAG_DE_HIGH) + vm.flags |= DISPLAY_FLAGS_DE_HIGH; + } + + if (!(vm.flags & (DISPLAY_FLAGS_PIXDATA_POSEDGE | + DISPLAY_FLAGS_PIXDATA_NEGEDGE))) { + if (bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE) + vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE; + else if (bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE) + vm.flags |= DISPLAY_FLAGS_PIXDATA_NEGEDGE; + } + + if (!(vm.flags & (DISPLAY_FLAGS_SYNC_POSEDGE | + DISPLAY_FLAGS_SYNC_NEGEDGE))) { + if (bus_flags & DRM_BUS_FLAG_SYNC_POSEDGE) + vm.flags |= DISPLAY_FLAGS_SYNC_POSEDGE; + else if (bus_flags & DRM_BUS_FLAG_SYNC_NEGEDGE) + vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE; + } + } + + /* + * HACK: Call the .set_timings() operation if available, this will + * eventually store timings in the CRTC. Otherwise (for DSI outputs) + * store the timings directly. + * + * All outputs should be brought in sync to operate similarly. + */ + if (display->ops->set_timings) + display->ops->set_timings(display, &vm); + else + *omap_crtc_timings(encoder->crtc) = vm; + hdmi_mode = false; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { if (connector->encoder == encoder) { @@ -71,6 +124,8 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder, } } + dssdev = omap_encoder->output; + if (dssdev->ops->hdmi.set_hdmi_mode) dssdev->ops->hdmi.set_hdmi_mode(dssdev, hdmi_mode); @@ -92,78 +147,12 @@ static void omap_encoder_disable(struct drm_encoder *encoder) dssdev->ops->disable(dssdev); } -static int omap_encoder_update(struct drm_encoder *encoder, - enum omap_channel channel, - struct videomode *vm) -{ - struct drm_device *dev = encoder->dev; - struct omap_encoder *omap_encoder = to_omap_encoder(encoder); - struct omap_dss_device *display = omap_encoder->display; - struct omap_dss_device *dssdev; - int ret; - - for (dssdev = omap_encoder->output; dssdev; dssdev = dssdev->next) { - unsigned long bus_flags = dssdev->bus_flags; - - if (dssdev->ops->check_timings) { - ret = dssdev->ops->check_timings(dssdev, vm); - if (ret) { - dev_err(dev->dev, "invalid timings: %d\n", ret); - return ret; - } - } - - /* - * HACK: This fixes the vm flags. - * struct drm_display_mode does not contain the VSYNC/HSYNC/DE - * flags and they get lost when converting back and forth - * between struct drm_display_mode and struct videomode. The - * hack below goes and fetches the missing flags. - * - * A better solution is to use DRM's bus-flags through the whole - * driver. - */ - - if (!(vm->flags & (DISPLAY_FLAGS_DE_LOW | - DISPLAY_FLAGS_DE_HIGH))) { - if (bus_flags & DRM_BUS_FLAG_DE_LOW) - vm->flags |= DISPLAY_FLAGS_DE_LOW; - else if (bus_flags & DRM_BUS_FLAG_DE_HIGH) - vm->flags |= DISPLAY_FLAGS_DE_HIGH; - } - - if (!(vm->flags & (DISPLAY_FLAGS_PIXDATA_POSEDGE | - DISPLAY_FLAGS_PIXDATA_NEGEDGE))) { - if (bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE) - vm->flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE; - else if (bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE) - vm->flags |= DISPLAY_FLAGS_PIXDATA_NEGEDGE; - } - - if (!(vm->flags & (DISPLAY_FLAGS_SYNC_POSEDGE | - DISPLAY_FLAGS_SYNC_NEGEDGE))) { - if (bus_flags & DRM_BUS_FLAG_SYNC_POSEDGE) - vm->flags |= DISPLAY_FLAGS_SYNC_POSEDGE; - else if (bus_flags & DRM_BUS_FLAG_SYNC_NEGEDGE) - vm->flags |= DISPLAY_FLAGS_SYNC_NEGEDGE; - } - } - - if (display->ops->set_timings) - display->ops->set_timings(display, vm); - - return 0; -} - static void omap_encoder_enable(struct drm_encoder *encoder) { struct omap_encoder *omap_encoder = to_omap_encoder(encoder); struct omap_dss_device *dssdev = omap_encoder->display; int r; - omap_encoder_update(encoder, omap_crtc_channel(encoder->crtc), - omap_crtc_timings(encoder->crtc)); - r = dssdev->ops->enable(dssdev); if (r) dev_err(encoder->dev->dev, @@ -175,6 +164,27 @@ static int omap_encoder_atomic_check(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { + struct omap_encoder *omap_encoder = to_omap_encoder(encoder); + struct drm_device *dev = encoder->dev; + struct omap_dss_device *dssdev; + struct videomode vm = { 0 }; + int ret; + + drm_display_mode_to_videomode(&crtc_state->mode, &vm); + + for (dssdev = omap_encoder->output; dssdev; dssdev = dssdev->next) { + if (!dssdev->ops->check_timings) + continue; + + ret = dssdev->ops->check_timings(dssdev, &vm); + if (ret) { + dev_err(dev->dev, "invalid timings: %d\n", ret); + return ret; + } + } + + drm_display_mode_from_videomode(&vm, &crtc_state->adjusted_mode); + return 0; } -- GitLab From 7c27fa57ef31debf62c5529725d4fa096b336a99 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 5 Jun 2018 01:57:09 +0300 Subject: [PATCH 0985/1692] drm/omap: Call dispc timings check operation directly Instead of call the dispc timings check function dispc_mgr_timings_ok() from the internal encoders .check_timings() operation, expose it through the dispc ops (after renaming it to check_timings) and call it directly from omapdrm. This allows removal of now empty omap_dss_device .check_timings() operations. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/dispc.c | 18 ++++++++++-------- drivers/gpu/drm/omapdrm/dss/dpi.c | 4 ---- drivers/gpu/drm/omapdrm/dss/dss.h | 3 --- drivers/gpu/drm/omapdrm/dss/hdmi4.c | 12 ------------ drivers/gpu/drm/omapdrm/dss/hdmi5.c | 12 ------------ drivers/gpu/drm/omapdrm/dss/omapdss.h | 3 +++ drivers/gpu/drm/omapdrm/dss/sdi.c | 6 ------ drivers/gpu/drm/omapdrm/omap_connector.c | 6 ++++++ drivers/gpu/drm/omapdrm/omap_encoder.c | 18 +++++++++++++----- 9 files changed, 32 insertions(+), 50 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c index da95dbfdf790..e61a9592a650 100644 --- a/drivers/gpu/drm/omapdrm/dss/dispc.c +++ b/drivers/gpu/drm/omapdrm/dss/dispc.c @@ -3113,28 +3113,29 @@ static bool _dispc_mgr_pclk_ok(struct dispc_device *dispc, return pclk <= dispc->feat->max_tv_pclk; } -bool dispc_mgr_timings_ok(struct dispc_device *dispc, enum omap_channel channel, - const struct videomode *vm) +static int dispc_mgr_check_timings(struct dispc_device *dispc, + enum omap_channel channel, + const struct videomode *vm) { if (!_dispc_mgr_size_ok(dispc, vm->hactive, vm->vactive)) - return false; + return MODE_BAD; if (!_dispc_mgr_pclk_ok(dispc, channel, vm->pixelclock)) - return false; + return MODE_BAD; if (dss_mgr_is_lcd(channel)) { /* TODO: OMAP4+ supports interlace for LCD outputs */ if (vm->flags & DISPLAY_FLAGS_INTERLACED) - return false; + return MODE_BAD; if (!_dispc_lcd_timings_ok(dispc, vm->hsync_len, vm->hfront_porch, vm->hback_porch, vm->vsync_len, vm->vfront_porch, vm->vback_porch)) - return false; + return MODE_BAD; } - return true; + return MODE_OK; } static void _dispc_mgr_set_lcd_timings(struct dispc_device *dispc, @@ -3236,7 +3237,7 @@ static void dispc_mgr_set_timings(struct dispc_device *dispc, DSSDBG("channel %d xres %u yres %u\n", channel, t.hactive, t.vactive); - if (!dispc_mgr_timings_ok(dispc, channel, &t)) { + if (dispc_mgr_check_timings(dispc, channel, &t)) { BUG(); return; } @@ -4733,6 +4734,7 @@ static const struct dispc_ops dispc_ops = { .mgr_go_busy = dispc_mgr_go_busy, .mgr_go = dispc_mgr_go, .mgr_set_lcd_config = dispc_mgr_set_lcd_config, + .mgr_check_timings = dispc_mgr_check_timings, .mgr_set_timings = dispc_mgr_set_timings, .mgr_setup = dispc_mgr_setup, .mgr_gamma_size = dispc_mgr_gamma_size, diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c index 58237decb5a8..d814d71fffbc 100644 --- a/drivers/gpu/drm/omapdrm/dss/dpi.c +++ b/drivers/gpu/drm/omapdrm/dss/dpi.c @@ -496,7 +496,6 @@ static int dpi_check_timings(struct omap_dss_device *dssdev, struct videomode *vm) { struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev); - enum omap_channel channel = dpi->output.dispc_channel; int lck_div, pck_div; unsigned long fck; unsigned long pck; @@ -506,9 +505,6 @@ static int dpi_check_timings(struct omap_dss_device *dssdev, if (vm->hactive % 8 != 0) return -EINVAL; - if (!dispc_mgr_timings_ok(dpi->dss->dispc, channel, vm)) - return -EINVAL; - if (vm->pixelclock == 0) return -EINVAL; diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h index ee06051933c5..37790c363128 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.h +++ b/drivers/gpu/drm/omapdrm/dss/dss.h @@ -417,9 +417,6 @@ bool dispc_div_calc(struct dispc_device *dispc, unsigned long dispc_freq, unsigned long pck_min, unsigned long pck_max, dispc_div_calc_func func, void *data); -bool dispc_mgr_timings_ok(struct dispc_device *dispc, - enum omap_channel channel, - const struct videomode *vm); int dispc_calc_clock_rates(struct dispc_device *dispc, unsigned long dispc_fclk_rate, struct dispc_clock_info *cinfo); diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index a66bdbe3b969..3e2bc85ef538 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c @@ -251,17 +251,6 @@ static void hdmi_power_off_full(struct omap_hdmi *hdmi) hdmi_power_off_core(hdmi); } -static int hdmi_display_check_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev); - - if (!dispc_mgr_timings_ok(hdmi->dss->dispc, dssdev->dispc_channel, vm)) - return -EINVAL; - - return 0; -} - static void hdmi_display_set_timings(struct omap_dss_device *dssdev, const struct videomode *vm) { @@ -508,7 +497,6 @@ static const struct omap_dss_device_ops hdmi_ops = { .enable = hdmi_display_enable, .disable = hdmi_display_disable, - .check_timings = hdmi_display_check_timings, .set_timings = hdmi_display_set_timings, .read_edid = hdmi_read_edid, diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index d63831c9eacf..c02e08299155 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c @@ -250,17 +250,6 @@ static void hdmi_power_off_full(struct omap_hdmi *hdmi) hdmi_power_off_core(hdmi); } -static int hdmi_display_check_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev); - - if (!dispc_mgr_timings_ok(hdmi->dss->dispc, dssdev->dispc_channel, vm)) - return -EINVAL; - - return 0; -} - static void hdmi_display_set_timings(struct omap_dss_device *dssdev, const struct videomode *vm) { @@ -502,7 +491,6 @@ static const struct omap_dss_device_ops hdmi_ops = { .enable = hdmi_display_enable, .disable = hdmi_display_disable, - .check_timings = hdmi_display_check_timings, .set_timings = hdmi_display_set_timings, .read_edid = hdmi_read_edid, diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index 251e092dfb05..1f698a95a94a 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -608,6 +608,9 @@ struct dispc_ops { void (*mgr_set_lcd_config)(struct dispc_device *dispc, enum omap_channel channel, const struct dss_lcd_mgr_config *config); + int (*mgr_check_timings)(struct dispc_device *dispc, + enum omap_channel channel, + const struct videomode *vm); void (*mgr_set_timings)(struct dispc_device *dispc, enum omap_channel channel, const struct videomode *vm); diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c index 8e111afe5958..736a74db3ad5 100644 --- a/drivers/gpu/drm/omapdrm/dss/sdi.c +++ b/drivers/gpu/drm/omapdrm/dss/sdi.c @@ -237,12 +237,6 @@ static void sdi_set_timings(struct omap_dss_device *dssdev, static int sdi_check_timings(struct omap_dss_device *dssdev, struct videomode *vm) { - struct sdi_device *sdi = dssdev_to_sdi(dssdev); - enum omap_channel channel = dssdev->dispc_channel; - - if (!dispc_mgr_timings_ok(sdi->dss->dispc, channel, vm)) - return -EINVAL; - if (vm->pixelclock == 0) return -EINVAL; diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c index b8317b697083..98f5ca29444a 100644 --- a/drivers/gpu/drm/omapdrm/omap_connector.c +++ b/drivers/gpu/drm/omapdrm/omap_connector.c @@ -268,6 +268,8 @@ static int omap_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct omap_connector *omap_connector = to_omap_connector(connector); + enum omap_channel channel = omap_connector->output->dispc_channel; + struct omap_drm_private *priv = connector->dev->dev_private; struct omap_dss_device *dssdev; struct videomode vm = {0}; struct drm_device *dev = connector->dev; @@ -277,6 +279,10 @@ static int omap_connector_mode_valid(struct drm_connector *connector, drm_display_mode_to_videomode(mode, &vm); mode->vrefresh = drm_mode_vrefresh(mode); + r = priv->dispc_ops->mgr_check_timings(priv->dispc, channel, &vm); + if (r) + goto done; + for (dssdev = omap_connector->output; dssdev; dssdev = dssdev->next) { if (!dssdev->ops->check_timings) continue; diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c index 0177a2c4b77a..749d21a92edd 100644 --- a/drivers/gpu/drm/omapdrm/omap_encoder.c +++ b/drivers/gpu/drm/omapdrm/omap_encoder.c @@ -165,27 +165,35 @@ static int omap_encoder_atomic_check(struct drm_encoder *encoder, struct drm_connector_state *conn_state) { struct omap_encoder *omap_encoder = to_omap_encoder(encoder); + enum omap_channel channel = omap_encoder->output->dispc_channel; struct drm_device *dev = encoder->dev; + struct omap_drm_private *priv = dev->dev_private; struct omap_dss_device *dssdev; struct videomode vm = { 0 }; int ret; drm_display_mode_to_videomode(&crtc_state->mode, &vm); + ret = priv->dispc_ops->mgr_check_timings(priv->dispc, channel, &vm); + if (ret) + goto done; + for (dssdev = omap_encoder->output; dssdev; dssdev = dssdev->next) { if (!dssdev->ops->check_timings) continue; ret = dssdev->ops->check_timings(dssdev, &vm); - if (ret) { - dev_err(dev->dev, "invalid timings: %d\n", ret); - return ret; - } + if (ret) + goto done; } drm_display_mode_from_videomode(&vm, &crtc_state->adjusted_mode); - return 0; +done: + if (ret) + dev_err(dev->dev, "invalid timings: %d\n", ret); + + return ret; } static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = { -- GitLab From f79fa7da6a2953c9964f285dfddc61f92ced0a5a Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Fri, 8 Jun 2018 16:03:14 +0300 Subject: [PATCH 0986/1692] drm/omap: dpi: Don't fixup video mode in dpi_set_mode() The video mode is aleady fixed up by the .check_timings() operation, there's no need to repeat that when enabling the DPI output. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/dpi.c | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c index d814d71fffbc..2a4ad732679f 100644 --- a/drivers/gpu/drm/omapdrm/dss/dpi.c +++ b/drivers/gpu/drm/omapdrm/dss/dpi.c @@ -347,10 +347,9 @@ static int dpi_set_dispc_clk(struct dpi_data *dpi, unsigned long pck_req, static int dpi_set_mode(struct dpi_data *dpi) { - struct videomode *vm = &dpi->vm; + const struct videomode *vm = &dpi->vm; int lck_div = 0, pck_div = 0; unsigned long fck = 0; - unsigned long pck; int r = 0; if (dpi->pll) @@ -362,15 +361,6 @@ static int dpi_set_mode(struct dpi_data *dpi) if (r) return r; - pck = fck / lck_div / pck_div; - - if (pck != vm->pixelclock) { - DSSWARN("Could not find exact pixel clock. Requested %lu Hz, got %lu Hz\n", - vm->pixelclock, pck); - - vm->pixelclock = pck; - } - dss_mgr_set_timings(&dpi->output, vm); return 0; -- GitLab From 7d39e59be51bddbd5cd487274d48969a39a3bcd1 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Fri, 8 Jun 2018 15:59:31 +0300 Subject: [PATCH 0987/1692] drm/omap: dsi: Fixup video mode in .set_config() operation The DSI encoder modifies the passed videomode to take the requirements of the internal DISPC-DSI bus into account in the .enable_video_output() operation. This should be performed in the .check_timings() operation instead. There is however no .check_timings() operation as the DSI encoder uses a custom API, so move it to the closest match which is the .set_config() operation. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/dsi.c | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 280f63081224..d1734ea2534a 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -3265,7 +3265,7 @@ static void dsi_config_vp_num_line_buffers(struct dsi_data *dsi) if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) { int bpp = dsi_get_pixel_size(dsi->pix_fmt); - struct videomode *vm = &dsi->vm; + const struct videomode *vm = &dsi->vm; /* * Don't use line buffers if width is greater than the video * port's line buffer size @@ -3394,7 +3394,7 @@ static void dsi_config_cmd_mode_interleaving(struct dsi_data *dsi) int ddr_clk_pre, ddr_clk_post, enter_hs_mode_lat, exit_hs_mode_lat; int tclk_trail, ths_exit, exiths_clk; bool ddr_alwon; - struct videomode *vm = &dsi->vm; + const struct videomode *vm = &dsi->vm; int bpp = dsi_get_pixel_size(dsi->pix_fmt); int ndl = dsi->num_lanes_used - 1; int dsi_fclk_hsdiv = dsi->user_dsi_cinfo.mX[HSDIV_DSI] + 1; @@ -3644,7 +3644,7 @@ static void dsi_proto_timings(struct dsi_data *dsi) int vbp = dsi->vm_timings.vbp; int window_sync = dsi->vm_timings.window_sync; bool hsync_end; - struct videomode *vm = &dsi->vm; + const struct videomode *vm = &dsi->vm; int bpp = dsi_get_pixel_size(dsi->pix_fmt); int tl, t_he, width_bytes; @@ -4044,16 +4044,6 @@ static int dsi_display_init_dispc(struct dsi_data *dsi) dsi->mgr_config.fifohandcheck = false; } - /* - * override interlace, logic level and edge related parameters in - * videomode with default values - */ - dsi->vm.flags &= ~DISPLAY_FLAGS_INTERLACED; - dsi->vm.flags &= ~DISPLAY_FLAGS_HSYNC_LOW; - dsi->vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH; - dsi->vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW; - dsi->vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH; - dss_mgr_set_timings(&dsi->output, &dsi->vm); r = dsi_configure_dispc_clocks(dsi); @@ -4755,6 +4745,17 @@ static int dsi_set_config(struct omap_dss_device *dssdev, dsi->user_dispc_cinfo = ctx.dispc_cinfo; dsi->vm = ctx.vm; + + /* + * override interlace, logic level and edge related parameters in + * videomode with default values + */ + dsi->vm.flags &= ~DISPLAY_FLAGS_INTERLACED; + dsi->vm.flags &= ~DISPLAY_FLAGS_HSYNC_LOW; + dsi->vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH; + dsi->vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW; + dsi->vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH; + dsi->vm_timings = ctx.dsi_vm; mutex_unlock(&dsi->lock); -- GitLab From 95e472da1094dfe41b7d1d3fb2d04486cf863a42 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Fri, 8 Jun 2018 16:39:27 +0300 Subject: [PATCH 0988/1692] drm/omap: hdmi: Constify video mode and related pointers Constify many pointers to struct videomode, as well as pointers to container structures, to ensure the video mode isn't modified after the .check_timings() operation. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/hdmi.h | 8 ++++---- drivers/gpu/drm/omapdrm/dss/hdmi4.c | 2 +- drivers/gpu/drm/omapdrm/dss/hdmi5.c | 2 +- drivers/gpu/drm/omapdrm/dss/hdmi5_core.c | 6 +++--- drivers/gpu/drm/omapdrm/dss/hdmi_wp.c | 8 ++++---- 5 files changed, 13 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi.h b/drivers/gpu/drm/omapdrm/dss/hdmi.h index 3aeb4cabd59f..7f0dc490a31d 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi.h +++ b/drivers/gpu/drm/omapdrm/dss/hdmi.h @@ -313,13 +313,13 @@ void hdmi_wp_clear_irqenable(struct hdmi_wp_data *wp, u32 mask); int hdmi_wp_set_phy_pwr(struct hdmi_wp_data *wp, enum hdmi_phy_pwr val); int hdmi_wp_set_pll_pwr(struct hdmi_wp_data *wp, enum hdmi_pll_pwr val); void hdmi_wp_video_config_format(struct hdmi_wp_data *wp, - struct hdmi_video_format *video_fmt); + const struct hdmi_video_format *video_fmt); void hdmi_wp_video_config_interface(struct hdmi_wp_data *wp, - struct videomode *vm); + const struct videomode *vm); void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp, - struct videomode *vm); + const struct videomode *vm); void hdmi_wp_init_vid_fmt_timings(struct hdmi_video_format *video_fmt, - struct videomode *vm, struct hdmi_config *param); + struct videomode *vm, const struct hdmi_config *param); int hdmi_wp_init(struct platform_device *pdev, struct hdmi_wp_data *wp, unsigned int version); phys_addr_t hdmi_wp_get_audio_dma_addr(struct hdmi_wp_data *wp); diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index 3e2bc85ef538..7ad173098c22 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c @@ -154,7 +154,7 @@ static void hdmi_power_off_core(struct omap_hdmi *hdmi) static int hdmi_power_on_full(struct omap_hdmi *hdmi) { int r; - struct videomode *vm; + const struct videomode *vm; struct hdmi_wp_data *wp = &hdmi->wp; struct dss_pll_clock_info hdmi_cinfo = { 0 }; unsigned int pc; diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index c02e08299155..147c3550df51 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c @@ -153,7 +153,7 @@ static void hdmi_power_off_core(struct omap_hdmi *hdmi) static int hdmi_power_on_full(struct omap_hdmi *hdmi) { int r; - struct videomode *vm; + const struct videomode *vm; struct dss_pll_clock_info hdmi_cinfo = { 0 }; unsigned int pc; diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c index 2282e48574c6..02efabc7ed76 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c @@ -287,7 +287,7 @@ void hdmi5_core_dump(struct hdmi_core_data *core, struct seq_file *s) } static void hdmi_core_init(struct hdmi_core_vid_config *video_cfg, - struct hdmi_config *cfg) + const struct hdmi_config *cfg) { DSSDBG("hdmi_core_init\n"); @@ -325,10 +325,10 @@ static void hdmi_core_init(struct hdmi_core_vid_config *video_cfg, /* DSS_HDMI_CORE_VIDEO_CONFIG */ static void hdmi_core_video_config(struct hdmi_core_data *core, - struct hdmi_core_vid_config *cfg) + const struct hdmi_core_vid_config *cfg) { void __iomem *base = core->base; - struct videomode *vm = &cfg->v_fc_config.vm; + const struct videomode *vm = &cfg->v_fc_config.vm; unsigned char r = 0; bool vsync_pol, hsync_pol; diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c index 53bc5f78050c..100efb9f08c6 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c @@ -131,7 +131,7 @@ void hdmi_wp_video_stop(struct hdmi_wp_data *wp) } void hdmi_wp_video_config_format(struct hdmi_wp_data *wp, - struct hdmi_video_format *video_fmt) + const struct hdmi_video_format *video_fmt) { u32 l = 0; @@ -144,7 +144,7 @@ void hdmi_wp_video_config_format(struct hdmi_wp_data *wp, } void hdmi_wp_video_config_interface(struct hdmi_wp_data *wp, - struct videomode *vm) + const struct videomode *vm) { u32 r; bool vsync_inv, hsync_inv; @@ -164,7 +164,7 @@ void hdmi_wp_video_config_interface(struct hdmi_wp_data *wp, } void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp, - struct videomode *vm) + const struct videomode *vm) { u32 timing_h = 0; u32 timing_v = 0; @@ -193,7 +193,7 @@ void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp, } void hdmi_wp_init_vid_fmt_timings(struct hdmi_video_format *video_fmt, - struct videomode *vm, struct hdmi_config *param) + struct videomode *vm, const struct hdmi_config *param) { DSSDBG("Enter hdmi_wp_video_init_format\n"); -- GitLab From 96fc64c775370b18bbf53732ad5130a6662cf1a2 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Fri, 8 Jun 2018 15:59:31 +0300 Subject: [PATCH 0989/1692] drm/omap: sdi: Fixup video mode in .check_timings() operation The SDI encoder modifies the pixel clock of the requested video mode to take the limitations of the PLL into account in the .enable() operation. This should be performed in the .check_timings() operation instead. Move the fixup. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/sdi.c | 37 ++++++++++++++++++------------- 1 file changed, 22 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c index 736a74db3ad5..e98c1b6e3d2d 100644 --- a/drivers/gpu/drm/omapdrm/dss/sdi.c +++ b/drivers/gpu/drm/omapdrm/dss/sdi.c @@ -132,10 +132,8 @@ static void sdi_config_lcd_manager(struct sdi_device *sdi) static int sdi_display_enable(struct omap_dss_device *dssdev) { struct sdi_device *sdi = dssdev_to_sdi(dssdev); - struct videomode *vm = &sdi->vm; - unsigned long fck; struct dispc_clock_info dispc_cinfo; - unsigned long pck; + unsigned long fck; int r; if (!sdi->output.dispc_channel_connected) { @@ -151,23 +149,13 @@ static int sdi_display_enable(struct omap_dss_device *dssdev) if (r) goto err_get_dispc; - r = sdi_calc_clock_div(sdi, vm->pixelclock, &fck, &dispc_cinfo); + r = sdi_calc_clock_div(sdi, sdi->vm.pixelclock, &fck, &dispc_cinfo); if (r) goto err_calc_clock_div; sdi->mgr_config.clock_info = dispc_cinfo; - pck = fck / dispc_cinfo.lck_div / dispc_cinfo.pck_div; - - if (pck != vm->pixelclock) { - DSSWARN("Could not find exact pixel clock. Requested %lu Hz, got %lu Hz\n", - vm->pixelclock, pck); - - vm->pixelclock = pck; - } - - - dss_mgr_set_timings(&sdi->output, vm); + dss_mgr_set_timings(&sdi->output, &sdi->vm); r = dss_set_fck_rate(sdi->dss, fck); if (r) @@ -237,9 +225,28 @@ static void sdi_set_timings(struct omap_dss_device *dssdev, static int sdi_check_timings(struct omap_dss_device *dssdev, struct videomode *vm) { + struct sdi_device *sdi = dssdev_to_sdi(dssdev); + struct dispc_clock_info dispc_cinfo; + unsigned long fck; + unsigned long pck; + int r; + if (vm->pixelclock == 0) return -EINVAL; + r = sdi_calc_clock_div(sdi, vm->pixelclock, &fck, &dispc_cinfo); + if (r) + return r; + + pck = fck / dispc_cinfo.lck_div / dispc_cinfo.pck_div; + + if (pck != vm->pixelclock) { + DSSWARN("Pixel clock adjusted from %lu Hz to %lu Hz\n", + vm->pixelclock, pck); + + vm->pixelclock = pck; + } + return 0; } -- GitLab From a730ce996ce4da09979bac884ddb62daf413b79e Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Fri, 8 Jun 2018 15:59:31 +0300 Subject: [PATCH 0990/1692] drm/omap: venc: Fixup video mode in .check_timings() operation The VENC encoder modifies the requested video mode to match the NTSC or PAL timings (or reject the video mode completely) in the .set_timings() operation. This should be performed in the .check_timings() operation instead. Move the fixup. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/venc.c | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c index 09ec8b0eafee..126efbf89898 100644 --- a/drivers/gpu/drm/omapdrm/dss/venc.c +++ b/drivers/gpu/drm/omapdrm/dss/venc.c @@ -452,7 +452,7 @@ static void venc_runtime_put(struct venc_device *venc) WARN_ON(r < 0 && r != -ENOSYS); } -static const struct venc_config *venc_timings_to_config(struct videomode *vm) +static const struct venc_config *venc_timings_to_config(const struct videomode *vm) { switch (venc_get_videomode(vm)) { default: @@ -582,28 +582,16 @@ static void venc_set_timings(struct omap_dss_device *dssdev, const struct videomode *vm) { struct venc_device *venc = dssdev_to_venc(dssdev); - struct videomode actual_vm; DSSDBG("venc_set_timings\n"); mutex_lock(&venc->venc_lock); - switch (venc_get_videomode(vm)) { - default: - WARN_ON_ONCE(1); - case VENC_MODE_PAL: - actual_vm = omap_dss_pal_vm; - break; - case VENC_MODE_NTSC: - actual_vm = omap_dss_ntsc_vm; - break; - } - /* Reset WSS data when the TV standard changes. */ - if (memcmp(&venc->vm, &actual_vm, sizeof(actual_vm))) + if (memcmp(&venc->vm, vm, sizeof(*vm))) venc->wss_data = 0; - venc->vm = actual_vm; + venc->vm = *vm; dispc_set_tv_pclk(venc->dss->dispc, 13500000); @@ -617,8 +605,13 @@ static int venc_check_timings(struct omap_dss_device *dssdev, switch (venc_get_videomode(vm)) { case VENC_MODE_PAL: + *vm = omap_dss_pal_vm; + return 0; + case VENC_MODE_NTSC: + *vm = omap_dss_ntsc_vm; return 0; + default: return -EINVAL; } -- GitLab From d8dbe79143764b86bd04c63c044431565582a22d Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Fri, 8 Jun 2018 16:53:37 +0300 Subject: [PATCH 0991/1692] drm/omap: Store CRTC timings in .set_timings() operation The video timings are stored in the CRTC structure by the omap_crtc_dss_set_timings() function, called by dss_mgr_set_timings() from the .enable() operation of the internal encoders. This instead belongs to the .set_timings() code paths. Move the omap_crtc_dss_set_timings() calls accordingly. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- drivers/gpu/drm/omapdrm/dss/dpi.c | 4 ++-- drivers/gpu/drm/omapdrm/dss/dsi.c | 6 ++---- drivers/gpu/drm/omapdrm/dss/hdmi4.c | 5 ++--- drivers/gpu/drm/omapdrm/dss/hdmi5.c | 5 ++--- drivers/gpu/drm/omapdrm/dss/sdi.c | 4 ++-- drivers/gpu/drm/omapdrm/dss/venc.c | 4 ++-- 6 files changed, 12 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c index 2a4ad732679f..223586788648 100644 --- a/drivers/gpu/drm/omapdrm/dss/dpi.c +++ b/drivers/gpu/drm/omapdrm/dss/dpi.c @@ -361,8 +361,6 @@ static int dpi_set_mode(struct dpi_data *dpi) if (r) return r; - dss_mgr_set_timings(&dpi->output, vm); - return 0; } @@ -479,6 +477,8 @@ static void dpi_set_timings(struct omap_dss_device *dssdev, dpi->vm = *vm; + dss_mgr_set_timings(&dpi->output, vm); + mutex_unlock(&dpi->lock); } diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index d1734ea2534a..394c129cfb3b 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -3901,8 +3901,6 @@ static void dsi_update_screen_dispc(struct dsi_data *dsi) msecs_to_jiffies(250)); BUG_ON(r == 0); - dss_mgr_set_timings(&dsi->output, &dsi->vm); - dss_mgr_start_update(&dsi->output); if (dsi->te_enabled) { @@ -4044,8 +4042,6 @@ static int dsi_display_init_dispc(struct dsi_data *dsi) dsi->mgr_config.fifohandcheck = false; } - dss_mgr_set_timings(&dsi->output, &dsi->vm); - r = dsi_configure_dispc_clocks(dsi); if (r) goto err1; @@ -4756,6 +4752,8 @@ static int dsi_set_config(struct omap_dss_device *dssdev, dsi->vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW; dsi->vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH; + dss_mgr_set_timings(&dsi->output, &dsi->vm); + dsi->vm_timings = ctx.dsi_vm; mutex_unlock(&dsi->lock); diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index 7ad173098c22..df7cfb3e2b12 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c @@ -207,9 +207,6 @@ static int hdmi_power_on_full(struct omap_hdmi *hdmi) hdmi4_configure(&hdmi->core, &hdmi->wp, &hdmi->cfg); - /* tv size */ - dss_mgr_set_timings(&hdmi->output, vm); - r = dss_mgr_enable(&hdmi->output); if (r) goto err_mgr_enable; @@ -262,6 +259,8 @@ static void hdmi_display_set_timings(struct omap_dss_device *dssdev, dispc_set_tv_pclk(hdmi->dss->dispc, vm->pixelclock); + dss_mgr_set_timings(&hdmi->output, vm); + mutex_unlock(&hdmi->lock); } diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index 147c3550df51..cb212e5e790f 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c @@ -206,9 +206,6 @@ static int hdmi_power_on_full(struct omap_hdmi *hdmi) hdmi5_configure(&hdmi->core, &hdmi->wp, &hdmi->cfg); - /* tv size */ - dss_mgr_set_timings(&hdmi->output, vm); - r = dss_mgr_enable(&hdmi->output); if (r) goto err_mgr_enable; @@ -261,6 +258,8 @@ static void hdmi_display_set_timings(struct omap_dss_device *dssdev, dispc_set_tv_pclk(hdmi->dss->dispc, vm->pixelclock); + dss_mgr_set_timings(&hdmi->output, vm); + mutex_unlock(&hdmi->lock); } diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c index e98c1b6e3d2d..36edcdbf0609 100644 --- a/drivers/gpu/drm/omapdrm/dss/sdi.c +++ b/drivers/gpu/drm/omapdrm/dss/sdi.c @@ -155,8 +155,6 @@ static int sdi_display_enable(struct omap_dss_device *dssdev) sdi->mgr_config.clock_info = dispc_cinfo; - dss_mgr_set_timings(&sdi->output, &sdi->vm); - r = dss_set_fck_rate(sdi->dss, fck); if (r) goto err_set_dss_clock_div; @@ -220,6 +218,8 @@ static void sdi_set_timings(struct omap_dss_device *dssdev, struct sdi_device *sdi = dssdev_to_sdi(dssdev); sdi->vm = *vm; + + dss_mgr_set_timings(&sdi->output, vm); } static int sdi_check_timings(struct omap_dss_device *dssdev, diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c index 126efbf89898..39e3c43c54c1 100644 --- a/drivers/gpu/drm/omapdrm/dss/venc.c +++ b/drivers/gpu/drm/omapdrm/dss/venc.c @@ -491,8 +491,6 @@ static int venc_power_on(struct venc_device *venc) venc_write_reg(venc, VENC_OUTPUT_CONTROL, l); - dss_mgr_set_timings(&venc->output, &venc->vm); - r = regulator_enable(venc->vdda_dac_reg); if (r) goto err1; @@ -595,6 +593,8 @@ static void venc_set_timings(struct omap_dss_device *dssdev, dispc_set_tv_pclk(venc->dss->dispc, 13500000); + dss_mgr_set_timings(&venc->output, vm); + mutex_unlock(&venc->venc_lock); } -- GitLab From 6ea48430952323091194100d48c5610b9cd286b4 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Thu, 7 Jun 2018 19:55:04 +0300 Subject: [PATCH 0992/1692] drm/omap: Don't call .set_timings() operation recursively Instead of calling the .set_timings() operation recursively from the display device backwards, iterate over the devices manually in the DRM encoder code. This moves the complexity to a single central location and simplifies the logic in omap_dss_device drivers. Signed-off-by: Laurent Pinchart Reviewed-by: Sebastian Reichel Signed-off-by: Tomi Valkeinen --- .../omapdrm/displays/connector-analog-tv.c | 10 ---------- .../gpu/drm/omapdrm/displays/connector-dvi.c | 10 ---------- .../gpu/drm/omapdrm/displays/connector-hdmi.c | 10 ---------- .../gpu/drm/omapdrm/displays/encoder-opa362.c | 11 ---------- .../gpu/drm/omapdrm/displays/encoder-tfp410.c | 9 --------- .../drm/omapdrm/displays/encoder-tpd12s015.c | 9 --------- drivers/gpu/drm/omapdrm/displays/panel-dpi.c | 9 --------- .../displays/panel-lgphilips-lb035q02.c | 9 --------- .../omapdrm/displays/panel-nec-nl8048hl11.c | 9 --------- .../displays/panel-sharp-ls037v7dw01.c | 9 --------- .../omapdrm/displays/panel-sony-acx565akm.c | 9 --------- .../omapdrm/displays/panel-tpo-td028ttec1.c | 9 --------- .../omapdrm/displays/panel-tpo-td043mtea1.c | 9 --------- drivers/gpu/drm/omapdrm/dss/dpi.c | 2 -- drivers/gpu/drm/omapdrm/dss/hdmi4.c | 2 -- drivers/gpu/drm/omapdrm/dss/hdmi5.c | 2 -- drivers/gpu/drm/omapdrm/dss/sdi.c | 2 -- drivers/gpu/drm/omapdrm/dss/venc.c | 2 -- drivers/gpu/drm/omapdrm/omap_encoder.c | 20 ++++++++----------- 19 files changed, 8 insertions(+), 144 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c index 4866bf8ed524..28a3ce8f88d2 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c @@ -73,22 +73,12 @@ static void tvc_disable(struct omap_dss_device *dssdev) dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } -static void tvc_set_timings(struct omap_dss_device *dssdev, - const struct videomode *vm) -{ - struct omap_dss_device *src = dssdev->src; - - src->ops->set_timings(src, vm); -} - static const struct omap_dss_device_ops tvc_ops = { .connect = tvc_connect, .disconnect = tvc_disconnect, .enable = tvc_enable, .disable = tvc_disable, - - .set_timings = tvc_set_timings, }; static int tvc_probe(struct platform_device *pdev) diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c index 818a4dc452e0..24b14f44248e 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c @@ -78,14 +78,6 @@ static void dvic_disable(struct omap_dss_device *dssdev) dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } -static void dvic_set_timings(struct omap_dss_device *dssdev, - const struct videomode *vm) -{ - struct omap_dss_device *src = dssdev->src; - - src->ops->set_timings(src, vm); -} - static int dvic_ddc_read(struct i2c_adapter *adapter, unsigned char *buf, u16 count, u8 offset) { @@ -192,8 +184,6 @@ static const struct omap_dss_device_ops dvic_ops = { .enable = dvic_enable, .disable = dvic_disable, - .set_timings = dvic_set_timings, - .read_edid = dvic_read_edid, .detect = dvic_detect, diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c index a32e4159242d..e602fa4a50a4 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c @@ -79,14 +79,6 @@ static void hdmic_disable(struct omap_dss_device *dssdev) dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } -static void hdmic_set_timings(struct omap_dss_device *dssdev, - const struct videomode *vm) -{ - struct omap_dss_device *src = dssdev->src; - - src->ops->set_timings(src, vm); -} - static bool hdmic_detect(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); @@ -124,8 +116,6 @@ static const struct omap_dss_device_ops hdmic_ops = { .enable = hdmic_enable, .disable = hdmic_disable, - .set_timings = hdmic_set_timings, - .detect = hdmic_detect, .register_hpd_cb = hdmic_register_hpd_cb, .unregister_hpd_cb = hdmic_unregister_hpd_cb, diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c index bdf796123133..4fefd80f53bb 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c @@ -85,22 +85,11 @@ static void opa362_disable(struct omap_dss_device *dssdev) dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } -static void opa362_set_timings(struct omap_dss_device *dssdev, - const struct videomode *vm) -{ - struct omap_dss_device *src = dssdev->src; - - dev_dbg(dssdev->dev, "set_timings\n"); - - src->ops->set_timings(src, vm); -} - static const struct omap_dss_device_ops opa362_ops = { .connect = opa362_connect, .disconnect = opa362_disconnect, .enable = opa362_enable, .disable = opa362_disable, - .set_timings = opa362_set_timings, }; static int opa362_probe(struct platform_device *pdev) diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c index c3ceee2d19d0..f1a748353279 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c @@ -76,20 +76,11 @@ static void tfp410_disable(struct omap_dss_device *dssdev) dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } -static void tfp410_set_timings(struct omap_dss_device *dssdev, - const struct videomode *vm) -{ - struct omap_dss_device *src = dssdev->src; - - src->ops->set_timings(src, vm); -} - static const struct omap_dss_device_ops tfp410_ops = { .connect = tfp410_connect, .disconnect = tfp410_disconnect, .enable = tfp410_enable, .disable = tfp410_disable, - .set_timings = tfp410_set_timings, }; static int tfp410_probe(struct platform_device *pdev) diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c index 4598e5ca4238..94de55fd8884 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c @@ -91,14 +91,6 @@ static void tpd_disable(struct omap_dss_device *dssdev) dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } -static void tpd_set_timings(struct omap_dss_device *dssdev, - const struct videomode *vm) -{ - struct omap_dss_device *src = dssdev->src; - - src->ops->set_timings(src, vm); -} - static bool tpd_detect(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); @@ -134,7 +126,6 @@ static const struct omap_dss_device_ops tpd_ops = { .disconnect = tpd_disconnect, .enable = tpd_enable, .disable = tpd_disable, - .set_timings = tpd_set_timings, .detect = tpd_detect, .register_hpd_cb = tpd_register_hpd_cb, .unregister_hpd_cb = tpd_unregister_hpd_cb, diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c index 95cdfde174aa..1f8161b041be 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c @@ -93,14 +93,6 @@ static void panel_dpi_disable(struct omap_dss_device *dssdev) dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } -static void panel_dpi_set_timings(struct omap_dss_device *dssdev, - const struct videomode *vm) -{ - struct omap_dss_device *src = dssdev->src; - - src->ops->set_timings(src, vm); -} - static void panel_dpi_get_timings(struct omap_dss_device *dssdev, struct videomode *vm) { @@ -116,7 +108,6 @@ static const struct omap_dss_device_ops panel_dpi_ops = { .enable = panel_dpi_enable, .disable = panel_dpi_disable, - .set_timings = panel_dpi_set_timings, .get_timings = panel_dpi_get_timings, }; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c index 4e21de0e010d..f6ef8ff964dd 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c @@ -163,14 +163,6 @@ static void lb035q02_disable(struct omap_dss_device *dssdev) dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } -static void lb035q02_set_timings(struct omap_dss_device *dssdev, - const struct videomode *vm) -{ - struct omap_dss_device *src = dssdev->src; - - src->ops->set_timings(src, vm); -} - static void lb035q02_get_timings(struct omap_dss_device *dssdev, struct videomode *vm) { @@ -186,7 +178,6 @@ static const struct omap_dss_device_ops lb035q02_ops = { .enable = lb035q02_enable, .disable = lb035q02_disable, - .set_timings = lb035q02_set_timings, .get_timings = lb035q02_get_timings, }; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c index f6fc7b8e639b..f445de6369f7 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c @@ -156,14 +156,6 @@ static void nec_8048_disable(struct omap_dss_device *dssdev) dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } -static void nec_8048_set_timings(struct omap_dss_device *dssdev, - const struct videomode *vm) -{ - struct omap_dss_device *src = dssdev->src; - - src->ops->set_timings(src, vm); -} - static void nec_8048_get_timings(struct omap_dss_device *dssdev, struct videomode *vm) { @@ -179,7 +171,6 @@ static const struct omap_dss_device_ops nec_8048_ops = { .enable = nec_8048_enable, .disable = nec_8048_disable, - .set_timings = nec_8048_set_timings, .get_timings = nec_8048_get_timings, }; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c index 51ca92c82e2a..64b1369cb274 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c @@ -126,14 +126,6 @@ static void sharp_ls_disable(struct omap_dss_device *dssdev) dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } -static void sharp_ls_set_timings(struct omap_dss_device *dssdev, - const struct videomode *vm) -{ - struct omap_dss_device *src = dssdev->src; - - src->ops->set_timings(src, vm); -} - static void sharp_ls_get_timings(struct omap_dss_device *dssdev, struct videomode *vm) { @@ -149,7 +141,6 @@ static const struct omap_dss_device_ops sharp_ls_ops = { .enable = sharp_ls_enable, .disable = sharp_ls_disable, - .set_timings = sharp_ls_set_timings, .get_timings = sharp_ls_get_timings, }; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c index 974982c46445..e04663856b31 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c @@ -629,14 +629,6 @@ static void acx565akm_disable(struct omap_dss_device *dssdev) dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } -static void acx565akm_set_timings(struct omap_dss_device *dssdev, - const struct videomode *vm) -{ - struct omap_dss_device *src = dssdev->src; - - src->ops->set_timings(src, vm); -} - static void acx565akm_get_timings(struct omap_dss_device *dssdev, struct videomode *vm) { @@ -652,7 +644,6 @@ static const struct omap_dss_device_ops acx565akm_ops = { .enable = acx565akm_enable, .disable = acx565akm_disable, - .set_timings = acx565akm_set_timings, .get_timings = acx565akm_get_timings, }; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c index ee17eb3e0a48..7ddc8c574a61 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c @@ -295,14 +295,6 @@ static void td028ttec1_panel_disable(struct omap_dss_device *dssdev) dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } -static void td028ttec1_panel_set_timings(struct omap_dss_device *dssdev, - const struct videomode *vm) -{ - struct omap_dss_device *src = dssdev->src; - - src->ops->set_timings(src, vm); -} - static void td028ttec1_panel_get_timings(struct omap_dss_device *dssdev, struct videomode *vm) { @@ -318,7 +310,6 @@ static const struct omap_dss_device_ops td028ttec1_ops = { .enable = td028ttec1_panel_enable, .disable = td028ttec1_panel_disable, - .set_timings = td028ttec1_panel_set_timings, .get_timings = td028ttec1_panel_get_timings, }; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c index 7b0439274458..8440fcb744d9 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c @@ -369,14 +369,6 @@ static void tpo_td043_disable(struct omap_dss_device *dssdev) dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } -static void tpo_td043_set_timings(struct omap_dss_device *dssdev, - const struct videomode *vm) -{ - struct omap_dss_device *src = dssdev->src; - - src->ops->set_timings(src, vm); -} - static void tpo_td043_get_timings(struct omap_dss_device *dssdev, struct videomode *vm) { @@ -392,7 +384,6 @@ static const struct omap_dss_device_ops tpo_td043_ops = { .enable = tpo_td043_enable, .disable = tpo_td043_disable, - .set_timings = tpo_td043_set_timings, .get_timings = tpo_td043_get_timings, }; diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c index 223586788648..ca4f3c4c6318 100644 --- a/drivers/gpu/drm/omapdrm/dss/dpi.c +++ b/drivers/gpu/drm/omapdrm/dss/dpi.c @@ -477,8 +477,6 @@ static void dpi_set_timings(struct omap_dss_device *dssdev, dpi->vm = *vm; - dss_mgr_set_timings(&dpi->output, vm); - mutex_unlock(&dpi->lock); } diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index df7cfb3e2b12..cf6230eac31a 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c @@ -259,8 +259,6 @@ static void hdmi_display_set_timings(struct omap_dss_device *dssdev, dispc_set_tv_pclk(hdmi->dss->dispc, vm->pixelclock); - dss_mgr_set_timings(&hdmi->output, vm); - mutex_unlock(&hdmi->lock); } diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index cb212e5e790f..b0e4a7463f8c 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c @@ -258,8 +258,6 @@ static void hdmi_display_set_timings(struct omap_dss_device *dssdev, dispc_set_tv_pclk(hdmi->dss->dispc, vm->pixelclock); - dss_mgr_set_timings(&hdmi->output, vm); - mutex_unlock(&hdmi->lock); } diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c index 36edcdbf0609..b2fe2387037a 100644 --- a/drivers/gpu/drm/omapdrm/dss/sdi.c +++ b/drivers/gpu/drm/omapdrm/dss/sdi.c @@ -218,8 +218,6 @@ static void sdi_set_timings(struct omap_dss_device *dssdev, struct sdi_device *sdi = dssdev_to_sdi(dssdev); sdi->vm = *vm; - - dss_mgr_set_timings(&sdi->output, vm); } static int sdi_check_timings(struct omap_dss_device *dssdev, diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c index 39e3c43c54c1..ff0b18c8e4ac 100644 --- a/drivers/gpu/drm/omapdrm/dss/venc.c +++ b/drivers/gpu/drm/omapdrm/dss/venc.c @@ -593,8 +593,6 @@ static void venc_set_timings(struct omap_dss_device *dssdev, dispc_set_tv_pclk(venc->dss->dispc, 13500000); - dss_mgr_set_timings(&venc->output, vm); - mutex_unlock(&venc->venc_lock); } diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c index 749d21a92edd..452e625f6ce3 100644 --- a/drivers/gpu/drm/omapdrm/omap_encoder.c +++ b/drivers/gpu/drm/omapdrm/omap_encoder.c @@ -58,7 +58,6 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder, { struct drm_device *dev = encoder->dev; struct omap_encoder *omap_encoder = to_omap_encoder(encoder); - struct omap_dss_device *display = omap_encoder->display; struct drm_connector *connector; struct omap_dss_device *dssdev; struct videomode vm = { 0 }; @@ -104,18 +103,15 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder, } } - /* - * HACK: Call the .set_timings() operation if available, this will - * eventually store timings in the CRTC. Otherwise (for DSI outputs) - * store the timings directly. - * - * All outputs should be brought in sync to operate similarly. - */ - if (display->ops->set_timings) - display->ops->set_timings(display, &vm); - else - *omap_crtc_timings(encoder->crtc) = vm; + /* Set timings for all devices in the display pipeline. */ + dss_mgr_set_timings(omap_encoder->output, &vm); + + for (dssdev = omap_encoder->output; dssdev; dssdev = dssdev->next) { + if (dssdev->ops->set_timings) + dssdev->ops->set_timings(dssdev, &vm); + } + /* Set the HDMI mode and HDMI infoframe if applicable. */ hdmi_mode = false; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { if (connector->encoder == encoder) { -- GitLab From 5a7b44a8df822e0667fc76ed7130252523993bda Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Mon, 3 Sep 2018 15:16:43 +0200 Subject: [PATCH 0993/1692] ALSA: rawmidi: Initialize allocated buffers syzbot reported the uninitialized value exposure in certain situations using virmidi loop. It's likely a very small race at writing and reading, and the influence is almost negligible. But it's safer to paper over this just by replacing the existing kvmalloc() with kvzalloc(). Reported-by: syzbot+194dffdb8b22fc5d207a@syzkaller.appspotmail.com Signed-off-by: Takashi Iwai --- sound/core/rawmidi.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c index 69517e18ef07..08d5662039e3 100644 --- a/sound/core/rawmidi.c +++ b/sound/core/rawmidi.c @@ -129,7 +129,7 @@ static int snd_rawmidi_runtime_create(struct snd_rawmidi_substream *substream) runtime->avail = 0; else runtime->avail = runtime->buffer_size; - runtime->buffer = kvmalloc(runtime->buffer_size, GFP_KERNEL); + runtime->buffer = kvzalloc(runtime->buffer_size, GFP_KERNEL); if (!runtime->buffer) { kfree(runtime); return -ENOMEM; @@ -655,7 +655,7 @@ static int resize_runtime_buffer(struct snd_rawmidi_runtime *runtime, if (params->avail_min < 1 || params->avail_min > params->buffer_size) return -EINVAL; if (params->buffer_size != runtime->buffer_size) { - newbuf = kvmalloc(params->buffer_size, GFP_KERNEL); + newbuf = kvzalloc(params->buffer_size, GFP_KERNEL); if (!newbuf) return -ENOMEM; spin_lock_irq(&runtime->lock); -- GitLab From 4d230d12710646788af581ba0155d83ab48b955c Mon Sep 17 00:00:00 2001 From: Jiada Wang Date: Mon, 3 Sep 2018 07:08:58 +0000 Subject: [PATCH 0994/1692] ASoC: rsnd: fixup not to call clk_get/set under non-atomic Clocking operations clk_get/set_rate, are non-atomic, they shouldn't be called in soc_pcm_trigger() which is atomic. Following issue was found due to execution of clk_get_rate() causes sleep in soc_pcm_trigger(), which shouldn't be blocked. We can reproduce this issue by following > enable CONFIG_DEBUG_ATOMIC_SLEEP=y > compile, and boot > mount -t debugfs none /sys/kernel/debug > while true; do cat /sys/kernel/debug/clk/clk_summary > /dev/null; done & > while true; do aplay xxx; done This patch adds support to .prepare callback, and moves non-atomic clocking operations to it. As .prepare is non-atomic, it is always called before trigger_start/trigger_stop. BUG: sleeping function called from invalid context at kernel/locking/mutex.c:620 in_atomic(): 1, irqs_disabled(): 128, pid: 2242, name: aplay INFO: lockdep is turned off. irq event stamp: 5964 hardirqs last enabled at (5963): [] mutex_lock_nested+0x6e8/0x6f0 hardirqs last disabled at (5964): [] _raw_spin_lock_irqsave+0x24/0x68 softirqs last enabled at (5502): [] __do_softirq+0x560/0x10c0 softirqs last disabled at (5495): [] irq_exit+0x160/0x25c Preemption disabled at:[ 62.904063] [] snd_pcm_stream_lock+0xb4/0xc0 CPU: 2 PID: 2242 Comm: aplay Tainted: G B C 4.9.54+ #186 Hardware name: Renesas Salvator-X board based on r8a7795 (DT) Call trace: [] dump_backtrace+0x0/0x37c [] show_stack+0x14/0x1c [] dump_stack+0xfc/0x154 [] ___might_sleep+0x57c/0x58c [] __might_sleep+0x208/0x21c [] mutex_lock_nested+0xb4/0x6f0 [] clk_prepare_lock+0xb0/0x184 [] clk_core_get_rate+0x14/0x54 [] clk_get_rate+0x20/0x34 [] rsnd_adg_ssi_clk_try_start+0x158/0x4f8 [snd_soc_rcar] [] rsnd_ssi_init+0x668/0x7a0 [snd_soc_rcar] [] rsnd_soc_dai_trigger+0x4bc/0xcf8 [snd_soc_rcar] [] soc_pcm_trigger+0x2a4/0x2d4 Fixes: e7d850dd10f4 ("ASoC: rsnd: use mod base common method on SSI-parent") Signed-off-by: Jiada Wang Signed-off-by: Timo Wischer [Kuninori: tidyup for upstream] Signed-off-by: Kuninori Morimoto Tested-by: Hiroyuki Yokoyama Signed-off-by: Mark Brown Cc: stable@vger.kernel.org --- sound/soc/sh/rcar/core.c | 11 +++++++++++ sound/soc/sh/rcar/rsnd.h | 7 +++++++ sound/soc/sh/rcar/ssi.c | 16 ++++++++++------ 3 files changed, 28 insertions(+), 6 deletions(-) diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c index f8425d8b44d2..b35f5509cfe2 100644 --- a/sound/soc/sh/rcar/core.c +++ b/sound/soc/sh/rcar/core.c @@ -958,12 +958,23 @@ static void rsnd_soc_dai_shutdown(struct snd_pcm_substream *substream, rsnd_dai_stream_quit(io); } +static int rsnd_soc_dai_prepare(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + struct rsnd_priv *priv = rsnd_dai_to_priv(dai); + struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai); + struct rsnd_dai_stream *io = rsnd_rdai_to_io(rdai, substream); + + return rsnd_dai_call(prepare, io, priv); +} + static const struct snd_soc_dai_ops rsnd_soc_dai_ops = { .startup = rsnd_soc_dai_startup, .shutdown = rsnd_soc_dai_shutdown, .trigger = rsnd_soc_dai_trigger, .set_fmt = rsnd_soc_dai_set_fmt, .set_tdm_slot = rsnd_soc_set_dai_tdm_slot, + .prepare = rsnd_soc_dai_prepare, }; void rsnd_parse_connect_common(struct rsnd_dai *rdai, diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h index 96d93330b1e1..8f7a0abfa751 100644 --- a/sound/soc/sh/rcar/rsnd.h +++ b/sound/soc/sh/rcar/rsnd.h @@ -280,6 +280,9 @@ struct rsnd_mod_ops { int (*nolock_stop)(struct rsnd_mod *mod, struct rsnd_dai_stream *io, struct rsnd_priv *priv); + int (*prepare)(struct rsnd_mod *mod, + struct rsnd_dai_stream *io, + struct rsnd_priv *priv); }; struct rsnd_dai_stream; @@ -309,6 +312,7 @@ struct rsnd_mod { * H 0: fallback * H 0: hw_params * H 0: pointer + * H 0: prepare */ #define __rsnd_mod_shift_nolock_start 0 #define __rsnd_mod_shift_nolock_stop 0 @@ -323,6 +327,7 @@ struct rsnd_mod { #define __rsnd_mod_shift_fallback 28 /* always called */ #define __rsnd_mod_shift_hw_params 28 /* always called */ #define __rsnd_mod_shift_pointer 28 /* always called */ +#define __rsnd_mod_shift_prepare 28 /* always called */ #define __rsnd_mod_add_probe 0 #define __rsnd_mod_add_remove 0 @@ -337,6 +342,7 @@ struct rsnd_mod { #define __rsnd_mod_add_fallback 0 #define __rsnd_mod_add_hw_params 0 #define __rsnd_mod_add_pointer 0 +#define __rsnd_mod_add_prepare 0 #define __rsnd_mod_call_probe 0 #define __rsnd_mod_call_remove 0 @@ -351,6 +357,7 @@ struct rsnd_mod { #define __rsnd_mod_call_pointer 0 #define __rsnd_mod_call_nolock_start 0 #define __rsnd_mod_call_nolock_stop 1 +#define __rsnd_mod_call_prepare 0 #define rsnd_mod_to_priv(mod) ((mod)->priv) #define rsnd_mod_name(mod) ((mod)->ops->name) diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c index 8304e4ec9242..3f880ec66459 100644 --- a/sound/soc/sh/rcar/ssi.c +++ b/sound/soc/sh/rcar/ssi.c @@ -283,7 +283,7 @@ static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod, if (rsnd_ssi_is_multi_slave(mod, io)) return 0; - if (ssi->usrcnt > 1) { + if (ssi->rate) { if (ssi->rate != rate) { dev_err(dev, "SSI parent/child should use same rate\n"); return -EINVAL; @@ -434,7 +434,6 @@ static int rsnd_ssi_init(struct rsnd_mod *mod, struct rsnd_priv *priv) { struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod); - int ret; if (!rsnd_ssi_is_run_mods(mod, io)) return 0; @@ -443,10 +442,6 @@ static int rsnd_ssi_init(struct rsnd_mod *mod, rsnd_mod_power_on(mod); - ret = rsnd_ssi_master_clk_start(mod, io); - if (ret < 0) - return ret; - rsnd_ssi_config_init(mod, io); rsnd_ssi_register_setup(mod); @@ -852,6 +847,13 @@ static int rsnd_ssi_pio_pointer(struct rsnd_mod *mod, return 0; } +static int rsnd_ssi_prepare(struct rsnd_mod *mod, + struct rsnd_dai_stream *io, + struct rsnd_priv *priv) +{ + return rsnd_ssi_master_clk_start(mod, io); +} + static struct rsnd_mod_ops rsnd_ssi_pio_ops = { .name = SSI_NAME, .probe = rsnd_ssi_common_probe, @@ -864,6 +866,7 @@ static struct rsnd_mod_ops rsnd_ssi_pio_ops = { .pointer = rsnd_ssi_pio_pointer, .pcm_new = rsnd_ssi_pcm_new, .hw_params = rsnd_ssi_hw_params, + .prepare = rsnd_ssi_prepare, }; static int rsnd_ssi_dma_probe(struct rsnd_mod *mod, @@ -940,6 +943,7 @@ static struct rsnd_mod_ops rsnd_ssi_dma_ops = { .pcm_new = rsnd_ssi_pcm_new, .fallback = rsnd_ssi_fallback, .hw_params = rsnd_ssi_hw_params, + .prepare = rsnd_ssi_prepare, }; int rsnd_ssi_is_dma_mode(struct rsnd_mod *mod) -- GitLab From 46223993c2e4fbbb5bd35b5abef0c6b663ebba58 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 3 Sep 2018 16:02:16 +0100 Subject: [PATCH 0995/1692] drm/i915: Fix up FORCE_GPU_RELOC (debug) to flush CPU write domains We currently assert that if the target is in a CPU write domain, we use a CPU reloc path rather than the GPU reloc path. However, we have a debug override to force the GPU path and that unfortunately hits the assert. Include the async clflush under the debug option to ensure correct behaviour even when debugging, and strict when not. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20180903150216.19965-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 43706c1db31a..7d0b3a2c30e2 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1127,6 +1127,13 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, u32 *cmd; int err; + if (DBG_FORCE_RELOC == FORCE_GPU_RELOC) { + obj = vma->obj; + if (obj->cache_dirty & ~obj->cache_coherent) + i915_gem_clflush_object(obj, 0); + obj->write_domain = 0; + } + GEM_BUG_ON(vma->obj->write_domain & I915_GEM_DOMAIN_CPU); obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE); -- GitLab From a0e731f4e26c4d774e71f9e69fff3e88d49dd34f Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 3 Sep 2018 16:23:03 +0100 Subject: [PATCH 0996/1692] drm/i915: Combine cleanup_status_page() Pull the physical status page cleanup into a common cleanup_status_page() for caller simplicity. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20180903152304.31589-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_engine_cs.c | 20 ++++++-------------- 1 file changed, 6 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 1a34e8ff82d5..292eae19fce2 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -530,19 +530,14 @@ void intel_engine_cleanup_scratch(struct intel_engine_cs *engine) i915_vma_unpin_and_release(&engine->scratch, 0); } -static void cleanup_phys_status_page(struct intel_engine_cs *engine) +static void cleanup_status_page(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; + struct drm_dma_handle *dmah; - if (!dev_priv->status_page_dmah) - return; + dmah = fetch_and_zero(&engine->i915->status_page_dmah); + if (dmah) + drm_pci_free(&engine->i915->drm, dmah); - drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah); - engine->status_page.page_addr = NULL; -} - -static void cleanup_status_page(struct intel_engine_cs *engine) -{ i915_vma_unpin_and_release(&engine->status_page.vma, I915_VMA_RELEASE_MAP); } @@ -710,10 +705,7 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) intel_engine_cleanup_scratch(engine); - if (HWS_NEEDS_PHYSICAL(engine->i915)) - cleanup_phys_status_page(engine); - else - cleanup_status_page(engine); + cleanup_status_page(engine); intel_engine_fini_breadcrumbs(engine); intel_engine_cleanup_cmd_parser(engine); -- GitLab From d6acae363e63d655ba892c139ba14f24206462c0 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 3 Sep 2018 16:23:04 +0100 Subject: [PATCH 0997/1692] drm/i915: Use a cached mapping for the physical HWS Older gen use a physical address for the hardware status page, for which we use cache-coherent writes. As the writes are into the cpu cache, we use a normal WB mapped page to read the HWS, used for our seqno tracking. Anecdotally, I observed lost breadcrumbs writes into the HWS on i965gm, which so far have not reoccurred with this patch. How reliable that evidence is remains to be seen. v2: Explicitly pass the expected physical address to the hw v3: Also remember the wild writes we once had for HWS above 4G. Signed-off-by: Chris Wilson Cc: Daniel Vetter Cc: Joonas Lahtinen Reviewed-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20180903152304.31589-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_drv.h | 1 - drivers/gpu/drm/i915/intel_engine_cs.c | 25 +++++++++++++------------ drivers/gpu/drm/i915/intel_ringbuffer.c | 7 +++++-- 3 files changed, 18 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 9771f39d99b3..5a4da5b723fd 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1666,7 +1666,6 @@ struct drm_i915_private { struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1] [MAX_ENGINE_INSTANCE + 1]; - struct drm_dma_handle *status_page_dmah; struct resource mch_res; /* protects the irq masks */ diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 292eae19fce2..10cd051ba29e 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -532,11 +532,11 @@ void intel_engine_cleanup_scratch(struct intel_engine_cs *engine) static void cleanup_status_page(struct intel_engine_cs *engine) { - struct drm_dma_handle *dmah; + if (HWS_NEEDS_PHYSICAL(engine->i915)) { + void *addr = fetch_and_zero(&engine->status_page.page_addr); - dmah = fetch_and_zero(&engine->i915->status_page_dmah); - if (dmah) - drm_pci_free(&engine->i915->drm, dmah); + __free_page(virt_to_page(addr)); + } i915_vma_unpin_and_release(&engine->status_page.vma, I915_VMA_RELEASE_MAP); @@ -605,17 +605,18 @@ static int init_status_page(struct intel_engine_cs *engine) static int init_phys_status_page(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; - - GEM_BUG_ON(engine->id != RCS); + struct page *page; - dev_priv->status_page_dmah = - drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE); - if (!dev_priv->status_page_dmah) + /* + * Though the HWS register does support 36bit addresses, historically + * we have had hangs and corruption reported due to wild writes if + * the HWS is placed above 4G. + */ + page = alloc_page(GFP_KERNEL | __GFP_DMA32 | __GFP_ZERO); + if (!page) return -ENOMEM; - engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr; - memset(engine->status_page.page_addr, 0, PAGE_SIZE); + engine->status_page.page_addr = page_address(page); return 0; } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 44432677160c..86604dd1c5a5 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -344,11 +344,14 @@ gen7_render_ring_flush(struct i915_request *rq, u32 mode) static void ring_setup_phys_status_page(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; + struct page *page = virt_to_page(engine->status_page.page_addr); + phys_addr_t phys = PFN_PHYS(page_to_pfn(page)); u32 addr; - addr = dev_priv->status_page_dmah->busaddr; + addr = lower_32_bits(phys); if (INTEL_GEN(dev_priv) >= 4) - addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; + addr |= (phys >> 28) & 0xf0; + I915_WRITE(HWS_PGA, addr); } -- GitLab From edf4e7b7b9104b58fddfcd073bd7dcc1585d5326 Mon Sep 17 00:00:00 2001 From: John Johansen Date: Sat, 1 Sep 2018 01:57:52 -0700 Subject: [PATCH 0998/1692] apparmor: fix bad debug check in apparmor_secid_to_secctx() apparmor_secid_to_secctx() has a bad debug statement tripping on a condition handle by the code. When kconfig SECURITY_APPARMOR_DEBUG is enabled the debug WARN_ON will trip when **secdata is NULL resulting in the following trace. ------------[ cut here ]------------ AppArmor WARN apparmor_secid_to_secctx: ((!secdata)): WARNING: CPU: 0 PID: 14826 at security/apparmor/secid.c:82 apparmor_secid_to_secctx+0x2b5/0x2f0 security/apparmor/secid.c:82 Kernel panic - not syncing: panic_on_warn set ... CPU: 0 PID: 14826 Comm: syz-executor1 Not tainted 4.19.0-rc1+ #193 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:77 [inline] dump_stack+0x1c9/0x2b4 lib/dump_stack.c:113 panic+0x238/0x4e7 kernel/panic.c:184 __warn.cold.8+0x163/0x1ba kernel/panic.c:536 report_bug+0x252/0x2d0 lib/bug.c:186 fixup_bug arch/x86/kernel/traps.c:178 [inline] do_error_trap+0x1fc/0x4d0 arch/x86/kernel/traps.c:296 do_invalid_op+0x1b/0x20 arch/x86/kernel/traps.c:316 invalid_op+0x14/0x20 arch/x86/entry/entry_64.S:993 RIP: 0010:apparmor_secid_to_secctx+0x2b5/0x2f0 security/apparmor/secid.c:82 Code: c7 c7 40 66 58 87 e8 6a 6d 0f fe 0f 0b e9 6c fe ff ff e8 3e aa 44 fe 48 c7 c6 80 67 58 87 48 c7 c7 a0 65 58 87 e8 4b 6d 0f fe <0f> 0b e9 3f fe ff ff 48 89 df e8 fc a7 83 fe e9 ed fe ff ff bb f4 RSP: 0018:ffff8801ba1bed10 EFLAGS: 00010286 RAX: 0000000000000000 RBX: ffff8801ba1beed0 RCX: ffffc9000227e000 RDX: 0000000000018482 RSI: ffffffff8163ac01 RDI: 0000000000000001 RBP: ffff8801ba1bed30 R08: ffff8801b80ec080 R09: ffffed003b603eca R10: ffffed003b603eca R11: ffff8801db01f657 R12: 0000000000000001 R13: 0000000000000000 R14: 0000000000000000 R15: ffff8801ba1beed0 security_secid_to_secctx+0x63/0xc0 security/security.c:1314 ctnetlink_secctx_size net/netfilter/nf_conntrack_netlink.c:621 [inline] ctnetlink_nlmsg_size net/netfilter/nf_conntrack_netlink.c:659 [inline] ctnetlink_conntrack_event+0x303/0x1470 net/netfilter/nf_conntrack_netlink.c:706 nf_conntrack_eventmask_report+0x55f/0x930 net/netfilter/nf_conntrack_ecache.c:151 nf_conntrack_event_report include/net/netfilter/nf_conntrack_ecache.h:112 [inline] nf_ct_delete+0x33c/0x5d0 net/netfilter/nf_conntrack_core.c:601 nf_ct_iterate_cleanup+0x48c/0x5e0 net/netfilter/nf_conntrack_core.c:1892 nf_ct_iterate_cleanup_net+0x23c/0x2d0 net/netfilter/nf_conntrack_core.c:1974 ctnetlink_flush_conntrack net/netfilter/nf_conntrack_netlink.c:1226 [inline] ctnetlink_del_conntrack+0x66c/0x850 net/netfilter/nf_conntrack_netlink.c:1258 nfnetlink_rcv_msg+0xd88/0x1070 net/netfilter/nfnetlink.c:228 netlink_rcv_skb+0x172/0x440 net/netlink/af_netlink.c:2454 nfnetlink_rcv+0x1c0/0x4d0 net/netfilter/nfnetlink.c:560 netlink_unicast_kernel net/netlink/af_netlink.c:1317 [inline] netlink_unicast+0x5a0/0x760 net/netlink/af_netlink.c:1343 netlink_sendmsg+0xa18/0xfc0 net/netlink/af_netlink.c:1908 sock_sendmsg_nosec net/socket.c:621 [inline] sock_sendmsg+0xd5/0x120 net/socket.c:631 ___sys_sendmsg+0x7fd/0x930 net/socket.c:2114 __sys_sendmsg+0x11d/0x290 net/socket.c:2152 __do_sys_sendmsg net/socket.c:2161 [inline] __se_sys_sendmsg net/socket.c:2159 [inline] __x64_sys_sendmsg+0x78/0xb0 net/socket.c:2159 do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290 entry_SYSCALL_64_after_hwframe+0x49/0xbe RIP: 0033:0x457089 Code: fd b4 fb ff c3 66 2e 0f 1f 84 00 00 00 00 00 66 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 0f 83 cb b4 fb ff c3 66 2e 0f 1f 84 00 00 00 00 RSP: 002b:00007f7bc6e03c78 EFLAGS: 00000246 ORIG_RAX: 000000000000002e RAX: ffffffffffffffda RBX: 00007f7bc6e046d4 RCX: 0000000000457089 RDX: 0000000000000000 RSI: 0000000020d65000 RDI: 0000000000000003 RBP: 00000000009300a0 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000246 R12: 00000000ffffffff R13: 00000000004d4588 R14: 00000000004c8d5c R15: 0000000000000000 Dumping ftrace buffer: (ftrace buffer empty) Kernel Offset: disabled Rebooting in 86400 seconds.. CC: #4.18 Fixes: c092921219d2 ("apparmor: add support for mapping secids and using secctxes") Reported-by: syzbot+21016130b0580a9de3b5@syzkaller.appspotmail.com Signed-off-by: John Johansen --- security/apparmor/secid.c | 1 - 1 file changed, 1 deletion(-) diff --git a/security/apparmor/secid.c b/security/apparmor/secid.c index f2f22d00db18..4ccec1bcf6f5 100644 --- a/security/apparmor/secid.c +++ b/security/apparmor/secid.c @@ -79,7 +79,6 @@ int apparmor_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) struct aa_label *label = aa_secid_to_label(secid); int len; - AA_BUG(!secdata); AA_BUG(!seclen); if (!label) -- GitLab From f7c50fa636f72490baceb1664ba64973137266f2 Mon Sep 17 00:00:00 2001 From: Keyon Jie Date: Mon, 3 Sep 2018 10:47:09 +0800 Subject: [PATCH 0999/1692] ALSA: hda: Fix several mismatch for register mask and value E.g. for snd_hdac_ext_link_clear_stream_id(), we should set (1 << stream) as mask, and 0 as value, here correct it and several similar mismatches. And, here also remove unreadable register_mask usage for those mask value updating. Signed-off-by: Keyon Jie Signed-off-by: Takashi Iwai --- sound/hda/ext/hdac_ext_stream.c | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/sound/hda/ext/hdac_ext_stream.c b/sound/hda/ext/hdac_ext_stream.c index 1bd27576db98..a835558ddbc9 100644 --- a/sound/hda/ext/hdac_ext_stream.c +++ b/sound/hda/ext/hdac_ext_stream.c @@ -146,7 +146,8 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_decouple); */ void snd_hdac_ext_link_stream_start(struct hdac_ext_stream *stream) { - snd_hdac_updatel(stream->pplc_addr, AZX_REG_PPLCCTL, 0, AZX_PPLCCTL_RUN); + snd_hdac_updatel(stream->pplc_addr, AZX_REG_PPLCCTL, + AZX_PPLCCTL_RUN, AZX_PPLCCTL_RUN); } EXPORT_SYMBOL_GPL(snd_hdac_ext_link_stream_start); @@ -171,7 +172,8 @@ void snd_hdac_ext_link_stream_reset(struct hdac_ext_stream *stream) snd_hdac_ext_link_stream_clear(stream); - snd_hdac_updatel(stream->pplc_addr, AZX_REG_PPLCCTL, 0, AZX_PPLCCTL_STRST); + snd_hdac_updatel(stream->pplc_addr, AZX_REG_PPLCCTL, + AZX_PPLCCTL_STRST, AZX_PPLCCTL_STRST); udelay(3); timeout = 50; do { @@ -242,7 +244,7 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_link_set_stream_id); void snd_hdac_ext_link_clear_stream_id(struct hdac_ext_link *link, int stream) { - snd_hdac_updatew(link->ml_addr, AZX_REG_ML_LOSIDV, 0, (1 << stream)); + snd_hdac_updatew(link->ml_addr, AZX_REG_ML_LOSIDV, (1 << stream), 0); } EXPORT_SYMBOL_GPL(snd_hdac_ext_link_clear_stream_id); @@ -415,7 +417,6 @@ void snd_hdac_ext_stream_spbcap_enable(struct hdac_bus *bus, bool enable, int index) { u32 mask = 0; - u32 register_mask = 0; if (!bus->spbcap) { dev_err(bus->dev, "Address of SPB capability is NULL\n"); @@ -424,12 +425,8 @@ void snd_hdac_ext_stream_spbcap_enable(struct hdac_bus *bus, mask |= (1 << index); - register_mask = readl(bus->spbcap + AZX_REG_SPB_SPBFCCTL); - - mask |= register_mask; - if (enable) - snd_hdac_updatel(bus->spbcap, AZX_REG_SPB_SPBFCCTL, 0, mask); + snd_hdac_updatel(bus->spbcap, AZX_REG_SPB_SPBFCCTL, mask, mask); else snd_hdac_updatel(bus->spbcap, AZX_REG_SPB_SPBFCCTL, mask, 0); } @@ -503,7 +500,6 @@ void snd_hdac_ext_stream_drsm_enable(struct hdac_bus *bus, bool enable, int index) { u32 mask = 0; - u32 register_mask = 0; if (!bus->drsmcap) { dev_err(bus->dev, "Address of DRSM capability is NULL\n"); @@ -512,12 +508,8 @@ void snd_hdac_ext_stream_drsm_enable(struct hdac_bus *bus, mask |= (1 << index); - register_mask = readl(bus->drsmcap + AZX_REG_SPB_SPBFCCTL); - - mask |= register_mask; - if (enable) - snd_hdac_updatel(bus->drsmcap, AZX_REG_DRSM_CTL, 0, mask); + snd_hdac_updatel(bus->drsmcap, AZX_REG_DRSM_CTL, mask, mask); else snd_hdac_updatel(bus->drsmcap, AZX_REG_DRSM_CTL, mask, 0); } -- GitLab From 36feaac35405e932ad9c321d7a2db8a7e4a4d7ca Mon Sep 17 00:00:00 2001 From: Hangbin Liu Date: Fri, 31 Aug 2018 16:52:01 +0800 Subject: [PATCH 1000/1692] ip6_tunnel: respect ttl inherit for ip6tnl man ip-tunnel ttl section says: 0 is a special value meaning that packets inherit the TTL value. IPv4 tunnel respect this in ip_tunnel_xmit(), but IPv6 tunnel has not implement it yet. To make IPv6 behave consistently with IP tunnel, add ipv6 tunnel inherit support. Signed-off-by: Hangbin Liu Signed-off-by: David S. Miller --- net/ipv6/ip6_tunnel.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 5df2a58d945c..419960b0ba16 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -1188,7 +1188,15 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, init_tel_txopt(&opt, encap_limit); ipv6_push_frag_opts(skb, &opt.ops, &proto); } - hop_limit = hop_limit ? : ip6_dst_hoplimit(dst); + + if (hop_limit == 0) { + if (skb->protocol == htons(ETH_P_IP)) + hop_limit = ip_hdr(skb)->ttl; + else if (skb->protocol == htons(ETH_P_IPV6)) + hop_limit = ipv6_hdr(skb)->hop_limit; + else + hop_limit = ip6_dst_hoplimit(dst); + } /* Calculate max headroom for all the headers and adjust * needed_headroom if necessary. -- GitLab From 9fd0e09a4e86499639653243edfcb417a05c5c46 Mon Sep 17 00:00:00 2001 From: Anthony Wong Date: Fri, 31 Aug 2018 20:06:42 +0800 Subject: [PATCH 1001/1692] r8169: add support for NCube 8168 network card This card identifies itself as: Ethernet controller [0200]: NCube Device [10ff:8168] (rev 06) Subsystem: TP-LINK Technologies Co., Ltd. Device [7470:3468] Adding a new entry to rtl8169_pci_tbl makes the card work. Link: http://launchpad.net/bugs/1788730 Signed-off-by: Anthony Wong Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 1 + include/linux/pci_ids.h | 2 ++ 2 files changed, 3 insertions(+) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index ac306797590e..b08d51bf7a20 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -218,6 +218,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 }, + { PCI_DEVICE(PCI_VENDOR_ID_NCUBE, 0x8168), 0, 0, RTL_CFG_1 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, { PCI_VENDOR_ID_DLINK, 0x4300, PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 }, diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 99d366cb0e9f..d157983b84cf 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -3084,4 +3084,6 @@ #define PCI_VENDOR_ID_OCZ 0x1b85 +#define PCI_VENDOR_ID_NCUBE 0x10ff + #endif /* _LINUX_PCI_IDS_H */ -- GitLab From f0a459dec5495a3580f8d784555e6f8f3bf7f263 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Mon, 3 Sep 2018 22:19:43 -0400 Subject: [PATCH 1002/1692] ext4: fix online resize's handling of a too-small final block group Avoid growing the file system to an extent so that the last block group is too small to hold all of the metadata that must be stored in the block group. This problem can be triggered with the following reproducer: umount /mnt mke2fs -F -m0 -b 4096 -t ext4 -O resize_inode,^has_journal \ -E resize=1073741824 /tmp/foo.img 128M mount /tmp/foo.img /mnt truncate --size 1708M /tmp/foo.img resize2fs /dev/loop0 295400 umount /mnt e2fsck -fy /tmp/foo.img Reported-by: Torsten Hilbrich Signed-off-by: Theodore Ts'o Cc: stable@vger.kernel.org --- fs/ext4/resize.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index e5fb38451a73..33655a6eff4d 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c @@ -1986,6 +1986,26 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count) } } + /* + * Make sure the last group has enough space so that it's + * guaranteed to have enough space for all metadata blocks + * that it might need to hold. (We might not need to store + * the inode table blocks in the last block group, but there + * will be cases where this might be needed.) + */ + if ((ext4_group_first_block_no(sb, n_group) + + ext4_group_overhead_blocks(sb, n_group) + 2 + + sbi->s_itb_per_group + sbi->s_cluster_ratio) >= n_blocks_count) { + n_blocks_count = ext4_group_first_block_no(sb, n_group); + n_group--; + n_blocks_count_retry = 0; + if (resize_inode) { + iput(resize_inode); + resize_inode = NULL; + } + goto retry; + } + /* extend the last group */ if (n_group == o_group) add = n_blocks_count - o_blocks_count; -- GitLab From 5f8c10936fab2b69a487400f2872902e597dd320 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Mon, 3 Sep 2018 22:25:01 -0400 Subject: [PATCH 1003/1692] ext4: fix online resizing for bigalloc file systems with a 1k block size An online resize of a file system with the bigalloc feature enabled and a 1k block size would be refused since ext4_resize_begin() did not understand s_first_data_block is 0 for all bigalloc file systems, even when the block size is 1k. Signed-off-by: Theodore Ts'o Cc: stable@vger.kernel.org --- fs/ext4/resize.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index 33655a6eff4d..ebbc663d0798 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c @@ -19,6 +19,7 @@ int ext4_resize_begin(struct super_block *sb) { + struct ext4_sb_info *sbi = EXT4_SB(sb); int ret = 0; if (!capable(CAP_SYS_RESOURCE)) @@ -29,7 +30,7 @@ int ext4_resize_begin(struct super_block *sb) * because the user tools have no way of handling this. Probably a * bad time to do it anyways. */ - if (EXT4_SB(sb)->s_sbh->b_blocknr != + if (EXT4_B2C(sbi, sbi->s_sbh->b_blocknr) != le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) { ext4_warning(sb, "won't resize using backup superblock at %llu", (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr); -- GitLab From c48300c92ad9f029f4dcbcf5d71ad880e3acf2fa Mon Sep 17 00:00:00 2001 From: Gleb Fotengauer-Malinovskiy Date: Mon, 3 Sep 2018 20:59:13 +0300 Subject: [PATCH 1004/1692] vhost: fix VHOST_GET_BACKEND_FEATURES ioctl request definition The _IOC_READ flag fits this ioctl request more because this request actually only writes to, but doesn't read from userspace. See NOTEs in include/uapi/asm-generic/ioctl.h for more information. Fixes: 429711aec282 ("vhost: switch to use new message format") Signed-off-by: Gleb Fotengauer-Malinovskiy Acked-by: Jason Wang Acked-by: Michael S. Tsirkin Signed-off-by: David S. Miller --- include/uapi/linux/vhost.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h index b1e22c40c4b6..84c3de89696a 100644 --- a/include/uapi/linux/vhost.h +++ b/include/uapi/linux/vhost.h @@ -176,7 +176,7 @@ struct vhost_memory { #define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1 #define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64) -#define VHOST_GET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x26, __u64) +#define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64) /* VHOST_NET specific defines */ -- GitLab From 399334708b4f07b107094e5db4a390f0f25d2d4f Mon Sep 17 00:00:00 2001 From: Jan-Marek Glogowski Date: Sat, 25 Aug 2018 15:10:35 -0400 Subject: [PATCH 1005/1692] drm/i915: Re-apply "Perform link quality check, unconditionally during long pulse" This re-applies the workaround for "some DP sinks, [which] are a little nuts" from commit 1a36147bb939 ("drm/i915: Perform link quality check unconditionally during long pulse"). It makes the secondary AOC E2460P monitor connected via DP to an acer Veriton N4640G usable again. This hunk was dropped in commit c85d200e8321 ("drm/i915: Move SST DP link retraining into the ->post_hotplug() hook") Fixes: c85d200e8321 ("drm/i915: Move SST DP link retraining into the ->post_hotplug() hook") [Cleaned up commit message, added stable cc] Signed-off-by: Lyude Paul Signed-off-by: Jan-Marek Glogowski Cc: stable@vger.kernel.org Link: https://patchwork.freedesktop.org/patch/msgid/20180825191035.3945-1-lyude@redhat.com (cherry picked from commit 3cf71bc9904d7ee4a25a822c5dcb54c7804ea388) Signed-off-by: Rodrigo Vivi --- drivers/gpu/drm/i915/intel_dp.c | 33 +++++++++++++++++++-------------- 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index cd0f649b57a5..1193202766a2 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -4160,18 +4160,6 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp) return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); } -/* - * If display is now connected check links status, - * there has been known issues of link loss triggering - * long pulse. - * - * Some sinks (eg. ASUS PB287Q) seem to perform some - * weird HPD ping pong during modesets. So we can apparently - * end up with HPD going low during a modeset, and then - * going back up soon after. And once that happens we must - * retrain the link to get a picture. That's in case no - * userspace component reacted to intermittent HPD dip. - */ int intel_dp_retrain_link(struct intel_encoder *encoder, struct drm_modeset_acquire_ctx *ctx) { @@ -4661,7 +4649,8 @@ intel_dp_unset_edid(struct intel_dp *intel_dp) } static int -intel_dp_long_pulse(struct intel_connector *connector) +intel_dp_long_pulse(struct intel_connector *connector, + struct drm_modeset_acquire_ctx *ctx) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_dp *intel_dp = intel_attached_dp(&connector->base); @@ -4720,6 +4709,22 @@ intel_dp_long_pulse(struct intel_connector *connector) */ status = connector_status_disconnected; goto out; + } else { + /* + * If display is now connected check links status, + * there has been known issues of link loss triggering + * long pulse. + * + * Some sinks (eg. ASUS PB287Q) seem to perform some + * weird HPD ping pong during modesets. So we can apparently + * end up with HPD going low during a modeset, and then + * going back up soon after. And once that happens we must + * retrain the link to get a picture. That's in case no + * userspace component reacted to intermittent HPD dip. + */ + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + + intel_dp_retrain_link(encoder, ctx); } /* @@ -4781,7 +4786,7 @@ intel_dp_detect(struct drm_connector *connector, return ret; } - status = intel_dp_long_pulse(intel_dp->attached_connector); + status = intel_dp_long_pulse(intel_dp->attached_connector, ctx); } intel_dp->detect_done = false; -- GitLab From 4fe967912ee83048beb45a6b4f0f6774fddcfa0a Mon Sep 17 00:00:00 2001 From: Manasi Navare Date: Thu, 23 Aug 2018 18:48:07 -0700 Subject: [PATCH 1006/1692] drm/i915/dsc: Fix PPS register definition macros for 2nd VDSC engine This patch fixes the PPS4 and PPS5 register definition macros that were resulting into an incorect MMIO address. Fixes: 2efbb2f099fb ("i915/dp/dsc: Add DSC PPS register definitions") Cc: Anusha Srivatsa Signed-off-by: Manasi Navare Reviewed-by: Rodrigo Vivi Reviewed-by: Anusha Srivatsa Link: https://patchwork.freedesktop.org/patch/msgid/20180824014807.14681-1-manasi.d.navare@intel.com (cherry picked from commit 5df52391ddbed869c7d67b00fbb013bd64334115) Signed-off-by: Rodrigo Vivi --- drivers/gpu/drm/i915/i915_reg.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 08ec7446282e..9e63cd47b60f 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -10422,7 +10422,7 @@ enum skl_power_gate { _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \ _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC) #define ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB, \ _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC) #define DSC_INITIAL_DEC_DELAY(dec_delay) ((dec_delay) << 16) #define DSC_INITIAL_XMIT_DELAY(xmit_delay) ((xmit_delay) << 0) @@ -10437,7 +10437,7 @@ enum skl_power_gate { _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB, \ _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC) #define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB, \ _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC) #define DSC_SCALE_DEC_INTINT(scale_dec) ((scale_dec) << 16) #define DSC_SCALE_INC_INT(scale_inc) ((scale_inc) << 0) -- GitLab From 2b82435cb90bed2c5f8398730d964dd11602217c Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 31 Aug 2018 20:47:39 +0300 Subject: [PATCH 1007/1692] drm/i915/dp_mst: Fix enabling pipe clock for all streams MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit afb2c4437dae ("drm/i915/ddi: Push pipe clock enabling to encoders") inadvertently stopped enabling the pipe clock for any DP-MST stream after the first one. It also rearranged the pipe clock enabling wrt. initial MST payload allocation step (which may or may not be a problem, but it's contrary to the spec.). Fix things by making the above commit truly a non-functional change. Fixes: afb2c4437dae ("drm/i915/ddi: Push pipe clock enabling to encoders") Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=107365 Reported-by: Lyude Paul Reported-by: dmummenschanz@web.de Tested-by: dmummenschanz@web.de Tested-by: Lyude Paul Cc: Lyude Paul Cc: dmummenschanz@web.de Cc: Ville Syrjälä Cc: Rodrigo Vivi Cc: Chris Wilson Signed-off-by: Imre Deak Reviewed-by: Ville Syrjälä Reviewed-by: Lyude Paul Reviewed-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20180831174739.30387-1-imre.deak@intel.com (cherry picked from commit 2b5cf4ef541f1b2facaca58cae5e8e0b5f19ad4c) Signed-off-by: Rodrigo Vivi --- drivers/gpu/drm/i915/intel_ddi.c | 17 +++++++++-------- drivers/gpu/drm/i915/intel_dp_mst.c | 4 ++++ 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 8761513f3532..c9af34861d9e 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -2708,7 +2708,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, if (port != PORT_A || INTEL_GEN(dev_priv) >= 9) intel_dp_stop_link_train(intel_dp); - intel_ddi_enable_pipe_clock(crtc_state); + if (!is_mst) + intel_ddi_enable_pipe_clock(crtc_state); } static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, @@ -2810,14 +2811,14 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder, bool is_mst = intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST); - intel_ddi_disable_pipe_clock(old_crtc_state); - - /* - * Power down sink before disabling the port, otherwise we end - * up getting interrupts from the sink on detecting link loss. - */ - if (!is_mst) + if (!is_mst) { + intel_ddi_disable_pipe_clock(old_crtc_state); + /* + * Power down sink before disabling the port, otherwise we end + * up getting interrupts from the sink on detecting link loss. + */ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); + } intel_disable_ddi_buf(encoder); diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 7e3e01607643..4ecd65375603 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -166,6 +166,8 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder, struct intel_connector *connector = to_intel_connector(old_conn_state->connector); + intel_ddi_disable_pipe_clock(old_crtc_state); + /* this can fail */ drm_dp_check_act_status(&intel_dp->mst_mgr); /* and this can also fail */ @@ -252,6 +254,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder, I915_WRITE(DP_TP_STATUS(port), temp); ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr); + + intel_ddi_enable_pipe_clock(pipe_config); } static void intel_mst_enable_dp(struct intel_encoder *encoder, -- GitLab From c10bbfae3ae43fae1d77e16f05a73474acf514ff Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Mon, 3 Sep 2018 10:04:55 +0300 Subject: [PATCH 1008/1692] net: sched: null actions array pointer before releasing action Currently, tcf_action_delete() nulls actions array pointer after putting and deleting it. However, if tcf_idr_delete_index() returns an error, pointer to action is not set to null. That results it being released second time in error handling code of tca_action_gd(). Kasan error: [ 807.367755] ================================================================== [ 807.375844] BUG: KASAN: use-after-free in tc_setup_cb_call+0x14e/0x250 [ 807.382763] Read of size 8 at addr ffff88033e636000 by task tc/2732 [ 807.391289] CPU: 0 PID: 2732 Comm: tc Tainted: G W 4.19.0-rc1+ #799 [ 807.399542] Hardware name: Supermicro SYS-2028TP-DECR/X10DRT-P, BIOS 2.0b 03/30/2017 [ 807.407948] Call Trace: [ 807.410763] dump_stack+0x92/0xeb [ 807.414456] print_address_description+0x70/0x360 [ 807.419549] kasan_report+0x14d/0x300 [ 807.423582] ? tc_setup_cb_call+0x14e/0x250 [ 807.428150] tc_setup_cb_call+0x14e/0x250 [ 807.432539] ? nla_put+0x65/0xe0 [ 807.436146] fl_dump+0x394/0x3f0 [cls_flower] [ 807.440890] ? fl_tmplt_dump+0x140/0x140 [cls_flower] [ 807.446327] ? lock_downgrade+0x320/0x320 [ 807.450702] ? lock_acquire+0xe2/0x220 [ 807.454819] ? is_bpf_text_address+0x5/0x140 [ 807.459475] ? memcpy+0x34/0x50 [ 807.462980] ? nla_put+0x65/0xe0 [ 807.466582] tcf_fill_node+0x341/0x430 [ 807.470717] ? tcf_block_put+0xe0/0xe0 [ 807.474859] tcf_node_dump+0xdb/0xf0 [ 807.478821] fl_walk+0x8e/0x170 [cls_flower] [ 807.483474] tcf_chain_dump+0x35a/0x4d0 [ 807.487703] ? tfilter_notify+0x170/0x170 [ 807.492091] ? tcf_fill_node+0x430/0x430 [ 807.496411] tc_dump_tfilter+0x362/0x3f0 [ 807.500712] ? tc_del_tfilter+0x850/0x850 [ 807.505104] ? kasan_unpoison_shadow+0x30/0x40 [ 807.509940] ? __mutex_unlock_slowpath+0xcf/0x410 [ 807.515031] netlink_dump+0x263/0x4f0 [ 807.519077] __netlink_dump_start+0x2a0/0x300 [ 807.523817] ? tc_del_tfilter+0x850/0x850 [ 807.528198] rtnetlink_rcv_msg+0x46a/0x6d0 [ 807.532671] ? rtnl_fdb_del+0x3f0/0x3f0 [ 807.536878] ? tc_del_tfilter+0x850/0x850 [ 807.541280] netlink_rcv_skb+0x18d/0x200 [ 807.545570] ? rtnl_fdb_del+0x3f0/0x3f0 [ 807.549773] ? netlink_ack+0x500/0x500 [ 807.553913] netlink_unicast+0x2d0/0x370 [ 807.558212] ? netlink_attachskb+0x340/0x340 [ 807.562855] ? _copy_from_iter_full+0xe9/0x3e0 [ 807.567677] ? import_iovec+0x11e/0x1c0 [ 807.571890] netlink_sendmsg+0x3b9/0x6a0 [ 807.576192] ? netlink_unicast+0x370/0x370 [ 807.580684] ? netlink_unicast+0x370/0x370 [ 807.585154] sock_sendmsg+0x6b/0x80 [ 807.589015] ___sys_sendmsg+0x4a1/0x520 [ 807.593230] ? copy_msghdr_from_user+0x210/0x210 [ 807.598232] ? do_wp_page+0x174/0x880 [ 807.602276] ? __handle_mm_fault+0x749/0x1c10 [ 807.607021] ? __handle_mm_fault+0x1046/0x1c10 [ 807.611849] ? __pmd_alloc+0x320/0x320 [ 807.615973] ? check_chain_key+0x140/0x1f0 [ 807.620450] ? check_chain_key+0x140/0x1f0 [ 807.624929] ? __fget_light+0xbc/0xd0 [ 807.628970] ? __sys_sendmsg+0xd7/0x150 [ 807.633172] __sys_sendmsg+0xd7/0x150 [ 807.637201] ? __ia32_sys_shutdown+0x30/0x30 [ 807.641846] ? up_read+0x53/0x90 [ 807.645442] ? __do_page_fault+0x484/0x780 [ 807.649949] ? do_syscall_64+0x1e/0x2c0 [ 807.654164] do_syscall_64+0x72/0x2c0 [ 807.658198] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 807.663625] RIP: 0033:0x7f42e9870150 [ 807.667568] Code: 8b 15 3c 7d 2b 00 f7 d8 64 89 02 48 c7 c0 ff ff ff ff eb cd 66 0f 1f 44 00 00 83 3d b9 d5 2b 00 00 75 10 b8 2e 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 31 c3 48 83 ec 08 e8 be cd 00 00 48 89 04 24 [ 807.687328] RSP: 002b:00007ffdbf595b58 EFLAGS: 00000246 ORIG_RAX: 000000000000002e [ 807.695564] RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f42e9870150 [ 807.703083] RDX: 0000000000000000 RSI: 00007ffdbf595b80 RDI: 0000000000000003 [ 807.710605] RBP: 00007ffdbf599d90 R08: 0000000000679bc0 R09: 000000000000000f [ 807.718127] R10: 00000000000005e7 R11: 0000000000000246 R12: 00007ffdbf599d88 [ 807.725651] R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000 [ 807.735048] Allocated by task 2687: [ 807.738902] kasan_kmalloc+0xa0/0xd0 [ 807.742852] __kmalloc+0x118/0x2d0 [ 807.746615] tcf_idr_create+0x44/0x320 [ 807.750738] tcf_nat_init+0x41e/0x530 [act_nat] [ 807.755638] tcf_action_init_1+0x4e0/0x650 [ 807.760104] tcf_action_init+0x1ce/0x2d0 [ 807.764395] tcf_exts_validate+0x1d8/0x200 [ 807.768861] fl_change+0x55a/0x26b4 [cls_flower] [ 807.773845] tc_new_tfilter+0x748/0xa20 [ 807.778051] rtnetlink_rcv_msg+0x56a/0x6d0 [ 807.782517] netlink_rcv_skb+0x18d/0x200 [ 807.786804] netlink_unicast+0x2d0/0x370 [ 807.791095] netlink_sendmsg+0x3b9/0x6a0 [ 807.795387] sock_sendmsg+0x6b/0x80 [ 807.799240] ___sys_sendmsg+0x4a1/0x520 [ 807.803445] __sys_sendmsg+0xd7/0x150 [ 807.807473] do_syscall_64+0x72/0x2c0 [ 807.811506] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 807.818776] Freed by task 2728: [ 807.822283] __kasan_slab_free+0x122/0x180 [ 807.826752] kfree+0xf4/0x2f0 [ 807.830080] __tcf_action_put+0x5a/0xb0 [ 807.834281] tcf_action_put_many+0x46/0x70 [ 807.838747] tca_action_gd+0x232/0xc40 [ 807.842862] tc_ctl_action+0x215/0x230 [ 807.846977] rtnetlink_rcv_msg+0x56a/0x6d0 [ 807.851444] netlink_rcv_skb+0x18d/0x200 [ 807.855731] netlink_unicast+0x2d0/0x370 [ 807.860021] netlink_sendmsg+0x3b9/0x6a0 [ 807.864312] sock_sendmsg+0x6b/0x80 [ 807.868166] ___sys_sendmsg+0x4a1/0x520 [ 807.872372] __sys_sendmsg+0xd7/0x150 [ 807.876401] do_syscall_64+0x72/0x2c0 [ 807.880431] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 807.887704] The buggy address belongs to the object at ffff88033e636000 which belongs to the cache kmalloc-256 of size 256 [ 807.900909] The buggy address is located 0 bytes inside of 256-byte region [ffff88033e636000, ffff88033e636100) [ 807.913155] The buggy address belongs to the page: [ 807.918322] page:ffffea000cf98d80 count:1 mapcount:0 mapping:ffff88036f80ee00 index:0x0 compound_mapcount: 0 [ 807.928831] flags: 0x5fff8000008100(slab|head) [ 807.933647] raw: 005fff8000008100 ffffea000db44f00 0000000400000004 ffff88036f80ee00 [ 807.942050] raw: 0000000000000000 0000000080190019 00000001ffffffff 0000000000000000 [ 807.950456] page dumped because: kasan: bad access detected [ 807.958240] Memory state around the buggy address: [ 807.963405] ffff88033e635f00: fc fc fc fc fb fb fb fb fb fb fb fc fc fc fc fb [ 807.971288] ffff88033e635f80: fb fb fb fb fb fb fc fc fc fc fc fc fc fc fc fc [ 807.979166] >ffff88033e636000: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb [ 807.994882] ^ [ 807.998477] ffff88033e636080: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb [ 808.006352] ffff88033e636100: fc fc fc fc fc fc fc fc fb fb fb fb fb fb fb fb [ 808.014230] ================================================================== [ 808.022108] Disabling lock debugging due to kernel taint Fixes: edfaf94fa705 ("net_sched: improve and refactor tcf_action_put_many()") Signed-off-by: Vlad Buslov Acked-by: Cong Wang Signed-off-by: David S. Miller --- net/sched/act_api.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 316c98bb87e4..e12f8ef7baa4 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -1179,6 +1179,7 @@ static int tcf_action_delete(struct net *net, struct tc_action *actions[]) struct tcf_idrinfo *idrinfo = a->idrinfo; u32 act_index = a->tcfa_index; + actions[i] = NULL; if (tcf_action_put(a)) { /* last reference, action was deleted concurrently */ module_put(ops->owner); @@ -1190,7 +1191,6 @@ static int tcf_action_delete(struct net *net, struct tc_action *actions[]) if (ret < 0) return ret; } - actions[i] = NULL; } return 0; } -- GitLab From bf68066fccb10fce6bbffdda24ee2ae314c9c5b2 Mon Sep 17 00:00:00 2001 From: Ivan Mikhaylov Date: Mon, 3 Sep 2018 10:26:28 +0300 Subject: [PATCH 1009/1692] net/ibm/emac: wrong emac_calc_base call was used by typo __emac_calc_base_mr1 was used instead of __emac4_calc_base_mr1 by copy-paste mistake for emac4syn. Fixes: 45d6e545505fd32edb812f085be7de45b6a5c0af ("net/ibm/emac: add 8192 rx/tx fifo size") Signed-off-by: Ivan Mikhaylov Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/emac/core.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 354c0982847b..372664686309 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -494,9 +494,6 @@ static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_s case 16384: ret |= EMAC_MR1_RFS_16K; break; - case 8192: - ret |= EMAC4_MR1_RFS_8K; - break; case 4096: ret |= EMAC_MR1_RFS_4K; break; @@ -537,6 +534,9 @@ static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_ case 16384: ret |= EMAC4_MR1_RFS_16K; break; + case 8192: + ret |= EMAC4_MR1_RFS_8K; + break; case 4096: ret |= EMAC4_MR1_RFS_4K; break; -- GitLab From af8a2b8ba7678b4695f9e854ba9abae1076beabe Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 3 Sep 2018 15:47:10 +0800 Subject: [PATCH 1010/1692] sctp: fix invalid reference to the index variable of the iterator Now in sctp_apply_peer_addr_params(), if SPP_IPV6_FLOWLABEL flag is set and trans is NULL, it would use trans as the index variable to traverse transport_addr_list, then trans is set as the last transport of it. Later, if SPP_DSCP flag is set, it would enter into the wrong branch as trans is actually an invalid reference. So fix it by using a new index variable to traverse transport_addr_list for both SPP_DSCP and SPP_IPV6_FLOWLABEL flags process. Fixes: 0b0dce7a36fb ("sctp: add spp_ipv6_flowlabel and spp_dscp for sctp_paddrparams") Reported-by: Julia Lawall Signed-off-by: Xin Long Signed-off-by: David S. Miller --- net/sctp/socket.c | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index aa76586a1a1c..a0ccfa4b8220 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -2663,14 +2663,15 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, SCTP_FLOWLABEL_VAL_MASK; trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK; } else if (asoc) { - list_for_each_entry(trans, - &asoc->peer.transport_addr_list, + struct sctp_transport *t; + + list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) { - if (trans->ipaddr.sa.sa_family != AF_INET6) + if (t->ipaddr.sa.sa_family != AF_INET6) continue; - trans->flowlabel = params->spp_ipv6_flowlabel & - SCTP_FLOWLABEL_VAL_MASK; - trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK; + t->flowlabel = params->spp_ipv6_flowlabel & + SCTP_FLOWLABEL_VAL_MASK; + t->flowlabel |= SCTP_FLOWLABEL_SET_MASK; } asoc->flowlabel = params->spp_ipv6_flowlabel & SCTP_FLOWLABEL_VAL_MASK; @@ -2687,12 +2688,13 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, trans->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK; trans->dscp |= SCTP_DSCP_SET_MASK; } else if (asoc) { - list_for_each_entry(trans, - &asoc->peer.transport_addr_list, + struct sctp_transport *t; + + list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) { - trans->dscp = params->spp_dscp & - SCTP_DSCP_VAL_MASK; - trans->dscp |= SCTP_DSCP_SET_MASK; + t->dscp = params->spp_dscp & + SCTP_DSCP_VAL_MASK; + t->dscp |= SCTP_DSCP_SET_MASK; } asoc->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK; asoc->dscp |= SCTP_DSCP_SET_MASK; -- GitLab From 741880e1f2f59b20125dc480765d2546cec66080 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 3 Sep 2018 15:47:11 +0800 Subject: [PATCH 1011/1692] sctp: not traverse asoc trans list if non-ipv6 trans exists for ipv6_flowlabel When users set params.spp_address and get a trans, ipv6_flowlabel flag should be applied into this trans. But even if this one is not an ipv6 trans, it should not go to apply it into all other transes of the asoc but simply ignore it. Fixes: 0b0dce7a36fb ("sctp: add spp_ipv6_flowlabel and spp_dscp for sctp_paddrparams") Signed-off-by: Xin Long Signed-off-by: David S. Miller --- net/sctp/socket.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index a0ccfa4b8220..f73e9d38d5ba 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -2658,10 +2658,12 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, } if (params->spp_flags & SPP_IPV6_FLOWLABEL) { - if (trans && trans->ipaddr.sa.sa_family == AF_INET6) { - trans->flowlabel = params->spp_ipv6_flowlabel & - SCTP_FLOWLABEL_VAL_MASK; - trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK; + if (trans) { + if (trans->ipaddr.sa.sa_family == AF_INET6) { + trans->flowlabel = params->spp_ipv6_flowlabel & + SCTP_FLOWLABEL_VAL_MASK; + trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK; + } } else if (asoc) { struct sctp_transport *t; -- GitLab From 6b95c3e9697254dab0c8eafc6ab9d5e10d2eca4e Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 3 Sep 2018 04:23:17 -0400 Subject: [PATCH 1012/1692] bnxt_en: Fix firmware signaled resource change logic in open. When the driver detects that resources have changed during open, it should reset the rx and tx rings to 0. This will properly setup the init sequence to initialize the default rings again. We also need to signal the RDMA driver to stop and clear its interrupts. We then call the RoCE driver to restart if a new set of default rings is successfully reserved. Fixes: 25e1acd6b92b ("bnxt_en: Notify firmware about IF state changes.") Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 8bb1e38b1681..6a1baf375ac3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -6684,6 +6684,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) hw_resc->resv_rx_rings = 0; hw_resc->resv_hw_ring_grps = 0; hw_resc->resv_vnics = 0; + bp->tx_nr_rings = 0; + bp->rx_nr_rings = 0; } return rc; } @@ -8769,20 +8771,25 @@ static int bnxt_init_dflt_ring_mode(struct bnxt *bp) if (bp->tx_nr_rings) return 0; + bnxt_ulp_irq_stop(bp); + bnxt_clear_int_mode(bp); rc = bnxt_set_dflt_rings(bp, true); if (rc) { netdev_err(bp->dev, "Not enough rings available.\n"); - return rc; + goto init_dflt_ring_err; } rc = bnxt_init_int_mode(bp); if (rc) - return rc; + goto init_dflt_ring_err; + bp->tx_nr_rings_per_tc = bp->tx_nr_rings; if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) { bp->flags |= BNXT_FLAG_RFS; bp->dev->features |= NETIF_F_NTUPLE; } - return 0; +init_dflt_ring_err: + bnxt_ulp_irq_restart(bp, rc); + return rc; } int bnxt_restore_pf_fw_resources(struct bnxt *bp) -- GitLab From ad95c27bdb930105f3eea02621bda157caf2862d Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 3 Sep 2018 04:23:18 -0400 Subject: [PATCH 1013/1692] bnxt_en: Clean up unused functions. Remove unused bnxt_subtract_ulp_resources(). Change bnxt_get_max_func_irqs() to static since it is only locally used. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 +- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 1 - drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c | 15 --------------- drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h | 1 - 4 files changed, 1 insertion(+), 18 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 6a1baf375ac3..6472ce447f87 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -5918,7 +5918,7 @@ void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max) bp->hw_resc.max_cp_rings = max; } -unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) +static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) { struct bnxt_hw_resc *hw_resc = &bp->hw_resc; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index fefa011320e0..c4c77b9fa77b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1482,7 +1482,6 @@ unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp); void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max); unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp); void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max); -unsigned int bnxt_get_max_func_irqs(struct bnxt *bp); int bnxt_get_avail_msix(struct bnxt *bp, int num); int bnxt_reserve_rings(struct bnxt *bp); void bnxt_tx_disable(struct bnxt *bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index c37b2842f972..deac73e8e0f7 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -220,21 +220,6 @@ int bnxt_get_ulp_msix_base(struct bnxt *bp) return 0; } -void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id) -{ - ASSERT_RTNL(); - if (bnxt_ulp_registered(bp->edev, ulp_id)) { - struct bnxt_en_dev *edev = bp->edev; - unsigned int msix_req, max; - - msix_req = edev->ulp_tbl[ulp_id].msix_requested; - max = bnxt_get_max_func_cp_rings(bp); - bnxt_set_max_func_cp_rings(bp, max - msix_req); - max = bnxt_get_max_func_stat_ctxs(bp); - bnxt_set_max_func_stat_ctxs(bp, max - 1); - } -} - static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id, struct bnxt_fw_msg *fw_msg) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h index df48ac71729f..d9bea37cd211 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h @@ -90,7 +90,6 @@ static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev, int ulp_id) int bnxt_get_ulp_msix_num(struct bnxt *bp); int bnxt_get_ulp_msix_base(struct bnxt *bp); -void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id); void bnxt_ulp_stop(struct bnxt *bp); void bnxt_ulp_start(struct bnxt *bp); void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs); -- GitLab From 00fe9c326d2027f2437dea38ef0e82f9d02d94c0 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 3 Sep 2018 04:23:19 -0400 Subject: [PATCH 1014/1692] bnxt_en: Do not adjust max_cp_rings by the ones used by RDMA. Currently, the driver adjusts the bp->hw_resc.max_cp_rings by the number of MSIX vectors used by RDMA. There is one code path in open that needs to check the true max_cp_rings including any used by RDMA. This code is now checking for the reduced max_cp_rings which will fail when the number of cp rings is very small. To fix this in a clean way, we don't adjust max_cp_rings anymore. Instead, we add a helper bnxt_get_max_func_cp_rings_for_en() to get the reduced max_cp_rings when appropriate. Fixes: ec86f14ea506 ("bnxt_en: Add ULP calls to stop and restart IRQs.") Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 7 ++++--- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 2 +- drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | 7 ++++--- drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c | 5 ----- 4 files changed, 9 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 6472ce447f87..cecbb1d1f587 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -5913,9 +5913,9 @@ unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp) return bp->hw_resc.max_cp_rings; } -void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max) +unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp) { - bp->hw_resc.max_cp_rings = max; + return bp->hw_resc.max_cp_rings - bnxt_get_ulp_msix_num(bp); } static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) @@ -8631,7 +8631,8 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, *max_tx = hw_resc->max_tx_rings; *max_rx = hw_resc->max_rx_rings; - *max_cp = min_t(int, hw_resc->max_irqs, hw_resc->max_cp_rings); + *max_cp = min_t(int, bnxt_get_max_func_cp_rings_for_en(bp), + hw_resc->max_irqs); *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs); max_ring_grps = hw_resc->max_hw_ring_grps; if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index c4c77b9fa77b..bde384630a75 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1481,7 +1481,7 @@ int bnxt_hwrm_set_coal(struct bnxt *); unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp); void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max); unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp); -void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max); +unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp); int bnxt_get_avail_msix(struct bnxt *bp, int num); int bnxt_reserve_rings(struct bnxt *bp); void bnxt_tx_disable(struct bnxt *bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 6d583bcd2a81..fcd085a9853a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -451,7 +451,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs) bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1); - vf_cp_rings = hw_resc->max_cp_rings - bp->cp_nr_rings; + vf_cp_rings = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings; vf_stat_ctx = hw_resc->max_stat_ctxs - bp->num_stat_ctxs; if (bp->flags & BNXT_FLAG_AGG_RINGS) vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2; @@ -549,7 +549,8 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) max_stat_ctxs = hw_resc->max_stat_ctxs; /* Remaining rings are distributed equally amongs VF's for now */ - vf_cp_rings = (hw_resc->max_cp_rings - bp->cp_nr_rings) / num_vfs; + vf_cp_rings = (bnxt_get_max_func_cp_rings_for_en(bp) - + bp->cp_nr_rings) / num_vfs; vf_stat_ctx = (max_stat_ctxs - bp->num_stat_ctxs) / num_vfs; if (bp->flags & BNXT_FLAG_AGG_RINGS) vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) / @@ -643,7 +644,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) */ vfs_supported = *num_vfs; - avail_cp = hw_resc->max_cp_rings - bp->cp_nr_rings; + avail_cp = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings; avail_stat = hw_resc->max_stat_ctxs - bp->num_stat_ctxs; avail_cp = min_t(int, avail_cp, avail_stat); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index deac73e8e0f7..beee61292d5e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -169,7 +169,6 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id, edev->ulp_tbl[ulp_id].msix_requested = avail_msix; } bnxt_fill_msix_vecs(bp, ent); - bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix); edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED; return avail_msix; } @@ -178,7 +177,6 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id) { struct net_device *dev = edev->net; struct bnxt *bp = netdev_priv(dev); - int max_cp_rings, msix_requested; ASSERT_RTNL(); if (ulp_id != BNXT_ROCE_ULP) @@ -187,9 +185,6 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id) if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED)) return 0; - max_cp_rings = bnxt_get_max_func_cp_rings(bp); - msix_requested = edev->ulp_tbl[ulp_id].msix_requested; - bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested); edev->ulp_tbl[ulp_id].msix_requested = 0; edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED; if (netif_running(dev)) { -- GitLab From 9cc1bf3928b31e515ed15477b3c7eb653d0b3b42 Mon Sep 17 00:00:00 2001 From: Zhenbo Gao Date: Mon, 3 Sep 2018 16:36:45 +0800 Subject: [PATCH 1015/1692] tipc: correct spelling errors for struct tipc_bc_base's comment Trivial fix for two spelling mistakes. Signed-off-by: Zhenbo Gao Reviewed-by: Ying Xue Signed-off-by: David S. Miller --- net/tipc/bcast.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index 9ee6cfea56dd..d8026543bf4c 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c @@ -51,12 +51,12 @@ const char tipc_bclink_name[] = "broadcast-link"; * struct tipc_bc_base - base structure for keeping broadcast send state * @link: broadcast send link structure * @inputq: data input queue; will only carry SOCK_WAKEUP messages - * @dest: array keeping number of reachable destinations per bearer + * @dests: array keeping number of reachable destinations per bearer * @primary_bearer: a bearer having links to all broadcast destinations, if any * @bcast_support: indicates if primary bearer, if any, supports broadcast * @rcast_support: indicates if all peer nodes support replicast * @rc_ratio: dest count as percentage of cluster size where send method changes - * @bc_threshold: calculated drom rc_ratio; if dests > threshold use broadcast + * @bc_threshold: calculated from rc_ratio; if dests > threshold use broadcast */ struct tipc_bc_base { struct tipc_link *link; -- GitLab From a484ef3442d2f05fa59edf4f6d14a8169d1b94a6 Mon Sep 17 00:00:00 2001 From: Zhenbo Gao Date: Mon, 3 Sep 2018 16:36:46 +0800 Subject: [PATCH 1016/1692] tipc: correct spelling errors for tipc_topsrv_queue_evt() comments tipc_conn_queue_evt -> tipc_topsrv_queue_evt tipc_send_work -> tipc_conn_send_work tipc_send_to_sock -> tipc_conn_send_to_sock Signed-off-by: Zhenbo Gao Reviewed-by: Ying Xue Signed-off-by: David S. Miller --- net/tipc/topsrv.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c index c8e34ef22c30..2627b5d812e9 100644 --- a/net/tipc/topsrv.c +++ b/net/tipc/topsrv.c @@ -313,8 +313,8 @@ static void tipc_conn_send_work(struct work_struct *work) conn_put(con); } -/* tipc_conn_queue_evt() - interrupt level call from a subscription instance - * The queued work is launched into tipc_send_work()->tipc_send_to_sock() +/* tipc_topsrv_queue_evt() - interrupt level call from a subscription instance + * The queued work is launched into tipc_conn_send_work()->tipc_conn_send_to_sock() */ void tipc_topsrv_queue_evt(struct net *net, int conid, u32 event, struct tipc_event *evt) -- GitLab From 1dfdf99106668679b0de5a62fd4f42c1a11c9445 Mon Sep 17 00:00:00 2001 From: Greentime Hu Date: Wed, 18 Jul 2018 09:54:55 +0800 Subject: [PATCH 1017/1692] nds32: fix logic for module This bug is report by Dan Carpenter. We shall use ~loc_mask instead of !loc_mask because we need to and(&) the bits of ~loc_mask. Reported-by: Dan Carpenter Fixes: c9a4a8da6baa ("nds32: Loadable modules") Signed-off-by: Greentime Hu --- arch/nds32/kernel/module.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/nds32/kernel/module.c b/arch/nds32/kernel/module.c index 4167283d8293..1e31829cbc2a 100644 --- a/arch/nds32/kernel/module.c +++ b/arch/nds32/kernel/module.c @@ -40,7 +40,7 @@ void do_reloc16(unsigned int val, unsigned int *loc, unsigned int val_mask, tmp2 = tmp & loc_mask; if (partial_in_place) { - tmp &= (!loc_mask); + tmp &= (~loc_mask); tmp = tmp2 | ((tmp + ((val & val_mask) >> val_shift)) & val_mask); } else { @@ -70,7 +70,7 @@ void do_reloc32(unsigned int val, unsigned int *loc, unsigned int val_mask, tmp2 = tmp & loc_mask; if (partial_in_place) { - tmp &= (!loc_mask); + tmp &= (~loc_mask); tmp = tmp2 | ((tmp + ((val & val_mask) >> val_shift)) & val_mask); } else { -- GitLab From 1944a50859ec2b570b42b459ac25d607fc7c31f0 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Tue, 7 Aug 2018 12:03:13 +0800 Subject: [PATCH 1018/1692] nds32: add NULL entry to the end of_device_id array Make sure of_device_id tables are NULL terminated. Found by coccinelle spatch "misc/of_table.cocci" Signed-off-by: YueHaibing Acked-by: Greentime Hu Signed-off-by: Greentime Hu --- arch/nds32/kernel/atl2c.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/nds32/kernel/atl2c.c b/arch/nds32/kernel/atl2c.c index 0c6d031a1c4a..0c5386e72098 100644 --- a/arch/nds32/kernel/atl2c.c +++ b/arch/nds32/kernel/atl2c.c @@ -9,7 +9,8 @@ void __iomem *atl2c_base; static const struct of_device_id atl2c_ids[] __initconst = { - {.compatible = "andestech,atl2c",} + {.compatible = "andestech,atl2c",}, + {} }; static int __init atl2c_of_init(void) -- GitLab From c17df7960534357fb74074c2f514c831d4a9cf5a Mon Sep 17 00:00:00 2001 From: Zong Li Date: Mon, 13 Aug 2018 13:28:23 +0800 Subject: [PATCH 1019/1692] nds32: Fix empty call trace The compiler predefined macro 'NDS32_ABI_2' had been removed, it should use the '__NDS32_ABI_2' here. Signed-off-by: Zong Li Acked-by: Greentime Hu Signed-off-by: Greentime Hu --- arch/nds32/kernel/traps.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/nds32/kernel/traps.c b/arch/nds32/kernel/traps.c index a6205fd4db52..f0e974347c26 100644 --- a/arch/nds32/kernel/traps.c +++ b/arch/nds32/kernel/traps.c @@ -137,7 +137,7 @@ static void __dump(struct task_struct *tsk, unsigned long *base_reg) !((unsigned long)base_reg & 0x3) && ((unsigned long)base_reg >= TASK_SIZE)) { unsigned long next_fp; -#if !defined(NDS32_ABI_2) +#if !defined(__NDS32_ABI_2) ret_addr = base_reg[0]; next_fp = base_reg[1]; #else -- GitLab From 6cce95a6c7d288ac2126eee4b95df448b9015b84 Mon Sep 17 00:00:00 2001 From: Zong Li Date: Mon, 13 Aug 2018 14:48:49 +0800 Subject: [PATCH 1020/1692] nds32: Fix get_user/put_user macro expand pointer problem The pointer argument of macro need to be taken out once first, and then use the new pointer in the macro body. In kernel/trace/trace.c, get_user(ch, ubuf++) causes the unexpected increment after expand the macro. Signed-off-by: Zong Li Acked-by: Greentime Hu Signed-off-by: Greentime Hu --- arch/nds32/include/asm/uaccess.h | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/arch/nds32/include/asm/uaccess.h b/arch/nds32/include/asm/uaccess.h index 18a009f3804d..3f771e0595e8 100644 --- a/arch/nds32/include/asm/uaccess.h +++ b/arch/nds32/include/asm/uaccess.h @@ -78,8 +78,9 @@ static inline void set_fs(mm_segment_t fs) #define get_user(x,p) \ ({ \ long __e = -EFAULT; \ - if(likely(access_ok(VERIFY_READ, p, sizeof(*p)))) { \ - __e = __get_user(x,p); \ + const __typeof__(*(p)) __user *__p = (p); \ + if(likely(access_ok(VERIFY_READ, __p, sizeof(*__p)))) { \ + __e = __get_user(x, __p); \ } else \ x = 0; \ __e; \ @@ -99,10 +100,10 @@ static inline void set_fs(mm_segment_t fs) #define __get_user_err(x,ptr,err) \ do { \ - unsigned long __gu_addr = (unsigned long)(ptr); \ + const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ unsigned long __gu_val; \ - __chk_user_ptr(ptr); \ - switch (sizeof(*(ptr))) { \ + __chk_user_ptr(__gu_addr); \ + switch (sizeof(*(__gu_addr))) { \ case 1: \ __get_user_asm("lbi",__gu_val,__gu_addr,err); \ break; \ @@ -119,7 +120,7 @@ do { \ BUILD_BUG(); \ break; \ } \ - (x) = (__typeof__(*(ptr)))__gu_val; \ + (x) = (__typeof__(*(__gu_addr)))__gu_val; \ } while (0) #define __get_user_asm(inst,x,addr,err) \ @@ -169,8 +170,9 @@ do { \ #define put_user(x,p) \ ({ \ long __e = -EFAULT; \ - if(likely(access_ok(VERIFY_WRITE, p, sizeof(*p)))) { \ - __e = __put_user(x,p); \ + __typeof__(*(p)) __user *__p = (p); \ + if(likely(access_ok(VERIFY_WRITE, __p, sizeof(*__p)))) { \ + __e = __put_user(x, __p); \ } \ __e; \ }) @@ -189,10 +191,10 @@ do { \ #define __put_user_err(x,ptr,err) \ do { \ - unsigned long __pu_addr = (unsigned long)(ptr); \ - __typeof__(*(ptr)) __pu_val = (x); \ - __chk_user_ptr(ptr); \ - switch (sizeof(*(ptr))) { \ + __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ + __typeof__(*(__pu_addr)) __pu_val = (x); \ + __chk_user_ptr(__pu_addr); \ + switch (sizeof(*(__pu_addr))) { \ case 1: \ __put_user_asm("sbi",__pu_val,__pu_addr,err); \ break; \ -- GitLab From 7ef39548df8cdb6406e3b4b7255e7f8cd3fe3b13 Mon Sep 17 00:00:00 2001 From: Zong Li Date: Mon, 13 Aug 2018 15:08:52 +0800 Subject: [PATCH 1021/1692] nds32: Clean up the coding style 1. Adjust indentation. 2. Unify argument name of each macro. 3. Add space after comma in parameters list. 4. Add space after 'if' keyword. 5. Replace space by tab. 6. Change asm volatile to __asm__ __volatile__ Signed-off-by: Zong Li Acked-by: Greentime Hu Signed-off-by: Greentime Hu --- arch/nds32/include/asm/uaccess.h | 201 ++++++++++++++++--------------- 1 file changed, 103 insertions(+), 98 deletions(-) diff --git a/arch/nds32/include/asm/uaccess.h b/arch/nds32/include/asm/uaccess.h index 3f771e0595e8..e1a2b5b749b9 100644 --- a/arch/nds32/include/asm/uaccess.h +++ b/arch/nds32/include/asm/uaccess.h @@ -38,7 +38,7 @@ struct exception_table_entry { extern int fixup_exception(struct pt_regs *regs); #define KERNEL_DS ((mm_segment_t) { ~0UL }) -#define USER_DS ((mm_segment_t) {TASK_SIZE - 1}) +#define USER_DS ((mm_segment_t) {TASK_SIZE - 1}) #define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) @@ -49,11 +49,11 @@ static inline void set_fs(mm_segment_t fs) current_thread_info()->addr_limit = fs; } -#define segment_eq(a, b) ((a) == (b)) +#define segment_eq(a, b) ((a) == (b)) #define __range_ok(addr, size) (size <= get_fs() && addr <= (get_fs() -size)) -#define access_ok(type, addr, size) \ +#define access_ok(type, addr, size) \ __range_ok((unsigned long)addr, (unsigned long)size) /* * Single-value transfer routines. They automatically use the right @@ -75,46 +75,48 @@ static inline void set_fs(mm_segment_t fs) * versions are void (ie, don't return a value as such). */ -#define get_user(x,p) \ +#define get_user(x, ptr) \ ({ \ long __e = -EFAULT; \ - const __typeof__(*(p)) __user *__p = (p); \ - if(likely(access_ok(VERIFY_READ, __p, sizeof(*__p)))) { \ + const __typeof__(*(ptr)) __user *__p = (ptr); \ + if (likely(access_ok(VERIFY_READ, __p, sizeof(*__p)))) { \ __e = __get_user(x, __p); \ - } else \ - x = 0; \ + } else { \ + (x) = 0; \ + } \ __e; \ }) -#define __get_user(x,ptr) \ + +#define __get_user(x, ptr) \ ({ \ long __gu_err = 0; \ - __get_user_err((x),(ptr),__gu_err); \ + __get_user_err((x), (ptr), __gu_err); \ __gu_err; \ }) -#define __get_user_error(x,ptr,err) \ +#define __get_user_error(x, ptr, err) \ ({ \ - __get_user_err((x),(ptr),err); \ - (void) 0; \ + __get_user_err((x), (ptr), err); \ + (void)0; \ }) -#define __get_user_err(x,ptr,err) \ +#define __get_user_err(x, ptr, err) \ do { \ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ unsigned long __gu_val; \ __chk_user_ptr(__gu_addr); \ switch (sizeof(*(__gu_addr))) { \ case 1: \ - __get_user_asm("lbi",__gu_val,__gu_addr,err); \ + __get_user_asm("lbi", __gu_val, __gu_addr, (err)); \ break; \ case 2: \ - __get_user_asm("lhi",__gu_val,__gu_addr,err); \ + __get_user_asm("lhi", __gu_val, __gu_addr, (err)); \ break; \ case 4: \ - __get_user_asm("lwi",__gu_val,__gu_addr,err); \ + __get_user_asm("lwi", __gu_val, __gu_addr, (err)); \ break; \ case 8: \ - __get_user_asm_dword(__gu_val,__gu_addr,err); \ + __get_user_asm_dword(__gu_val, __gu_addr, (err)); \ break; \ default: \ BUILD_BUG(); \ @@ -123,23 +125,23 @@ do { \ (x) = (__typeof__(*(__gu_addr)))__gu_val; \ } while (0) -#define __get_user_asm(inst,x,addr,err) \ - asm volatile( \ - "1: "inst" %1,[%2]\n" \ - "2:\n" \ - " .section .fixup,\"ax\"\n" \ - " .align 2\n" \ - "3: move %0, %3\n" \ - " move %1, #0\n" \ - " b 2b\n" \ - " .previous\n" \ - " .section __ex_table,\"a\"\n" \ - " .align 3\n" \ - " .long 1b, 3b\n" \ - " .previous" \ - : "+r" (err), "=&r" (x) \ - : "r" (addr), "i" (-EFAULT) \ - : "cc") +#define __get_user_asm(inst, x, addr, err) \ + __asm__ __volatile__ ( \ + "1: "inst" %1,[%2]\n" \ + "2:\n" \ + " .section .fixup,\"ax\"\n" \ + " .align 2\n" \ + "3: move %0, %3\n" \ + " move %1, #0\n" \ + " b 2b\n" \ + " .previous\n" \ + " .section __ex_table,\"a\"\n" \ + " .align 3\n" \ + " .long 1b, 3b\n" \ + " .previous" \ + : "+r" (err), "=&r" (x) \ + : "r" (addr), "i" (-EFAULT) \ + : "cc") #ifdef __NDS32_EB__ #define __gu_reg_oper0 "%H1" @@ -150,62 +152,64 @@ do { \ #endif #define __get_user_asm_dword(x, addr, err) \ - asm volatile( \ - "\n1:\tlwi " __gu_reg_oper0 ",[%2]\n" \ - "\n2:\tlwi " __gu_reg_oper1 ",[%2+4]\n" \ - "3:\n" \ - " .section .fixup,\"ax\"\n" \ - " .align 2\n" \ - "4: move %0, %3\n" \ - " b 3b\n" \ - " .previous\n" \ - " .section __ex_table,\"a\"\n" \ - " .align 3\n" \ - " .long 1b, 4b\n" \ - " .long 2b, 4b\n" \ - " .previous" \ - : "+r"(err), "=&r"(x) \ - : "r"(addr), "i"(-EFAULT) \ - : "cc") -#define put_user(x,p) \ + __asm__ __volatile__ ( \ + "\n1:\tlwi " __gu_reg_oper0 ",[%2]\n" \ + "\n2:\tlwi " __gu_reg_oper1 ",[%2+4]\n" \ + "3:\n" \ + " .section .fixup,\"ax\"\n" \ + " .align 2\n" \ + "4: move %0, %3\n" \ + " b 3b\n" \ + " .previous\n" \ + " .section __ex_table,\"a\"\n" \ + " .align 3\n" \ + " .long 1b, 4b\n" \ + " .long 2b, 4b\n" \ + " .previous" \ + : "+r"(err), "=&r"(x) \ + : "r"(addr), "i"(-EFAULT) \ + : "cc") + +#define put_user(x, ptr) \ ({ \ long __e = -EFAULT; \ - __typeof__(*(p)) __user *__p = (p); \ - if(likely(access_ok(VERIFY_WRITE, __p, sizeof(*__p)))) { \ + __typeof__(*(ptr)) __user *__p = (ptr); \ + if (likely(access_ok(VERIFY_WRITE, __p, sizeof(*__p)))) { \ __e = __put_user(x, __p); \ } \ __e; \ }) -#define __put_user(x,ptr) \ + +#define __put_user(x, ptr) \ ({ \ long __pu_err = 0; \ - __put_user_err((x),(ptr),__pu_err); \ + __put_user_err((x), (ptr), __pu_err); \ __pu_err; \ }) -#define __put_user_error(x,ptr,err) \ +#define __put_user_error(x, ptr, err) \ ({ \ - __put_user_err((x),(ptr),err); \ - (void) 0; \ + __put_user_err((x), (ptr), err); \ + (void)0; \ }) -#define __put_user_err(x,ptr,err) \ +#define __put_user_err(x, ptr, err) \ do { \ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ __typeof__(*(__pu_addr)) __pu_val = (x); \ __chk_user_ptr(__pu_addr); \ switch (sizeof(*(__pu_addr))) { \ case 1: \ - __put_user_asm("sbi",__pu_val,__pu_addr,err); \ + __put_user_asm("sbi", __pu_val, __pu_addr, (err)); \ break; \ case 2: \ - __put_user_asm("shi",__pu_val,__pu_addr,err); \ + __put_user_asm("shi", __pu_val, __pu_addr, (err)); \ break; \ case 4: \ - __put_user_asm("swi",__pu_val,__pu_addr,err); \ + __put_user_asm("swi", __pu_val, __pu_addr, (err)); \ break; \ case 8: \ - __put_user_asm_dword(__pu_val,__pu_addr,err); \ + __put_user_asm_dword(__pu_val, __pu_addr, (err)); \ break; \ default: \ BUILD_BUG(); \ @@ -213,22 +217,22 @@ do { \ } \ } while (0) -#define __put_user_asm(inst,x,addr,err) \ - asm volatile( \ - "1: "inst" %1,[%2]\n" \ - "2:\n" \ - " .section .fixup,\"ax\"\n" \ - " .align 2\n" \ - "3: move %0, %3\n" \ - " b 2b\n" \ - " .previous\n" \ - " .section __ex_table,\"a\"\n" \ - " .align 3\n" \ - " .long 1b, 3b\n" \ - " .previous" \ - : "+r" (err) \ - : "r" (x), "r" (addr), "i" (-EFAULT) \ - : "cc") +#define __put_user_asm(inst, x, addr, err) \ + __asm__ __volatile__ ( \ + "1: "inst" %1,[%2]\n" \ + "2:\n" \ + " .section .fixup,\"ax\"\n" \ + " .align 2\n" \ + "3: move %0, %3\n" \ + " b 2b\n" \ + " .previous\n" \ + " .section __ex_table,\"a\"\n" \ + " .align 3\n" \ + " .long 1b, 3b\n" \ + " .previous" \ + : "+r" (err) \ + : "r" (x), "r" (addr), "i" (-EFAULT) \ + : "cc") #ifdef __NDS32_EB__ #define __pu_reg_oper0 "%H2" @@ -239,23 +243,24 @@ do { \ #endif #define __put_user_asm_dword(x, addr, err) \ - asm volatile( \ - "\n1:\tswi " __pu_reg_oper0 ",[%1]\n" \ - "\n2:\tswi " __pu_reg_oper1 ",[%1+4]\n" \ - "3:\n" \ - " .section .fixup,\"ax\"\n" \ - " .align 2\n" \ - "4: move %0, %3\n" \ - " b 3b\n" \ - " .previous\n" \ - " .section __ex_table,\"a\"\n" \ - " .align 3\n" \ - " .long 1b, 4b\n" \ - " .long 2b, 4b\n" \ - " .previous" \ - : "+r"(err) \ - : "r"(addr), "r"(x), "i"(-EFAULT) \ - : "cc") + __asm__ __volatile__ ( \ + "\n1:\tswi " __pu_reg_oper0 ",[%1]\n" \ + "\n2:\tswi " __pu_reg_oper1 ",[%1+4]\n" \ + "3:\n" \ + " .section .fixup,\"ax\"\n" \ + " .align 2\n" \ + "4: move %0, %3\n" \ + " b 3b\n" \ + " .previous\n" \ + " .section __ex_table,\"a\"\n" \ + " .align 3\n" \ + " .long 1b, 4b\n" \ + " .long 2b, 4b\n" \ + " .previous" \ + : "+r"(err) \ + : "r"(addr), "r"(x), "i"(-EFAULT) \ + : "cc") + extern unsigned long __arch_clear_user(void __user * addr, unsigned long n); extern long strncpy_from_user(char *dest, const char __user * src, long count); extern __must_check long strlen_user(const char __user * str); -- GitLab From 487913ab18c215b06611c4c91c7e905fc0960eb8 Mon Sep 17 00:00:00 2001 From: Zong Li Date: Mon, 13 Aug 2018 16:02:53 +0800 Subject: [PATCH 1022/1692] nds32: Extract the checking and getting pointer to a macro Signed-off-by: Zong Li Acked-by: Greentime Hu Signed-off-by: Greentime Hu --- arch/nds32/include/asm/uaccess.h | 80 ++++++++++++++++---------------- 1 file changed, 41 insertions(+), 39 deletions(-) diff --git a/arch/nds32/include/asm/uaccess.h b/arch/nds32/include/asm/uaccess.h index e1a2b5b749b9..362a32d9bd16 100644 --- a/arch/nds32/include/asm/uaccess.h +++ b/arch/nds32/include/asm/uaccess.h @@ -75,54 +75,54 @@ static inline void set_fs(mm_segment_t fs) * versions are void (ie, don't return a value as such). */ -#define get_user(x, ptr) \ -({ \ - long __e = -EFAULT; \ - const __typeof__(*(ptr)) __user *__p = (ptr); \ - if (likely(access_ok(VERIFY_READ, __p, sizeof(*__p)))) { \ - __e = __get_user(x, __p); \ - } else { \ - (x) = 0; \ - } \ - __e; \ -}) +#define get_user __get_user \ #define __get_user(x, ptr) \ ({ \ long __gu_err = 0; \ - __get_user_err((x), (ptr), __gu_err); \ + __get_user_check((x), (ptr), __gu_err); \ __gu_err; \ }) #define __get_user_error(x, ptr, err) \ ({ \ - __get_user_err((x), (ptr), err); \ + __get_user_check((x), (ptr), (err)); \ (void)0; \ }) +#define __get_user_check(x, ptr, err) \ +({ \ + const __typeof__(*(ptr)) __user *__p = (ptr); \ + might_fault(); \ + if (access_ok(VERIFY_READ, __p, sizeof(*__p))) { \ + __get_user_err((x), __p, (err)); \ + } else { \ + (x) = 0; (err) = -EFAULT; \ + } \ +}) + #define __get_user_err(x, ptr, err) \ do { \ - const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ unsigned long __gu_val; \ - __chk_user_ptr(__gu_addr); \ - switch (sizeof(*(__gu_addr))) { \ + __chk_user_ptr(ptr); \ + switch (sizeof(*(ptr))) { \ case 1: \ - __get_user_asm("lbi", __gu_val, __gu_addr, (err)); \ + __get_user_asm("lbi", __gu_val, (ptr), (err)); \ break; \ case 2: \ - __get_user_asm("lhi", __gu_val, __gu_addr, (err)); \ + __get_user_asm("lhi", __gu_val, (ptr), (err)); \ break; \ case 4: \ - __get_user_asm("lwi", __gu_val, __gu_addr, (err)); \ + __get_user_asm("lwi", __gu_val, (ptr), (err)); \ break; \ case 8: \ - __get_user_asm_dword(__gu_val, __gu_addr, (err)); \ + __get_user_asm_dword(__gu_val, (ptr), (err)); \ break; \ default: \ BUILD_BUG(); \ break; \ } \ - (x) = (__typeof__(*(__gu_addr)))__gu_val; \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ } while (0) #define __get_user_asm(inst, x, addr, err) \ @@ -170,15 +170,7 @@ do { \ : "r"(addr), "i"(-EFAULT) \ : "cc") -#define put_user(x, ptr) \ -({ \ - long __e = -EFAULT; \ - __typeof__(*(ptr)) __user *__p = (ptr); \ - if (likely(access_ok(VERIFY_WRITE, __p, sizeof(*__p)))) { \ - __e = __put_user(x, __p); \ - } \ - __e; \ -}) +#define put_user __put_user \ #define __put_user(x, ptr) \ ({ \ @@ -189,27 +181,37 @@ do { \ #define __put_user_error(x, ptr, err) \ ({ \ - __put_user_err((x), (ptr), err); \ + __put_user_err((x), (ptr), (err)); \ (void)0; \ }) +#define __put_user_check(x, ptr, err) \ +({ \ + __typeof__(*(ptr)) __user *__p = (ptr); \ + might_fault(); \ + if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) { \ + __put_user_err((x), __p, (err)); \ + } else { \ + (err) = -EFAULT; \ + } \ +}) + #define __put_user_err(x, ptr, err) \ do { \ - __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ - __typeof__(*(__pu_addr)) __pu_val = (x); \ - __chk_user_ptr(__pu_addr); \ - switch (sizeof(*(__pu_addr))) { \ + __typeof__(*(ptr)) __pu_val = (x); \ + __chk_user_ptr(ptr); \ + switch (sizeof(*(ptr))) { \ case 1: \ - __put_user_asm("sbi", __pu_val, __pu_addr, (err)); \ + __put_user_asm("sbi", __pu_val, (ptr), (err)); \ break; \ case 2: \ - __put_user_asm("shi", __pu_val, __pu_addr, (err)); \ + __put_user_asm("shi", __pu_val, (ptr), (err)); \ break; \ case 4: \ - __put_user_asm("swi", __pu_val, __pu_addr, (err)); \ + __put_user_asm("swi", __pu_val, (ptr), (err)); \ break; \ case 8: \ - __put_user_asm_dword(__pu_val, __pu_addr, (err)); \ + __put_user_asm_dword(__pu_val, (ptr), (err)); \ break; \ default: \ BUILD_BUG(); \ -- GitLab From a18082575c664847d36c6ca030b09ce8d93aec2f Mon Sep 17 00:00:00 2001 From: Zong Li Date: Wed, 15 Aug 2018 10:45:59 +0800 Subject: [PATCH 1023/1692] nds32/ftrace: Support static function tracer This patch support the static function tracer. On nds32 ABI, we need to always push return address to stack for __builtin_return_address can work correctly, otherwise, it will get the wrong value of $lp at leaf function. Signed-off-by: Zong Li Acked-by: Greentime Hu Signed-off-by: Greentime Hu --- arch/nds32/Kconfig | 1 + arch/nds32/Makefile | 4 ++++ arch/nds32/include/asm/ftrace.h | 20 ++++++++++++++++++++ arch/nds32/kernel/Makefile | 6 ++++++ arch/nds32/kernel/ftrace.c | 28 ++++++++++++++++++++++++++++ 5 files changed, 59 insertions(+) create mode 100644 arch/nds32/include/asm/ftrace.h create mode 100644 arch/nds32/kernel/ftrace.c diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig index 1d4248fa55e9..853497fe4266 100644 --- a/arch/nds32/Kconfig +++ b/arch/nds32/Kconfig @@ -40,6 +40,7 @@ config NDS32 select NO_IOPORT_MAP select RTC_LIB select THREAD_INFO_IN_TASK + select HAVE_FUNCTION_TRACER help Andes(nds32) Linux support. diff --git a/arch/nds32/Makefile b/arch/nds32/Makefile index 63f4f173e5f4..3509fac10491 100644 --- a/arch/nds32/Makefile +++ b/arch/nds32/Makefile @@ -5,6 +5,10 @@ KBUILD_DEFCONFIG := defconfig comma = , +ifdef CONFIG_FUNCTION_TRACER +arch-y += -malways-save-lp -mno-relax +endif + KBUILD_CFLAGS += $(call cc-option, -mno-sched-prolog-epilog) KBUILD_CFLAGS += -mcmodel=large diff --git a/arch/nds32/include/asm/ftrace.h b/arch/nds32/include/asm/ftrace.h new file mode 100644 index 000000000000..bac7657f576a --- /dev/null +++ b/arch/nds32/include/asm/ftrace.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_NDS32_FTRACE_H +#define __ASM_NDS32_FTRACE_H + +#ifdef CONFIG_FUNCTION_TRACER + +#define HAVE_FUNCTION_GRAPH_FP_TEST + +#define MCOUNT_ADDR ((unsigned long)(_mcount)) +/* mcount call is composed of three instructions: + * sethi + ori + jral + */ +#define MCOUNT_INSN_SIZE 12 + +extern void _mcount(unsigned long parent_ip); + +#endif /* CONFIG_FUNCTION_TRACER */ + +#endif /* __ASM_NDS32_FTRACE_H */ diff --git a/arch/nds32/kernel/Makefile b/arch/nds32/kernel/Makefile index 42792743e8b9..27cded39fa66 100644 --- a/arch/nds32/kernel/Makefile +++ b/arch/nds32/kernel/Makefile @@ -21,3 +21,9 @@ extra-y := head.o vmlinux.lds obj-y += vdso/ + +obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o + +ifdef CONFIG_FUNCTION_TRACER +CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE) +endif diff --git a/arch/nds32/kernel/ftrace.c b/arch/nds32/kernel/ftrace.c new file mode 100644 index 000000000000..563f64c070b3 --- /dev/null +++ b/arch/nds32/kernel/ftrace.c @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include + +extern void (*ftrace_trace_function)(unsigned long, unsigned long, + struct ftrace_ops*, struct pt_regs*); + +noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *op, struct pt_regs *regs) +{ + __asm__ (""); /* avoid to optimize as pure function */ +} + +noinline void _mcount(unsigned long parent_ip) +{ + /* save all state by the compiler prologue */ + + unsigned long ip = (unsigned long)__builtin_return_address(0); + + if (ftrace_trace_function != ftrace_stub) + ftrace_trace_function(ip - MCOUNT_INSN_SIZE, parent_ip, + NULL, NULL); + + /* restore all state by the compiler epilogue */ +} +EXPORT_SYMBOL(_mcount); -- GitLab From 1e9b14c0d92b61a0979fd5ee24d5e7f080f11030 Mon Sep 17 00:00:00 2001 From: Zong Li Date: Wed, 15 Aug 2018 10:53:04 +0800 Subject: [PATCH 1024/1692] nds32/ftrace: Support static function graph tracer This patch contains implementation of static function graph tracer. Signed-off-by: Zong Li Acked-by: Greentime Hu Signed-off-by: Greentime Hu --- arch/nds32/Kconfig | 1 + arch/nds32/kernel/ftrace.c | 69 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 70 insertions(+) diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig index 853497fe4266..ea171a00327c 100644 --- a/arch/nds32/Kconfig +++ b/arch/nds32/Kconfig @@ -41,6 +41,7 @@ config NDS32 select RTC_LIB select THREAD_INFO_IN_TASK select HAVE_FUNCTION_TRACER + select HAVE_FUNCTION_GRAPH_TRACER help Andes(nds32) Linux support. diff --git a/arch/nds32/kernel/ftrace.c b/arch/nds32/kernel/ftrace.c index 563f64c070b3..707fce76522e 100644 --- a/arch/nds32/kernel/ftrace.c +++ b/arch/nds32/kernel/ftrace.c @@ -6,6 +6,8 @@ extern void (*ftrace_trace_function)(unsigned long, unsigned long, struct ftrace_ops*, struct pt_regs*); +extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace); +extern void ftrace_graph_caller(void); noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct pt_regs *regs) @@ -23,6 +25,73 @@ noinline void _mcount(unsigned long parent_ip) ftrace_trace_function(ip - MCOUNT_INSN_SIZE, parent_ip, NULL, NULL); +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + if (ftrace_graph_return != (trace_func_graph_ret_t)ftrace_stub + || ftrace_graph_entry != ftrace_graph_entry_stub) + ftrace_graph_caller(); +#endif + /* restore all state by the compiler epilogue */ } EXPORT_SYMBOL(_mcount); + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, + unsigned long frame_pointer) +{ + unsigned long return_hooker = (unsigned long)&return_to_handler; + struct ftrace_graph_ent trace; + unsigned long old; + int err; + + if (unlikely(atomic_read(¤t->tracing_graph_pause))) + return; + + old = *parent; + + trace.func = self_addr; + trace.depth = current->curr_ret_stack + 1; + + /* Only trace if the calling function expects to */ + if (!ftrace_graph_entry(&trace)) + return; + + err = ftrace_push_return_trace(old, self_addr, &trace.depth, + frame_pointer, NULL); + + if (err == -EBUSY) + return; + + *parent = return_hooker; +} + +noinline void ftrace_graph_caller(void) +{ + unsigned long *parent_ip = + (unsigned long *)(__builtin_frame_address(2) - 4); + + unsigned long selfpc = + (unsigned long)(__builtin_return_address(1) - MCOUNT_INSN_SIZE); + + unsigned long frame_pointer = + (unsigned long)__builtin_frame_address(3); + + prepare_ftrace_return(parent_ip, selfpc, frame_pointer); +} + +extern unsigned long ftrace_return_to_handler(unsigned long frame_pointer); +void __naked return_to_handler(void) +{ + __asm__ __volatile__ ( + /* save state needed by the ABI */ + "smw.adm $r0,[$sp],$r1,#0x0 \n\t" + + /* get original return address */ + "move $r0, $fp \n\t" + "bal ftrace_return_to_handler\n\t" + "move $lp, $r0 \n\t" + + /* restore state nedded by the ABI */ + "lmw.bim $r0,[$sp],$r1,#0x0 \n\t"); +} +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ -- GitLab From fbf58a52ac088669dfa930e557d0303a9fbb7e17 Mon Sep 17 00:00:00 2001 From: Zong Li Date: Wed, 15 Aug 2018 10:57:16 +0800 Subject: [PATCH 1025/1692] nds32/ftrace: Add RECORD_MCOUNT support Recognize NDS32 object files in recordmcount.pl. Signed-off-by: Zong Li Acked-by: Greentime Hu Signed-off-by: Greentime Hu --- arch/nds32/Kconfig | 1 + scripts/recordmcount.pl | 3 +++ 2 files changed, 4 insertions(+) diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig index ea171a00327c..48d92171ea20 100644 --- a/arch/nds32/Kconfig +++ b/arch/nds32/Kconfig @@ -42,6 +42,7 @@ config NDS32 select THREAD_INFO_IN_TASK select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_GRAPH_TRACER + select HAVE_FTRACE_MCOUNT_RECORD help Andes(nds32) Linux support. diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl index fe06e77c15eb..f599031260d5 100755 --- a/scripts/recordmcount.pl +++ b/scripts/recordmcount.pl @@ -389,6 +389,9 @@ if ($arch eq "x86_64") { $mcount_regex = "^\\s*([0-9a-fA-F]+):\\sR_RISCV_CALL\\s_mcount\$"; $type = ".quad"; $alignment = 2; +} elsif ($arch eq "nds32") { + $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_NDS32_HI20_RELA\\s+_mcount\$"; + $alignment = 2; } else { die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD"; } -- GitLab From 6b1d6d2fba37129f690ee7e9164f225c55626cac Mon Sep 17 00:00:00 2001 From: Zong Li Date: Wed, 15 Aug 2018 11:00:08 +0800 Subject: [PATCH 1026/1692] nds32/ftrace: Support dynamic function tracer This patch contains the implementation of dynamic function tracer. The mcount call is composed of three instructions, so there are three nop for enough placeholder. Signed-off-by: Zong Li Acked-by: Greentime Hu Signed-off-by: Greentime Hu --- arch/nds32/Kconfig | 1 + arch/nds32/include/asm/ftrace.h | 26 +++++ arch/nds32/kernel/ftrace.c | 164 ++++++++++++++++++++++++++++++++ 3 files changed, 191 insertions(+) diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig index 48d92171ea20..7068f341133d 100644 --- a/arch/nds32/Kconfig +++ b/arch/nds32/Kconfig @@ -43,6 +43,7 @@ config NDS32 select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FTRACE_MCOUNT_RECORD + select HAVE_DYNAMIC_FTRACE help Andes(nds32) Linux support. diff --git a/arch/nds32/include/asm/ftrace.h b/arch/nds32/include/asm/ftrace.h index bac7657f576a..2f96cc96aa35 100644 --- a/arch/nds32/include/asm/ftrace.h +++ b/arch/nds32/include/asm/ftrace.h @@ -15,6 +15,32 @@ extern void _mcount(unsigned long parent_ip); +#ifdef CONFIG_DYNAMIC_FTRACE + +#define FTRACE_ADDR ((unsigned long)_ftrace_caller) + +#ifdef __NDS32_EL__ +#define INSN_NOP 0x09000040 +#define INSN_SIZE(insn) (((insn & 0x00000080) == 0) ? 4 : 2) +#define IS_SETHI(insn) ((insn & 0x000000fe) == 0x00000046) +#define ENDIAN_CONVERT(insn) be32_to_cpu(insn) +#else /* __NDS32_EB__ */ +#define INSN_NOP 0x40000009 +#define INSN_SIZE(insn) (((insn & 0x80000000) == 0) ? 4 : 2) +#define IS_SETHI(insn) ((insn & 0xfe000000) == 0x46000000) +#define ENDIAN_CONVERT(insn) (insn) +#endif + +extern void _ftrace_caller(unsigned long parent_ip); +static inline unsigned long ftrace_call_adjust(unsigned long addr) +{ + return addr; +} +struct dyn_arch_ftrace { +}; + +#endif /* CONFIG_DYNAMIC_FTRACE */ + #endif /* CONFIG_FUNCTION_TRACER */ #endif /* __ASM_NDS32_FTRACE_H */ diff --git a/arch/nds32/kernel/ftrace.c b/arch/nds32/kernel/ftrace.c index 707fce76522e..3ca676b75d97 100644 --- a/arch/nds32/kernel/ftrace.c +++ b/arch/nds32/kernel/ftrace.c @@ -4,6 +4,7 @@ #include #include +#ifndef CONFIG_DYNAMIC_FTRACE extern void (*ftrace_trace_function)(unsigned long, unsigned long, struct ftrace_ops*, struct pt_regs*); extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace); @@ -35,6 +36,168 @@ noinline void _mcount(unsigned long parent_ip) } EXPORT_SYMBOL(_mcount); +#else /* CONFIG_DYNAMIC_FTRACE */ + +noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *op, struct pt_regs *regs) +{ + __asm__ (""); /* avoid to optimize as pure function */ +} + +noinline void __naked _mcount(unsigned long parent_ip) +{ + __asm__ (""); /* avoid to optimize as pure function */ +} +EXPORT_SYMBOL(_mcount); + +#define XSTR(s) STR(s) +#define STR(s) #s +void _ftrace_caller(unsigned long parent_ip) +{ + /* save all state needed by the compiler prologue */ + + /* + * prepare arguments for real tracing function + * first arg : __builtin_return_address(0) - MCOUNT_INSN_SIZE + * second arg : parent_ip + */ + __asm__ __volatile__ ( + "move $r1, %0 \n\t" + "addi $r0, %1, #-" XSTR(MCOUNT_INSN_SIZE) "\n\t" + : + : "r" (parent_ip), "r" (__builtin_return_address(0))); + + /* a placeholder for the call to a real tracing function */ + __asm__ __volatile__ ( + "ftrace_call: \n\t" + "nop \n\t" + "nop \n\t" + "nop \n\t"); + + /* restore all state needed by the compiler epilogue */ +} + +int __init ftrace_dyn_arch_init(void) +{ + return 0; +} + +int ftrace_arch_code_modify_prepare(void) +{ + set_all_modules_text_rw(); + return 0; +} + +int ftrace_arch_code_modify_post_process(void) +{ + set_all_modules_text_ro(); + return 0; +} + +static unsigned long gen_sethi_insn(unsigned long addr) +{ + unsigned long opcode = 0x46000000; + unsigned long imm = addr >> 12; + unsigned long rt_num = 0xf << 20; + + return ENDIAN_CONVERT(opcode | rt_num | imm); +} + +static unsigned long gen_ori_insn(unsigned long addr) +{ + unsigned long opcode = 0x58000000; + unsigned long imm = addr & 0x0000fff; + unsigned long rt_num = 0xf << 20; + unsigned long ra_num = 0xf << 15; + + return ENDIAN_CONVERT(opcode | rt_num | ra_num | imm); +} + +static unsigned long gen_jral_insn(unsigned long addr) +{ + unsigned long opcode = 0x4a000001; + unsigned long rt_num = 0x1e << 20; + unsigned long rb_num = 0xf << 10; + + return ENDIAN_CONVERT(opcode | rt_num | rb_num); +} + +static void ftrace_gen_call_insn(unsigned long *call_insns, + unsigned long addr) +{ + call_insns[0] = gen_sethi_insn(addr); /* sethi $r15, imm20u */ + call_insns[1] = gen_ori_insn(addr); /* ori $r15, $r15, imm15u */ + call_insns[2] = gen_jral_insn(addr); /* jral $lp, $r15 */ +} + +static int __ftrace_modify_code(unsigned long pc, unsigned long *old_insn, + unsigned long *new_insn, bool validate) +{ + unsigned long orig_insn[3]; + + if (validate) { + if (probe_kernel_read(orig_insn, (void *)pc, MCOUNT_INSN_SIZE)) + return -EFAULT; + if (memcmp(orig_insn, old_insn, MCOUNT_INSN_SIZE)) + return -EINVAL; + } + + if (probe_kernel_write((void *)pc, new_insn, MCOUNT_INSN_SIZE)) + return -EPERM; + + return 0; +} + +static int ftrace_modify_code(unsigned long pc, unsigned long *old_insn, + unsigned long *new_insn, bool validate) +{ + int ret; + + ret = __ftrace_modify_code(pc, old_insn, new_insn, validate); + if (ret) + return ret; + + flush_icache_range(pc, pc + MCOUNT_INSN_SIZE); + + return ret; +} + +int ftrace_update_ftrace_func(ftrace_func_t func) +{ + unsigned long pc = (unsigned long)&ftrace_call; + unsigned long old_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP}; + unsigned long new_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP}; + + if (func != ftrace_stub) + ftrace_gen_call_insn(new_insn, (unsigned long)func); + + return ftrace_modify_code(pc, old_insn, new_insn, false); +} + +int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned long pc = rec->ip; + unsigned long nop_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP}; + unsigned long call_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP}; + + ftrace_gen_call_insn(call_insn, addr); + + return ftrace_modify_code(pc, nop_insn, call_insn, true); +} + +int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, + unsigned long addr) +{ + unsigned long pc = rec->ip; + unsigned long nop_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP}; + unsigned long call_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP}; + + ftrace_gen_call_insn(call_insn, addr); + + return ftrace_modify_code(pc, call_insn, nop_insn, true); +} +#endif /* CONFIG_DYNAMIC_FTRACE */ + #ifdef CONFIG_FUNCTION_GRAPH_TRACER void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, unsigned long frame_pointer) @@ -94,4 +257,5 @@ void __naked return_to_handler(void) /* restore state nedded by the ABI */ "lmw.bim $r0,[$sp],$r1,#0x0 \n\t"); } + #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ -- GitLab From 95cd2f7bce9aa712473bba1b5b3f4fdec148baee Mon Sep 17 00:00:00 2001 From: Zong Li Date: Wed, 15 Aug 2018 11:01:10 +0800 Subject: [PATCH 1027/1692] nds32/ftrace: Support dynamic function graph tracer This patch contains the implementation of dynamic function graph tracer. Signed-off-by: Zong Li Acked-by: Greentime Hu Signed-off-by: Greentime Hu --- arch/nds32/kernel/ftrace.c | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/arch/nds32/kernel/ftrace.c b/arch/nds32/kernel/ftrace.c index 3ca676b75d97..a646a8339052 100644 --- a/arch/nds32/kernel/ftrace.c +++ b/arch/nds32/kernel/ftrace.c @@ -74,6 +74,14 @@ void _ftrace_caller(unsigned long parent_ip) "nop \n\t" "nop \n\t"); +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + /* a placeholder for the call to ftrace_graph_caller */ + __asm__ __volatile__ ( + "ftrace_graph_call: \n\t" + "nop \n\t" + "nop \n\t" + "nop \n\t"); +#endif /* restore all state needed by the compiler epilogue */ } @@ -258,4 +266,32 @@ void __naked return_to_handler(void) "lmw.bim $r0,[$sp],$r1,#0x0 \n\t"); } +#ifdef CONFIG_DYNAMIC_FTRACE +extern unsigned long ftrace_graph_call; + +static int ftrace_modify_graph_caller(bool enable) +{ + unsigned long pc = (unsigned long)&ftrace_graph_call; + unsigned long nop_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP}; + unsigned long call_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP}; + + ftrace_gen_call_insn(call_insn, (unsigned long)ftrace_graph_caller); + + if (enable) + return ftrace_modify_code(pc, nop_insn, call_insn, true); + else + return ftrace_modify_code(pc, call_insn, nop_insn, true); +} + +int ftrace_enable_ftrace_graph_caller(void) +{ + return ftrace_modify_graph_caller(true); +} + +int ftrace_disable_ftrace_graph_caller(void) +{ + return ftrace_modify_graph_caller(false); +} +#endif /* CONFIG_DYNAMIC_FTRACE */ + #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ -- GitLab From 1e377ae9b04aef4dc531fa4c5f81b65d440ebcba Mon Sep 17 00:00:00 2001 From: Zong Li Date: Wed, 15 Aug 2018 11:05:40 +0800 Subject: [PATCH 1028/1692] nds32/stack: Get real return address by using ftrace_graph_ret_addr Function graph tracer has modified the return address to 'return_to_handler' on stack, and provide the 'ftrace_graph_ret_addr' to get the real return address. Signed-off-by: Zong Li Acked-by: Greentime Hu Signed-off-by: Greentime Hu --- arch/nds32/kernel/stacktrace.c | 4 ++++ arch/nds32/kernel/traps.c | 30 ++++++------------------------ 2 files changed, 10 insertions(+), 24 deletions(-) diff --git a/arch/nds32/kernel/stacktrace.c b/arch/nds32/kernel/stacktrace.c index 8b231e910ea6..36bc87003e83 100644 --- a/arch/nds32/kernel/stacktrace.c +++ b/arch/nds32/kernel/stacktrace.c @@ -4,6 +4,7 @@ #include #include #include +#include void save_stack_trace(struct stack_trace *trace) { @@ -16,6 +17,7 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) unsigned long *fpn; int skip = trace->skip; int savesched; + int graph_idx = 0; if (tsk == current) { __asm__ __volatile__("\tori\t%0, $fp, #0\n":"=r"(fpn)); @@ -33,6 +35,8 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) fpp = fpn[FP_OFFSET]; if (!__kernel_text_address(lpp)) break; + else + lpp = ftrace_graph_ret_addr(tsk, &graph_idx, lpp, NULL); if (savesched || !in_sched_functions(lpp)) { if (skip) { diff --git a/arch/nds32/kernel/traps.c b/arch/nds32/kernel/traps.c index f0e974347c26..7684c8f597ed 100644 --- a/arch/nds32/kernel/traps.c +++ b/arch/nds32/kernel/traps.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include @@ -94,28 +95,6 @@ static void dump_instr(struct pt_regs *regs) set_fs(fs); } -#ifdef CONFIG_FUNCTION_GRAPH_TRACER -#include -static void -get_real_ret_addr(unsigned long *addr, struct task_struct *tsk, int *graph) -{ - if (*addr == (unsigned long)return_to_handler) { - int index = tsk->curr_ret_stack; - - if (tsk->ret_stack && index >= *graph) { - index -= *graph; - *addr = tsk->ret_stack[index].ret; - (*graph)++; - } - } -} -#else -static inline void -get_real_ret_addr(unsigned long *addr, struct task_struct *tsk, int *graph) -{ -} -#endif - #define LOOP_TIMES (100) static void __dump(struct task_struct *tsk, unsigned long *base_reg) { @@ -126,7 +105,8 @@ static void __dump(struct task_struct *tsk, unsigned long *base_reg) while (!kstack_end(base_reg)) { ret_addr = *base_reg++; if (__kernel_text_address(ret_addr)) { - get_real_ret_addr(&ret_addr, tsk, &graph); + ret_addr = ftrace_graph_ret_addr( + tsk, &graph, ret_addr, NULL); print_ip_sym(ret_addr); } if (--cnt < 0) @@ -145,7 +125,9 @@ static void __dump(struct task_struct *tsk, unsigned long *base_reg) next_fp = base_reg[FP_OFFSET]; #endif if (__kernel_text_address(ret_addr)) { - get_real_ret_addr(&ret_addr, tsk, &graph); + + ret_addr = ftrace_graph_ret_addr( + tsk, &graph, ret_addr, NULL); print_ip_sym(ret_addr); } if (--cnt < 0) -- GitLab From c5fdf7e00d490dc094a97d287e0fa27e253cca84 Mon Sep 17 00:00:00 2001 From: Zong Li Date: Mon, 20 Aug 2018 09:40:08 +0800 Subject: [PATCH 1029/1692] nds32: Remove the deprecated ABI implementation We are not using NDS32 ABI 2 for now, just remove the preprocessor directives __NDS32_ABI_2. Signed-off-by: Zong Li Acked-by: Greentime Hu Signed-off-by: Greentime Hu --- arch/nds32/kernel/traps.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/arch/nds32/kernel/traps.c b/arch/nds32/kernel/traps.c index 7684c8f597ed..f432310f3d02 100644 --- a/arch/nds32/kernel/traps.c +++ b/arch/nds32/kernel/traps.c @@ -117,13 +117,8 @@ static void __dump(struct task_struct *tsk, unsigned long *base_reg) !((unsigned long)base_reg & 0x3) && ((unsigned long)base_reg >= TASK_SIZE)) { unsigned long next_fp; -#if !defined(__NDS32_ABI_2) - ret_addr = base_reg[0]; - next_fp = base_reg[1]; -#else ret_addr = base_reg[-1]; next_fp = base_reg[FP_OFFSET]; -#endif if (__kernel_text_address(ret_addr)) { ret_addr = ftrace_graph_ret_addr( -- GitLab From 95f93ed7fe92c16f5346e477491d91e4fa8e92b8 Mon Sep 17 00:00:00 2001 From: Zong Li Date: Mon, 20 Aug 2018 09:51:29 +0800 Subject: [PATCH 1030/1692] nds32: Add macro definition for offset of lp register on stack Use macro to replace the magic number. Signed-off-by: Zong Li Acked-by: Greentime Hu Signed-off-by: Greentime Hu --- arch/nds32/include/asm/nds32.h | 1 + arch/nds32/kernel/stacktrace.c | 2 +- arch/nds32/kernel/traps.c | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/nds32/include/asm/nds32.h b/arch/nds32/include/asm/nds32.h index 19b19394a936..68c38151c3e4 100644 --- a/arch/nds32/include/asm/nds32.h +++ b/arch/nds32/include/asm/nds32.h @@ -17,6 +17,7 @@ #else #define FP_OFFSET (-2) #endif +#define LP_OFFSET (-1) extern void __init early_trap_init(void); static inline void GIE_ENABLE(void) diff --git a/arch/nds32/kernel/stacktrace.c b/arch/nds32/kernel/stacktrace.c index 36bc87003e83..d974c0c1c65f 100644 --- a/arch/nds32/kernel/stacktrace.c +++ b/arch/nds32/kernel/stacktrace.c @@ -31,7 +31,7 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) && (fpn >= (unsigned long *)TASK_SIZE)) { unsigned long lpp, fpp; - lpp = fpn[-1]; + lpp = fpn[LP_OFFSET]; fpp = fpn[FP_OFFSET]; if (!__kernel_text_address(lpp)) break; diff --git a/arch/nds32/kernel/traps.c b/arch/nds32/kernel/traps.c index f432310f3d02..b0b85b7ab079 100644 --- a/arch/nds32/kernel/traps.c +++ b/arch/nds32/kernel/traps.c @@ -117,7 +117,7 @@ static void __dump(struct task_struct *tsk, unsigned long *base_reg) !((unsigned long)base_reg & 0x3) && ((unsigned long)base_reg >= TASK_SIZE)) { unsigned long next_fp; - ret_addr = base_reg[-1]; + ret_addr = base_reg[LP_OFFSET]; next_fp = base_reg[FP_OFFSET]; if (__kernel_text_address(ret_addr)) { -- GitLab From 487c4b2323b26cfb5fd4d77d3605a92c182b6288 Mon Sep 17 00:00:00 2001 From: Greentime Hu Date: Thu, 23 Aug 2018 14:47:43 +0800 Subject: [PATCH 1031/1692] nds32: Only print one page of stack when die to prevent printing too much information. It may print too much information sometimes if the stack is wrong or too big. This patch can limit the debug information in a page of stack. Signed-off-by: Greentime Hu --- arch/nds32/kernel/traps.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/arch/nds32/kernel/traps.c b/arch/nds32/kernel/traps.c index b0b85b7ab079..1496aab48998 100644 --- a/arch/nds32/kernel/traps.c +++ b/arch/nds32/kernel/traps.c @@ -173,11 +173,10 @@ void die(const char *str, struct pt_regs *regs, int err) pr_emerg("CPU: %i\n", smp_processor_id()); show_regs(regs); pr_emerg("Process %s (pid: %d, stack limit = 0x%p)\n", - tsk->comm, tsk->pid, task_thread_info(tsk) + 1); + tsk->comm, tsk->pid, end_of_stack(tsk)); if (!user_mode(regs) || in_interrupt()) { - dump_mem("Stack: ", regs->sp, - THREAD_SIZE + (unsigned long)task_thread_info(tsk)); + dump_mem("Stack: ", regs->sp, (regs->sp + PAGE_SIZE) & PAGE_MASK); dump_instr(regs); dump_stack(); } -- GitLab From 0cde56e0280d70ce26b54d22131944c2fe584b38 Mon Sep 17 00:00:00 2001 From: Greentime Hu Date: Thu, 23 Aug 2018 15:05:46 +0800 Subject: [PATCH 1032/1692] nds32: Fix a kernel panic issue because of wrong frame pointer access. It can make sure that trace_hardirqs_off/trace_hardirqs_on can get a correct return address by frame pointer through __builtin_return_address() in this fix. Unable to handle kernel paging request at virtual address fffffffc pgd = 3c42e9cf [fffffffc] *pgd=02a9c000 Internal error: Oops: 1 [#1] Modules linked in: CPU: 0 PC is at trace_hardirqs_off+0x78/0xec LP is at common_exception_handler+0xda/0xf4 pc : [] lp : [] Tainted: G W sp : ada60ab0 fp : efcaff48 gp : 3a020490 r25: efcb0000 r24: 00000000 r23: 00000000 r22: 00000000 r21: 00000000 r20: 000700c1 r19: 000700ca r18: 3a21b018 r17: 00000001 r16: 00000002 r15: 00000001 r14: 0000002a r13: 3a00a804 r12: ada60ab0 r11: 3a113af8 r10: 3a01c530 r9 : 3a124404 r8 : 00120f9c r7 : b2352eba r6 : 00000000 r5 : 3a126b58 r4 : 00000000 r3 : 3a1726a8 r2 : b2921000 r1 : 00000000 r0 : 00000000 IRQs off Segment user Process init (pid: 1, stack limit = 0x069d7f15) Stack: (0xada60ab0 to 0xada61000) Stack: 0aa0: 00000000 00000003 3a110000 0011f000 Stack: 0ac0: 00000005 00000000 00000000 00000000 ada60b10 3a01fe68 ada60b0c ada60b08 Stack: 0ae0: 00000000 ada60ab8 ada60b30 3a020550 00000000 00000001 3a11c2f8 3a01c6e8 Stack: 0b00: 3a01cb80 fffffba8 3a113af8 3a21b018 3a122c28 00003ec4 00000165 00000000 Stack: 0b20: 3a126aec 0000006c 00000000 00000001 3a01fe68 00000000 00000003 00000000 Stack: 0b40: 00000001 000003f8 3a020930 3a01c530 00000008 ada60c18 3a020490 3a003120 Stack: 0b60: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 Stack: 0b80: 00000000 00000000 00000000 00000000 ffff8000 00000000 00000000 00000000 Stack: 0ba0: 00000000 00000001 3a020550 00000000 3a01d020 00000000 fffff000 fffff000 Stack: 0bc0: 00000000 00000000 00000000 00000000 ada60f2c 00000000 00000001 00000000 Stack: 0be0: 00000000 00000000 3a01fe68 fffffab0 00008034 00000008 3a0010cc 3a01fe68 Stack: 0c00: 00000000 00000000 00000001 ada60c88 3a020490 3a0139d4 0009dc6f 00000000 Stack: 0c20: 00000000 00000000 ada60fce fffff000 00000000 0000ebe0 3a020038 3a020550 Stack: 0c40: ada60f20 ada60c90 3a0007f0 3a0002a8 ada60c8c 00000000 00000000 ada60c88 Stack: 0c60: 3a020490 3a004570 00000000 00000000 ada60f20 3a0007f0 3a000000 00000000 Stack: 0c80: 3a020490 3a004850 00000000 3a013f24 3a000000 00000000 3a01ff44 00000000 Stack: 0ca0: 00000000 00000000 00000000 00000000 00000000 00000000 3a01ff84 3a01ff7c Stack: 0cc0: 3a01ff4c 3a01ff5c 3a01ff64 3a01ff9c 3a01ffa4 3a01ffac 3a01ff6c 3a01ff74 Stack: 0ce0: 00000000 00000000 3a01ff44 00000000 00000000 00000000 00000000 00000000 Stack: 0d00: 3a01ff8c 00000000 00000000 3a01ff94 00000000 00000000 00000000 00000000 Stack: 0d20: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 Stack: 0d40: 3a01ffbc 3a01ffb4 00000000 00000000 00000000 00000000 00000000 00000000 Stack: 0d60: 00000000 00000000 00000000 00000000 00000000 3a01ffc4 00000000 00000000 Stack: 0d80: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 Stack: 0da0: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 Stack: 0dc0: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 3a01ff54 Stack: 0de0: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 Stack: 0e00: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 Stack: 0e20: 00000000 00000004 00000000 00000000 00000000 00000000 00000000 00000000 Stack: 0e40: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 Stack: 0e60: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 Stack: 0e80: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 Stack: 0ea0: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 Stack: 0ec0: 00000000 00000000 00000000 00000000 ffffffff 00000000 00000000 00000000 Stack: 0ee0: 00000000 00000000 00000000 00000000 ada60f20 00000000 00000000 00000000 Stack: 0f00: 00000000 00000000 00000000 00000000 00000000 00000000 3a020490 3a000b24 Stack: 0f20: 00000001 ada60fde 00000000 ada60fe4 ada60feb 00000000 00000021 3a038000 Stack: 0f40: 00000010 0009dc6f 00000006 00001000 00000011 00000064 00000003 00008034 Stack: 0f60: 00000004 00000020 00000005 00000008 00000007 3a000000 00000008 00000000 Stack: 0f80: 00000009 0000ebe0 0000000b 00000000 0000000c 00000000 0000000d 00000000 Stack: 0fa0: 0000000e 00000000 00000017 00000000 00000019 ada60fce 0000001f ada60ff6 Stack: 0fc0: 00000000 00000000 00000000 b5010000 fa839914 23b5dd89 a2aea540 692fc82e Stack: 0fe0: 0074696e 454d4f48 54002f3d 3d4d5245 756e696c 692f0078 0074696e 00000000 CPU: 0 PID: 1 Comm: init Tainted: G W 4.18.0-00015-g1888b64a2558-dirty #112 Hardware name: andestech,ae3xx (DT) Call Trace: [] dump_stack+0x2c/0x38 [] die+0x128/0x18c [] do_page_fault+0x3b8/0x4e0 [] ret_from_exception+0x0/0x10 [] common_exception_handler+0xda/0xf4 Signed-off-by: Greentime Hu --- arch/nds32/kernel/ex-entry.S | 2 +- arch/nds32/kernel/ex-exit.S | 4 ++-- arch/nds32/kernel/ftrace.c | 12 ++++++++++++ 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/arch/nds32/kernel/ex-entry.S b/arch/nds32/kernel/ex-entry.S index b8ae4e9a6b93..21a144071566 100644 --- a/arch/nds32/kernel/ex-entry.S +++ b/arch/nds32/kernel/ex-entry.S @@ -118,7 +118,7 @@ common_exception_handler: /* interrupt */ 2: #ifdef CONFIG_TRACE_IRQFLAGS - jal trace_hardirqs_off + jal __trace_hardirqs_off #endif move $r0, $sp sethi $lp, hi20(ret_from_intr) diff --git a/arch/nds32/kernel/ex-exit.S b/arch/nds32/kernel/ex-exit.S index 03e4f7788a18..f00af92f7e22 100644 --- a/arch/nds32/kernel/ex-exit.S +++ b/arch/nds32/kernel/ex-exit.S @@ -138,8 +138,8 @@ no_work_pending: #ifdef CONFIG_TRACE_IRQFLAGS lwi $p0, [$sp+(#IPSW_OFFSET)] andi $p0, $p0, #0x1 - la $r10, trace_hardirqs_off - la $r9, trace_hardirqs_on + la $r10, __trace_hardirqs_off + la $r9, __trace_hardirqs_on cmovz $r9, $p0, $r10 jral $r9 #endif diff --git a/arch/nds32/kernel/ftrace.c b/arch/nds32/kernel/ftrace.c index a646a8339052..a0a9679ad5de 100644 --- a/arch/nds32/kernel/ftrace.c +++ b/arch/nds32/kernel/ftrace.c @@ -295,3 +295,15 @@ int ftrace_disable_ftrace_graph_caller(void) #endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ + + +#ifdef CONFIG_TRACE_IRQFLAGS +noinline void __trace_hardirqs_off(void) +{ + trace_hardirqs_off(); +} +noinline void __trace_hardirqs_on(void) +{ + trace_hardirqs_on(); +} +#endif /* CONFIG_TRACE_IRQFLAGS */ -- GitLab From ec865393292f5ad8d52da20788b3685ebce44c48 Mon Sep 17 00:00:00 2001 From: Greentime Hu Date: Tue, 28 Aug 2018 16:07:39 +0800 Subject: [PATCH 1033/1692] nds32: fix build error because of wrong semicolon It shall be removed in the define usage. We shall not put a semicolon there. /kisskb/src/arch/nds32/include/asm/elf.h:126:29: error: expected '}' before ';' token #define ELF_DATA ELFDATA2LSB; ^ /kisskb/src/fs/proc/kcore.c:318:17: note: in expansion of macro 'ELF_DATA' [EI_DATA] = ELF_DATA, ^~~~~~~~ /kisskb/src/fs/proc/kcore.c:312:15: note: to match this '{' .e_ident = { ^ /kisskb/src/scripts/Makefile.build:307: recipe for target 'fs/proc/kcore.o' failed Signed-off-by: Greentime Hu --- arch/nds32/include/asm/elf.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/nds32/include/asm/elf.h b/arch/nds32/include/asm/elf.h index 56c479058802..f5f9cf7e0544 100644 --- a/arch/nds32/include/asm/elf.h +++ b/arch/nds32/include/asm/elf.h @@ -121,9 +121,9 @@ struct elf32_hdr; */ #define ELF_CLASS ELFCLASS32 #ifdef __NDS32_EB__ -#define ELF_DATA ELFDATA2MSB; +#define ELF_DATA ELFDATA2MSB #else -#define ELF_DATA ELFDATA2LSB; +#define ELF_DATA ELFDATA2LSB #endif #define ELF_ARCH EM_NDS32 #define USE_ELF_CORE_DUMP -- GitLab From 0a1b60d76b0abcc2a0de4eb96d5dd379cd855f30 Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Fri, 31 Aug 2018 10:58:52 +0800 Subject: [PATCH 1034/1692] drm/i915/gvt: Fix life cycle reference on KVM mm Handle guest mm access life cycle properly with mmget()/mmput(). As noted by Linus, use_mm() depends on valid live page table but KVM's mmgrab() doesn't guarantee that. As vGPU usage depends on guest VM life cycle, need to make sure to use mmget()/mmput() to guarantee VM address access. v3: fix build v2: v1 caused a weird dependence issue which failed for vfio device release, which result invalid mdev vgpu and kvm state without proper release taken. This trys to put right reference around VM address space access instead. Cc: Linus Torvalds Cc: Paolo Bonzini Cc: Zhi Wang Reviewed-by: Zhi Wang Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/kvmgt.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index a45f46d8537f..c7afee37b2b8 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -1792,16 +1793,21 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa, info = (struct kvmgt_guest_info *)handle; kvm = info->kvm; - if (kthread) + if (kthread) { + if (!mmget_not_zero(kvm->mm)) + return -EFAULT; use_mm(kvm->mm); + } idx = srcu_read_lock(&kvm->srcu); ret = write ? kvm_write_guest(kvm, gpa, buf, len) : kvm_read_guest(kvm, gpa, buf, len); srcu_read_unlock(&kvm->srcu, idx); - if (kthread) + if (kthread) { unuse_mm(kvm->mm); + mmput(kvm->mm); + } return ret; } -- GitLab From b81126e01a8c6048249955feea46c8217ebefa91 Mon Sep 17 00:00:00 2001 From: Ingo Franzki Date: Mon, 27 Aug 2018 14:28:47 +0200 Subject: [PATCH 1035/1692] s390/crypto: Fix return code checking in cbc_paes_crypt() The return code of cpacf_kmc() is less than the number of bytes to process in case of an error, not greater. The crypt routines for the other cipher modes already have this correctly. Cc: stable@vger.kernel.org # v4.11+ Fixes: 279378430768 ("s390/crypt: Add protected key AES module") Signed-off-by: Ingo Franzki Acked-by: Harald Freudenberger Signed-off-by: Martin Schwidefsky --- arch/s390/crypto/paes_s390.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c index 80b27294c1de..ab9a0ebecc19 100644 --- a/arch/s390/crypto/paes_s390.c +++ b/arch/s390/crypto/paes_s390.c @@ -208,7 +208,7 @@ static int cbc_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier, walk->dst.virt.addr, walk->src.virt.addr, n); if (k) ret = blkcipher_walk_done(desc, walk, nbytes - k); - if (n < k) { + if (k < n) { if (__cbc_paes_set_key(ctx) != 0) return blkcipher_walk_done(desc, walk, -EIO); memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE); -- GitLab From a11bdb1a6b782ee97587f92fae798efc78c31093 Mon Sep 17 00:00:00 2001 From: Janosch Frank Date: Thu, 30 Aug 2018 10:13:55 +0200 Subject: [PATCH 1036/1692] KVM: s390: Fix pfmf and conditional skey emulation We should not return with a lock. We also have to increase the address when we do page clearing. Fixes: bd096f644319 ("KVM: s390: Add skey emulation fault handling") Signed-off-by: Janosch Frank Message-Id: <20180830081355.59234-1-frankja@linux.ibm.com> Reviewed-by: David Hildenbrand Signed-off-by: Christian Borntraeger --- arch/s390/kvm/priv.c | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index d68f10441a16..8679bd74d337 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -280,9 +280,11 @@ static int handle_iske(struct kvm_vcpu *vcpu) goto retry; } } - if (rc) - return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); up_read(¤t->mm->mmap_sem); + if (rc == -EFAULT) + return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); + if (rc < 0) + return rc; vcpu->run->s.regs.gprs[reg1] &= ~0xff; vcpu->run->s.regs.gprs[reg1] |= key; return 0; @@ -324,9 +326,11 @@ static int handle_rrbe(struct kvm_vcpu *vcpu) goto retry; } } - if (rc < 0) - return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); up_read(¤t->mm->mmap_sem); + if (rc == -EFAULT) + return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); + if (rc < 0) + return rc; kvm_s390_set_psw_cc(vcpu, rc); return 0; } @@ -390,12 +394,12 @@ static int handle_sske(struct kvm_vcpu *vcpu) FAULT_FLAG_WRITE, &unlocked); rc = !rc ? -EAGAIN : rc; } + up_read(¤t->mm->mmap_sem); if (rc == -EFAULT) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); - - up_read(¤t->mm->mmap_sem); - if (rc >= 0) - start += PAGE_SIZE; + if (rc < 0) + return rc; + start += PAGE_SIZE; } if (m3 & (SSKE_MC | SSKE_MR)) { @@ -1002,13 +1006,15 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) FAULT_FLAG_WRITE, &unlocked); rc = !rc ? -EAGAIN : rc; } + up_read(¤t->mm->mmap_sem); if (rc == -EFAULT) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); - - up_read(¤t->mm->mmap_sem); - if (rc >= 0) - start += PAGE_SIZE; + if (rc == -EAGAIN) + continue; + if (rc < 0) + return rc; } + start += PAGE_SIZE; } if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) { -- GitLab From 204c97245612b6c255edf4e21e24d417c4a0c008 Mon Sep 17 00:00:00 2001 From: Pierre Morel Date: Thu, 23 Aug 2018 12:25:54 +0200 Subject: [PATCH 1037/1692] KVM: s390: vsie: copy wrapping keys to right place Copy the key mask to the right offset inside the shadow CRYCB Fixes: bbeaa58b3 ("KVM: s390: vsie: support aes dea wrapping keys") Signed-off-by: Pierre Morel Reviewed-by: David Hildenbrand Reviewed-by: Cornelia Huck Reviewed-by: Janosch Frank Cc: stable@vger.kernel.org # v4.8+ Message-Id: <1535019956-23539-2-git-send-email-pmorel@linux.ibm.com> Signed-off-by: Janosch Frank Signed-off-by: Christian Borntraeger --- arch/s390/kvm/vsie.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index 63844b95c22c..a2b28cd1e3fe 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -173,7 +173,8 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) return set_validity_icpt(scb_s, 0x0039U); /* copy only the wrapping keys */ - if (read_guest_real(vcpu, crycb_addr + 72, &vsie_page->crycb, 56)) + if (read_guest_real(vcpu, crycb_addr + 72, + vsie_page->crycb.dea_wrapping_key_mask, 56)) return set_validity_icpt(scb_s, 0x0035U); scb_s->ecb3 |= ecb3_flags; -- GitLab From df88f3181f10565c6e3a89eb6f0f9e6afaaf15f1 Mon Sep 17 00:00:00 2001 From: Janosch Frank Date: Thu, 30 Aug 2018 16:14:18 +0200 Subject: [PATCH 1038/1692] KVM: s390: Properly lock mm context allow_gmap_hpage_1m setting We have to do down_write on the mm semaphore to set a bitfield in the mm context. Signed-off-by: Janosch Frank Fixes: a4499382 ("KVM: s390: Add huge page enablement control") Reviewed-by: Christian Borntraeger Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/mmu.h | 8 +++++++- arch/s390/kvm/kvm-s390.c | 2 ++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h index f31a15044c24..a8418e1379eb 100644 --- a/arch/s390/include/asm/mmu.h +++ b/arch/s390/include/asm/mmu.h @@ -16,7 +16,13 @@ typedef struct { unsigned long asce; unsigned long asce_limit; unsigned long vdso_base; - /* The mmu context allocates 4K page tables. */ + /* + * The following bitfields need a down_write on the mm + * semaphore when they are written to. As they are only + * written once, they can be read without a lock. + * + * The mmu context allocates 4K page tables. + */ unsigned int alloc_pgste:1; /* The mmu context uses extended page tables. */ unsigned int has_pgste:1; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 91ad4a9425c0..f69333fd2fa3 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -695,7 +695,9 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) r = -EINVAL; else { r = 0; + down_write(&kvm->mm->mmap_sem); kvm->mm->context.allow_gmap_hpage_1m = 1; + up_write(&kvm->mm->mmap_sem); /* * We might have to create fake 4k page * tables. To avoid that the hardware works on -- GitLab From 9f9d594d952abad06f31ed65f29855f3b99a3c17 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Mon, 3 Sep 2018 17:28:41 +0300 Subject: [PATCH 1039/1692] drm/i915: Fix ICL+ HDMI clock readout MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Copy the 38.4 vs. 19.2 MHz ref clock exception from the dpll mgr into the clock readout function as well. v2: Refactor the code into a common function s/is_icl/gen11+/ (Rodrigo) Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=107722 Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20180903142841.14627-1-ville.syrjala@linux.intel.com Reviewed-by: Rodrigo Vivi --- drivers/gpu/drm/i915/intel_ddi.c | 2 +- drivers/gpu/drm/i915/intel_dpll_mgr.c | 23 +++++++++++++++-------- drivers/gpu/drm/i915/intel_dpll_mgr.h | 1 + 3 files changed, 17 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index dcb1a98d624d..cd01a09c5e0f 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -1414,7 +1414,7 @@ static int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv, break; } - ref_clock = dev_priv->cdclk.hw.ref; + ref_clock = cnl_hdmi_pll_ref_clock(dev_priv); dco_freq = (cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) * ref_clock; diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index 04d41bc1a4bb..e6cac9225536 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c @@ -2212,6 +2212,20 @@ static void cnl_wrpll_params_populate(struct skl_wrpll_params *params, params->dco_fraction = dco & 0x7fff; } +int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv) +{ + int ref_clock = dev_priv->cdclk.hw.ref; + + /* + * For ICL+, the spec states: if reference frequency is 38.4, + * use 19.2 because the DPLL automatically divides that by 2. + */ + if (INTEL_GEN(dev_priv) >= 11 && ref_clock == 38400) + ref_clock = 19200; + + return ref_clock; +} + static bool cnl_ddi_calculate_wrpll(int clock, struct drm_i915_private *dev_priv, @@ -2251,14 +2265,7 @@ cnl_ddi_calculate_wrpll(int clock, cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv); - ref_clock = dev_priv->cdclk.hw.ref; - - /* - * For ICL, the spec states: if reference frequency is 38.4, use 19.2 - * because the DPLL automatically divides that by 2. - */ - if (IS_ICELAKE(dev_priv) && ref_clock == 38400) - ref_clock = 19200; + ref_clock = cnl_hdmi_pll_ref_clock(dev_priv); cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock, pdiv, qdiv, kdiv); diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h index 7e522cf4f13f..bf0de8a4dc63 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h @@ -344,5 +344,6 @@ void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv, struct intel_dpll_hw_state *hw_state); int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv, uint32_t pll_id); +int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv); #endif /* _INTEL_DPLL_MGR_H_ */ -- GitLab From 7ef4ac6ed9eddd12c48020998d98647d2d85bdb1 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 4 Sep 2018 12:17:32 +0100 Subject: [PATCH 1040/1692] drm/i915: Double check we didn't miss an unclaimed register access Currently, if the user has enabled mmio-debug around each register access, we presume that we have then checked them all. However, it is still possible through omission (raw register access) or external interaction that the unclaimed access was not highlighted. Signed-off-by: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20180904111732.24266-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_uncore.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 20f2f5ad9c3f..05f0cda18501 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -2283,15 +2283,16 @@ bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv) bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv) { - if (unlikely(i915_modparams.mmio_debug || - dev_priv->uncore.unclaimed_mmio_check <= 0)) + if (unlikely(dev_priv->uncore.unclaimed_mmio_check <= 0)) return false; if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) { - DRM_DEBUG("Unclaimed register detected, " - "enabling oneshot unclaimed register reporting. " - "Please use i915.mmio_debug=N for more information.\n"); - i915_modparams.mmio_debug++; + if (!i915_modparams.mmio_debug) { + DRM_DEBUG("Unclaimed register detected, " + "enabling oneshot unclaimed register reporting. " + "Please use i915.mmio_debug=N for more information.\n"); + i915_modparams.mmio_debug++; + } dev_priv->uncore.unclaimed_mmio_check--; return true; } -- GitLab From 06348d3086a3b34f2db6c7692b4327fb7fc0b6c7 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 4 Sep 2018 07:38:02 +0100 Subject: [PATCH 1041/1692] drm/i915/ringbuffer: Move double invalidate to after pd flush Continuing the fun of trying to find exactly the delay that is sufficient to ensure that the page directory is fully loaded between context switches, move the extra flush added in commit 70b73f9ac113 ("drm/i915/ringbuffer: Delay after invalidating gen6+ xcs") to just after we flush the pd. Entirely based on the empirical data of running failing tests in a loop until we survive a day (before the mtbf is 10-30 minutes). Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=107769 References: 70b73f9ac113 ("drm/i915/ringbuffer: Delay after invalidating gen6+ xcs") Signed-off-by: Chris Wilson Acked-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20180904063802.13880-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_ringbuffer.c | 40 ++++++++++++++----------- 1 file changed, 22 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 86604dd1c5a5..472939f5c18f 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1707,9 +1707,29 @@ static int switch_context(struct i915_request *rq) } if (ppgtt) { + ret = engine->emit_flush(rq, EMIT_INVALIDATE); + if (ret) + goto err_mm; + ret = flush_pd_dir(rq); if (ret) goto err_mm; + + /* + * Not only do we need a full barrier (post-sync write) after + * invalidating the TLBs, but we need to wait a little bit + * longer. Whether this is merely delaying us, or the + * subsequent flush is a key part of serialising with the + * post-sync op, this extra pass appears vital before a + * mm switch! + */ + ret = engine->emit_flush(rq, EMIT_INVALIDATE); + if (ret) + goto err_mm; + + ret = engine->emit_flush(rq, EMIT_FLUSH); + if (ret) + goto err_mm; } if (ctx->remap_slice) { @@ -1947,7 +1967,7 @@ static void gen6_bsd_submit_request(struct i915_request *request) intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } -static int emit_mi_flush_dw(struct i915_request *rq, u32 flags) +static int mi_flush_dw(struct i915_request *rq, u32 flags) { u32 cmd, *cs; @@ -1985,23 +2005,7 @@ static int emit_mi_flush_dw(struct i915_request *rq, u32 flags) static int gen6_flush_dw(struct i915_request *rq, u32 mode, u32 invflags) { - int err; - - /* - * Not only do we need a full barrier (post-sync write) after - * invalidating the TLBs, but we need to wait a little bit - * longer. Whether this is merely delaying us, or the - * subsequent flush is a key part of serialising with the - * post-sync op, this extra pass appears vital before a - * mm switch! - */ - if (mode & EMIT_INVALIDATE) { - err = emit_mi_flush_dw(rq, invflags); - if (err) - return err; - } - - return emit_mi_flush_dw(rq, 0); + return mi_flush_dw(rq, mode & EMIT_INVALIDATE ? invflags : 0); } static int gen6_bsd_ring_flush(struct i915_request *rq, u32 mode) -- GitLab From b212f0a470eeb62a8eaa95f51b3cdbc457f687a8 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Mon, 3 Sep 2018 12:30:07 +0100 Subject: [PATCH 1042/1692] drm/i915/icl: Fix context RPCS programming There are two issues with the current RPCS programming for Icelake: Expansion of the slice count bitfield has been missed, as well as the required programming workaround for the subslice count bitfield size limitation. 1) Bitfield width for configuring the active slice count has grown so we need to program the GEN8_R_PWR_CLK_STATE accordingly. Current code was always requesting eight times the number of slices (due writing to a bitfield starting three bits higher than it should). These requests were luckily a) capped by the hardware to the available number of slices, and b) we haven't yet exported the code to ask for reduced slice configurations. Due both of the above there was no impact from this incorrect programming but we should still fix it. 2) Due subslice count bitfield being only three bits wide and furthermore capped to a maximum documented value of four, special programming workaround is needed to enable more than four subslices. With this programming driver has to consider the GT configuration as 2x4x8, while the hardware internally translates this to 1x8x8. A limitation stemming from this is that either a subslice count between one and four can be selected, or a subslice count equaling the total number of subslices in all selected slices. In other words, odd subslice counts greater than four are impossible, as are odd subslice counts greater than a single slice subslice count. This also had no impact in the current code base due breakage from 1) always reqesting more than one slice. While fixing this we also add some asserts to flag up any future bitfield overflows. v2: * Use a local in all branches for clarity. (Lionel) Signed-off-by: Tvrtko Ursulin Bspec: 12247 Reported-by: tony.ye@intel.com Suggested-by: Lionel Landwerlin Cc: Lionel Landwerlin Cc: tony.ye@intel.com Cc: Mika Kuoppala Reviewed-by: Lionel Landwerlin Link: https://patchwork.freedesktop.org/patch/msgid/20180903113007.2643-1-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_reg.h | 2 + drivers/gpu/drm/i915/intel_lrc.c | 87 +++++++++++++++++++++++++++----- 2 files changed, 76 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index f2321785cbd6..09bc8e730ee1 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -344,6 +344,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN8_RPCS_S_CNT_ENABLE (1 << 18) #define GEN8_RPCS_S_CNT_SHIFT 15 #define GEN8_RPCS_S_CNT_MASK (0x7 << GEN8_RPCS_S_CNT_SHIFT) +#define GEN11_RPCS_S_CNT_SHIFT 12 +#define GEN11_RPCS_S_CNT_MASK (0x3f << GEN11_RPCS_S_CNT_SHIFT) #define GEN8_RPCS_SS_CNT_ENABLE (1 << 11) #define GEN8_RPCS_SS_CNT_SHIFT 8 #define GEN8_RPCS_SS_CNT_MASK (0x7 << GEN8_RPCS_SS_CNT_SHIFT) diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index f8ceb9c99dd6..def467c2451b 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -2480,6 +2480,9 @@ int logical_xcs_ring_init(struct intel_engine_cs *engine) static u32 make_rpcs(struct drm_i915_private *dev_priv) { + bool subslice_pg = INTEL_INFO(dev_priv)->sseu.has_subslice_pg; + u8 slices = hweight8(INTEL_INFO(dev_priv)->sseu.slice_mask); + u8 subslices = hweight8(INTEL_INFO(dev_priv)->sseu.subslice_mask[0]); u32 rpcs = 0; /* @@ -2489,6 +2492,38 @@ make_rpcs(struct drm_i915_private *dev_priv) if (INTEL_GEN(dev_priv) < 9) return 0; + /* + * Since the SScount bitfield in GEN8_R_PWR_CLK_STATE is only three bits + * wide and Icelake has up to eight subslices, specfial programming is + * needed in order to correctly enable all subslices. + * + * According to documentation software must consider the configuration + * as 2x4x8 and hardware will translate this to 1x8x8. + * + * Furthemore, even though SScount is three bits, maximum documented + * value for it is four. From this some rules/restrictions follow: + * + * 1. + * If enabled subslice count is greater than four, two whole slices must + * be enabled instead. + * + * 2. + * When more than one slice is enabled, hardware ignores the subslice + * count altogether. + * + * From these restrictions it follows that it is not possible to enable + * a count of subslices between the SScount maximum of four restriction, + * and the maximum available number on a particular SKU. Either all + * subslices are enabled, or a count between one and four on the first + * slice. + */ + if (IS_GEN11(dev_priv) && slices == 1 && subslices >= 4) { + GEM_BUG_ON(subslices & 1); + + subslice_pg = false; + slices *= 2; + } + /* * Starting in Gen9, render power gating can leave * slice/subslice/EU in a partially enabled state. We @@ -2496,24 +2531,50 @@ make_rpcs(struct drm_i915_private *dev_priv) * enablement. */ if (INTEL_INFO(dev_priv)->sseu.has_slice_pg) { - rpcs |= GEN8_RPCS_S_CNT_ENABLE; - rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.slice_mask) << - GEN8_RPCS_S_CNT_SHIFT; - rpcs |= GEN8_RPCS_ENABLE; + u32 mask, val = slices; + + if (INTEL_GEN(dev_priv) >= 11) { + mask = GEN11_RPCS_S_CNT_MASK; + val <<= GEN11_RPCS_S_CNT_SHIFT; + } else { + mask = GEN8_RPCS_S_CNT_MASK; + val <<= GEN8_RPCS_S_CNT_SHIFT; + } + + GEM_BUG_ON(val & ~mask); + val &= mask; + + rpcs |= GEN8_RPCS_ENABLE | GEN8_RPCS_S_CNT_ENABLE | val; } - if (INTEL_INFO(dev_priv)->sseu.has_subslice_pg) { - rpcs |= GEN8_RPCS_SS_CNT_ENABLE; - rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.subslice_mask[0]) << - GEN8_RPCS_SS_CNT_SHIFT; - rpcs |= GEN8_RPCS_ENABLE; + if (subslice_pg) { + u32 val = subslices; + + val <<= GEN8_RPCS_SS_CNT_SHIFT; + + GEM_BUG_ON(val & ~GEN8_RPCS_SS_CNT_MASK); + val &= GEN8_RPCS_SS_CNT_MASK; + + rpcs |= GEN8_RPCS_ENABLE | GEN8_RPCS_SS_CNT_ENABLE | val; } if (INTEL_INFO(dev_priv)->sseu.has_eu_pg) { - rpcs |= INTEL_INFO(dev_priv)->sseu.eu_per_subslice << - GEN8_RPCS_EU_MIN_SHIFT; - rpcs |= INTEL_INFO(dev_priv)->sseu.eu_per_subslice << - GEN8_RPCS_EU_MAX_SHIFT; + u32 val; + + val = INTEL_INFO(dev_priv)->sseu.eu_per_subslice << + GEN8_RPCS_EU_MIN_SHIFT; + GEM_BUG_ON(val & ~GEN8_RPCS_EU_MIN_MASK); + val &= GEN8_RPCS_EU_MIN_MASK; + + rpcs |= val; + + val = INTEL_INFO(dev_priv)->sseu.eu_per_subslice << + GEN8_RPCS_EU_MAX_SHIFT; + GEM_BUG_ON(val & ~GEN8_RPCS_EU_MAX_MASK); + val &= GEN8_RPCS_EU_MAX_MASK; + + rpcs |= val; + rpcs |= GEN8_RPCS_ENABLE; } -- GitLab From 3c398f3c3bef21961eaaeb93227fa66d440dc83d Mon Sep 17 00:00:00 2001 From: Andreas Kemnade Date: Sun, 2 Sep 2018 09:30:58 +0200 Subject: [PATCH 1043/1692] mmc: omap_hsmmc: fix wakeirq handling on removal after unbinding mmc I get things like this: [ 185.294067] mmc1: card 0001 removed [ 185.305206] omap_hsmmc 480b4000.mmc: wake IRQ with no resume: -13 The wakeirq stays in /proc-interrupts rebinding shows this: [ 289.795959] genirq: Flags mismatch irq 112. 0000200a (480b4000.mmc:wakeup) vs. 0000200a (480b4000.mmc:wakeup) [ 289.808959] omap_hsmmc 480b4000.mmc: Unable to request wake IRQ [ 289.815338] omap_hsmmc 480b4000.mmc: no SDIO IRQ support, falling back to polling That bug seems to be introduced by switching from devm_request_irq() to generic wakeirq handling. So let us cleanup at removal. Signed-off-by: Andreas Kemnade Fixes: 5b83b2234be6 ("mmc: omap_hsmmc: Change wake-up interrupt to use generic wakeirq") Cc: stable@vger.kernel.org # v4.2+ Signed-off-by: Ulf Hansson --- drivers/mmc/host/omap_hsmmc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 071693ebfe18..68760d4a5d3d 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -2177,6 +2177,7 @@ static int omap_hsmmc_remove(struct platform_device *pdev) dma_release_channel(host->tx_chan); dma_release_channel(host->rx_chan); + dev_pm_clear_wake_irq(host->dev); pm_runtime_dont_use_autosuspend(host->dev); pm_runtime_put_sync(host->dev); pm_runtime_disable(host->dev); -- GitLab From a167b1e1319cac8894a88e9ea05a13be05b46d87 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 4 Sep 2018 14:12:07 +0100 Subject: [PATCH 1044/1692] drm/i915: Pull intel_uncore_arm_unclaimed_mmio_detection() under the spinlock Elsewhere we manipulate uncore.unclaimed_mmio_check and i915_param.mmio_debug under the irq lock (e.g. preserving the current value across a user forcewake grab), but do not protect the manipulation inside intel_uncore_arm_unclaimed_mmio_detection() from concurrent access, even from itself. This is an issue as we do call arm_unclaimed_mmio_detection from multiple threads without coordination. Suggested-by: Mika Kuoppala Signed-off-by: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20180904131207.17563-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_uncore.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 05f0cda18501..3ad302c66254 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -2283,8 +2283,12 @@ bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv) bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv) { + bool ret = false; + + spin_lock_irq(&dev_priv->uncore.lock); + if (unlikely(dev_priv->uncore.unclaimed_mmio_check <= 0)) - return false; + goto out; if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) { if (!i915_modparams.mmio_debug) { @@ -2294,10 +2298,13 @@ intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv) i915_modparams.mmio_debug++; } dev_priv->uncore.unclaimed_mmio_check--; - return true; + ret = true; } - return false; +out: + spin_unlock_irq(&dev_priv->uncore.lock); + + return ret; } static enum forcewake_domains -- GitLab From e4b069e0945fa14c71cf8b5b89f8b1b2aa68dbc2 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Wed, 22 Aug 2018 12:45:51 -0400 Subject: [PATCH 1045/1692] dm verity: fix crash on bufio buffer that was allocated with vmalloc Since commit d1ac3ff008fb ("dm verity: switch to using asynchronous hash crypto API") dm-verity uses asynchronous crypto calls for verification, so that it can use hardware with asynchronous processing of crypto operations. These asynchronous calls don't support vmalloc memory, but the buffer data can be allocated with vmalloc if dm-bufio is short of memory and uses a reserved buffer that was preallocated in dm_bufio_client_create(). Fix verity_hash_update() so that it deals with vmalloc'd memory correctly. Reported-by: "Xiao, Jin" Signed-off-by: Mikulas Patocka Fixes: d1ac3ff008fb ("dm verity: switch to using asynchronous hash crypto API") Cc: stable@vger.kernel.org # 4.12+ Signed-off-by: Mike Snitzer --- drivers/md/dm-verity-target.c | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index 12decdbd722d..fc65f0dedf7f 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -99,10 +99,26 @@ static int verity_hash_update(struct dm_verity *v, struct ahash_request *req, { struct scatterlist sg; - sg_init_one(&sg, data, len); - ahash_request_set_crypt(req, &sg, NULL, len); - - return crypto_wait_req(crypto_ahash_update(req), wait); + if (likely(!is_vmalloc_addr(data))) { + sg_init_one(&sg, data, len); + ahash_request_set_crypt(req, &sg, NULL, len); + return crypto_wait_req(crypto_ahash_update(req), wait); + } else { + do { + int r; + size_t this_step = min_t(size_t, len, PAGE_SIZE - offset_in_page(data)); + flush_kernel_vmap_range((void *)data, this_step); + sg_init_table(&sg, 1); + sg_set_page(&sg, vmalloc_to_page(data), this_step, offset_in_page(data)); + ahash_request_set_crypt(req, &sg, NULL, this_step); + r = crypto_wait_req(crypto_ahash_update(req), wait); + if (unlikely(r)) + return r; + data += this_step; + len -= this_step; + } while (len); + return 0; + } } /* -- GitLab From 851a15114895c5bce163a6f2d57e0aa4658a1be4 Mon Sep 17 00:00:00 2001 From: Felipe Balbi Date: Mon, 3 Sep 2018 11:24:57 +0300 Subject: [PATCH 1046/1692] i2c: i801: fix DNV's SMBCTRL register offset DNV's iTCO is slightly different with SMBCTRL sitting at a different offset when compared to all other devices. Let's fix so that we can properly use iTCO watchdog. Fixes: 84d7f2ebd70d ("i2c: i801: Add support for Intel DNV") Cc: # v4.4+ Signed-off-by: Felipe Balbi Reviewed-by: Jean Delvare Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-i801.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 04b60a349d7e..c91e145ef5a5 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -140,6 +140,7 @@ #define SBREG_BAR 0x10 #define SBREG_SMBCTRL 0xc6000c +#define SBREG_SMBCTRL_DNV 0xcf000c /* Host status bits for SMBPCISTS */ #define SMBPCISTS_INTS BIT(3) @@ -1399,7 +1400,11 @@ static void i801_add_tco(struct i801_priv *priv) spin_unlock(&p2sb_spinlock); res = &tco_res[ICH_RES_MEM_OFF]; - res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL; + if (pci_dev->device == PCI_DEVICE_ID_INTEL_DNV_SMBUS) + res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL_DNV; + else + res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL; + res->end = res->start + 3; res->flags = IORESOURCE_MEM; -- GitLab From bc811f05d77f47059c197a98b6ad242eb03999cb Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 4 Sep 2018 11:52:34 -0600 Subject: [PATCH 1047/1692] nbd: don't allow invalid blocksize settings syzbot reports a divide-by-zero off the NBD_SET_BLKSIZE ioctl. We need proper validation of the input here. Not just if it's zero, but also if the value is a power-of-2 and in a valid range. Add that. Cc: stable@vger.kernel.org Reported-by: syzbot Reviewed-by: Josef Bacik Signed-off-by: Jens Axboe --- drivers/block/nbd.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 3863c00372bb..14a51254c3db 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -1239,6 +1239,9 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, case NBD_SET_SOCK: return nbd_add_socket(nbd, arg, false); case NBD_SET_BLKSIZE: + if (!arg || !is_power_of_2(arg) || arg < 512 || + arg > PAGE_SIZE) + return -EINVAL; nbd_size_set(nbd, arg, div_s64(config->bytesize, arg)); return 0; -- GitLab From 639505d4397b8c654a8e2616f9cb70ece40c83f9 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Mon, 3 Sep 2018 18:06:24 +0300 Subject: [PATCH 1048/1692] net/mlx5: Fix SQ offset in QPs with small RQ Correct the formula for calculating the RQ page remainder, which should be in byte granularity. The result will be non-zero only for RQs smaller than PAGE_SIZE, as an RQ size is a power of 2. Divide this by the SQ stride (MLX5_SEND_WQE_BB) to get the SQ offset in strides granularity. Fixes: d7037ad73daa ("net/mlx5: Fix QP fragmented buffer allocation") Signed-off-by: Tariq Toukan Reviewed-by: Eran Ben Elisha Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/wq.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c index 86478a6b99c5..c8c315eb5128 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c @@ -139,14 +139,15 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, struct mlx5_wq_ctrl *wq_ctrl) { u32 sq_strides_offset; + u32 rq_pg_remainder; int err; mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4, MLX5_GET(qpc, qpc, log_rq_size), &wq->rq.fbc); - sq_strides_offset = - ((wq->rq.fbc.frag_sz_m1 + 1) % PAGE_SIZE) / MLX5_SEND_WQE_BB; + rq_pg_remainder = mlx5_wq_cyc_get_byte_size(&wq->rq) % PAGE_SIZE; + sq_strides_offset = rq_pg_remainder / MLX5_SEND_WQE_BB; mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB), MLX5_GET(qpc, qpc, log_sq_size), -- GitLab From 6d784f1625ea68783cc1fb17de8f6cd3e1660c3f Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Mon, 3 Sep 2018 11:08:15 -0700 Subject: [PATCH 1049/1692] act_ife: fix a potential use-after-free Immediately after module_put(), user could delete this module, so e->ops could be already freed before we call e->ops->release(). Fix this by moving module_put() after ops->release(). Fixes: ef6980b6becb ("introduce IFE action") Cc: Jamal Hadi Salim Signed-off-by: Cong Wang Signed-off-by: David S. Miller --- net/sched/act_ife.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index 196430aefe87..fc412769a1be 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c @@ -400,7 +400,6 @@ static void _tcf_ife_cleanup(struct tc_action *a) struct tcf_meta_info *e, *n; list_for_each_entry_safe(e, n, &ife->metalist, metalist) { - module_put(e->ops->owner); list_del(&e->metalist); if (e->metaval) { if (e->ops->release) @@ -408,6 +407,7 @@ static void _tcf_ife_cleanup(struct tc_action *a) else kfree(e->metaval); } + module_put(e->ops->owner); kfree(e); } } -- GitLab From 84cb8eb26cb9ce3c79928094962a475a9d850a53 Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Tue, 4 Sep 2018 00:44:42 +0300 Subject: [PATCH 1050/1692] net: sched: action_ife: take reference to meta module Recent refactoring of add_metainfo() caused use_all_metadata() to add metainfo to ife action metalist without taking reference to module. This causes warning in module_put called from ife action cleanup function. Implement add_metainfo_and_get_ops() function that returns with reference to module taken if metainfo was added successfully, and call it from use_all_metadata(), instead of calling __add_metainfo() directly. Example warning: [ 646.344393] WARNING: CPU: 1 PID: 2278 at kernel/module.c:1139 module_put+0x1cb/0x230 [ 646.352437] Modules linked in: act_meta_skbtcindex act_meta_mark act_meta_skbprio act_ife ife veth nfsv3 nfs fscache xt_CHECKSUM iptable_mangle ipt_MASQUERADE iptable_nat nf_nat_ipv4 nf_nat xt_conntrack nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 libcrc32c tun ebtable_filter ebtables ip6table_filter ip6_tables bridge stp llc mlx5_ib ib_uverbs ib_core intel_rapl sb_edac x86_pkg_temp_thermal mlx5_core coretemp kvm_intel kvm nfsd igb irqbypass crct10dif_pclmul devlink crc32_pclmul mei_me joydev ses crc32c_intel enclosure auth_rpcgss i2c_algo_bit ioatdma ptp mei pps_core ghash_clmulni_intel iTCO_wdt iTCO_vendor_support pcspkr dca ipmi_ssif lpc_ich target_core_mod i2c_i801 ipmi_si ipmi_devintf pcc_cpufreq wmi ipmi_msghandler nfs_acl lockd acpi_pad acpi_power_meter grace sunrpc mpt3sas raid_class scsi_transport_sas [ 646.425631] CPU: 1 PID: 2278 Comm: tc Not tainted 4.19.0-rc1+ #799 [ 646.432187] Hardware name: Supermicro SYS-2028TP-DECR/X10DRT-P, BIOS 2.0b 03/30/2017 [ 646.440595] RIP: 0010:module_put+0x1cb/0x230 [ 646.445238] Code: f3 66 94 02 e8 26 ff fa ff 85 c0 74 11 0f b6 1d 51 30 94 02 80 fb 01 77 60 83 e3 01 74 13 65 ff 0d 3a 83 db 73 e9 2b ff ff ff <0f> 0b e9 00 ff ff ff e8 59 01 fb ff 85 c0 75 e4 48 c7 c2 20 62 6b [ 646.464997] RSP: 0018:ffff880354d37068 EFLAGS: 00010286 [ 646.470599] RAX: 0000000000000000 RBX: ffffffffc0a52518 RCX: ffffffff8c2668db [ 646.478118] RDX: 0000000000000003 RSI: dffffc0000000000 RDI: ffffffffc0a52518 [ 646.485641] RBP: ffffffffc0a52180 R08: fffffbfff814a4a4 R09: fffffbfff814a4a3 [ 646.493164] R10: ffffffffc0a5251b R11: fffffbfff814a4a4 R12: 1ffff1006a9a6e0d [ 646.500687] R13: 00000000ffffffff R14: ffff880362bab890 R15: dead000000000100 [ 646.508213] FS: 00007f4164c99800(0000) GS:ffff88036fe40000(0000) knlGS:0000000000000000 [ 646.516961] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 646.523080] CR2: 00007f41638b8420 CR3: 0000000351df0004 CR4: 00000000001606e0 [ 646.530595] Call Trace: [ 646.533408] ? find_symbol_in_section+0x260/0x260 [ 646.538509] tcf_ife_cleanup+0x11b/0x200 [act_ife] [ 646.543695] tcf_action_cleanup+0x29/0xa0 [ 646.548078] __tcf_action_put+0x5a/0xb0 [ 646.552289] ? nla_put+0x65/0xe0 [ 646.555889] __tcf_idr_release+0x48/0x60 [ 646.560187] tcf_generic_walker+0x448/0x6b0 [ 646.564764] ? tcf_action_dump_1+0x450/0x450 [ 646.569411] ? __lock_is_held+0x84/0x110 [ 646.573720] ? tcf_ife_walker+0x10c/0x20f [act_ife] [ 646.578982] tca_action_gd+0x972/0xc40 [ 646.583129] ? tca_get_fill.constprop.17+0x250/0x250 [ 646.588471] ? mark_lock+0xcf/0x980 [ 646.592324] ? check_chain_key+0x140/0x1f0 [ 646.596832] ? debug_show_all_locks+0x240/0x240 [ 646.601839] ? memset+0x1f/0x40 [ 646.605350] ? nla_parse+0xca/0x1a0 [ 646.609217] tc_ctl_action+0x215/0x230 [ 646.613339] ? tcf_action_add+0x220/0x220 [ 646.617748] rtnetlink_rcv_msg+0x56a/0x6d0 [ 646.622227] ? rtnl_fdb_del+0x3f0/0x3f0 [ 646.626466] netlink_rcv_skb+0x18d/0x200 [ 646.630752] ? rtnl_fdb_del+0x3f0/0x3f0 [ 646.634959] ? netlink_ack+0x500/0x500 [ 646.639106] netlink_unicast+0x2d0/0x370 [ 646.643409] ? netlink_attachskb+0x340/0x340 [ 646.648050] ? _copy_from_iter_full+0xe9/0x3e0 [ 646.652870] ? import_iovec+0x11e/0x1c0 [ 646.657083] netlink_sendmsg+0x3b9/0x6a0 [ 646.661388] ? netlink_unicast+0x370/0x370 [ 646.665877] ? netlink_unicast+0x370/0x370 [ 646.670351] sock_sendmsg+0x6b/0x80 [ 646.674212] ___sys_sendmsg+0x4a1/0x520 [ 646.678443] ? copy_msghdr_from_user+0x210/0x210 [ 646.683463] ? lock_downgrade+0x320/0x320 [ 646.687849] ? debug_show_all_locks+0x240/0x240 [ 646.692760] ? do_raw_spin_unlock+0xa2/0x130 [ 646.697418] ? _raw_spin_unlock+0x24/0x30 [ 646.701798] ? __handle_mm_fault+0x1819/0x1c10 [ 646.706619] ? __pmd_alloc+0x320/0x320 [ 646.710738] ? debug_show_all_locks+0x240/0x240 [ 646.715649] ? restore_nameidata+0x7b/0xa0 [ 646.720117] ? check_chain_key+0x140/0x1f0 [ 646.724590] ? check_chain_key+0x140/0x1f0 [ 646.729070] ? __fget_light+0xbc/0xd0 [ 646.733121] ? __sys_sendmsg+0xd7/0x150 [ 646.737329] __sys_sendmsg+0xd7/0x150 [ 646.741359] ? __ia32_sys_shutdown+0x30/0x30 [ 646.746003] ? up_read+0x53/0x90 [ 646.749601] ? __do_page_fault+0x484/0x780 [ 646.754105] ? do_syscall_64+0x1e/0x2c0 [ 646.758320] do_syscall_64+0x72/0x2c0 [ 646.762353] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 646.767776] RIP: 0033:0x7f4163872150 [ 646.771713] Code: 8b 15 3c 7d 2b 00 f7 d8 64 89 02 48 c7 c0 ff ff ff ff eb cd 66 0f 1f 44 00 00 83 3d b9 d5 2b 00 00 75 10 b8 2e 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 31 c3 48 83 ec 08 e8 be cd 00 00 48 89 04 24 [ 646.791474] RSP: 002b:00007ffdef7d6b58 EFLAGS: 00000246 ORIG_RAX: 000000000000002e [ 646.799721] RAX: ffffffffffffffda RBX: 0000000000000024 RCX: 00007f4163872150 [ 646.807240] RDX: 0000000000000000 RSI: 00007ffdef7d6bd0 RDI: 0000000000000003 [ 646.814760] RBP: 000000005b8b9482 R08: 0000000000000001 R09: 0000000000000000 [ 646.822286] R10: 00000000000005e7 R11: 0000000000000246 R12: 00007ffdef7dad20 [ 646.829807] R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000679bc0 [ 646.837360] irq event stamp: 6083 [ 646.841043] hardirqs last enabled at (6081): [] __call_rcu+0x17d/0x500 [ 646.849882] hardirqs last disabled at (6083): [] trace_hardirqs_off_thunk+0x1a/0x1c [ 646.859775] softirqs last enabled at (5968): [] __do_softirq+0x4a1/0x6ee [ 646.868784] softirqs last disabled at (6082): [] tcf_ife_cleanup+0x39/0x200 [act_ife] [ 646.878845] ---[ end trace b1b8c12ffe51e657 ]--- Fixes: 5ffe57da29b3 ("act_ife: fix a potential deadlock") Signed-off-by: Vlad Buslov Acked-by: Cong Wang Signed-off-by: David S. Miller --- net/sched/act_ife.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index fc412769a1be..06a3d4801878 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c @@ -326,6 +326,20 @@ static int __add_metainfo(const struct tcf_meta_ops *ops, return ret; } +static int add_metainfo_and_get_ops(const struct tcf_meta_ops *ops, + struct tcf_ife_info *ife, u32 metaid, + bool exists) +{ + int ret; + + if (!try_module_get(ops->owner)) + return -ENOENT; + ret = __add_metainfo(ops, ife, metaid, NULL, 0, true, exists); + if (ret) + module_put(ops->owner); + return ret; +} + static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, int len, bool exists) { @@ -349,7 +363,7 @@ static int use_all_metadata(struct tcf_ife_info *ife, bool exists) read_lock(&ife_mod_lock); list_for_each_entry(o, &ifeoplist, list) { - rc = __add_metainfo(o, ife, o->metaid, NULL, 0, true, exists); + rc = add_metainfo_and_get_ops(o, ife, o->metaid, exists); if (rc == 0) installed += 1; } -- GitLab From a33710bdb6b284f8f1e24f1119d167037b374ebb Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Tue, 4 Sep 2018 04:23:56 +0200 Subject: [PATCH 1051/1692] net: phy: sfp: Handle unimplemented hwmon limits and alarms Not all SFPs implement the registers containing sensor limits and alarms. Luckily, there is a bit indicating if they are implemented or not. Add checking for this bit, when deciding if the hwmon attributes should be visible. Fixes: 1323061a018a ("net: phy: sfp: Add HWMON support for module sensors") Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/sfp.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index 4637d980310e..52fffb98fde9 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -398,7 +398,6 @@ static umode_t sfp_hwmon_is_visible(const void *data, switch (type) { case hwmon_temp: switch (attr) { - case hwmon_temp_input: case hwmon_temp_min_alarm: case hwmon_temp_max_alarm: case hwmon_temp_lcrit_alarm: @@ -407,13 +406,16 @@ static umode_t sfp_hwmon_is_visible(const void *data, case hwmon_temp_max: case hwmon_temp_lcrit: case hwmon_temp_crit: + if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN)) + return 0; + /* fall through */ + case hwmon_temp_input: return 0444; default: return 0; } case hwmon_in: switch (attr) { - case hwmon_in_input: case hwmon_in_min_alarm: case hwmon_in_max_alarm: case hwmon_in_lcrit_alarm: @@ -422,13 +424,16 @@ static umode_t sfp_hwmon_is_visible(const void *data, case hwmon_in_max: case hwmon_in_lcrit: case hwmon_in_crit: + if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN)) + return 0; + /* fall through */ + case hwmon_in_input: return 0444; default: return 0; } case hwmon_curr: switch (attr) { - case hwmon_curr_input: case hwmon_curr_min_alarm: case hwmon_curr_max_alarm: case hwmon_curr_lcrit_alarm: @@ -437,6 +442,10 @@ static umode_t sfp_hwmon_is_visible(const void *data, case hwmon_curr_max: case hwmon_curr_lcrit: case hwmon_curr_crit: + if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN)) + return 0; + /* fall through */ + case hwmon_curr_input: return 0444; default: return 0; @@ -452,7 +461,6 @@ static umode_t sfp_hwmon_is_visible(const void *data, channel == 1) return 0; switch (attr) { - case hwmon_power_input: case hwmon_power_min_alarm: case hwmon_power_max_alarm: case hwmon_power_lcrit_alarm: @@ -461,6 +469,10 @@ static umode_t sfp_hwmon_is_visible(const void *data, case hwmon_power_max: case hwmon_power_lcrit: case hwmon_power_crit: + if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN)) + return 0; + /* fall through */ + case hwmon_power_input: return 0444; default: return 0; -- GitLab From ec6adef5fbc3f140c70e7499fdad818acb3a46c6 Mon Sep 17 00:00:00 2001 From: Benjamin Tissoires Date: Tue, 4 Sep 2018 15:31:12 +0200 Subject: [PATCH 1052/1692] HID: multitouch: fix Elan panels with 2 input modes declaration When implementing commit 7f81c8db5489 ("HID: multitouch: simplify the settings of the various features"), I wrongly removed a test that made sure we never try to set the second InputMode feature to something else than 0. This broke badly some recent Elan panels that now forget to send the click button in some area of the touchpad. Link: https://bugzilla.kernel.org/show_bug.cgi?id=200899 Fixes: 7f81c8db5489 ("HID: multitouch: simplify the settings of the various features") Cc: stable@vger.kernel.org # v4.18+ Signed-off-by: Benjamin Tissoires Signed-off-by: Jiri Kosina --- drivers/hid/hid-multitouch.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 40fbb7c52723..88da991ef256 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -1375,7 +1375,8 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev, struct hid_usage *usage, enum latency_mode latency, bool surface_switch, - bool button_switch) + bool button_switch, + bool *inputmode_found) { struct mt_device *td = hid_get_drvdata(hdev); struct mt_class *cls = &td->mtclass; @@ -1387,6 +1388,14 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev, switch (usage->hid) { case HID_DG_INPUTMODE: + /* + * Some elan panels wrongly declare 2 input mode features, + * and silently ignore when we set the value in the second + * field. Skip the second feature and hope for the best. + */ + if (*inputmode_found) + return false; + if (cls->quirks & MT_QUIRK_FORCE_GET_FEATURE) { report_len = hid_report_len(report); buf = hid_alloc_report_buf(report, GFP_KERNEL); @@ -1402,6 +1411,7 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev, } field->value[index] = td->inputmode_value; + *inputmode_found = true; return true; case HID_DG_CONTACTMAX: @@ -1439,6 +1449,7 @@ static void mt_set_modes(struct hid_device *hdev, enum latency_mode latency, struct hid_usage *usage; int i, j; bool update_report; + bool inputmode_found = false; rep_enum = &hdev->report_enum[HID_FEATURE_REPORT]; list_for_each_entry(rep, &rep_enum->report_list, list) { @@ -1457,7 +1468,8 @@ static void mt_set_modes(struct hid_device *hdev, enum latency_mode latency, usage, latency, surface_switch, - button_switch)) + button_switch, + &inputmode_found)) update_report = true; } } -- GitLab From 0d6c3011409135ea84e2a231b013a22017ff999a Mon Sep 17 00:00:00 2001 From: Benjamin Tissoires Date: Tue, 4 Sep 2018 15:31:14 +0200 Subject: [PATCH 1053/1692] HID: core: fix grouping by application commit f07b3c1da92d ("HID: generic: create one input report per application type") was effectively the same as MULTI_INPUT: hidinput->report was never set, so hidinput_match_application() always returned null. Fix that by testing against the real application. Note that this breaks some old eGalax touchscreens that expect MULTI_INPUT instead of HID_QUIRK_INPUT_PER_APP. Enable this quirk for backward compatibility on all non-Win8 touchscreens. link: https://bugzilla.kernel.org/show_bug.cgi?id=200847 link: https://bugzilla.kernel.org/show_bug.cgi?id=200849 link: https://bugs.archlinux.org/task/59699 link: https://github.com/NixOS/nixpkgs/issues/45165 Cc: stable@vger.kernel.org # v4.18+ Signed-off-by: Benjamin Tissoires Signed-off-by: Jiri Kosina --- drivers/hid/hid-input.c | 4 ++-- drivers/hid/hid-multitouch.c | 3 +++ include/linux/hid.h | 1 + 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index ac201817a2dd..a481eaf39e88 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -1582,6 +1582,7 @@ static struct hid_input *hidinput_allocate(struct hid_device *hid, input_dev->dev.parent = &hid->dev; hidinput->input = input_dev; + hidinput->application = application; list_add_tail(&hidinput->list, &hid->inputs); INIT_LIST_HEAD(&hidinput->reports); @@ -1677,8 +1678,7 @@ static struct hid_input *hidinput_match_application(struct hid_report *report) struct hid_input *hidinput; list_for_each_entry(hidinput, &hid->inputs, list) { - if (hidinput->report && - hidinput->report->application == report->application) + if (hidinput->application == report->application) return hidinput; } diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 88da991ef256..da954f3f4da7 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -1697,6 +1697,9 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id) */ hdev->quirks |= HID_QUIRK_INPUT_PER_APP; + if (id->group != HID_GROUP_MULTITOUCH_WIN_8) + hdev->quirks |= HID_QUIRK_MULTI_INPUT; + timer_setup(&td->release_timer, mt_expired_timeout, 0); ret = hid_parse(hdev); diff --git a/include/linux/hid.h b/include/linux/hid.h index 834e6461a690..d44a78362942 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -526,6 +526,7 @@ struct hid_input { const char *name; bool registered; struct list_head reports; /* the list of reports */ + unsigned int application; /* application usage for this input */ }; enum hid_type { -- GitLab From 2820a708d5a321342bef34e459fdc8679c30e20f Mon Sep 17 00:00:00 2001 From: Eugeniy Paltsev Date: Mon, 30 Jul 2018 19:26:34 +0300 Subject: [PATCH 1054/1692] ARC: dma [IOC] Enable per device io coherency So far the IOC treatment was global on ARC, being turned on (or off) for all devices in the system. With this patch, this can now be done per device using the "dma-coherent" DT property; IOW with this patch we can use both HW-coherent and regular DMA peripherals simultaneously. The changes involved are too many so enlisting the summary below: 1. common code calls ARC arch_setup_dma_ops() per device. 2. For coherent dma (IOC) it plugs in generic @dma_direct_ops which doesn't need any arch specific backend: No need for any explicit cache flushes or MMU mappings to provide for uncached access - dma_(map|sync)_single* return early as corresponding dma ops callbacks are NULL in generic code. So arch_sync_dma_*() -> dma_cache_*() need not handle the coherent dma case, hence drop ARC __dma_cache_*_ioc() which were no-op anyways 3. For noncoherent dma (non IOC) generic @dma_noncoherent_ops is used which in turns calls ARC specific routines - arch_dma_alloc() no longer checks for @ioc_enable since this is called only for !IOC case. Reviewed-by: Christoph Hellwig Signed-off-by: Eugeniy Paltsev Signed-off-by: Vineet Gupta [vgupta: rewrote changelog] --- arch/arc/include/asm/dma-mapping.h | 13 +++++++ arch/arc/mm/cache.c | 23 +++++-------- arch/arc/mm/dma.c | 54 ++++++++++++++++-------------- 3 files changed, 50 insertions(+), 40 deletions(-) create mode 100644 arch/arc/include/asm/dma-mapping.h diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h new file mode 100644 index 000000000000..c946c0a83e76 --- /dev/null +++ b/arch/arc/include/asm/dma-mapping.h @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0 +// (C) 2018 Synopsys, Inc. (www.synopsys.com) + +#ifndef ASM_ARC_DMA_MAPPING_H +#define ASM_ARC_DMA_MAPPING_H + +#include + +void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, + const struct iommu_ops *iommu, bool coherent); +#define arch_setup_dma_ops arch_setup_dma_ops + +#endif diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c index 25c631942500..2d389cab46ba 100644 --- a/arch/arc/mm/cache.c +++ b/arch/arc/mm/cache.c @@ -65,7 +65,7 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len) n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n", perip_base, - IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency ")); + IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency (per-device) ")); return buf; } @@ -896,15 +896,6 @@ static void __dma_cache_wback_slc(phys_addr_t start, unsigned long sz) slc_op(start, sz, OP_FLUSH); } -/* - * DMA ops for systems with IOC - * IOC hardware snoops all DMA traffic keeping the caches consistent with - * memory - eliding need for any explicit cache maintenance of DMA buffers - */ -static void __dma_cache_wback_inv_ioc(phys_addr_t start, unsigned long sz) {} -static void __dma_cache_inv_ioc(phys_addr_t start, unsigned long sz) {} -static void __dma_cache_wback_ioc(phys_addr_t start, unsigned long sz) {} - /* * Exported DMA API */ @@ -1264,11 +1255,7 @@ void __init arc_cache_init_master(void) if (is_isa_arcv2() && ioc_enable) arc_ioc_setup(); - if (is_isa_arcv2() && ioc_enable) { - __dma_cache_wback_inv = __dma_cache_wback_inv_ioc; - __dma_cache_inv = __dma_cache_inv_ioc; - __dma_cache_wback = __dma_cache_wback_ioc; - } else if (is_isa_arcv2() && l2_line_sz && slc_enable) { + if (is_isa_arcv2() && l2_line_sz && slc_enable) { __dma_cache_wback_inv = __dma_cache_wback_inv_slc; __dma_cache_inv = __dma_cache_inv_slc; __dma_cache_wback = __dma_cache_wback_slc; @@ -1277,6 +1264,12 @@ void __init arc_cache_init_master(void) __dma_cache_inv = __dma_cache_inv_l1; __dma_cache_wback = __dma_cache_wback_l1; } + /* + * In case of IOC (say IOC+SLC case), pointers above could still be set + * but end up not being relevant as the first function in chain is not + * called at all for @dma_direct_ops + * arch_sync_dma_for_cpu() -> dma_cache_*() -> __dma_cache_*() + */ } void __ref arc_cache_init(void) diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c index ec47e6079f5d..c0b49399225d 100644 --- a/arch/arc/mm/dma.c +++ b/arch/arc/mm/dma.c @@ -6,20 +6,17 @@ * published by the Free Software Foundation. */ -/* - * DMA Coherent API Notes - * - * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is - * implemented by accessing it using a kernel virtual address, with - * Cache bit off in the TLB entry. - * - * The default DMA address == Phy address which is 0x8000_0000 based. - */ - #include #include #include +/* + * ARCH specific callbacks for generic noncoherent DMA ops (dma/noncoherent.c) + * - hardware IOC not available (or "dma-coherent" not set for device in DT) + * - But still handle both coherent and non-coherent requests from caller + * + * For DMA coherent hardware (IOC) generic code suffices + */ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { @@ -33,19 +30,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, if (!page) return NULL; - /* - * IOC relies on all data (even coherent DMA data) being in cache - * Thus allocate normal cached memory - * - * The gains with IOC are two pronged: - * -For streaming data, elides need for cache maintenance, saving - * cycles in flush code, and bus bandwidth as all the lines of a - * buffer need to be flushed out to memory - * -For coherent data, Read/Write to buffers terminate early in cache - * (vs. always going to memory - thus are faster) - */ - if ((is_isa_arcv2() && ioc_enable) || - (attrs & DMA_ATTR_NON_CONSISTENT)) + if (attrs & DMA_ATTR_NON_CONSISTENT) need_coh = 0; /* @@ -95,8 +80,7 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr, struct page *page = virt_to_page(paddr); int is_non_coh = 1; - is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) || - (is_isa_arcv2() && ioc_enable); + is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT); if (PageHighMem(page) || !is_non_coh) iounmap((void __force __iomem *)vaddr); @@ -185,3 +169,23 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, break; } } + +/* + * Plug in coherent or noncoherent dma ops + */ +void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, + const struct iommu_ops *iommu, bool coherent) +{ + /* + * IOC hardware snoops all DMA traffic keeping the caches consistent + * with memory - eliding need for any explicit cache maintenance of + * DMA buffers - so we can use dma_direct cache ops. + */ + if (is_isa_arcv2() && ioc_enable && coherent) { + set_dma_ops(dev, &dma_direct_ops); + dev_info(dev, "use dma_direct_ops cache ops\n"); + } else { + set_dma_ops(dev, &dma_noncoherent_ops); + dev_info(dev, "use dma_noncoherent_ops cache ops\n"); + } +} -- GitLab From 2b720e99a1297417b979bf4810a0f01d27133c48 Mon Sep 17 00:00:00 2001 From: Eugeniy Paltsev Date: Mon, 30 Jul 2018 19:26:35 +0300 Subject: [PATCH 1055/1692] ARC: IOC: panic if both IOC and ZONE_HIGHMEM enabled Signed-off-by: Eugeniy Paltsev Signed-off-by: Vineet Gupta --- arch/arc/mm/cache.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c index 2d389cab46ba..f2701c13a66b 100644 --- a/arch/arc/mm/cache.c +++ b/arch/arc/mm/cache.c @@ -1144,6 +1144,19 @@ noinline void __init arc_ioc_setup(void) { unsigned int ioc_base, mem_sz; + /* + * As for today we don't support both IOC and ZONE_HIGHMEM enabled + * simultaneously. This happens because as of today IOC aperture covers + * only ZONE_NORMAL (low mem) and any dma transactions outside this + * region won't be HW coherent. + * If we want to use both IOC and ZONE_HIGHMEM we can use + * bounce_buffer to handle dma transactions to HIGHMEM. + * Also it is possible to modify dma_direct cache ops or increase IOC + * aperture size if we are planning to use HIGHMEM without PAE. + */ + if (IS_ENABLED(CONFIG_HIGHMEM)) + panic("IOC and HIGHMEM can't be used simultaneously"); + /* Flush + invalidate + disable L1 dcache */ __dc_disable(); -- GitLab From dd45210b6dd4f1512eafcc41774154ebb762360f Mon Sep 17 00:00:00 2001 From: Eugeniy Paltsev Date: Mon, 30 Jul 2018 19:26:36 +0300 Subject: [PATCH 1056/1692] ARC: don't check for HIGHMEM pages in arch_dma_alloc __GFP_HIGHMEM flag is cleared by upper layer functions (in include/linux/dma-mapping.h) so we'll never get a __GFP_HIGHMEM flag in arch_dma_alloc gfp argument. That's why alloc_pages will never return highmem page here. Get rid of highmem pages handling and cleanup arch_dma_alloc and arch_dma_free functions. Reviewed-by: Christoph Hellwig Signed-off-by: Eugeniy Paltsev Signed-off-by: Vineet Gupta --- arch/arc/mm/dma.c | 32 ++++++++++++++------------------ 1 file changed, 14 insertions(+), 18 deletions(-) diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c index c0b49399225d..c75d5c3470e3 100644 --- a/arch/arc/mm/dma.c +++ b/arch/arc/mm/dma.c @@ -24,30 +24,29 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, struct page *page; phys_addr_t paddr; void *kvaddr; - int need_coh = 1, need_kvaddr = 0; + bool need_coh = !(attrs & DMA_ATTR_NON_CONSISTENT); + + /* + * __GFP_HIGHMEM flag is cleared by upper layer functions + * (in include/linux/dma-mapping.h) so we should never get a + * __GFP_HIGHMEM here. + */ + BUG_ON(gfp & __GFP_HIGHMEM); page = alloc_pages(gfp, order); if (!page) return NULL; - if (attrs & DMA_ATTR_NON_CONSISTENT) - need_coh = 0; - - /* - * - A coherent buffer needs MMU mapping to enforce non-cachability - * - A highmem page needs a virtual handle (hence MMU mapping) - * independent of cachability - */ - if (PageHighMem(page) || need_coh) - need_kvaddr = 1; - /* This is linear addr (0x8000_0000 based) */ paddr = page_to_phys(page); *dma_handle = paddr; - /* This is kernel Virtual address (0x7000_0000 based) */ - if (need_kvaddr) { + /* + * A coherent buffer needs MMU mapping to enforce non-cachability. + * kvaddr is kernel Virtual address (0x7000_0000 based). + */ + if (need_coh) { kvaddr = ioremap_nocache(paddr, size); if (kvaddr == NULL) { __free_pages(page, order); @@ -78,11 +77,8 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr, { phys_addr_t paddr = dma_handle; struct page *page = virt_to_page(paddr); - int is_non_coh = 1; - - is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT); - if (PageHighMem(page) || !is_non_coh) + if (!(attrs & DMA_ATTR_NON_CONSISTENT)) iounmap((void __force __iomem *)vaddr); __free_pages(page, get_order(size)); -- GitLab From 9d3f8d2ff777b94993581bdfe5c595c619429624 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 4 Sep 2018 17:29:02 +0100 Subject: [PATCH 1057/1692] drm/i915: Be defensive and don't assume PSR has any commit to sync against MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If the previous modeset commit has completed and is no longer part of the crtc state, skip waiting for it. Ville pointed out that, in fact, the commit is never removed after a modeset so the only way we could see a NULL here should be if there was never a commit attached. Nevertheless, we have the evidence it can be NULL and it has been defended against elsewhere, for example commit 93313538c153 ("drm/i915: Pass idle crtc_state to intel_dp_sink_crc"). Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=107792 Fixes: c44301fce614 ("drm/i915: Allow control of PSR at runtime through debugfs, v6") Signed-off-by: Chris Wilson Cc: Maarten Lankhorst Cc: Rodrigo Vivi Cc: Dhinakaran Pandiyan Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20180904162902.2578-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_psr.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index 21984d4c08ed..b6838b525502 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -834,6 +834,7 @@ int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv, struct drm_device *dev = &dev_priv->drm; struct drm_connector_state *conn_state; struct intel_crtc_state *crtc_state = NULL; + struct drm_crtc_commit *commit; struct drm_crtc *crtc; struct intel_dp *dp; int ret; @@ -860,12 +861,15 @@ int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv, return ret; crtc_state = to_intel_crtc_state(crtc->state); - ret = wait_for_completion_interruptible(&crtc_state->base.commit->hw_done); - } else - ret = wait_for_completion_interruptible(&conn_state->commit->hw_done); - - if (ret) - return ret; + commit = crtc_state->base.commit; + } else { + commit = conn_state->commit; + } + if (commit) { + ret = wait_for_completion_interruptible(&commit->hw_done); + if (ret) + return ret; + } ret = mutex_lock_interruptible(&dev_priv->psr.lock); if (ret) -- GitLab From 0d23ba6034b9cf48b8918404367506da3e4b3ee5 Mon Sep 17 00:00:00 2001 From: Jann Horn Date: Mon, 3 Sep 2018 18:54:14 +0200 Subject: [PATCH 1058/1692] RDMA/ucma: check fd type in ucma_migrate_id() The current code grabs the private_data of whatever file descriptor userspace has supplied and implicitly casts it to a `struct ucma_file *`, potentially causing a type confusion. This is probably fine in practice because the pointer is only used for comparisons, it is never actually dereferenced; and even in the comparisons, it is unlikely that a file from another filesystem would have a ->private_data pointer that happens to also be valid in this context. But ->private_data is not always guaranteed to be a valid pointer to an object owned by the file's filesystem; for example, some filesystems just cram numbers in there. Check the type of the supplied file descriptor to be safe, analogous to how other places in the kernel do it. Fixes: 88314e4dda1e ("RDMA/cma: add support for rdma_migrate_id()") Signed-off-by: Jann Horn Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/ucma.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index ec8fb289621f..5f437d1570fb 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -124,6 +124,8 @@ static DEFINE_MUTEX(mut); static DEFINE_IDR(ctx_idr); static DEFINE_IDR(multicast_idr); +static const struct file_operations ucma_fops; + static inline struct ucma_context *_ucma_find_context(int id, struct ucma_file *file) { @@ -1581,6 +1583,10 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file, f = fdget(cmd.fd); if (!f.file) return -ENOENT; + if (f.file->f_op != &ucma_fops) { + ret = -EINVAL; + goto file_put; + } /* Validate current fd and prevent destruction of id. */ ctx = ucma_get_ctx(f.file->private_data, cmd.id); -- GitLab From e4ff3d22c11dd505353896cdcad0ee8f3251be68 Mon Sep 17 00:00:00 2001 From: Artemy Kovalyov Date: Tue, 28 Aug 2018 14:40:32 +0300 Subject: [PATCH 1059/1692] IB/core: Release object lock if destroy failed The object lock was supposed to always be released during destroy, but when the destruction retry series was integrated with the destroy series it created a failure path that missed the unlock. Keep with convention, if destroy fails the caller must undo all locking. Fixes: 87ad80abc70d ("IB/uverbs: Consolidate uobject destruction") Signed-off-by: Artemy Kovalyov Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/rdma_core.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c index 6eb64c6f0802..c4118bcd5103 100644 --- a/drivers/infiniband/core/rdma_core.c +++ b/drivers/infiniband/core/rdma_core.c @@ -882,6 +882,8 @@ static int __uverbs_cleanup_ufile(struct ib_uverbs_file *ufile, WARN_ON(uverbs_try_lock_object(obj, UVERBS_LOOKUP_WRITE)); if (!uverbs_destroy_uobject(obj, reason)) ret = 0; + else + atomic_set(&obj->usecnt, 0); } return ret; } -- GitLab From 308aa2b8f7b7db3332a7d41099fd37851fb793b2 Mon Sep 17 00:00:00 2001 From: Steve Wise Date: Fri, 31 Aug 2018 07:15:56 -0700 Subject: [PATCH 1060/1692] iw_cxgb4: only allow 1 flush on user qps Once the qp has been flushed, it cannot be flushed again. The user qp flush logic wasn't enforcing it however. The bug can cause touch-after-free crashes like: Unable to handle kernel paging request for data at address 0x000001ec Faulting instruction address: 0xc008000016069100 Oops: Kernel access of bad area, sig: 11 [#1] ... NIP [c008000016069100] flush_qp+0x80/0x480 [iw_cxgb4] LR [c00800001606cd6c] c4iw_modify_qp+0x71c/0x11d0 [iw_cxgb4] Call Trace: [c00800001606cd6c] c4iw_modify_qp+0x71c/0x11d0 [iw_cxgb4] [c00800001606e868] c4iw_ib_modify_qp+0x118/0x200 [iw_cxgb4] [c0080000119eae80] ib_security_modify_qp+0xd0/0x3d0 [ib_core] [c0080000119c4e24] ib_modify_qp+0xc4/0x2c0 [ib_core] [c008000011df0284] iwcm_modify_qp_err+0x44/0x70 [iw_cm] [c008000011df0fec] destroy_cm_id+0xcc/0x370 [iw_cm] [c008000011ed4358] rdma_destroy_id+0x3c8/0x520 [rdma_cm] [c0080000134b0540] ucma_close+0x90/0x1b0 [rdma_ucm] [c000000000444da4] __fput+0xe4/0x2f0 So fix flush_qp() to only flush the wq once. Cc: stable@vger.kernel.org Signed-off-by: Steve Wise Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/cxgb4/qp.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index b3203afa3b1d..347fe18b1a41 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -1685,6 +1685,12 @@ static void flush_qp(struct c4iw_qp *qhp) schp = to_c4iw_cq(qhp->ibqp.send_cq); if (qhp->ibqp.uobject) { + + /* for user qps, qhp->wq.flushed is protected by qhp->mutex */ + if (qhp->wq.flushed) + return; + + qhp->wq.flushed = 1; t4_set_wq_in_error(&qhp->wq, 0); t4_set_cq_in_error(&rchp->cq); spin_lock_irqsave(&rchp->comp_handler_lock, flag); -- GitLab From 3100dab2aa09dc6e082956e306fc9f81b3cc0f7a Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Tue, 4 Sep 2018 15:45:34 -0700 Subject: [PATCH 1061/1692] mm: memcontrol: print proper OOM header when no eligible victim left When the memcg OOM killer runs out of killable tasks, it currently prints a WARN with no further OOM context. This has caused some user confusion. Warnings indicate a kernel problem. In a reported case, however, the situation was triggered by a nonsensical memcg configuration (hard limit set to 0). But without any VM context this wasn't obvious from the report, and it took some back and forth on the mailing list to identify what is actually a trivial issue. Handle this OOM condition like we handle it in the global OOM killer: dump the full OOM context and tell the user we ran out of tasks. This way the user can identify misconfigurations easily by themselves and rectify the problem - without having to go through the hassle of running into an obscure but unsettling warning, finding the appropriate kernel mailing list and waiting for a kernel developer to remote-analyze that the memcg configuration caused this. If users cannot make sense of why the OOM killer was triggered or why it failed, they will still report it to the mailing list, we know that from experience. So in case there is an actual kernel bug causing this, kernel developers will very likely hear about it. Link: http://lkml.kernel.org/r/20180821160406.22578-1-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Acked-by: Michal Hocko Cc: Dmitry Vyukov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 2 -- mm/oom_kill.c | 13 ++++++++++--- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 4ead5a4817de..e79cb59552d9 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1701,8 +1701,6 @@ static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int if (mem_cgroup_out_of_memory(memcg, mask, order)) return OOM_SUCCESS; - WARN(1,"Memory cgroup charge failed because of no reclaimable memory! " - "This looks like a misconfiguration or a kernel bug."); return OOM_FAILED; } diff --git a/mm/oom_kill.c b/mm/oom_kill.c index b5b25e4dcbbb..95fbbc46f68f 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -1103,10 +1103,17 @@ bool out_of_memory(struct oom_control *oc) } select_bad_process(oc); - /* Found nothing?!?! Either we hang forever, or we panic. */ - if (!oc->chosen && !is_sysrq_oom(oc) && !is_memcg_oom(oc)) { + /* Found nothing?!?! */ + if (!oc->chosen) { dump_header(oc, NULL); - panic("Out of memory and no killable processes...\n"); + pr_warn("Out of memory and no killable processes...\n"); + /* + * If we got here due to an actual allocation at the + * system level, we cannot survive this and will enter + * an endless loop in the allocator. Bail out now. + */ + if (!is_sysrq_oom(oc) && !is_memcg_oom(oc)) + panic("System is deadlocked on memory\n"); } if (oc->chosen && oc->chosen != (void *)-1UL) oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" : -- GitLab From 79cc81057eef7ad846588976296ab0f266c1a7a5 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Tue, 4 Sep 2018 15:45:37 -0700 Subject: [PATCH 1062/1692] mm, oom: fix missing tlb_finish_mmu() in __oom_reap_task_mm(). Commit 93065ac753e4 ("mm, oom: distinguish blockable mode for mmu notifiers") has added an ability to skip over vmas with blockable mmu notifiers. This however didn't call tlb_finish_mmu as it should. As a result inc_tlb_flush_pending has been called without its pairing dec_tlb_flush_pending and all callers mm_tlb_flush_pending would flush even though this is not really needed. This alone is not harmful and it seems there shouldn't be any such callers for oom victims at all but there is no real reason to skip tlb_finish_mmu on early skip either so call it. [mhocko@suse.com: new changelog] Link: http://lkml.kernel.org/r/b752d1d5-81ad-7a35-2394-7870641be51c@i-love.sakura.ne.jp Signed-off-by: Tetsuo Handa Acked-by: Michal Hocko Cc: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/oom_kill.c | 1 + 1 file changed, 1 insertion(+) diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 95fbbc46f68f..f10aa5360616 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -522,6 +522,7 @@ bool __oom_reap_task_mm(struct mm_struct *mm) tlb_gather_mmu(&tlb, mm, start, end); if (mmu_notifier_invalidate_range_start_nonblock(mm, start, end)) { + tlb_finish_mmu(&tlb, start, end); ret = false; continue; } -- GitLab From 1ed0cc5a01a4d868d9907ce96468c4b4c6709556 Mon Sep 17 00:00:00 2001 From: Nadav Amit Date: Tue, 4 Sep 2018 15:45:41 -0700 Subject: [PATCH 1063/1692] mm: respect arch_dup_mmap() return value Commit d70f2a14b72a ("include/linux/sched/mm.h: uninline mmdrop_async(), etc") ignored the return value of arch_dup_mmap(). As a result, on x86, a failure to duplicate the LDT (e.g. due to memory allocation error) would leave the duplicated memory mapping in an inconsistent state. Fix by using the return value, as it was before the change. Link: http://lkml.kernel.org/r/20180823051229.211856-1-namit@vmware.com Fixes: d70f2a14b72a4 ("include/linux/sched/mm.h: uninline mmdrop_async(), etc") Signed-off-by: Nadav Amit Acked-by: Michal Hocko Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/fork.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/kernel/fork.c b/kernel/fork.c index d896e9ca38b0..f0b58479534f 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -550,8 +550,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, goto out; } /* a new mm has just been created */ - arch_dup_mmap(oldmm, mm); - retval = 0; + retval = arch_dup_mmap(oldmm, mm); out: up_write(&mm->mmap_sem); flush_tlb_mm(oldmm); -- GitLab From b353756b2b71915e81ed41239292306622d08c9f Mon Sep 17 00:00:00 2001 From: Vincent Whitchurch Date: Tue, 4 Sep 2018 15:45:44 -0700 Subject: [PATCH 1064/1692] kmemleak: always register debugfs file If kmemleak built in to the kernel, but is disabled by default, the debugfs file is never registered. Because of this, it is not possible to find out if the kernel is built with kmemleak support by checking for the presence of this file. To allow this, always register the file. After this patch, if the file doesn't exist, kmemleak is not available in the kernel. If writing "scan" or any other value than "clear" to this file results in EBUSY, then kmemleak is available but is disabled by default and can be activated via the kernel command line. Catalin: "that's also consistent with a late disabling of kmemleak when the debugfs entry sticks around." Link: http://lkml.kernel.org/r/20180824131220.19176-1-vincent.whitchurch@axis.com Signed-off-by: Vincent Whitchurch Acked-by: Catalin Marinas Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/kmemleak.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 9a085d525bbc..17dd883198ae 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -2097,6 +2097,11 @@ static int __init kmemleak_late_init(void) kmemleak_initialized = 1; + dentry = debugfs_create_file("kmemleak", 0644, NULL, NULL, + &kmemleak_fops); + if (!dentry) + pr_warn("Failed to create the debugfs kmemleak file\n"); + if (kmemleak_error) { /* * Some error occurred and kmemleak was disabled. There is a @@ -2108,10 +2113,6 @@ static int __init kmemleak_late_init(void) return -ENOMEM; } - dentry = debugfs_create_file("kmemleak", 0644, NULL, NULL, - &kmemleak_fops); - if (!dentry) - pr_warn("Failed to create the debugfs kmemleak file\n"); mutex_lock(&scan_mutex); start_scan_thread(); mutex_unlock(&scan_mutex); -- GitLab From 904506562e0856f2535d876407d087c9459d345b Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Tue, 4 Sep 2018 15:45:48 -0700 Subject: [PATCH 1065/1692] tools/vm/slabinfo.c: fix sign-compare warning Currently we get the following compiler warning: slabinfo.c:854:22: warning: comparison between signed and unsigned integer expressions [-Wsign-compare] if (s->object_size < min_objsize) ^ due to the mismatch of signed/unsigned comparison. ->object_size and ->slab_size are never expected to be negative, so let's define them as unsigned int. [n-horiguchi@ah.jp.nec.com: convert everything - none of these can be negative] Link: http://lkml.kernel.org/r/20180826234947.GA9787@hori1.linux.bs1.fc.nec.co.jp Link: http://lkml.kernel.org/r/1535103134-20239-1-git-send-email-n-horiguchi@ah.jp.nec.com Signed-off-by: Naoya Horiguchi Reviewed-by: Andrew Morton Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- tools/vm/slabinfo.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/vm/slabinfo.c b/tools/vm/slabinfo.c index f82c2eaa859d..334b16db0ebb 100644 --- a/tools/vm/slabinfo.c +++ b/tools/vm/slabinfo.c @@ -30,8 +30,8 @@ struct slabinfo { int alias; int refs; int aliases, align, cache_dma, cpu_slabs, destroy_by_rcu; - int hwcache_align, object_size, objs_per_slab; - int sanity_checks, slab_size, store_user, trace; + unsigned int hwcache_align, object_size, objs_per_slab; + unsigned int sanity_checks, slab_size, store_user, trace; int order, poison, reclaim_account, red_zone; unsigned long partial, objects, slabs, objects_partial, objects_total; unsigned long alloc_fastpath, alloc_slowpath; -- GitLab From 7ab660f8baecfe26c1c267fa8e64d2073feae2bb Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Tue, 4 Sep 2018 15:45:51 -0700 Subject: [PATCH 1066/1692] tools/vm/page-types.c: fix "defined but not used" warning debugfs_known_mountpoints[] is not used any more, so let's remove it. Link: http://lkml.kernel.org/r/1535102651-19418-1-git-send-email-n-horiguchi@ah.jp.nec.com Signed-off-by: Naoya Horiguchi Reviewed-by: Andrew Morton Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- tools/vm/page-types.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c index 30cb0a0713ff..37908a83ddc2 100644 --- a/tools/vm/page-types.c +++ b/tools/vm/page-types.c @@ -159,12 +159,6 @@ static const char * const page_flag_names[] = { }; -static const char * const debugfs_known_mountpoints[] = { - "/sys/kernel/debug", - "/debug", - 0, -}; - /* * data structures */ -- GitLab From 04b8e946075d4582093e84f54dc1a004b227794d Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Tue, 4 Sep 2018 15:45:55 -0700 Subject: [PATCH 1067/1692] mm/util.c: improve kvfree() kerneldoc Scooped from an email from Matthew. Cc: Mike Rapoport Cc: Jonathan Corbet Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/util.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/mm/util.c b/mm/util.c index d2890a407332..9e3ebd2ef65f 100644 --- a/mm/util.c +++ b/mm/util.c @@ -435,11 +435,14 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node) EXPORT_SYMBOL(kvmalloc_node); /** - * kvfree - free memory allocated with kvmalloc - * @addr: pointer returned by kvmalloc + * kvfree() - Free memory. + * @addr: Pointer to allocated memory. * - * If the memory is allocated from vmalloc area it is freed with vfree(). - * Otherwise kfree() is used. + * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc(). + * It is slightly more efficient to use kfree() or vfree() if you are certain + * that you know which one to use. + * + * Context: Any context except NMI. */ void kvfree(const void *addr) { -- GitLab From 464c7ffbcb164b2e5cebfa406b7fc6cdb7945344 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 4 Sep 2018 15:45:59 -0700 Subject: [PATCH 1068/1692] mm/hugetlb: filter out hugetlb pages if HUGEPAGE migration is not supported. When scanning for movable pages, filter out Hugetlb pages if hugepage migration is not supported. Without this we hit infinte loop in __offline_pages() where we do pfn = scan_movable_pages(start_pfn, end_pfn); if (pfn) { /* We have movable pages */ ret = do_migrate_range(pfn, end_pfn); goto repeat; } Fix this by checking hugepage_migration_supported both in has_unmovable_pages which is the primary backoff mechanism for page offlining and for consistency reasons also into scan_movable_pages because it doesn't make any sense to return a pfn to non-migrateable huge page. This issue was revealed by, but not caused by 72b39cfc4d75 ("mm, memory_hotplug: do not fail offlining too early"). Link: http://lkml.kernel.org/r/20180824063314.21981-1-aneesh.kumar@linux.ibm.com Fixes: 72b39cfc4d75 ("mm, memory_hotplug: do not fail offlining too early") Signed-off-by: Aneesh Kumar K.V Reported-by: Haren Myneni Acked-by: Michal Hocko Reviewed-by: Naoya Horiguchi Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory_hotplug.c | 3 ++- mm/page_alloc.c | 4 ++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 9eea6e809a4e..38d94b703e9d 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1333,7 +1333,8 @@ static unsigned long scan_movable_pages(unsigned long start, unsigned long end) if (__PageMovable(page)) return pfn; if (PageHuge(page)) { - if (page_huge_active(page)) + if (hugepage_migration_supported(page_hstate(page)) && + page_huge_active(page)) return pfn; else pfn = round_up(pfn + 1, diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 05e983f42316..89d2a2ab3fe6 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -7708,6 +7708,10 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count, * handle each tail page individually in migration. */ if (PageHuge(page)) { + + if (!hugepage_migration_supported(page_hstate(page))) + goto unmovable; + iter = round_up(iter + 1, 1< Date: Tue, 4 Sep 2018 15:46:02 -0700 Subject: [PATCH 1069/1692] ipc/shm: properly return EIDRM in shm_lock() When getting rid of the general ipc_lock(), this was missed furthermore, making the comment around the ipc object validity check bogus. Under EIDRM conditions, callers will in turn not see the error and continue with the operation. Link: http://lkml.kernel.org/r/20180824030920.GD3677@linux-r8p5 Link: http://lkml.kernel.org/r/20180823024051.GC13343@shao2-debian Fixes: 82061c57ce9 ("ipc: drop ipc_lock()") Signed-off-by: Davidlohr Bueso Reported-by: kernel test robot Cc: Manfred Spraul Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/shm.c | 1 + 1 file changed, 1 insertion(+) diff --git a/ipc/shm.c b/ipc/shm.c index b0eb3757ab89..4cd402e4cfeb 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -199,6 +199,7 @@ static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id) } ipc_unlock_object(ipcp); + ipcp = ERR_PTR(-EIDRM); err: rcu_read_unlock(); /* -- GitLab From 328b5f417a4ac929e20d98b989a2babac66c8520 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Tue, 4 Sep 2018 15:46:06 -0700 Subject: [PATCH 1070/1692] checkpatch: add optional static const to blank line declarations test Using a static const struct definition as part of a series of declarations produces a false positive "Missing a blank line after declarations" for code like: WARNING: Missing a blank line after declarations #710: FILE: drivers/gpu/drm/tidss/tidss_scale_coefs.c:137: + int inc; + static const struct { So fix it. Link: http://lkml.kernel.org/r/5905126e70b0ed1781e49265fd5c49c5090d0223.camel@perches.com Signed-off-by: Joe Perches Reported-by: Jyri Sarha Cc: "Valkeinen, Tomi" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- scripts/checkpatch.pl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 5219280bf7ff..b4caee6e269c 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -3311,7 +3311,7 @@ sub process { # known declaration macros $sline =~ /^\+\s+$declaration_macros/ || # start of struct or union or enum - $sline =~ /^\+\s+(?:union|struct|enum|typedef)\b/ || + $sline =~ /^\+\s+(?:static\s+)?(?:const\s+)?(?:union|struct|enum|typedef)\b/ || # start or end of block or continuation of declaration $sline =~ /^\+\s+(?:$|[\{\}\.\#\"\?\:\(\[])/ || # bitfield continuation -- GitLab From 4e8346d0be889c7ab5eb2d3deedc18111d741e99 Mon Sep 17 00:00:00 2001 From: Mikhail Zaslonko Date: Tue, 4 Sep 2018 15:46:09 -0700 Subject: [PATCH 1071/1692] memory_hotplug: fix kernel_panic on offline page processing Within show_valid_zones() the function test_pages_in_a_zone() should be called for online memory blocks only. Otherwise it might lead to the VM_BUG_ON due to uninitialized struct pages (when CONFIG_DEBUG_VM_PGFLAGS kernel option is set): page dumped because: VM_BUG_ON_PAGE(PagePoisoned(p)) ------------[ cut here ]------------ Call Trace: ([<000000000038f91e>] test_pages_in_a_zone+0xe6/0x168) [<0000000000923472>] show_valid_zones+0x5a/0x1a8 [<0000000000900284>] dev_attr_show+0x3c/0x78 [<000000000046f6f0>] sysfs_kf_seq_show+0xd0/0x150 [<00000000003ef662>] seq_read+0x212/0x4b8 [<00000000003bf202>] __vfs_read+0x3a/0x178 [<00000000003bf3ca>] vfs_read+0x8a/0x148 [<00000000003bfa3a>] ksys_read+0x62/0xb8 [<0000000000bc2220>] system_call+0xdc/0x2d8 That VM_BUG_ON was triggered by the page poisoning introduced in mm/sparse.c with the git commit d0dc12e86b31 ("mm/memory_hotplug: optimize memory hotplug"). With the same commit the new 'nid' field has been added to the struct memory_block in order to store and later on derive the node id for offline pages (instead of accessing struct page which might be uninitialized). But one reference to nid in show_valid_zones() function has been overlooked. Fixed with current commit. Also, nr_pages will not be used any more after test_pages_in_a_zone() call, do not update it. Link: http://lkml.kernel.org/r/20180828090539.41491-1-zaslonko@linux.ibm.com Fixes: d0dc12e86b31 ("mm/memory_hotplug: optimize memory hotplug") Signed-off-by: Mikhail Zaslonko Acked-by: Michal Hocko Reviewed-by: Pavel Tatashin Cc: [4.17+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/base/memory.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/drivers/base/memory.c b/drivers/base/memory.c index c8a1cb0b6136..817320c7c4c1 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -416,26 +416,24 @@ static ssize_t show_valid_zones(struct device *dev, struct zone *default_zone; int nid; - /* - * The block contains more than one zone can not be offlined. - * This can happen e.g. for ZONE_DMA and ZONE_DMA32 - */ - if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages, &valid_start_pfn, &valid_end_pfn)) - return sprintf(buf, "none\n"); - - start_pfn = valid_start_pfn; - nr_pages = valid_end_pfn - start_pfn; - /* * Check the existing zone. Make sure that we do that only on the * online nodes otherwise the page_zone is not reliable */ if (mem->state == MEM_ONLINE) { + /* + * The block contains more than one zone can not be offlined. + * This can happen e.g. for ZONE_DMA and ZONE_DMA32 + */ + if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages, + &valid_start_pfn, &valid_end_pfn)) + return sprintf(buf, "none\n"); + start_pfn = valid_start_pfn; strcat(buf, page_zone(pfn_to_page(start_pfn))->name); goto out; } - nid = pfn_to_nid(start_pfn); + nid = mem->nid; default_zone = zone_for_pfn_range(MMOP_ONLINE_KEEP, nid, start_pfn, nr_pages); strcat(buf, default_zone->name); -- GitLab From 8a2336e549d385bb0b46880435b411df8d8200e8 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 4 Sep 2018 15:46:13 -0700 Subject: [PATCH 1072/1692] uapi/linux/keyctl.h: don't use C++ reserved keyword as a struct member name Since this header is in "include/uapi/linux/", apparently people want to use it in userspace programs -- even in C++ ones. However, the header uses a C++ reserved keyword ("private"), so change that to "dh_private" instead to allow the header file to be used in C++ userspace. Fixes https://bugzilla.kernel.org/show_bug.cgi?id=191051 Link: http://lkml.kernel.org/r/0db6c314-1ef4-9bfa-1baa-7214dd2ee061@infradead.org Fixes: ddbb41148724 ("KEYS: Add KEYCTL_DH_COMPUTE command") Signed-off-by: Randy Dunlap Reviewed-by: Andrew Morton Cc: David Howells Cc: James Morris Cc: "Serge E. Hallyn" Cc: Mat Martineau Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/uapi/linux/keyctl.h | 2 +- security/keys/dh.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/include/uapi/linux/keyctl.h b/include/uapi/linux/keyctl.h index 7b8c9e19bad1..910cc4334b21 100644 --- a/include/uapi/linux/keyctl.h +++ b/include/uapi/linux/keyctl.h @@ -65,7 +65,7 @@ /* keyctl structures */ struct keyctl_dh_params { - __s32 private; + __s32 dh_private; __s32 prime; __s32 base; }; diff --git a/security/keys/dh.c b/security/keys/dh.c index 711e89d8c415..3b602a1e27fa 100644 --- a/security/keys/dh.c +++ b/security/keys/dh.c @@ -300,7 +300,7 @@ long __keyctl_dh_compute(struct keyctl_dh_params __user *params, } dh_inputs.g_size = dlen; - dlen = dh_data_from_key(pcopy.private, &dh_inputs.key); + dlen = dh_data_from_key(pcopy.dh_private, &dh_inputs.key); if (dlen < 0) { ret = dlen; goto out2; -- GitLab From 62ec0d8c4f332dedf19d6fad15ddea639044d5fe Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Tue, 4 Sep 2018 15:46:16 -0700 Subject: [PATCH 1073/1692] mm: fix BUG_ON() in vmf_insert_pfn_pud() from VM_MIXEDMAP removal It looks like I missed the PUD path when doing VM_MIXEDMAP removal. This can be triggered by: 1. Boot with memmap=4G!8G 2. build ndctl with destructive flag on 3. make TESTS=device-dax check [ +0.000675] kernel BUG at mm/huge_memory.c:824! Applying the same change that was applied to vmf_insert_pfn_pmd() in the original patch. Link: http://lkml.kernel.org/r/153565957352.35524.1005746906902065126.stgit@djiang5-desk3.ch.intel.com Fixes: e1fb4a08649 ("dax: remove VM_MIXEDMAP for fsdax and device dax") Signed-off-by: Dave Jiang Reported-by: Vishal Verma Tested-by: Vishal Verma Acked-by: Jeff Moyer Reviewed-by: Jan Kara Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/huge_memory.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index c3bc7e9c9a2a..533f9b00147d 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -821,11 +821,11 @@ vm_fault_t vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, * but we need to be consistent with PTEs and architectures that * can't support a 'special' bit. */ - BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); + BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && + !pfn_t_devmap(pfn)); BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == (VM_PFNMAP|VM_MIXEDMAP)); BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); - BUG_ON(!pfn_t_devmap(pfn)); if (addr < vma->vm_start || addr >= vma->vm_end) return VM_FAULT_SIGBUS; -- GitLab From c5967e989f1fe702e75d7405b9251ec7e490d847 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Tue, 4 Sep 2018 15:46:20 -0700 Subject: [PATCH 1074/1692] checkpatch: add __ro_after_init to known $Attribute __ro_after_init is a specific __attribute__ that checkpatch does currently not understand. Add it to the known $Attribute types so that code that uses variables declared with __ro_after_init are not thought to be a modifier type. This appears as a defect in checkpatch output of code like: static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU); [...] if (trust_cpu && arch_init) { where checkpatch reports: ERROR: space prohibited after that '&&' (ctx:WxW) if (trust_cpu && arch_init) { Link: http://lkml.kernel.org/r/0fa8a2cb83ade4c525e18261ecf6cfede3015983.camel@perches.com Signed-off-by: Joe Perches Reported-by: Kees Cook Tested-by: Kees Cook Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- scripts/checkpatch.pl | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index b4caee6e269c..161b0224d6ae 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -380,6 +380,7 @@ our $Attribute = qr{ __noclone| __deprecated| __read_mostly| + __ro_after_init| __kprobes| $InitAttribute| ____cacheline_aligned| -- GitLab From 4c5d114ea04d5b6c7009d46895ec26109aa654f3 Mon Sep 17 00:00:00 2001 From: Thibaut Sautereau Date: Tue, 4 Sep 2018 15:46:23 -0700 Subject: [PATCH 1075/1692] lib/Kconfig.debug: fix three typos in help text Fix three typos in CONFIG_WARN_ALL_UNSEEDED_RANDOM help text. Link: http://lkml.kernel.org/r/20180830194505.4778-1-thibaut@sautereau.fr Signed-off-by: Thibaut Sautereau Acked-by: Randy Dunlap Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/Kconfig.debug | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 613316724c6a..4966c4fbe7f7 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1277,13 +1277,13 @@ config WARN_ALL_UNSEEDED_RANDOM time. This is really bad from a security perspective, and so architecture maintainers really need to do what they can to get the CRNG seeded sooner after the system is booted. - However, since users can not do anything actionble to + However, since users cannot do anything actionable to address this, by default the kernel will issue only a single warning for the first use of unseeded randomness. Say Y here if you want to receive warnings for all uses of unseeded randomness. This will be of use primarily for - those developers interersted in improving the security of + those developers interested in improving the security of Linux kernels running on their architecture (or subarchitecture). -- GitLab From 36bdac1e674debd2714cb3e80eaa18266c2426e4 Mon Sep 17 00:00:00 2001 From: Souptick Joarder Date: Tue, 4 Sep 2018 15:46:26 -0700 Subject: [PATCH 1076/1692] drivers/dax/device.c: convert variable to vm_fault_t type As part of 226ab561075f ("device-dax: Convert to vmf_insert_mixed and vm_fault_t") in 4.19-rc1, 'rc' was not converted to vm_fault_t. Now converted. Link: http://lkml.kernel.org/r/20180830153813.GA26059@jordon-HP-15-Notebook-PC Signed-off-by: Souptick Joarder Cc: Dan Williams Cc: Dave Jiang Cc: Ross Zwisler Cc: Vishal Verma Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/dax/device.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/dax/device.c b/drivers/dax/device.c index 6fd46083e629..bbe4d72ca105 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c @@ -392,7 +392,8 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf, { struct file *filp = vmf->vma->vm_file; unsigned long fault_size; - int rc, id; + vm_fault_t rc = VM_FAULT_SIGBUS; + int id; pfn_t pfn; struct dev_dax *dev_dax = filp->private_data; -- GitLab From ae98043f5f7fa45b65084f70e3ada3209873ebb4 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Tue, 4 Sep 2018 15:46:30 -0700 Subject: [PATCH 1077/1692] nilfs2: convert to SPDX license tags Remove the verbose license text from NILFS2 files and replace them with SPDX tags. This does not change the license of any of the code. Link: http://lkml.kernel.org/r/1535624528-5982-1-git-send-email-konishi.ryusuke@lab.ntt.co.jp Signed-off-by: Ryusuke Konishi Reviewed-by: Andrew Morton Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/nilfs2/alloc.c | 11 +---------- fs/nilfs2/alloc.h | 11 +---------- fs/nilfs2/bmap.c | 11 +---------- fs/nilfs2/bmap.h | 11 +---------- fs/nilfs2/btnode.c | 11 +---------- fs/nilfs2/btnode.h | 11 +---------- fs/nilfs2/btree.c | 11 +---------- fs/nilfs2/btree.h | 11 +---------- fs/nilfs2/cpfile.c | 11 +---------- fs/nilfs2/cpfile.h | 11 +---------- fs/nilfs2/dat.c | 11 +---------- fs/nilfs2/dat.h | 11 +---------- fs/nilfs2/dir.c | 11 +---------- fs/nilfs2/direct.c | 11 +---------- fs/nilfs2/direct.h | 11 +---------- fs/nilfs2/file.c | 11 +---------- fs/nilfs2/gcinode.c | 11 +---------- fs/nilfs2/ifile.c | 11 +---------- fs/nilfs2/ifile.h | 11 +---------- fs/nilfs2/inode.c | 11 +---------- fs/nilfs2/ioctl.c | 11 +---------- fs/nilfs2/mdt.c | 11 +---------- fs/nilfs2/mdt.h | 11 +---------- fs/nilfs2/namei.c | 11 +---------- fs/nilfs2/nilfs.h | 11 +---------- fs/nilfs2/page.c | 11 +---------- fs/nilfs2/page.h | 11 +---------- fs/nilfs2/recovery.c | 11 +---------- fs/nilfs2/segbuf.c | 11 +---------- fs/nilfs2/segbuf.h | 11 +---------- fs/nilfs2/segment.c | 11 +---------- fs/nilfs2/segment.h | 11 +---------- fs/nilfs2/sufile.c | 11 +---------- fs/nilfs2/sufile.h | 11 +---------- fs/nilfs2/super.c | 11 +---------- fs/nilfs2/sysfs.c | 11 +---------- fs/nilfs2/sysfs.h | 11 +---------- fs/nilfs2/the_nilfs.c | 11 +---------- fs/nilfs2/the_nilfs.h | 11 +---------- 39 files changed, 39 insertions(+), 390 deletions(-) diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c index 03b8ba933eb2..235b959fc2b3 100644 --- a/fs/nilfs2/alloc.c +++ b/fs/nilfs2/alloc.c @@ -1,18 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * alloc.c - NILFS dat/inode allocator * * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Originally written by Koji Sato. * Two allocators were unified by Ryusuke Konishi and Amagai Yoshiji. */ diff --git a/fs/nilfs2/alloc.h b/fs/nilfs2/alloc.h index 05149e606a78..0303c3968cee 100644 --- a/fs/nilfs2/alloc.h +++ b/fs/nilfs2/alloc.h @@ -1,18 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * alloc.h - persistent object (dat entry/disk inode) allocator/deallocator * * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Originally written by Koji Sato. * Two allocators were unified by Ryusuke Konishi and Amagai Yoshiji. */ diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c index 01fb1831ca25..fb5a9a8a13cf 100644 --- a/fs/nilfs2/bmap.c +++ b/fs/nilfs2/bmap.c @@ -1,18 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * bmap.c - NILFS block mapping. * * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Written by Koji Sato. */ diff --git a/fs/nilfs2/bmap.h b/fs/nilfs2/bmap.h index 2b6ffbe5997a..2c63858e81c9 100644 --- a/fs/nilfs2/bmap.h +++ b/fs/nilfs2/bmap.h @@ -1,18 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * bmap.h - NILFS block mapping. * * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Written by Koji Sato. */ diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c index dec98cab729d..ebb24a314f43 100644 --- a/fs/nilfs2/btnode.c +++ b/fs/nilfs2/btnode.c @@ -1,18 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * btnode.c - NILFS B-tree node cache * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Originally written by Seiji Kihara. * Fully revised by Ryusuke Konishi for stabilization and simplification. * diff --git a/fs/nilfs2/btnode.h b/fs/nilfs2/btnode.h index 4e8aaa1aeb65..0f88dbc9bcb3 100644 --- a/fs/nilfs2/btnode.h +++ b/fs/nilfs2/btnode.h @@ -1,18 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * btnode.h - NILFS B-tree node cache * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Written by Seiji Kihara. * Revised by Ryusuke Konishi. */ diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index 16a7a67a11c9..23e043eca237 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c @@ -1,18 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * btree.c - NILFS B-tree. * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Written by Koji Sato. */ diff --git a/fs/nilfs2/btree.h b/fs/nilfs2/btree.h index 2184e47fa4bf..d1421b646ce4 100644 --- a/fs/nilfs2/btree.h +++ b/fs/nilfs2/btree.h @@ -1,18 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * btree.h - NILFS B-tree. * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Written by Koji Sato. */ diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c index a15a1601e931..8d41311b5db4 100644 --- a/fs/nilfs2/cpfile.c +++ b/fs/nilfs2/cpfile.c @@ -1,18 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * cpfile.c - NILFS checkpoint file. * * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Written by Koji Sato. */ diff --git a/fs/nilfs2/cpfile.h b/fs/nilfs2/cpfile.h index 6eca972f9673..6336222df24a 100644 --- a/fs/nilfs2/cpfile.h +++ b/fs/nilfs2/cpfile.h @@ -1,18 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * cpfile.h - NILFS checkpoint file. * * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Written by Koji Sato. */ diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c index dffedb2f8817..6f4066636be9 100644 --- a/fs/nilfs2/dat.c +++ b/fs/nilfs2/dat.c @@ -1,18 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * dat.c - NILFS disk address translation. * * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Written by Koji Sato. */ diff --git a/fs/nilfs2/dat.h b/fs/nilfs2/dat.h index 57dc6cf466d0..b17ee34580ae 100644 --- a/fs/nilfs2/dat.h +++ b/fs/nilfs2/dat.h @@ -1,18 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * dat.h - NILFS disk address translation. * * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Written by Koji Sato. */ diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c index 582831ab3eb9..81394e22d0a0 100644 --- a/fs/nilfs2/dir.c +++ b/fs/nilfs2/dir.c @@ -1,18 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * dir.c - NILFS directory entry operations * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Modified for NILFS by Amagai Yoshiji. */ /* diff --git a/fs/nilfs2/direct.c b/fs/nilfs2/direct.c index 96e3ed0d9652..533e24ea3a88 100644 --- a/fs/nilfs2/direct.c +++ b/fs/nilfs2/direct.c @@ -1,18 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * direct.c - NILFS direct block pointer. * * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Written by Koji Sato. */ diff --git a/fs/nilfs2/direct.h b/fs/nilfs2/direct.h index cfe85e848bba..ec9a23c77994 100644 --- a/fs/nilfs2/direct.h +++ b/fs/nilfs2/direct.h @@ -1,18 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * direct.h - NILFS direct block pointer. * * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Written by Koji Sato. */ diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c index 7da0fac71dc2..64bc81363c6c 100644 --- a/fs/nilfs2/file.c +++ b/fs/nilfs2/file.c @@ -1,18 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * file.c - NILFS regular file handling primitives including fsync(). * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Written by Amagai Yoshiji and Ryusuke Konishi. */ diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c index 853a831dcde0..aa3c328ee189 100644 --- a/fs/nilfs2/gcinode.c +++ b/fs/nilfs2/gcinode.c @@ -1,18 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * gcinode.c - dummy inodes to buffer blocks for garbage collection * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Written by Seiji Kihara, Amagai Yoshiji, and Ryusuke Konishi. * Revised by Ryusuke Konishi. * diff --git a/fs/nilfs2/ifile.c b/fs/nilfs2/ifile.c index b8fa45c20c63..4140d232cadc 100644 --- a/fs/nilfs2/ifile.c +++ b/fs/nilfs2/ifile.c @@ -1,18 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * ifile.c - NILFS inode file * * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Written by Amagai Yoshiji. * Revised by Ryusuke Konishi. * diff --git a/fs/nilfs2/ifile.h b/fs/nilfs2/ifile.h index 188b94fe0ec5..a1e1e5711a05 100644 --- a/fs/nilfs2/ifile.h +++ b/fs/nilfs2/ifile.h @@ -1,18 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * ifile.h - NILFS inode file * * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Written by Amagai Yoshiji. * Revised by Ryusuke Konishi. * diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index 6a612d832e7d..671085512e0f 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -1,18 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * inode.c - NILFS inode operations. * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Written by Ryusuke Konishi. * */ diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c index 1d2c3d7711fe..9b96d79eea6c 100644 --- a/fs/nilfs2/ioctl.c +++ b/fs/nilfs2/ioctl.c @@ -1,18 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * ioctl.c - NILFS ioctl operations. * * Copyright (C) 2007, 2008 Nippon Telegraph and Telephone Corporation. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Written by Koji Sato. */ diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c index c6bc1033e7d2..700870a92bc4 100644 --- a/fs/nilfs2/mdt.c +++ b/fs/nilfs2/mdt.c @@ -1,18 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * mdt.c - meta data file for NILFS * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Written by Ryusuke Konishi. */ diff --git a/fs/nilfs2/mdt.h b/fs/nilfs2/mdt.h index 3f67f3932097..e77aea4bb921 100644 --- a/fs/nilfs2/mdt.h +++ b/fs/nilfs2/mdt.h @@ -1,18 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * mdt.h - NILFS meta data file prototype and definitions * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Written by Ryusuke Konishi. */ diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c index dd52d3f82e8d..9fe6d4ab74f0 100644 --- a/fs/nilfs2/namei.c +++ b/fs/nilfs2/namei.c @@ -1,18 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * namei.c - NILFS pathname lookup operations. * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Modified for NILFS by Amagai Yoshiji and Ryusuke Konishi. */ /* diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h index 33f8c8fc96e8..a2f247b6a209 100644 --- a/fs/nilfs2/nilfs.h +++ b/fs/nilfs2/nilfs.h @@ -1,18 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * nilfs.h - NILFS local header file. * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Written by Koji Sato and Ryusuke Konishi. */ diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c index 4cb850a6f1c2..329a056b73b1 100644 --- a/fs/nilfs2/page.c +++ b/fs/nilfs2/page.c @@ -1,18 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * page.c - buffer/page management specific to NILFS * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Written by Ryusuke Konishi and Seiji Kihara. */ diff --git a/fs/nilfs2/page.h b/fs/nilfs2/page.h index f3687c958fa8..62b9bb469e92 100644 --- a/fs/nilfs2/page.h +++ b/fs/nilfs2/page.h @@ -1,18 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * page.h - buffer/page management specific to NILFS * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Written by Ryusuke Konishi and Seiji Kihara. */ diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c index 5139efed1888..140b663e91c7 100644 --- a/fs/nilfs2/recovery.c +++ b/fs/nilfs2/recovery.c @@ -1,18 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * recovery.c - NILFS recovery logic * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Written by Ryusuke Konishi. */ diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c index 68cb9e4740b4..20c479b5e41b 100644 --- a/fs/nilfs2/segbuf.c +++ b/fs/nilfs2/segbuf.c @@ -1,18 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * segbuf.c - NILFS segment buffer * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Written by Ryusuke Konishi. * */ diff --git a/fs/nilfs2/segbuf.h b/fs/nilfs2/segbuf.h index 10e16935fff6..9bea1bd59041 100644 --- a/fs/nilfs2/segbuf.h +++ b/fs/nilfs2/segbuf.h @@ -1,18 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * segbuf.h - NILFS Segment buffer prototypes and definitions * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Written by Ryusuke Konishi. * */ diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 0953635e7d48..445eef41bfaf 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -1,18 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * segment.c - NILFS segment constructor. * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Written by Ryusuke Konishi. * */ diff --git a/fs/nilfs2/segment.h b/fs/nilfs2/segment.h index 04634e3e3d58..f5cf5308f3fc 100644 --- a/fs/nilfs2/segment.h +++ b/fs/nilfs2/segment.h @@ -1,18 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * segment.h - NILFS Segment constructor prototypes and definitions * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Written by Ryusuke Konishi. * */ diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c index c7fa139d50e8..bf3f8f05c89b 100644 --- a/fs/nilfs2/sufile.c +++ b/fs/nilfs2/sufile.c @@ -1,18 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * sufile.c - NILFS segment usage file. * * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Written by Koji Sato. * Revised by Ryusuke Konishi. */ diff --git a/fs/nilfs2/sufile.h b/fs/nilfs2/sufile.h index 673a891350f4..c4e2c7a7add1 100644 --- a/fs/nilfs2/sufile.h +++ b/fs/nilfs2/sufile.h @@ -1,18 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * sufile.h - NILFS segment usage file. * * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Written by Koji Sato. */ diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index 1b9067cf4511..26290aa1023f 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -1,18 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * super.c - NILFS module and super block management. * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Written by Ryusuke Konishi. */ /* diff --git a/fs/nilfs2/sysfs.c b/fs/nilfs2/sysfs.c index 4b25837e7724..e60be7bb55b0 100644 --- a/fs/nilfs2/sysfs.c +++ b/fs/nilfs2/sysfs.c @@ -1,19 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * sysfs.c - sysfs support implementation. * * Copyright (C) 2005-2014 Nippon Telegraph and Telephone Corporation. * Copyright (C) 2014 HGST, Inc., a Western Digital Company. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Written by Vyacheslav Dubeyko */ diff --git a/fs/nilfs2/sysfs.h b/fs/nilfs2/sysfs.h index 648cedf9c06e..d001eb862dae 100644 --- a/fs/nilfs2/sysfs.h +++ b/fs/nilfs2/sysfs.h @@ -1,19 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * sysfs.h - sysfs support declarations. * * Copyright (C) 2005-2014 Nippon Telegraph and Telephone Corporation. * Copyright (C) 2014 HGST, Inc., a Western Digital Company. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Written by Vyacheslav Dubeyko */ diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index 1a85317e83f0..484785cdf96e 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c @@ -1,18 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * the_nilfs.c - the_nilfs shared structure. * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Written by Ryusuke Konishi. * */ diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h index 36da1779f976..380a543c5b19 100644 --- a/fs/nilfs2/the_nilfs.h +++ b/fs/nilfs2/the_nilfs.h @@ -1,18 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * the_nilfs.h - the_nilfs shared structure. * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Written by Ryusuke Konishi. * */ -- GitLab From e866d3e84eb7c9588afb77604d417e8cc49fe216 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Tue, 28 Aug 2018 17:33:46 -0700 Subject: [PATCH 1078/1692] riscv: Do not overwrite initrd_start and initrd_end setup_initrd() overwrites initrd_start and initrd_end if __initramfs_size is larger than 0, which is always true even if there is no embedded initramfs. This prevents booting qemu with "-initrd" parameter. Overwriting initrd_start and initrd_end is not necessary since __initramfs_start and __initramfs_size are used directly in populate_rootfs() to load the built-in initramfs, so just drop that code. Signed-off-by: Guenter Roeck Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/setup.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index db20dc630e7e..aee603123030 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -85,15 +85,8 @@ atomic_t hart_lottery; #ifdef CONFIG_BLK_DEV_INITRD static void __init setup_initrd(void) { - extern char __initramfs_start[]; - extern unsigned long __initramfs_size; unsigned long size; - if (__initramfs_size > 0) { - initrd_start = (unsigned long)(&__initramfs_start); - initrd_end = initrd_start + __initramfs_size; - } - if (initrd_start >= initrd_end) { printk(KERN_INFO "initrd not found or empty"); goto disable; -- GitLab From 3350139c0ff3c95724b784f7109987d533cb3ecd Mon Sep 17 00:00:00 2001 From: Greentime Hu Date: Tue, 4 Sep 2018 14:25:57 +0800 Subject: [PATCH 1079/1692] nds32: linker script: GCOV kernel may refers data in __exit This patch is used to fix nds32 allmodconfig/allyesconfig build error because GCOV kernel embeds counters in the kernel for each line and a part of that embed in __exit text. So we need to keep the EXIT_TEXT and EXIT_DATA if CONFIG_GCOV_KERNEL=y. Link: https://lkml.org/lkml/2018/9/1/125 Signed-off-by: Greentime Hu Reviewed-by: Masami Hiramatsu --- arch/nds32/kernel/vmlinux.lds.S | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/arch/nds32/kernel/vmlinux.lds.S b/arch/nds32/kernel/vmlinux.lds.S index 288313b886ef..9e90f30a181d 100644 --- a/arch/nds32/kernel/vmlinux.lds.S +++ b/arch/nds32/kernel/vmlinux.lds.S @@ -13,14 +13,26 @@ OUTPUT_ARCH(nds32) ENTRY(_stext_lma) jiffies = jiffies_64; +#if defined(CONFIG_GCOV_KERNEL) +#define NDS32_EXIT_KEEP(x) x +#else +#define NDS32_EXIT_KEEP(x) +#endif + SECTIONS { _stext_lma = TEXTADDR - LOAD_OFFSET; . = TEXTADDR; __init_begin = .; HEAD_TEXT_SECTION + .exit.text : { + NDS32_EXIT_KEEP(EXIT_TEXT) + } INIT_TEXT_SECTION(PAGE_SIZE) INIT_DATA_SECTION(16) + .exit.data : { + NDS32_EXIT_KEEP(EXIT_DATA) + } PERCPU_SECTION(L1_CACHE_BYTES) __init_end = .; -- GitLab From c483a5cc9d09f4ceaa9abb106f863cc89cb643d9 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Mon, 27 Aug 2018 10:21:48 +0200 Subject: [PATCH 1080/1692] mmc: meson-mx-sdio: fix OF child-node lookup Use the new of_get_compatible_child() helper to lookup the slot child node instead of using of_find_compatible_node(), which searches the entire tree from a given start node and thus can return an unrelated (i.e. non-child) node. This also addresses a potential use-after-free (e.g. after probe deferral) as the tree-wide helper drops a reference to its first argument (i.e. the node of the device being probed). While at it, also fix up the related slot-node reference leak. Fixes: ed80a13bb4c4 ("mmc: meson-mx-sdio: Add a driver for the Amlogic Meson8 and Meson8b SoCs") Cc: stable # 4.15 Cc: Carlo Caione Cc: Martin Blumenstingl Cc: Ulf Hansson Acked-by: Martin Blumenstingl Signed-off-by: Johan Hovold Signed-off-by: Ulf Hansson --- drivers/mmc/host/meson-mx-sdio.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c index 09cb89645d06..2cfec33178c1 100644 --- a/drivers/mmc/host/meson-mx-sdio.c +++ b/drivers/mmc/host/meson-mx-sdio.c @@ -517,19 +517,23 @@ static struct mmc_host_ops meson_mx_mmc_ops = { static struct platform_device *meson_mx_mmc_slot_pdev(struct device *parent) { struct device_node *slot_node; + struct platform_device *pdev; /* * TODO: the MMC core framework currently does not support * controllers with multiple slots properly. So we only register * the first slot for now */ - slot_node = of_find_compatible_node(parent->of_node, NULL, "mmc-slot"); + slot_node = of_get_compatible_child(parent->of_node, "mmc-slot"); if (!slot_node) { dev_warn(parent, "no 'mmc-slot' sub-node found\n"); return ERR_PTR(-ENOENT); } - return of_platform_device_create(slot_node, NULL, parent); + pdev = of_platform_device_create(slot_node, NULL, parent); + of_node_put(slot_node); + + return pdev; } static int meson_mx_mmc_add_host(struct meson_mx_mmc_host *host) -- GitLab From b034ed50a2bb517c4b76e84f7723cb6bf60a4edd Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Wed, 29 Aug 2018 10:22:09 -0500 Subject: [PATCH 1081/1692] HID: core: fix NULL pointer dereference There is a NULL pointer dereference in case memory resources for *parse* are not successfully allocated. Fix this by adding a new goto label and make the execution path jump to it in case vzalloc() fails. Addresses-Coverity-ID: 1473081 ("Dereference after null check") Fixes: b2dd9f2e5a8a ("HID: core: fix memory leak on probe") Signed-off-by: Gustavo A. R. Silva Reviewed-by: Stefan Agner Signed-off-by: Jiri Kosina --- drivers/hid/hid-core.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 44a465db3f96..44564f61e9cc 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1000,7 +1000,7 @@ int hid_open_report(struct hid_device *device) parser = vzalloc(sizeof(struct hid_parser)); if (!parser) { ret = -ENOMEM; - goto err; + goto alloc_err; } parser->device = device; @@ -1049,6 +1049,7 @@ int hid_open_report(struct hid_device *device) hid_err(device, "item fetching failed at offset %d\n", (int)(end - start)); err: kfree(parser->collection_stack); +alloc_err: vfree(parser); hid_close_report(device); return ret; -- GitLab From ade573eb1e03d1ee5abcb3359b1259469ab6e8ed Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Sat, 18 Aug 2018 10:12:08 +0200 Subject: [PATCH 1082/1692] HID: sensor-hub: Restore fixup for Lenovo ThinkPad Helix 2 sensor hub report Commit b0f847e16c1e ("HID: hid-sensor-hub: Force logical minimum to 1 for power and report state") not only replaced the descriptor fixup done for devices with the HID_SENSOR_HUB_ENUM_QUIRK with a generic fix, but also accidentally removed the unrelated descriptor fixup for the Lenovo ThinkPad Helix 2 sensor hub. This commit restores this fixup. Restoring this fixup not only fixes the Lenovo ThinkPad Helix 2's sensors, but also the Lenovo ThinkPad 8's sensors. Fixes: b0f847e16c1e ("HID: hid-sensor-hub: Force logical minimum ...") Cc: Srinivas Pandruvada Cc: Fernando D S Lima Acked-by: Srinivas Pandruvada Signed-off-by: Hans de Goede Signed-off-by: Jiri Kosina --- drivers/hid/hid-sensor-hub.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c index 50af72baa5ca..2b63487057c2 100644 --- a/drivers/hid/hid-sensor-hub.c +++ b/drivers/hid/hid-sensor-hub.c @@ -579,6 +579,28 @@ void sensor_hub_device_close(struct hid_sensor_hub_device *hsdev) } EXPORT_SYMBOL_GPL(sensor_hub_device_close); +static __u8 *sensor_hub_report_fixup(struct hid_device *hdev, __u8 *rdesc, + unsigned int *rsize) +{ + /* + * Checks if the report descriptor of Thinkpad Helix 2 has a logical + * minimum for magnetic flux axis greater than the maximum. + */ + if (hdev->product == USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA && + *rsize == 2558 && rdesc[913] == 0x17 && rdesc[914] == 0x40 && + rdesc[915] == 0x81 && rdesc[916] == 0x08 && + rdesc[917] == 0x00 && rdesc[918] == 0x27 && + rdesc[921] == 0x07 && rdesc[922] == 0x00) { + /* Sets negative logical minimum for mag x, y and z */ + rdesc[914] = rdesc[935] = rdesc[956] = 0xc0; + rdesc[915] = rdesc[936] = rdesc[957] = 0x7e; + rdesc[916] = rdesc[937] = rdesc[958] = 0xf7; + rdesc[917] = rdesc[938] = rdesc[959] = 0xff; + } + + return rdesc; +} + static int sensor_hub_probe(struct hid_device *hdev, const struct hid_device_id *id) { @@ -743,6 +765,7 @@ static struct hid_driver sensor_hub_driver = { .probe = sensor_hub_probe, .remove = sensor_hub_remove, .raw_event = sensor_hub_raw_event, + .report_fixup = sensor_hub_report_fixup, #ifdef CONFIG_PM .suspend = sensor_hub_suspend, .resume = sensor_hub_resume, -- GitLab From d9707490077bee0c7060ef5665a90656e1078b66 Mon Sep 17 00:00:00 2001 From: Bruno Meirelles Herrera Date: Mon, 27 Aug 2018 18:36:38 -0300 Subject: [PATCH 1083/1692] usb: dwc2: Fix call location of dwc2_check_core_endianness Some SoC/IP as STM32F469, the snpsid can only be read after clock is enabled, otherwise it will read as 0, and the dwc2_check_core_endianness will assume the core and AHB have opposite endianness, leading to the following error: [ 1.976339] dwc2 50000000.usb: 50000000.usb supply vusb_d not found, using dummy regulator [ 1.986124] dwc2 50000000.usb: Linked as a consumer to regulator.0 [ 1.992711] dwc2 50000000.usb: 50000000.usb supply vusb_a not found, using dummy regulator [ 2.003672] dwc2 50000000.usb: dwc2_core_reset: HANG! AHB Idle timeout GRSTCTL GRSTCTL_AHBIDLE [ 2.015176] dwc2: probe of 50000000.usb failed with error -16 The proposed patch changes the location where dwc2_check_core_endianness is called, allowing the clock peripheral to be enabled first. Acked-by: Minas Harutyunyan Tested-by: Martin Blumenstingl Signed-off-by: Bruno Meirelles Herrera Signed-off-by: Felipe Balbi --- drivers/usb/dwc2/platform.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c index 9a53a58e676e..577642895b57 100644 --- a/drivers/usb/dwc2/platform.c +++ b/drivers/usb/dwc2/platform.c @@ -412,8 +412,6 @@ static int dwc2_driver_probe(struct platform_device *dev) dev_dbg(&dev->dev, "mapped PA %08lx to VA %p\n", (unsigned long)res->start, hsotg->regs); - hsotg->needs_byte_swap = dwc2_check_core_endianness(hsotg); - retval = dwc2_lowlevel_hw_init(hsotg); if (retval) return retval; @@ -438,6 +436,8 @@ static int dwc2_driver_probe(struct platform_device *dev) if (retval) return retval; + hsotg->needs_byte_swap = dwc2_check_core_endianness(hsotg); + retval = dwc2_get_dr_mode(hsotg); if (retval) goto error; -- GitLab From 288f1ced5e24abe3e768224f701a205c3a7e16f9 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 4 Sep 2018 16:31:17 +0100 Subject: [PATCH 1084/1692] drm/i915: Reduce context HW ID lifetime Future gen reduce the number of bits we will have available to differentiate between contexts, so reduce the lifetime of the ID assignment from that of the context to its current active cycle (i.e. only while it is pinned for use by the HW, will it have a constant ID). This means that instead of a max of 2k allocated contexts (worst case before fun with bit twiddling), we instead have a limit of 2k in flight contexts (minus a few that have been pinned by the kernel or by perf). To reduce the number of contexts id we require, we allocate a context id on first and mark it as pinned for as long as the GEM context itself is, that is we keep it pinned it while active on each engine. If we exhaust our context id space, then we try to reclaim an id from an idle context. In the extreme case where all context ids are pinned by active contexts, we force the system to idle in order to recover ids. We cannot reduce the scope of an HW-ID to an engine (allowing the same gem_context to have different ids on each engine) as in the future we will need to preassign an id before we know which engine the context is being executed on. v2: Improved commentary (Tvrtko) [I tried at least] References: https://bugs.freedesktop.org/show_bug.cgi?id=107788 Signed-off-by: Chris Wilson Cc: Lionel Landwerlin Cc: Tvrtko Ursulin Cc: Mika Kuoppala Cc: Michel Thierry Cc: Michal Wajdeczko Cc: Daniele Ceraolo Spurio Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20180904153117.3907-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_debugfs.c | 5 +- drivers/gpu/drm/i915/i915_drv.h | 2 + drivers/gpu/drm/i915/i915_gem_context.c | 222 +++++++++++++----- drivers/gpu/drm/i915/i915_gem_context.h | 23 ++ drivers/gpu/drm/i915/intel_lrc.c | 8 + drivers/gpu/drm/i915/selftests/mock_context.c | 11 +- 6 files changed, 201 insertions(+), 70 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 4ad0e2ed8610..1f7051e97afb 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1953,7 +1953,10 @@ static int i915_context_status(struct seq_file *m, void *unused) return ret; list_for_each_entry(ctx, &dev_priv->contexts.list, link) { - seq_printf(m, "HW context %u ", ctx->hw_id); + seq_puts(m, "HW context "); + if (!list_empty(&ctx->hw_id_link)) + seq_printf(m, "%x [pin %u]", ctx->hw_id, + atomic_read(&ctx->hw_id_pin_count)); if (ctx->pid) { struct task_struct *task; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 5a4da5b723fd..767615ecdea5 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1861,6 +1861,7 @@ struct drm_i915_private { struct mutex av_mutex; struct { + struct mutex mutex; struct list_head list; struct llist_head free_list; struct work_struct free_work; @@ -1873,6 +1874,7 @@ struct drm_i915_private { #define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */ #define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */ #define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */ + struct list_head hw_id_list; } contexts; u32 fdi_rx_config; diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index f15a039772db..747b8170a15a 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -115,6 +115,95 @@ static void lut_close(struct i915_gem_context *ctx) rcu_read_unlock(); } +static inline int new_hw_id(struct drm_i915_private *i915, gfp_t gfp) +{ + unsigned int max; + + lockdep_assert_held(&i915->contexts.mutex); + + if (INTEL_GEN(i915) >= 11) + max = GEN11_MAX_CONTEXT_HW_ID; + else if (USES_GUC_SUBMISSION(i915)) + /* + * When using GuC in proxy submission, GuC consumes the + * highest bit in the context id to indicate proxy submission. + */ + max = MAX_GUC_CONTEXT_HW_ID; + else + max = MAX_CONTEXT_HW_ID; + + return ida_simple_get(&i915->contexts.hw_ida, 0, max, gfp); +} + +static int steal_hw_id(struct drm_i915_private *i915) +{ + struct i915_gem_context *ctx, *cn; + LIST_HEAD(pinned); + int id = -ENOSPC; + + lockdep_assert_held(&i915->contexts.mutex); + + list_for_each_entry_safe(ctx, cn, + &i915->contexts.hw_id_list, hw_id_link) { + if (atomic_read(&ctx->hw_id_pin_count)) { + list_move_tail(&ctx->hw_id_link, &pinned); + continue; + } + + GEM_BUG_ON(!ctx->hw_id); /* perma-pinned kernel context */ + list_del_init(&ctx->hw_id_link); + id = ctx->hw_id; + break; + } + + /* + * Remember how far we got up on the last repossesion scan, so the + * list is kept in a "least recently scanned" order. + */ + list_splice_tail(&pinned, &i915->contexts.hw_id_list); + return id; +} + +static int assign_hw_id(struct drm_i915_private *i915, unsigned int *out) +{ + int ret; + + lockdep_assert_held(&i915->contexts.mutex); + + /* + * We prefer to steal/stall ourselves and our users over that of the + * entire system. That may be a little unfair to our users, and + * even hurt high priority clients. The choice is whether to oomkill + * something else, or steal a context id. + */ + ret = new_hw_id(i915, GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); + if (unlikely(ret < 0)) { + ret = steal_hw_id(i915); + if (ret < 0) /* once again for the correct errno code */ + ret = new_hw_id(i915, GFP_KERNEL); + if (ret < 0) + return ret; + } + + *out = ret; + return 0; +} + +static void release_hw_id(struct i915_gem_context *ctx) +{ + struct drm_i915_private *i915 = ctx->i915; + + if (list_empty(&ctx->hw_id_link)) + return; + + mutex_lock(&i915->contexts.mutex); + if (!list_empty(&ctx->hw_id_link)) { + ida_simple_remove(&i915->contexts.hw_ida, ctx->hw_id); + list_del_init(&ctx->hw_id_link); + } + mutex_unlock(&i915->contexts.mutex); +} + static void i915_gem_context_free(struct i915_gem_context *ctx) { unsigned int n; @@ -122,6 +211,7 @@ static void i915_gem_context_free(struct i915_gem_context *ctx) lockdep_assert_held(&ctx->i915->drm.struct_mutex); GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); + release_hw_id(ctx); i915_ppgtt_put(ctx->ppgtt); for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) { @@ -136,7 +226,6 @@ static void i915_gem_context_free(struct i915_gem_context *ctx) list_del(&ctx->link); - ida_simple_remove(&ctx->i915->contexts.hw_ida, ctx->hw_id); kfree_rcu(ctx, rcu); } @@ -190,6 +279,12 @@ static void context_close(struct i915_gem_context *ctx) { i915_gem_context_set_closed(ctx); + /* + * This context will never again be assinged to HW, so we can + * reuse its ID for the next context. + */ + release_hw_id(ctx); + /* * The LUT uses the VMA as a backpointer to unref the object, * so we need to clear the LUT before we close all the VMA (inside @@ -203,43 +298,6 @@ static void context_close(struct i915_gem_context *ctx) i915_gem_context_put(ctx); } -static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out) -{ - int ret; - unsigned int max; - - if (INTEL_GEN(dev_priv) >= 11) { - max = GEN11_MAX_CONTEXT_HW_ID; - } else { - /* - * When using GuC in proxy submission, GuC consumes the - * highest bit in the context id to indicate proxy submission. - */ - if (USES_GUC_SUBMISSION(dev_priv)) - max = MAX_GUC_CONTEXT_HW_ID; - else - max = MAX_CONTEXT_HW_ID; - } - - - ret = ida_simple_get(&dev_priv->contexts.hw_ida, - 0, max, GFP_KERNEL); - if (ret < 0) { - /* Contexts are only released when no longer active. - * Flush any pending retires to hopefully release some - * stale contexts and try again. - */ - i915_retire_requests(dev_priv); - ret = ida_simple_get(&dev_priv->contexts.hw_ida, - 0, max, GFP_KERNEL); - if (ret < 0) - return ret; - } - - *out = ret; - return 0; -} - static u32 default_desc_template(const struct drm_i915_private *i915, const struct i915_hw_ppgtt *ppgtt) { @@ -276,12 +334,6 @@ __create_hw_context(struct drm_i915_private *dev_priv, if (ctx == NULL) return ERR_PTR(-ENOMEM); - ret = assign_hw_id(dev_priv, &ctx->hw_id); - if (ret) { - kfree(ctx); - return ERR_PTR(ret); - } - kref_init(&ctx->ref); list_add_tail(&ctx->link, &dev_priv->contexts.list); ctx->i915 = dev_priv; @@ -295,6 +347,7 @@ __create_hw_context(struct drm_i915_private *dev_priv, INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); INIT_LIST_HEAD(&ctx->handles_list); + INIT_LIST_HEAD(&ctx->hw_id_link); /* Default context will never have a file_priv */ ret = DEFAULT_CONTEXT_HANDLE; @@ -421,15 +474,35 @@ i915_gem_context_create_gvt(struct drm_device *dev) return ctx; } +static void +destroy_kernel_context(struct i915_gem_context **ctxp) +{ + struct i915_gem_context *ctx; + + /* Keep the context ref so that we can free it immediately ourselves */ + ctx = i915_gem_context_get(fetch_and_zero(ctxp)); + GEM_BUG_ON(!i915_gem_context_is_kernel(ctx)); + + context_close(ctx); + i915_gem_context_free(ctx); +} + struct i915_gem_context * i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio) { struct i915_gem_context *ctx; + int err; ctx = i915_gem_create_context(i915, NULL); if (IS_ERR(ctx)) return ctx; + err = i915_gem_context_pin_hw_id(ctx); + if (err) { + destroy_kernel_context(&ctx); + return ERR_PTR(err); + } + i915_gem_context_clear_bannable(ctx); ctx->sched.priority = prio; ctx->ring_size = PAGE_SIZE; @@ -439,17 +512,19 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio) return ctx; } -static void -destroy_kernel_context(struct i915_gem_context **ctxp) +static void init_contexts(struct drm_i915_private *i915) { - struct i915_gem_context *ctx; + mutex_init(&i915->contexts.mutex); + INIT_LIST_HEAD(&i915->contexts.list); - /* Keep the context ref so that we can free it immediately ourselves */ - ctx = i915_gem_context_get(fetch_and_zero(ctxp)); - GEM_BUG_ON(!i915_gem_context_is_kernel(ctx)); + /* Using the simple ida interface, the max is limited by sizeof(int) */ + BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX); + BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > INT_MAX); + ida_init(&i915->contexts.hw_ida); + INIT_LIST_HEAD(&i915->contexts.hw_id_list); - context_close(ctx); - i915_gem_context_free(ctx); + INIT_WORK(&i915->contexts.free_work, contexts_free_worker); + init_llist_head(&i915->contexts.free_list); } static bool needs_preempt_context(struct drm_i915_private *i915) @@ -470,14 +545,7 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv) if (ret) return ret; - INIT_LIST_HEAD(&dev_priv->contexts.list); - INIT_WORK(&dev_priv->contexts.free_work, contexts_free_worker); - init_llist_head(&dev_priv->contexts.free_list); - - /* Using the simple ida interface, the max is limited by sizeof(int) */ - BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX); - BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > INT_MAX); - ida_init(&dev_priv->contexts.hw_ida); + init_contexts(dev_priv); /* lowest priority; idle task */ ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN); @@ -487,9 +555,13 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv) } /* * For easy recognisablity, we want the kernel context to be 0 and then - * all user contexts will have non-zero hw_id. + * all user contexts will have non-zero hw_id. Kernel contexts are + * permanently pinned, so that we never suffer a stall and can + * use them from any allocation context (e.g. for evicting other + * contexts and from inside the shrinker). */ GEM_BUG_ON(ctx->hw_id); + GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count)); dev_priv->kernel_context = ctx; /* highest priority; preempting task */ @@ -527,6 +599,7 @@ void i915_gem_contexts_fini(struct drm_i915_private *i915) destroy_kernel_context(&i915->kernel_context); /* Must free all deferred contexts (via flush_workqueue) first */ + GEM_BUG_ON(!list_empty(&i915->contexts.hw_id_list)); ida_destroy(&i915->contexts.hw_ida); } @@ -932,6 +1005,33 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, return ret; } +int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx) +{ + struct drm_i915_private *i915 = ctx->i915; + int err = 0; + + mutex_lock(&i915->contexts.mutex); + + GEM_BUG_ON(i915_gem_context_is_closed(ctx)); + + if (list_empty(&ctx->hw_id_link)) { + GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count)); + + err = assign_hw_id(i915, &ctx->hw_id); + if (err) + goto out_unlock; + + list_add_tail(&ctx->hw_id_link, &i915->contexts.hw_id_list); + } + + GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == ~0u); + atomic_inc(&ctx->hw_id_pin_count); + +out_unlock: + mutex_unlock(&i915->contexts.mutex); + return err; +} + #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftests/mock_context.c" #include "selftests/i915_gem_context.c" diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h index 851dad6decd7..e09673ca731d 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.h +++ b/drivers/gpu/drm/i915/i915_gem_context.h @@ -134,8 +134,16 @@ struct i915_gem_context { * functions like fault reporting, PASID, scheduling. The * &drm_i915_private.context_hw_ida is used to assign a unqiue * id for the lifetime of the context. + * + * @hw_id_pin_count: - number of times this context had been pinned + * for use (should be, at most, once per engine). + * + * @hw_id_link: - all contexts with an assigned id are tracked + * for possible repossession. */ unsigned int hw_id; + atomic_t hw_id_pin_count; + struct list_head hw_id_link; /** * @user_handle: userspace identifier @@ -254,6 +262,21 @@ static inline void i915_gem_context_set_force_single_submission(struct i915_gem_ __set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags); } +int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx); +static inline int i915_gem_context_pin_hw_id(struct i915_gem_context *ctx) +{ + if (atomic_inc_not_zero(&ctx->hw_id_pin_count)) + return 0; + + return __i915_gem_context_pin_hw_id(ctx); +} + +static inline void i915_gem_context_unpin_hw_id(struct i915_gem_context *ctx) +{ + GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == 0u); + atomic_dec(&ctx->hw_id_pin_count); +} + static inline bool i915_gem_context_is_default(const struct i915_gem_context *c) { return c->user_handle == DEFAULT_CONTEXT_HANDLE; diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index def467c2451b..9b1f0e5211a0 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -1272,6 +1272,8 @@ static void execlists_context_destroy(struct intel_context *ce) static void execlists_context_unpin(struct intel_context *ce) { + i915_gem_context_unpin_hw_id(ce->gem_context); + intel_ring_unpin(ce->ring); ce->state->obj->pin_global--; @@ -1330,6 +1332,10 @@ __execlists_context_pin(struct intel_engine_cs *engine, if (ret) goto unpin_map; + ret = i915_gem_context_pin_hw_id(ctx); + if (ret) + goto unpin_ring; + intel_lr_context_descriptor_update(ctx, engine, ce); ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; @@ -1342,6 +1348,8 @@ __execlists_context_pin(struct intel_engine_cs *engine, i915_gem_context_get(ctx); return ce; +unpin_ring: + intel_ring_unpin(ce->ring); unpin_map: i915_gem_object_unpin_map(ce->state->obj); unpin_vma: diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c index 8904f1ce64e3..d937bdff26f9 100644 --- a/drivers/gpu/drm/i915/selftests/mock_context.c +++ b/drivers/gpu/drm/i915/selftests/mock_context.c @@ -43,6 +43,7 @@ mock_context(struct drm_i915_private *i915, INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); INIT_LIST_HEAD(&ctx->handles_list); + INIT_LIST_HEAD(&ctx->hw_id_link); for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) { struct intel_context *ce = &ctx->__engine[n]; @@ -50,11 +51,9 @@ mock_context(struct drm_i915_private *i915, ce->gem_context = ctx; } - ret = ida_simple_get(&i915->contexts.hw_ida, - 0, MAX_CONTEXT_HW_ID, GFP_KERNEL); + ret = i915_gem_context_pin_hw_id(ctx); if (ret < 0) goto err_handles; - ctx->hw_id = ret; if (name) { ctx->name = kstrdup(name, GFP_KERNEL); @@ -85,11 +84,7 @@ void mock_context_close(struct i915_gem_context *ctx) void mock_init_contexts(struct drm_i915_private *i915) { - INIT_LIST_HEAD(&i915->contexts.list); - ida_init(&i915->contexts.hw_ida); - - INIT_WORK(&i915->contexts.free_work, contexts_free_worker); - init_llist_head(&i915->contexts.free_list); + init_contexts(i915); } struct i915_gem_context * -- GitLab From 9b83a1c301ad6d24988a128c69b42cbaaf537d82 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maxence=20Dupr=C3=A8s?= Date: Wed, 8 Aug 2018 23:56:33 +0000 Subject: [PATCH 1085/1692] USB: add quirk for WORLDE Controller KS49 or Prodipe MIDI 49C USB controller WORLDE Controller KS49 or Prodipe MIDI 49C USB controller cause a -EPROTO error, a communication restart and loop again. This issue has already been fixed for KS25. https://lore.kernel.org/patchwork/patch/753077/ I just add device 201 for KS49 in quirks.c to get it works. Signed-off-by: Laurent Roux Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/quirks.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 097057d2eacf..689a6c65bc5c 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -178,6 +178,10 @@ static const struct usb_device_id usb_quirk_list[] = { /* CBM - Flash disk */ { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME }, + /* WORLDE Controller KS49 or Prodipe MIDI 49C USB controller */ + { USB_DEVICE(0x0218, 0x0201), .driver_info = + USB_QUIRK_CONFIG_INTF_STRINGS }, + /* WORLDE easy key (easykey.25) MIDI controller */ { USB_DEVICE(0x0218, 0x0401), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, -- GitLab From 4937213ba7fafa13f30496b3965ffe93970d8b53 Mon Sep 17 00:00:00 2001 From: Mathias Nyman Date: Fri, 31 Aug 2018 17:24:43 +0300 Subject: [PATCH 1086/1692] xhci: Fix use after free for URB cancellation on a reallocated endpoint Make sure the cancelled URB is on the current endpoint ring. If the endpoint ring has been reallocated since the URB was enqueued then the URB may contain TD and TRB pointers to a already freed ring. In this the case return the URB without touching any of the freed ring structure data. Don't try to stop the ring. It would be useless. This can occur if endpoint is not flushed before it is dropped and re-added, which is the case in usb_set_interface() as xhci does things in an odd order. Cc: Tested-by: Sudip Mukherjee Signed-off-by: Mathias Nyman Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/xhci.c | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 61f48b17e57b..0420eefa647a 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -37,6 +37,21 @@ static unsigned long long quirks; module_param(quirks, ullong, S_IRUGO); MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default"); +static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring) +{ + struct xhci_segment *seg = ring->first_seg; + + if (!td || !td->start_seg) + return false; + do { + if (seg == td->start_seg) + return true; + seg = seg->next; + } while (seg && seg != ring->first_seg); + + return false; +} + /* TODO: copied from ehci-hcd.c - can this be refactored? */ /* * xhci_handshake - spin reading hc until handshake completes or fails @@ -1571,6 +1586,21 @@ static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) goto done; } + /* + * check ring is not re-allocated since URB was enqueued. If it is, then + * make sure none of the ring related pointers in this URB private data + * are touched, such as td_list, otherwise we overwrite freed data + */ + if (!td_on_ring(&urb_priv->td[0], ep_ring)) { + xhci_err(xhci, "Canceled URB td not found on endpoint ring"); + for (i = urb_priv->num_tds_done; i < urb_priv->num_tds; i++) { + td = &urb_priv->td[i]; + if (!list_empty(&td->cancelled_td_list)) + list_del_init(&td->cancelled_td_list); + } + goto err_giveback; + } + if (xhci->xhc_state & XHCI_STATE_HALTED) { xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, "HC halted, freeing TD manually."); -- GitLab From 222471f7640d9771a993218d825d84825adc805d Mon Sep 17 00:00:00 2001 From: Anurag Kumar Vulisha Date: Fri, 31 Aug 2018 17:24:42 +0300 Subject: [PATCH 1087/1692] usb: host: xhci-plat: Iterate over parent nodes for finding quirks In xhci_plat_probe() both sysdev and pdev->dev are being used for finding quirks. There are some drivers(like dwc3 host.c) which adds quirks(like usb3-lpm-capable) into pdev and the logic present in xhci_plat_probe() checks for quirks in either sysdev or pdev for finding the quirks. Because of this logic, some of the quirks are getting missed(usb3-lpm-capable quirk added by dwc3 host.c driver is getting missed).This patch fixes this by iterating over all the available parents for finding the quirks. In this way all the quirks which are present in child or parent are correctly updated. Signed-off-by: Anurag Kumar Vulisha Signed-off-by: Mathias Nyman Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/xhci-plat.c | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 8dc77e34a859..94e939249b2b 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -153,7 +153,7 @@ static int xhci_plat_probe(struct platform_device *pdev) { const struct xhci_plat_priv *priv_match; const struct hc_driver *driver; - struct device *sysdev; + struct device *sysdev, *tmpdev; struct xhci_hcd *xhci; struct resource *res; struct usb_hcd *hcd; @@ -273,19 +273,24 @@ static int xhci_plat_probe(struct platform_device *pdev) goto disable_clk; } - if (device_property_read_bool(sysdev, "usb2-lpm-disable")) - xhci->quirks |= XHCI_HW_LPM_DISABLE; + /* imod_interval is the interrupt moderation value in nanoseconds. */ + xhci->imod_interval = 40000; - if (device_property_read_bool(sysdev, "usb3-lpm-capable")) - xhci->quirks |= XHCI_LPM_SUPPORT; + /* Iterate over all parent nodes for finding quirks */ + for (tmpdev = &pdev->dev; tmpdev; tmpdev = tmpdev->parent) { - if (device_property_read_bool(&pdev->dev, "quirk-broken-port-ped")) - xhci->quirks |= XHCI_BROKEN_PORT_PED; + if (device_property_read_bool(tmpdev, "usb2-lpm-disable")) + xhci->quirks |= XHCI_HW_LPM_DISABLE; - /* imod_interval is the interrupt moderation value in nanoseconds. */ - xhci->imod_interval = 40000; - device_property_read_u32(sysdev, "imod-interval-ns", - &xhci->imod_interval); + if (device_property_read_bool(tmpdev, "usb3-lpm-capable")) + xhci->quirks |= XHCI_LPM_SUPPORT; + + if (device_property_read_bool(tmpdev, "quirk-broken-port-ped")) + xhci->quirks |= XHCI_BROKEN_PORT_PED; + + device_property_read_u32(tmpdev, "imod-interval-ns", + &xhci->imod_interval); + } hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0); if (IS_ERR(hcd->usb_phy)) { -- GitLab From 7e10f14ebface44a48275c8d6dc1caae3668d5a9 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Wed, 15 Aug 2018 21:44:25 +0100 Subject: [PATCH 1088/1692] USB: yurex: Fix buffer over-read in yurex_write() If the written data starts with a digit, yurex_write() tries to parse it as an integer using simple_strtoull(). This requires a null- terminator, and currently there's no guarantee that there is one. (The sample program at https://github.com/NeoCat/YUREX-driver-for-Linux/blob/master/sample/yurex_clock.pl writes an integer without a null terminator. It seems like it must have worked by chance!) Always add a null byte after the written data. Enlarge the buffer to allow for this. Cc: stable@vger.kernel.org Signed-off-by: Ben Hutchings Signed-off-by: Greg Kroah-Hartman --- drivers/usb/misc/yurex.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c index 3be40eaa1ac9..1232dd49556d 100644 --- a/drivers/usb/misc/yurex.c +++ b/drivers/usb/misc/yurex.c @@ -421,13 +421,13 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer, { struct usb_yurex *dev; int i, set = 0, retval = 0; - char buffer[16]; + char buffer[16 + 1]; char *data = buffer; unsigned long long c, c2 = 0; signed long timeout = 0; DEFINE_WAIT(wait); - count = min(sizeof(buffer), count); + count = min(sizeof(buffer) - 1, count); dev = file->private_data; /* verify that we actually have some data to write */ @@ -446,6 +446,7 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer, retval = -EFAULT; goto error; } + buffer[count] = 0; memset(dev->cntl_buffer, CMD_PADDING, YUREX_BUF_SIZE); switch (buffer[0]) { -- GitLab From 14427b86837a4baf1c121934c6599bdb67dfa9fc Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Wed, 15 Aug 2018 21:45:37 +0100 Subject: [PATCH 1089/1692] USB: yurex: Check for truncation in yurex_read() snprintf() always returns the full length of the string it could have printed, even if it was truncated because the buffer was too small. So in case the counter value is truncated, we will over-read from in_buffer and over-write to the caller's buffer. I don't think it's actually possible for this to happen, but in case truncation occurs, WARN and return -EIO. Signed-off-by: Ben Hutchings Signed-off-by: Greg Kroah-Hartman --- drivers/usb/misc/yurex.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c index 1232dd49556d..6d9fd5f64903 100644 --- a/drivers/usb/misc/yurex.c +++ b/drivers/usb/misc/yurex.c @@ -413,6 +413,9 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count, spin_unlock_irqrestore(&dev->lock, flags); mutex_unlock(&dev->io_mutex); + if (WARN_ON_ONCE(len >= sizeof(in_buffer))) + return -EIO; + return simple_read_from_buffer(buffer, count, ppos, in_buffer, len); } -- GitLab From 4e3121abcf536f26fd08a4b395c6a6711a961641 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sun, 2 Sep 2018 19:39:55 -0700 Subject: [PATCH 1090/1692] usb/dwc3/gadget: fix kernel-doc parameter warning Fix kernel-doc warning: ../drivers/usb/dwc3/gadget.c:510: warning: Excess function parameter 'dwc' description in 'dwc3_gadget_start_config' Signed-off-by: Randy Dunlap Cc: Felipe Balbi Signed-off-by: Greg Kroah-Hartman --- drivers/usb/dwc3/gadget.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 032ea7d709ba..2b53194081ba 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -473,7 +473,6 @@ static int dwc3_gadget_set_xfer_resource(struct dwc3_ep *dep) /** * dwc3_gadget_start_config - configure ep resources - * @dwc: pointer to our controller context structure * @dep: endpoint that is being enabled * * Issue a %DWC3_DEPCMD_DEPSTARTCFG command to @dep. After the command's -- GitLab From 49aa5afda2ed9cf6a8819707014385ede895ff87 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sun, 2 Sep 2018 19:30:48 -0700 Subject: [PATCH 1091/1692] usb: typec: fix kernel-doc parameter warning Fix kernel-doc warning (13 times): ../drivers/usb/typec/class.c:1497: warning: Excess function parameter 'drvdata' description in 'typec_port_register_altmode' Signed-off-by: Randy Dunlap Acked-by: Heikki Krogerus Signed-off-by: Greg Kroah-Hartman --- drivers/usb/typec/class.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c index c202975f8097..e61dffb27a0c 100644 --- a/drivers/usb/typec/class.c +++ b/drivers/usb/typec/class.c @@ -1484,7 +1484,6 @@ EXPORT_SYMBOL_GPL(typec_set_mode); * typec_port_register_altmode - Register USB Type-C Port Alternate Mode * @port: USB Type-C Port that supports the alternate mode * @desc: Description of the alternate mode - * @drvdata: Private pointer to driver specific info * * This routine is used to register an alternate mode that @port is capable of * supporting. -- GitLab From f45681f9becaa65111ed0a691ccf080a0cd5feb8 Mon Sep 17 00:00:00 2001 From: Tim Anderson Date: Thu, 9 Aug 2018 14:55:34 -0700 Subject: [PATCH 1092/1692] USB: Add quirk to support DJI CineSSD This device does not correctly handle the LPM operations. Also, the device cannot handle ATA pass-through commands and locks up when attempted while running in super speed. This patch adds the equivalent quirk logic as found in uas. Signed-off-by: Tim Anderson Acked-by: Alan Stern Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/quirks.c | 3 +++ drivers/usb/storage/scsiglue.c | 9 +++++++++ drivers/usb/storage/unusual_devs.h | 7 +++++++ 3 files changed, 19 insertions(+) diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 689a6c65bc5c..e77dfe5ed5ec 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -410,6 +410,9 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x2040, 0x7200), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, + /* DJI CineSSD */ + { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM }, + /* INTEL VALUE SSD */ { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c index c267f2812a04..e227bb5b794f 100644 --- a/drivers/usb/storage/scsiglue.c +++ b/drivers/usb/storage/scsiglue.c @@ -376,6 +376,15 @@ static int queuecommand_lck(struct scsi_cmnd *srb, return 0; } + if ((us->fflags & US_FL_NO_ATA_1X) && + (srb->cmnd[0] == ATA_12 || srb->cmnd[0] == ATA_16)) { + memcpy(srb->sense_buffer, usb_stor_sense_invalidCDB, + sizeof(usb_stor_sense_invalidCDB)); + srb->result = SAM_STAT_CHECK_CONDITION; + done(srb); + return 0; + } + /* enqueue the command and wake up the control thread */ srb->scsi_done = done; us->srb = srb; diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 22fcfccf453a..f7f83b21dc74 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -2288,6 +2288,13 @@ UNUSUAL_DEV( 0x2735, 0x100b, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_GO_SLOW ), +/* Reported-by: Tim Anderson */ +UNUSUAL_DEV( 0x2ca3, 0x0031, 0x0000, 0x9999, + "DJI", + "CineSSD", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_ATA_1X), + /* * Reported by Frederic Marchal * Mio Moov 330 -- GitLab From 42d1c6d4a06a77b3ab206a919b9050c3080f3a71 Mon Sep 17 00:00:00 2001 From: Oliver Neukum Date: Thu, 9 Aug 2018 16:03:37 +0200 Subject: [PATCH 1093/1692] usb: uas: add support for more quirk flags The hope that UAS devices would be less broken than old style storage devices has turned out to be unfounded. Make UAS support more of the quirk flags of the old driver. Signed-off-by: Oliver Neukum Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/usb/storage/uas.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index 9e9de5452860..1f7b401c4d04 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -842,6 +842,27 @@ static int uas_slave_configure(struct scsi_device *sdev) sdev->skip_ms_page_8 = 1; sdev->wce_default_on = 1; } + + /* + * Some disks return the total number of blocks in response + * to READ CAPACITY rather than the highest block number. + * If this device makes that mistake, tell the sd driver. + */ + if (devinfo->flags & US_FL_FIX_CAPACITY) + sdev->fix_capacity = 1; + + /* + * Some devices don't like MODE SENSE with page=0x3f, + * which is the command used for checking if a device + * is write-protected. Now that we tell the sd driver + * to do a 192-byte transfer with this command the + * majority of devices work fine, but a few still can't + * handle it. The sd driver will simply assume those + * devices are write-enabled. + */ + if (devinfo->flags & US_FL_NO_WP_DETECT) + sdev->skip_ms_page_3f = 1; + scsi_change_queue_depth(sdev, devinfo->qdepth - 2); return 0; } -- GitLab From 78af87b8bbbbcaa613f1a7d8f14472fe9a7dc622 Mon Sep 17 00:00:00 2001 From: Chunfeng Yun Date: Wed, 29 Aug 2018 10:36:49 +0800 Subject: [PATCH 1094/1692] usb: mtu3: fix error of xhci port id when enable U3 dual role If dual role mode is enabled, when switch u3port0 to device mode, it will affect port id calculation of host(xHCI), specially when host supports multi U2 ports or U3 ports, so need enable its dual role mode, and fix it here. Signed-off-by: Chunfeng Yun Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/usb/mtu3/mtu3_core.c | 6 +++++- drivers/usb/mtu3/mtu3_hw_regs.h | 1 + 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c index eecfd0671362..d045d8458f81 100644 --- a/drivers/usb/mtu3/mtu3_core.c +++ b/drivers/usb/mtu3/mtu3_core.c @@ -107,8 +107,12 @@ static int mtu3_device_enable(struct mtu3 *mtu) (SSUSB_U2_PORT_DIS | SSUSB_U2_PORT_PDN | SSUSB_U2_PORT_HOST_SEL)); - if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG) + if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG) { mtu3_setbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL); + if (mtu->is_u3_ip) + mtu3_setbits(ibase, SSUSB_U3_CTRL(0), + SSUSB_U3_PORT_DUAL_MODE); + } return ssusb_check_clocks(mtu->ssusb, check_clk); } diff --git a/drivers/usb/mtu3/mtu3_hw_regs.h b/drivers/usb/mtu3/mtu3_hw_regs.h index 6ee371478d89..a45bb253939f 100644 --- a/drivers/usb/mtu3/mtu3_hw_regs.h +++ b/drivers/usb/mtu3/mtu3_hw_regs.h @@ -459,6 +459,7 @@ /* U3D_SSUSB_U3_CTRL_0P */ #define SSUSB_U3_PORT_SSP_SPEED BIT(9) +#define SSUSB_U3_PORT_DUAL_MODE BIT(7) #define SSUSB_U3_PORT_HOST_SEL BIT(2) #define SSUSB_U3_PORT_PDN BIT(1) #define SSUSB_U3_PORT_DIS BIT(0) -- GitLab From ae45893f74c72e632cbad882509f12558db2e4f3 Mon Sep 17 00:00:00 2001 From: Katsuhiro Suzuki Date: Wed, 5 Sep 2018 17:31:37 +0900 Subject: [PATCH 1095/1692] ASoC: uniphier: change status to orphan Since I'm leaving from Socionext, I'll unable to access specification documents of this hardware (these are not public). So change the state to orphan until someone will maintain this driver. Signed-off-by: Katsuhiro Suzuki Signed-off-by: Mark Brown --- MAINTAINERS | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index fc711d23dc83..5ee6b45248e5 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -13159,9 +13159,8 @@ F: drivers/i2c/busses/i2c-synquacer.c F: Documentation/devicetree/bindings/i2c/i2c-synquacer.txt SOCIONEXT UNIPHIER SOUND DRIVER -M: Katsuhiro Suzuki L: alsa-devel@alsa-project.org (moderated for non-subscribers) -S: Maintained +S: Orphan F: sound/soc/uniphier/ SOEKRIS NET48XX LED SUPPORT -- GitLab From f3dc41c5d22b2ca14a0802a65d8cdc33a3882d4e Mon Sep 17 00:00:00 2001 From: Mathias Nyman Date: Tue, 4 Sep 2018 17:35:16 +0300 Subject: [PATCH 1096/1692] usb: Don't die twice if PCI xhci host is not responding in resume usb_hc_died() should only be called once, and with the primary HCD as parameter. It will mark both primary and secondary hcd's dead. Remove the extra call to usb_cd_died with the shared hcd as parameter. Fixes: ff9d78b36f76 ("USB: Set usb_hcd->state and flags for shared roothubs") Signed-off-by: Mathias Nyman Cc: stable Acked-by: Alan Stern Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/hcd-pci.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c index 66fe1b78d952..03432467b05f 100644 --- a/drivers/usb/core/hcd-pci.c +++ b/drivers/usb/core/hcd-pci.c @@ -515,8 +515,6 @@ static int resume_common(struct device *dev, int event) event == PM_EVENT_RESTORE); if (retval) { dev_err(dev, "PCI post-resume error %d!\n", retval); - if (hcd->shared_hcd) - usb_hc_died(hcd->shared_hcd); usb_hc_died(hcd); } } -- GitLab From 40de5fe4f45c5b804ea085d7e3f1a72fc6705929 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Mon, 3 Sep 2018 12:58:35 -0700 Subject: [PATCH 1097/1692] usb/typec: fix kernel-doc notation warning for typec_match_altmode Fix kernel-doc warning for missing function parameter 'mode' description: ../drivers/usb/typec/bus.c:268: warning: Function parameter or member 'mode' not described in 'typec_match_altmode' Also fix typos for same function documentation. Fixes: 8a37d87d72f0 ("usb: typec: Bus type for alternate modes") Signed-off-by: Randy Dunlap Acked-by: Heikki Krogerus Signed-off-by: Greg Kroah-Hartman --- drivers/usb/typec/bus.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c index 95a2b10127db..76299b6ff06d 100644 --- a/drivers/usb/typec/bus.c +++ b/drivers/usb/typec/bus.c @@ -255,12 +255,13 @@ EXPORT_SYMBOL_GPL(typec_altmode_unregister_driver); /* API for the port drivers */ /** - * typec_match_altmode - Match SVID to an array of alternate modes + * typec_match_altmode - Match SVID and mode to an array of alternate modes * @altmodes: Array of alternate modes - * @n: Number of elements in the array, or -1 for NULL termiated arrays + * @n: Number of elements in the array, or -1 for NULL terminated arrays * @svid: Standard or Vendor ID to match with + * @mode: Mode to match with * - * Return pointer to an alternate mode with SVID mathing @svid, or NULL when no + * Return pointer to an alternate mode with SVID matching @svid, or NULL when no * match is found. */ struct typec_altmode *typec_match_altmode(struct typec_altmode **altmodes, -- GitLab From d23df2dc56325c72b51670b1fb400ddd23dc17cd Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Mon, 3 Sep 2018 12:51:59 -0700 Subject: [PATCH 1098/1692] linux/mod_devicetable.h: fix kernel-doc missing notation for typec_device_id Fix kernel-doc warning for missing struct member description: ../include/linux/mod_devicetable.h:763: warning: Function parameter or member 'driver_data' not described in 'typec_device_id' Fixes: 8a37d87d72f0c ("usb: typec: Bus type for alternate modes") Signed-off-by: Randy Dunlap Cc: Heikki Krogerus Reviewed-by: Heikki Krogerus Signed-off-by: Greg Kroah-Hartman --- include/linux/mod_devicetable.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 1298a7daa57d..01797cb4587e 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -754,6 +754,7 @@ struct tb_service_id { * struct typec_device_id - USB Type-C alternate mode identifiers * @svid: Standard or Vendor ID * @mode: Mode index + * @driver_data: Driver specific data */ struct typec_device_id { __u16 svid; -- GitLab From f9a5b4f58b280c1d26255376713c132f93837621 Mon Sep 17 00:00:00 2001 From: Mathias Nyman Date: Mon, 3 Sep 2018 15:44:16 +0300 Subject: [PATCH 1099/1692] usb: Avoid use-after-free by flushing endpoints early in usb_set_interface() The steps taken by usb core to set a new interface is very different from what is done on the xHC host side. xHC hardware will do everything in one go. One command is used to set up new endpoints, free old endpoints, check bandwidth, and run the new endpoints. All this is done by xHC when usb core asks the hcd to check for available bandwidth. At this point usb core has not yet flushed the old endpoints, which will cause use-after-free issues in xhci driver as queued URBs are cancelled on a re-allocated endpoint. To resolve this add a call to usb_disable_interface() which will flush the endpoints before calling usb_hcd_alloc_bandwidth() Additional checks in xhci driver will also be implemented to gracefully handle stale URB cancel on freed and re-allocated endpoints Cc: Reported-by: Sudip Mukherjee Signed-off-by: Mathias Nyman Acked-by: Alan Stern Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/message.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index 228672f2c4a1..bfa5eda0cc26 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -1341,6 +1341,11 @@ void usb_enable_interface(struct usb_device *dev, * is submitted that needs that bandwidth. Some other operating systems * allocate bandwidth early, when a configuration is chosen. * + * xHCI reserves bandwidth and configures the alternate setting in + * usb_hcd_alloc_bandwidth(). If it fails the original interface altsetting + * may be disabled. Drivers cannot rely on any particular alternate + * setting being in effect after a failure. + * * This call is synchronous, and may not be used in an interrupt context. * Also, drivers must not change altsettings while urbs are scheduled for * endpoints in that interface; all such urbs must first be completed @@ -1376,6 +1381,12 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate) alternate); return -EINVAL; } + /* + * usb3 hosts configure the interface in usb_hcd_alloc_bandwidth, + * including freeing dropped endpoint ring buffers. + * Make sure the interface endpoints are flushed before that + */ + usb_disable_interface(dev, iface, false); /* Make sure we have enough bandwidth for this alternate interface. * Remove the current alt setting and add the new alt setting. -- GitLab From 6d4f268fa132742fe96dad22307c68d237356d88 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Sat, 1 Sep 2018 17:23:47 +0800 Subject: [PATCH 1100/1692] usb: host: u132-hcd: Fix a sleep-in-atomic-context bug in u132_get_frame() i_usX2Y_subs_startup in usbusx2yaudio.c is a completion handler function for the USB driver. So it should not sleep, but it is can sleep according to the function call paths (from bottom to top) in Linux-4.16. [FUNC] msleep drivers/usb/host/u132-hcd.c, 2558: msleep in u132_get_frame drivers/usb/core/hcd.c, 2231: [FUNC_PTR]u132_get_frame in usb_hcd_get_frame_number drivers/usb/core/usb.c, 822: usb_hcd_get_frame_number in usb_get_current_frame_number sound/usb/usx2y/usbusx2yaudio.c, 303: usb_get_current_frame_number in i_usX2Y_urb_complete sound/usb/usx2y/usbusx2yaudio.c, 366: i_usX2Y_urb_complete in i_usX2Y_subs_startup Note that [FUNC_PTR] means a function pointer call is used. To fix this bug, msleep() is replaced with mdelay(). This bug is found by my static analysis tool DSAC. Signed-off-by: Jia-Ju Bai Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/u132-hcd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c index 072bd5d5738e..5b8a3d9530c4 100644 --- a/drivers/usb/host/u132-hcd.c +++ b/drivers/usb/host/u132-hcd.c @@ -2555,7 +2555,7 @@ static int u132_get_frame(struct usb_hcd *hcd) } else { int frame = 0; dev_err(&u132->platform_dev->dev, "TODO: u132_get_frame\n"); - msleep(100); + mdelay(100); return frame; } } -- GitLab From bc8acc214d3f1cafebcbcd101a695bbac716595d Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Sat, 1 Sep 2018 16:25:08 +0800 Subject: [PATCH 1101/1692] usb: misc: uss720: Fix two sleep-in-atomic-context bugs async_complete() in uss720.c is a completion handler function for the USB driver. So it should not sleep, but it is can sleep according to the function call paths (from bottom to top) in Linux-4.16. [FUNC] set_1284_register(GFP_KERNEL) drivers/usb/misc/uss720.c, 372: set_1284_register in parport_uss720_frob_control drivers/parport/ieee1284.c, 560: [FUNC_PTR]parport_uss720_frob_control in parport_ieee1284_ack_data_avail drivers/parport/ieee1284.c, 577: parport_ieee1284_ack_data_avail in parport_ieee1284_interrupt ./include/linux/parport.h, 474: parport_ieee1284_interrupt in parport_generic_irq drivers/usb/misc/uss720.c, 116: parport_generic_irq in async_complete [FUNC] get_1284_register(GFP_KERNEL) drivers/usb/misc/uss720.c, 382: get_1284_register in parport_uss720_read_status drivers/parport/ieee1284.c, 555: [FUNC_PTR]parport_uss720_read_status in parport_ieee1284_ack_data_avail drivers/parport/ieee1284.c, 577: parport_ieee1284_ack_data_avail in parport_ieee1284_interrupt ./include/linux/parport.h, 474: parport_ieee1284_interrupt in parport_generic_irq drivers/usb/misc/uss720.c, 116: parport_generic_irq in async_complete Note that [FUNC_PTR] means a function pointer call is used. To fix these bugs, GFP_KERNEL is replaced with GFP_ATOMIC. These bugs are found by my static analysis tool DSAC. Signed-off-by: Jia-Ju Bai Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/usb/misc/uss720.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c index 82f220631bd7..b5d661644263 100644 --- a/drivers/usb/misc/uss720.c +++ b/drivers/usb/misc/uss720.c @@ -369,7 +369,7 @@ static unsigned char parport_uss720_frob_control(struct parport *pp, unsigned ch mask &= 0x0f; val &= 0x0f; d = (priv->reg[1] & (~mask)) ^ val; - if (set_1284_register(pp, 2, d, GFP_KERNEL)) + if (set_1284_register(pp, 2, d, GFP_ATOMIC)) return 0; priv->reg[1] = d; return d & 0xf; @@ -379,7 +379,7 @@ static unsigned char parport_uss720_read_status(struct parport *pp) { unsigned char ret; - if (get_1284_register(pp, 1, &ret, GFP_KERNEL)) + if (get_1284_register(pp, 1, &ret, GFP_ATOMIC)) return 0; return ret & 0xf8; } -- GitLab From 6e22e3af7bb3a7b9dc53cb4687659f6e63fca427 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Sat, 1 Sep 2018 16:12:10 +0800 Subject: [PATCH 1102/1692] usb: cdc-wdm: Fix a sleep-in-atomic-context bug in service_outstanding_interrupt() wdm_in_callback() is a completion handler function for the USB driver. So it should not sleep. But it calls service_outstanding_interrupt(), which calls usb_submit_urb() with GFP_KERNEL. To fix this bug, GFP_KERNEL is replaced with GFP_ATOMIC. This bug is found by my static analysis tool DSAC. Signed-off-by: Jia-Ju Bai Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/usb/class/cdc-wdm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index bec581fb7c63..656d247819c9 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -460,7 +460,7 @@ static int service_outstanding_interrupt(struct wdm_device *desc) set_bit(WDM_RESPONDING, &desc->flags); spin_unlock_irq(&desc->iuspin); - rv = usb_submit_urb(desc->response, GFP_KERNEL); + rv = usb_submit_urb(desc->response, GFP_ATOMIC); spin_lock_irq(&desc->iuspin); if (rv) { dev_err(&desc->intf->dev, -- GitLab From b2d35fa5fc80c27e868e393dcab4c94a0d71737f Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Tue, 4 Sep 2018 12:47:21 +0200 Subject: [PATCH 1103/1692] selftests: add headers_install to lib.mk If the kernel headers aren't installed we can't build all the tests. Add a new make target rule 'khdr' in the file lib.mk to generate the kernel headers and that gets include for every test-dir Makefile that includes lib.mk If the testdir in turn have its own sub-dirs the top_srcdir needs to be set to the linux-rootdir to be able to generate the kernel headers. Signed-off-by: Anders Roxell Reviewed-by: Fathi Boudra Signed-off-by: Shuah Khan (Samsung OSG) --- Makefile | 14 +------------- scripts/subarch.include | 13 +++++++++++++ tools/testing/selftests/android/Makefile | 2 +- tools/testing/selftests/android/ion/Makefile | 2 ++ tools/testing/selftests/futex/functional/Makefile | 1 + tools/testing/selftests/gpio/Makefile | 7 ++----- tools/testing/selftests/kvm/Makefile | 7 ++----- tools/testing/selftests/lib.mk | 12 ++++++++++++ tools/testing/selftests/net/Makefile | 1 + .../selftests/networking/timestamping/Makefile | 1 + tools/testing/selftests/vm/Makefile | 4 ---- 11 files changed, 36 insertions(+), 28 deletions(-) create mode 100644 scripts/subarch.include diff --git a/Makefile b/Makefile index 2b458801ba74..8b3fbdb2759b 100644 --- a/Makefile +++ b/Makefile @@ -299,19 +299,7 @@ KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null) KERNELVERSION = $(VERSION)$(if $(PATCHLEVEL),.$(PATCHLEVEL)$(if $(SUBLEVEL),.$(SUBLEVEL)))$(EXTRAVERSION) export VERSION PATCHLEVEL SUBLEVEL KERNELRELEASE KERNELVERSION -# SUBARCH tells the usermode build what the underlying arch is. That is set -# first, and if a usermode build is happening, the "ARCH=um" on the command -# line overrides the setting of ARCH below. If a native build is happening, -# then ARCH is assigned, getting whatever value it gets normally, and -# SUBARCH is subsequently ignored. - -SUBARCH := $(shell uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ \ - -e s/sun4u/sparc64/ \ - -e s/arm.*/arm/ -e s/sa110/arm/ \ - -e s/s390x/s390/ -e s/parisc64/parisc/ \ - -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \ - -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ \ - -e s/riscv.*/riscv/) +include scripts/subarch.include # Cross compiling and selecting different set of gcc/bin-utils # --------------------------------------------------------------------------- diff --git a/scripts/subarch.include b/scripts/subarch.include new file mode 100644 index 000000000000..650682821126 --- /dev/null +++ b/scripts/subarch.include @@ -0,0 +1,13 @@ +# SUBARCH tells the usermode build what the underlying arch is. That is set +# first, and if a usermode build is happening, the "ARCH=um" on the command +# line overrides the setting of ARCH below. If a native build is happening, +# then ARCH is assigned, getting whatever value it gets normally, and +# SUBARCH is subsequently ignored. + +SUBARCH := $(shell uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ \ + -e s/sun4u/sparc64/ \ + -e s/arm.*/arm/ -e s/sa110/arm/ \ + -e s/s390x/s390/ -e s/parisc64/parisc/ \ + -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \ + -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ \ + -e s/riscv.*/riscv/) diff --git a/tools/testing/selftests/android/Makefile b/tools/testing/selftests/android/Makefile index 72c25a3cb658..d9a725478375 100644 --- a/tools/testing/selftests/android/Makefile +++ b/tools/testing/selftests/android/Makefile @@ -6,7 +6,7 @@ TEST_PROGS := run.sh include ../lib.mk -all: +all: khdr @for DIR in $(SUBDIRS); do \ BUILD_TARGET=$(OUTPUT)/$$DIR; \ mkdir $$BUILD_TARGET -p; \ diff --git a/tools/testing/selftests/android/ion/Makefile b/tools/testing/selftests/android/ion/Makefile index e03695287f76..88cfe88e466f 100644 --- a/tools/testing/selftests/android/ion/Makefile +++ b/tools/testing/selftests/android/ion/Makefile @@ -10,6 +10,8 @@ $(TEST_GEN_FILES): ipcsocket.c ionutils.c TEST_PROGS := ion_test.sh +KSFT_KHDR_INSTALL := 1 +top_srcdir = ../../../../.. include ../../lib.mk $(OUTPUT)/ionapp_export: ionapp_export.c ipcsocket.c ionutils.c diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile index ff8feca49746..ad1eeb14fda7 100644 --- a/tools/testing/selftests/futex/functional/Makefile +++ b/tools/testing/selftests/futex/functional/Makefile @@ -18,6 +18,7 @@ TEST_GEN_FILES := \ TEST_PROGS := run.sh +top_srcdir = ../../../../.. include ../../lib.mk $(TEST_GEN_FILES): $(HEADERS) diff --git a/tools/testing/selftests/gpio/Makefile b/tools/testing/selftests/gpio/Makefile index 1bbb47565c55..4665cdbf1a8d 100644 --- a/tools/testing/selftests/gpio/Makefile +++ b/tools/testing/selftests/gpio/Makefile @@ -21,11 +21,8 @@ endef CFLAGS += -O2 -g -std=gnu99 -Wall -I../../../../usr/include/ LDLIBS += -lmount -I/usr/include/libmount -$(BINARIES): ../../../gpio/gpio-utils.o ../../../../usr/include/linux/gpio.h +$(BINARIES):| khdr +$(BINARIES): ../../../gpio/gpio-utils.o ../../../gpio/gpio-utils.o: make ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) -C ../../../gpio - -../../../../usr/include/linux/gpio.h: - make -C ../../../.. headers_install INSTALL_HDR_PATH=$(shell pwd)/../../../../usr/ - diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 03b0f551bedf..87d1a8488af8 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -37,9 +37,6 @@ $(LIBKVM_OBJ): $(OUTPUT)/%.o: %.c $(OUTPUT)/libkvm.a: $(LIBKVM_OBJ) $(AR) crs $@ $^ -$(LINUX_HDR_PATH): - make -C $(top_srcdir) headers_install - -all: $(STATIC_LIBS) $(LINUX_HDR_PATH) +all: $(STATIC_LIBS) $(TEST_GEN_PROGS): $(STATIC_LIBS) -$(TEST_GEN_PROGS) $(LIBKVM_OBJ): | $(LINUX_HDR_PATH) +$(STATIC_LIBS):| khdr diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk index 17ab36605a8e..0a8e75886224 100644 --- a/tools/testing/selftests/lib.mk +++ b/tools/testing/selftests/lib.mk @@ -16,8 +16,20 @@ TEST_GEN_PROGS := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS)) TEST_GEN_PROGS_EXTENDED := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS_EXTENDED)) TEST_GEN_FILES := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_FILES)) +top_srcdir ?= ../../../.. +include $(top_srcdir)/scripts/subarch.include +ARCH ?= $(SUBARCH) + all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) +.PHONY: khdr +khdr: + make ARCH=$(ARCH) -C $(top_srcdir) headers_install + +ifdef KSFT_KHDR_INSTALL +$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES):| khdr +endif + .ONESHELL: define RUN_TEST_PRINT_RESULT TEST_HDR_MSG="selftests: "`basename $$PWD`:" $$BASENAME_TEST"; \ diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index 9cca68e440a0..919aa2ac00af 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -15,6 +15,7 @@ TEST_GEN_FILES += udpgso udpgso_bench_tx udpgso_bench_rx TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls +KSFT_KHDR_INSTALL := 1 include ../lib.mk $(OUTPUT)/reuseport_bpf_numa: LDFLAGS += -lnuma diff --git a/tools/testing/selftests/networking/timestamping/Makefile b/tools/testing/selftests/networking/timestamping/Makefile index a728040edbe1..14cfcf006936 100644 --- a/tools/testing/selftests/networking/timestamping/Makefile +++ b/tools/testing/selftests/networking/timestamping/Makefile @@ -5,6 +5,7 @@ TEST_PROGS := hwtstamp_config rxtimestamp timestamping txtimestamp all: $(TEST_PROGS) +top_srcdir = ../../../../.. include ../../lib.mk clean: diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile index 9881876d2aa0..e94b7b14bcb2 100644 --- a/tools/testing/selftests/vm/Makefile +++ b/tools/testing/selftests/vm/Makefile @@ -26,10 +26,6 @@ TEST_PROGS := run_vmtests include ../lib.mk -$(OUTPUT)/userfaultfd: ../../../../usr/include/linux/kernel.h $(OUTPUT)/userfaultfd: LDLIBS += -lpthread $(OUTPUT)/mlock-random-test: LDLIBS += -lcap - -../../../../usr/include/linux/kernel.h: - make -C ../../../.. headers_install -- GitLab From 3a3539cd36327c6f9e0ffd9f3fd3dea7ff8b3567 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Wed, 5 Sep 2018 12:16:00 +0200 Subject: [PATCH 1104/1692] mlxsw: spectrum_buffers: Set up a dedicated pool for BUM traffic MC-aware mode was recently enabled by mlxsw on Spectrum switches in commit 7b8195306694 ("mlxsw: spectrum: Configure MC-aware mode on mlxsw ports"). Unfortunately, testing has shown that the fix is incomplete and in the presented form actually makes the problem even worse, because any amount of MC traffic causes UC disruption. The reason for this is that currently, mlxsw configures the MC-specific TCs (8..15) to map to pool 0. It also configures a maximum buffer size of 0, but for MC traffic that maximum is disregarded and not part of the quota. Therefore MC traffic is always admitted to the egress buffer. Fix the configuration by directing the MC TCs into pool 15, which is dedicated to MC traffic and recognized as such by the silicon. Fixes: 7b8195306694 ("mlxsw: spectrum: Configure MC-aware mode on mlxsw ports") Signed-off-by: Petr Machata Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- .../ethernet/mellanox/mlxsw/spectrum_buffers.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index 4327487553c5..3589432d1643 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -337,14 +337,14 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = { MLXSW_SP_SB_CM(1500, 9, 0), MLXSW_SP_SB_CM(1500, 9, 0), MLXSW_SP_SB_CM(1500, 9, 0), - MLXSW_SP_SB_CM(0, 0, 0), - MLXSW_SP_SB_CM(0, 0, 0), - MLXSW_SP_SB_CM(0, 0, 0), - MLXSW_SP_SB_CM(0, 0, 0), - MLXSW_SP_SB_CM(0, 0, 0), - MLXSW_SP_SB_CM(0, 0, 0), - MLXSW_SP_SB_CM(0, 0, 0), - MLXSW_SP_SB_CM(0, 0, 0), + MLXSW_SP_SB_CM(0, 140000, 15), + MLXSW_SP_SB_CM(0, 140000, 15), + MLXSW_SP_SB_CM(0, 140000, 15), + MLXSW_SP_SB_CM(0, 140000, 15), + MLXSW_SP_SB_CM(0, 140000, 15), + MLXSW_SP_SB_CM(0, 140000, 15), + MLXSW_SP_SB_CM(0, 140000, 15), + MLXSW_SP_SB_CM(0, 140000, 15), MLXSW_SP_SB_CM(1, 0xff, 0), }; -- GitLab From 9d7f19dc4673fbafebfcbf30eb90e09fa7d1c037 Mon Sep 17 00:00:00 2001 From: Petr Oros Date: Wed, 5 Sep 2018 14:37:45 +0200 Subject: [PATCH 1105/1692] be2net: Fix memory leak in be_cmd_get_profile_config() DMA allocated memory is lost in be_cmd_get_profile_config() when we call it with non-NULL port_res parameter. Signed-off-by: Petr Oros Reviewed-by: Ivan Vecera Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be_cmds.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index ff92ab1daeb8..1e9d882c04ef 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -4500,7 +4500,7 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, port_res->max_vfs += le16_to_cpu(pcie->num_vfs); } } - return status; + goto err; } pcie = be_get_pcie_desc(resp->func_param, desc_count, -- GitLab From e65a9e480e91ddf9e15155454d370cead64689c8 Mon Sep 17 00:00:00 2001 From: Stefan Wahren Date: Wed, 5 Sep 2018 15:23:18 +0200 Subject: [PATCH 1106/1692] net: qca_spi: Fix race condition in spi transfers With performance optimization the spi transfer and messages of basic register operations like qcaspi_read_register moved into the private driver structure. But they weren't protected against mutual access (e.g. between driver kthread and ethtool). So dumping the QCA7000 registers via ethtool during network traffic could make spi_sync hang forever, because the completion in spi_message is overwritten. So revert the optimization completely. Fixes: 291ab06ecf676 ("net: qualcomm: new Ethernet over SPI driver for QCA700") Signed-off-by: Stefan Wahren Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/qca_7k.c | 76 ++++++++-------- drivers/net/ethernet/qualcomm/qca_spi.c | 110 ++++++++++++------------ drivers/net/ethernet/qualcomm/qca_spi.h | 5 -- 3 files changed, 93 insertions(+), 98 deletions(-) diff --git a/drivers/net/ethernet/qualcomm/qca_7k.c b/drivers/net/ethernet/qualcomm/qca_7k.c index ffe7a16bdfc8..6c8543fb90c0 100644 --- a/drivers/net/ethernet/qualcomm/qca_7k.c +++ b/drivers/net/ethernet/qualcomm/qca_7k.c @@ -45,34 +45,33 @@ qcaspi_read_register(struct qcaspi *qca, u16 reg, u16 *result) { __be16 rx_data; __be16 tx_data; - struct spi_transfer *transfer; - struct spi_message *msg; + struct spi_transfer transfer[2]; + struct spi_message msg; int ret; + memset(transfer, 0, sizeof(transfer)); + + spi_message_init(&msg); + tx_data = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_INTERNAL | reg); + *result = 0; + + transfer[0].tx_buf = &tx_data; + transfer[0].len = QCASPI_CMD_LEN; + transfer[1].rx_buf = &rx_data; + transfer[1].len = QCASPI_CMD_LEN; + + spi_message_add_tail(&transfer[0], &msg); if (qca->legacy_mode) { - msg = &qca->spi_msg1; - transfer = &qca->spi_xfer1; - transfer->tx_buf = &tx_data; - transfer->rx_buf = NULL; - transfer->len = QCASPI_CMD_LEN; - spi_sync(qca->spi_dev, msg); - } else { - msg = &qca->spi_msg2; - transfer = &qca->spi_xfer2[0]; - transfer->tx_buf = &tx_data; - transfer->rx_buf = NULL; - transfer->len = QCASPI_CMD_LEN; - transfer = &qca->spi_xfer2[1]; + spi_sync(qca->spi_dev, &msg); + spi_message_init(&msg); } - transfer->tx_buf = NULL; - transfer->rx_buf = &rx_data; - transfer->len = QCASPI_CMD_LEN; - ret = spi_sync(qca->spi_dev, msg); + spi_message_add_tail(&transfer[1], &msg); + ret = spi_sync(qca->spi_dev, &msg); if (!ret) - ret = msg->status; + ret = msg.status; if (ret) qcaspi_spi_error(qca); @@ -86,35 +85,32 @@ int qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value) { __be16 tx_data[2]; - struct spi_transfer *transfer; - struct spi_message *msg; + struct spi_transfer transfer[2]; + struct spi_message msg; int ret; + memset(&transfer, 0, sizeof(transfer)); + + spi_message_init(&msg); + tx_data[0] = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_INTERNAL | reg); tx_data[1] = cpu_to_be16(value); + transfer[0].tx_buf = &tx_data[0]; + transfer[0].len = QCASPI_CMD_LEN; + transfer[1].tx_buf = &tx_data[1]; + transfer[1].len = QCASPI_CMD_LEN; + + spi_message_add_tail(&transfer[0], &msg); if (qca->legacy_mode) { - msg = &qca->spi_msg1; - transfer = &qca->spi_xfer1; - transfer->tx_buf = &tx_data[0]; - transfer->rx_buf = NULL; - transfer->len = QCASPI_CMD_LEN; - spi_sync(qca->spi_dev, msg); - } else { - msg = &qca->spi_msg2; - transfer = &qca->spi_xfer2[0]; - transfer->tx_buf = &tx_data[0]; - transfer->rx_buf = NULL; - transfer->len = QCASPI_CMD_LEN; - transfer = &qca->spi_xfer2[1]; + spi_sync(qca->spi_dev, &msg); + spi_message_init(&msg); } - transfer->tx_buf = &tx_data[1]; - transfer->rx_buf = NULL; - transfer->len = QCASPI_CMD_LEN; - ret = spi_sync(qca->spi_dev, msg); + spi_message_add_tail(&transfer[1], &msg); + ret = spi_sync(qca->spi_dev, &msg); if (!ret) - ret = msg->status; + ret = msg.status; if (ret) qcaspi_spi_error(qca); diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index 206f0266463e..66b775d462fd 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -99,22 +99,24 @@ static u32 qcaspi_write_burst(struct qcaspi *qca, u8 *src, u32 len) { __be16 cmd; - struct spi_message *msg = &qca->spi_msg2; - struct spi_transfer *transfer = &qca->spi_xfer2[0]; + struct spi_message msg; + struct spi_transfer transfer[2]; int ret; + memset(&transfer, 0, sizeof(transfer)); + spi_message_init(&msg); + cmd = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_EXTERNAL); - transfer->tx_buf = &cmd; - transfer->rx_buf = NULL; - transfer->len = QCASPI_CMD_LEN; - transfer = &qca->spi_xfer2[1]; - transfer->tx_buf = src; - transfer->rx_buf = NULL; - transfer->len = len; + transfer[0].tx_buf = &cmd; + transfer[0].len = QCASPI_CMD_LEN; + transfer[1].tx_buf = src; + transfer[1].len = len; - ret = spi_sync(qca->spi_dev, msg); + spi_message_add_tail(&transfer[0], &msg); + spi_message_add_tail(&transfer[1], &msg); + ret = spi_sync(qca->spi_dev, &msg); - if (ret || (msg->actual_length != QCASPI_CMD_LEN + len)) { + if (ret || (msg.actual_length != QCASPI_CMD_LEN + len)) { qcaspi_spi_error(qca); return 0; } @@ -125,17 +127,20 @@ qcaspi_write_burst(struct qcaspi *qca, u8 *src, u32 len) static u32 qcaspi_write_legacy(struct qcaspi *qca, u8 *src, u32 len) { - struct spi_message *msg = &qca->spi_msg1; - struct spi_transfer *transfer = &qca->spi_xfer1; + struct spi_message msg; + struct spi_transfer transfer; int ret; - transfer->tx_buf = src; - transfer->rx_buf = NULL; - transfer->len = len; + memset(&transfer, 0, sizeof(transfer)); + spi_message_init(&msg); + + transfer.tx_buf = src; + transfer.len = len; - ret = spi_sync(qca->spi_dev, msg); + spi_message_add_tail(&transfer, &msg); + ret = spi_sync(qca->spi_dev, &msg); - if (ret || (msg->actual_length != len)) { + if (ret || (msg.actual_length != len)) { qcaspi_spi_error(qca); return 0; } @@ -146,23 +151,25 @@ qcaspi_write_legacy(struct qcaspi *qca, u8 *src, u32 len) static u32 qcaspi_read_burst(struct qcaspi *qca, u8 *dst, u32 len) { - struct spi_message *msg = &qca->spi_msg2; + struct spi_message msg; __be16 cmd; - struct spi_transfer *transfer = &qca->spi_xfer2[0]; + struct spi_transfer transfer[2]; int ret; + memset(&transfer, 0, sizeof(transfer)); + spi_message_init(&msg); + cmd = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_EXTERNAL); - transfer->tx_buf = &cmd; - transfer->rx_buf = NULL; - transfer->len = QCASPI_CMD_LEN; - transfer = &qca->spi_xfer2[1]; - transfer->tx_buf = NULL; - transfer->rx_buf = dst; - transfer->len = len; + transfer[0].tx_buf = &cmd; + transfer[0].len = QCASPI_CMD_LEN; + transfer[1].rx_buf = dst; + transfer[1].len = len; - ret = spi_sync(qca->spi_dev, msg); + spi_message_add_tail(&transfer[0], &msg); + spi_message_add_tail(&transfer[1], &msg); + ret = spi_sync(qca->spi_dev, &msg); - if (ret || (msg->actual_length != QCASPI_CMD_LEN + len)) { + if (ret || (msg.actual_length != QCASPI_CMD_LEN + len)) { qcaspi_spi_error(qca); return 0; } @@ -173,17 +180,20 @@ qcaspi_read_burst(struct qcaspi *qca, u8 *dst, u32 len) static u32 qcaspi_read_legacy(struct qcaspi *qca, u8 *dst, u32 len) { - struct spi_message *msg = &qca->spi_msg1; - struct spi_transfer *transfer = &qca->spi_xfer1; + struct spi_message msg; + struct spi_transfer transfer; int ret; - transfer->tx_buf = NULL; - transfer->rx_buf = dst; - transfer->len = len; + memset(&transfer, 0, sizeof(transfer)); + spi_message_init(&msg); - ret = spi_sync(qca->spi_dev, msg); + transfer.rx_buf = dst; + transfer.len = len; - if (ret || (msg->actual_length != len)) { + spi_message_add_tail(&transfer, &msg); + ret = spi_sync(qca->spi_dev, &msg); + + if (ret || (msg.actual_length != len)) { qcaspi_spi_error(qca); return 0; } @@ -195,19 +205,23 @@ static int qcaspi_tx_cmd(struct qcaspi *qca, u16 cmd) { __be16 tx_data; - struct spi_message *msg = &qca->spi_msg1; - struct spi_transfer *transfer = &qca->spi_xfer1; + struct spi_message msg; + struct spi_transfer transfer; int ret; + memset(&transfer, 0, sizeof(transfer)); + + spi_message_init(&msg); + tx_data = cpu_to_be16(cmd); - transfer->len = sizeof(tx_data); - transfer->tx_buf = &tx_data; - transfer->rx_buf = NULL; + transfer.len = sizeof(cmd); + transfer.tx_buf = &tx_data; + spi_message_add_tail(&transfer, &msg); - ret = spi_sync(qca->spi_dev, msg); + ret = spi_sync(qca->spi_dev, &msg); if (!ret) - ret = msg->status; + ret = msg.status; if (ret) qcaspi_spi_error(qca); @@ -835,16 +849,6 @@ qcaspi_netdev_setup(struct net_device *dev) qca = netdev_priv(dev); memset(qca, 0, sizeof(struct qcaspi)); - memset(&qca->spi_xfer1, 0, sizeof(struct spi_transfer)); - memset(&qca->spi_xfer2, 0, sizeof(struct spi_transfer) * 2); - - spi_message_init(&qca->spi_msg1); - spi_message_add_tail(&qca->spi_xfer1, &qca->spi_msg1); - - spi_message_init(&qca->spi_msg2); - spi_message_add_tail(&qca->spi_xfer2[0], &qca->spi_msg2); - spi_message_add_tail(&qca->spi_xfer2[1], &qca->spi_msg2); - memset(&qca->txr, 0, sizeof(qca->txr)); qca->txr.count = TX_RING_MAX_LEN; } diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h index fc4beb1b32d1..fc0e98726b36 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.h +++ b/drivers/net/ethernet/qualcomm/qca_spi.h @@ -83,11 +83,6 @@ struct qcaspi { struct tx_ring txr; struct qcaspi_stats stats; - struct spi_message spi_msg1; - struct spi_message spi_msg2; - struct spi_transfer spi_xfer1; - struct spi_transfer spi_xfer2[2]; - u8 *rx_buffer; u32 buffer_size; u8 sync; -- GitLab From 865e63b04e9b2a658d7f26bd13a71dcd964a9118 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Tue, 4 Sep 2018 16:26:11 -0400 Subject: [PATCH 1107/1692] tracing: Add back in rcu_irq_enter/exit_irqson() for rcuidle tracepoints Borislav reported the following splat: ============================= WARNING: suspicious RCU usage 4.19.0-rc1+ #1 Not tainted ----------------------------- ./include/linux/rcupdate.h:631 rcu_read_lock() used illegally while idle! other info that might help us debug this: RCU used illegally from idle CPU! rcu_scheduler_active = 2, debug_locks = 1 RCU used illegally from extended quiescent state! 1 lock held by swapper/0/0: #0: 000000004557ee0e (rcu_read_lock){....}, at: perf_event_output_forward+0x0/0x130 stack backtrace: CPU: 0 PID: 0 Comm: swapper/0 Not tainted 4.19.0-rc1+ #1 Hardware name: LENOVO 2320CTO/2320CTO, BIOS G2ET86WW (2.06 ) 11/13/2012 Call Trace: dump_stack+0x85/0xcb perf_event_output_forward+0xf6/0x130 __perf_event_overflow+0x52/0xe0 perf_swevent_overflow+0x91/0xb0 perf_tp_event+0x11a/0x350 ? find_held_lock+0x2d/0x90 ? __lock_acquire+0x2ce/0x1350 ? __lock_acquire+0x2ce/0x1350 ? retint_kernel+0x2d/0x2d ? find_held_lock+0x2d/0x90 ? tick_nohz_get_sleep_length+0x83/0xb0 ? perf_trace_cpu+0xbb/0xd0 ? perf_trace_buf_alloc+0x5a/0xa0 perf_trace_cpu+0xbb/0xd0 cpuidle_enter_state+0x185/0x340 do_idle+0x1eb/0x260 cpu_startup_entry+0x5f/0x70 start_kernel+0x49b/0x4a6 secondary_startup_64+0xa4/0xb0 This is due to the tracepoints moving to SRCU usage which does not require RCU to be "watching". But perf uses these tracepoints with RCU and expects it to be. Hence, we still need to add in the rcu_irq_enter/exit_irqson() calls for "rcuidle" tracepoints. This is a temporary fix until we have SRCU working in NMI context, and then perf can be converted to use that instead of normal RCU. Link: http://lkml.kernel.org/r/20180904162611.6a120068@gandalf.local.home Cc: x86-ml Cc: Peter Zijlstra Reported-by: Borislav Petkov Tested-by: Borislav Petkov Reviewed-by: "Paul E. McKenney" Fixes: e6753f23d961d ("tracepoint: Make rcuidle tracepoint callers use SRCU") Signed-off-by: Steven Rostedt (VMware) --- include/linux/tracepoint.h | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 7f2e16e76ac4..041f7e56a289 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -158,8 +158,10 @@ extern void syscall_unregfunc(void); * For rcuidle callers, use srcu since sched-rcu \ * doesn't work from the idle path. \ */ \ - if (rcuidle) \ + if (rcuidle) { \ idx = srcu_read_lock_notrace(&tracepoint_srcu); \ + rcu_irq_enter_irqson(); \ + } \ \ it_func_ptr = rcu_dereference_raw((tp)->funcs); \ \ @@ -171,8 +173,10 @@ extern void syscall_unregfunc(void); } while ((++it_func_ptr)->func); \ } \ \ - if (rcuidle) \ + if (rcuidle) { \ + rcu_irq_exit_irqson(); \ srcu_read_unlock_notrace(&tracepoint_srcu, idx);\ + } \ \ preempt_enable_notrace(); \ } while (0) -- GitLab From 53cf59d6c0ad3edc4f4449098706a8f8986258b6 Mon Sep 17 00:00:00 2001 From: Lei Yang Date: Wed, 5 Sep 2018 11:14:49 +0800 Subject: [PATCH 1108/1692] selftests/efivarfs: add required kernel configs add config file Signed-off-by: Lei Yang Signed-off-by: Shuah Khan (Samsung OSG) --- tools/testing/selftests/efivarfs/config | 1 + 1 file changed, 1 insertion(+) create mode 100644 tools/testing/selftests/efivarfs/config diff --git a/tools/testing/selftests/efivarfs/config b/tools/testing/selftests/efivarfs/config new file mode 100644 index 000000000000..4e151f1005b2 --- /dev/null +++ b/tools/testing/selftests/efivarfs/config @@ -0,0 +1 @@ +CONFIG_EFIVAR_FS=y -- GitLab From 4d85af102a66ee6aeefa596f273169e77fb2b48e Mon Sep 17 00:00:00 2001 From: Lei Yang Date: Wed, 5 Sep 2018 17:57:15 +0800 Subject: [PATCH 1109/1692] selftests: memory-hotplug: add required configs add CONFIG_MEMORY_HOTREMOVE=y in config without this config, /sys/devices/system/memory/memory*/removable always return 0, I endup getting an early skip during test Signed-off-by: Lei Yang Signed-off-by: Shuah Khan (Samsung OSG) --- tools/testing/selftests/memory-hotplug/config | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/testing/selftests/memory-hotplug/config b/tools/testing/selftests/memory-hotplug/config index 2fde30191a47..a7e8cd5bb265 100644 --- a/tools/testing/selftests/memory-hotplug/config +++ b/tools/testing/selftests/memory-hotplug/config @@ -2,3 +2,4 @@ CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTPLUG_SPARSE=y CONFIG_NOTIFIER_ERROR_INJECTION=y CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m +CONFIG_MEMORY_HOTREMOVE=y -- GitLab From acb3ef0ee40ea657280a4a11d9f60eb2937c0dca Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Wed, 5 Sep 2018 13:00:05 +0300 Subject: [PATCH 1110/1692] drm/i915/bdw: Increase IPS disable timeout to 100ms MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit During IPS disabling the current 42ms timeout value leads to occasional timeouts, increase it to 100ms which seems to get rid of the problem. References: https://bugs.freedesktop.org/show_bug.cgi?id=107494 Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=107562 Reported-by: Diego Viola Tested-by: Diego Viola Cc: Diego Viola Cc: Signed-off-by: Imre Deak Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20180905100005.7663-1-imre.deak@intel.com --- drivers/gpu/drm/i915/intel_display.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index b79ad9c57d35..1c7321dadd84 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5082,10 +5082,14 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state) mutex_lock(&dev_priv->pcu_lock); WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); mutex_unlock(&dev_priv->pcu_lock); - /* wait for pcode to finish disabling IPS, which may take up to 42ms */ + /* + * Wait for PCODE to finish disabling IPS. The BSpec specified + * 42ms timeout value leads to occasional timeouts so use 100ms + * instead. + */ if (intel_wait_for_register(dev_priv, IPS_CTL, IPS_ENABLE, 0, - 42)) + 100)) DRM_ERROR("Timed out waiting for IPS disable\n"); } else { I915_WRITE(IPS_CTL, 0); -- GitLab From d07f05fb86439c41dd6967c94be3ba3837b21567 Mon Sep 17 00:00:00 2001 From: Peter Robinson Date: Sat, 21 Jul 2018 00:02:12 +0100 Subject: [PATCH 1111/1692] hwmon: rpi: add module alias to raspberrypi-hwmon The raspberrypi-hwmon driver doesn't automatically load, although it does work when loaded, by adding the alias it auto loads as expected when built as a module. Tested on RPi2/RPi3 on 32 bit kernel and RPi3B+ on aarch64 with Fedora 28 and a patched 4.18 RC kernel. Fixes: 3c493c885cf ("hwmon: Add support for RPi voltage sensor") Signed-off-by: Peter Robinson CC: Stefan Wahren CC: Eric Anholt Acked-by: Guenter Roeck Tested-by: Stefan Wahren Reviewed-by: Eric Anholt Signed-off-by: Florian Fainelli --- drivers/hwmon/raspberrypi-hwmon.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/hwmon/raspberrypi-hwmon.c b/drivers/hwmon/raspberrypi-hwmon.c index fb4e4a6bb1f6..be5ba4690895 100644 --- a/drivers/hwmon/raspberrypi-hwmon.c +++ b/drivers/hwmon/raspberrypi-hwmon.c @@ -164,3 +164,4 @@ module_platform_driver(rpi_hwmon_driver); MODULE_AUTHOR("Stefan Wahren "); MODULE_DESCRIPTION("Raspberry Pi voltage sensor driver"); MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:raspberrypi-hwmon"); -- GitLab From 8407879c4e0d7731f6e7e905893cecf61a7762c7 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Mon, 3 Sep 2018 03:47:07 -0700 Subject: [PATCH 1112/1692] nvmet-rdma: fix possible bogus dereference under heavy load Currently we always repost the recv buffer before we send a response capsule back to the host. Since ordering is not guaranteed for send and recv completions, it is posible that we will receive a new request from the host before we got a send completion for the response capsule. Today, we pre-allocate 2x rsps the length of the queue, but in reality, under heavy load there is nothing that is really preventing the gap to expand until we exhaust all our rsps. To fix this, if we don't have any pre-allocated rsps left, we dynamically allocate a rsp and make sure to free it when we are done. If under memory pressure we fail to allocate a rsp, we silently drop the command and wait for the host to retry. Reported-by: Steve Wise Tested-by: Steve Wise Signed-off-by: Sagi Grimberg [hch: dropped a superflous assignment] Signed-off-by: Christoph Hellwig --- drivers/nvme/target/rdma.c | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 3533e918ea37..bfc4da660bb4 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -66,6 +66,7 @@ struct nvmet_rdma_rsp { struct nvmet_req req; + bool allocated; u8 n_rdma; u32 flags; u32 invalidate_rkey; @@ -174,11 +175,19 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) unsigned long flags; spin_lock_irqsave(&queue->rsps_lock, flags); - rsp = list_first_entry(&queue->free_rsps, + rsp = list_first_entry_or_null(&queue->free_rsps, struct nvmet_rdma_rsp, free_list); - list_del(&rsp->free_list); + if (likely(rsp)) + list_del(&rsp->free_list); spin_unlock_irqrestore(&queue->rsps_lock, flags); + if (unlikely(!rsp)) { + rsp = kmalloc(sizeof(*rsp), GFP_KERNEL); + if (unlikely(!rsp)) + return NULL; + rsp->allocated = true; + } + return rsp; } @@ -187,6 +196,11 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp) { unsigned long flags; + if (rsp->allocated) { + kfree(rsp); + return; + } + spin_lock_irqsave(&rsp->queue->rsps_lock, flags); list_add_tail(&rsp->free_list, &rsp->queue->free_rsps); spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags); @@ -776,6 +790,15 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) cmd->queue = queue; rsp = nvmet_rdma_get_rsp(queue); + if (unlikely(!rsp)) { + /* + * we get here only under memory pressure, + * silently drop and have the host retry + * as we can't even fail it. + */ + nvmet_rdma_post_recv(queue->dev, cmd); + return; + } rsp->queue = queue; rsp->cmd = cmd; rsp->flags = 0; -- GitLab From 55ac5a1614f99816ed367a9ded5f5d65321b522f Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 5 Sep 2018 15:09:20 +0100 Subject: [PATCH 1113/1692] drm/i915: Attach the pci match data to the device upon creation Attach our device_info to the our i915 private on creation so that it is always available for inspection. Signed-off-by: Chris Wilson Reviewed-by: Michal Wajdeczko Link: https://patchwork.freedesktop.org/patch/msgid/20180905140921.17467-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_drv.c | 66 +++++++++++++++++++-------------- 1 file changed, 38 insertions(+), 28 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 77a4a01ddc08..1dddd2f4f929 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -870,7 +870,6 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv) /** * i915_driver_init_early - setup state not requiring device access * @dev_priv: device private - * @ent: the matching pci_device_id * * Initialize everything that is a "SW-only" state, that is state not * requiring accessing the device or exposing the driver via kernel internal @@ -878,25 +877,13 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv) * system memory allocation, setting up device specific attributes and * function hooks not requiring accessing the device. */ -static int i915_driver_init_early(struct drm_i915_private *dev_priv, - const struct pci_device_id *ent) +static int i915_driver_init_early(struct drm_i915_private *dev_priv) { - const struct intel_device_info *match_info = - (struct intel_device_info *)ent->driver_data; - struct intel_device_info *device_info; int ret = 0; if (i915_inject_load_failure()) return -ENODEV; - /* Setup the write-once "constant" device info */ - device_info = mkwrite_device_info(dev_priv); - memcpy(device_info, match_info, sizeof(*device_info)); - device_info->device_id = dev_priv->drm.pdev->device; - - BUILD_BUG_ON(INTEL_MAX_PLATFORMS > - sizeof(device_info->platform_mask) * BITS_PER_BYTE); - BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE); spin_lock_init(&dev_priv->irq_lock); spin_lock_init(&dev_priv->gpu_error.lock); mutex_init(&dev_priv->backlight_lock); @@ -1335,6 +1322,39 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv) DRM_INFO("DRM_I915_DEBUG_RUNTIME_PM enabled\n"); } +static struct drm_i915_private * +i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + const struct intel_device_info *match_info = + (struct intel_device_info *)ent->driver_data; + struct intel_device_info *device_info; + struct drm_i915_private *i915; + + i915 = kzalloc(sizeof(*i915), GFP_KERNEL); + if (!i915) + return NULL; + + if (drm_dev_init(&i915->drm, &driver, &pdev->dev)) { + kfree(i915); + return NULL; + } + + i915->drm.pdev = pdev; + i915->drm.dev_private = i915; + pci_set_drvdata(pdev, &i915->drm); + + /* Setup the write-once "constant" device info */ + device_info = mkwrite_device_info(i915); + memcpy(device_info, match_info, sizeof(*device_info)); + device_info->device_id = pdev->device; + + BUILD_BUG_ON(INTEL_MAX_PLATFORMS > + sizeof(device_info->platform_mask) * BITS_PER_BYTE); + BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE); + + return i915; +} + /** * i915_driver_load - setup chip and create an initial config * @pdev: PCI device @@ -1357,24 +1377,15 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) if (!i915_modparams.nuclear_pageflip && match_info->gen < 5) driver.driver_features &= ~DRIVER_ATOMIC; - ret = -ENOMEM; - dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); - if (dev_priv) - ret = drm_dev_init(&dev_priv->drm, &driver, &pdev->dev); - if (ret) { - DRM_DEV_ERROR(&pdev->dev, "allocation failed\n"); - goto out_free; - } - - dev_priv->drm.pdev = pdev; - dev_priv->drm.dev_private = dev_priv; + dev_priv = i915_driver_create(pdev, ent); + if (!dev_priv) + return -ENOMEM; ret = pci_enable_device(pdev); if (ret) goto out_fini; - pci_set_drvdata(pdev, &dev_priv->drm); - ret = i915_driver_init_early(dev_priv, ent); + ret = i915_driver_init_early(dev_priv); if (ret < 0) goto out_pci_disable; @@ -1426,7 +1437,6 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) out_fini: i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret); drm_dev_fini(&dev_priv->drm); -out_free: kfree(dev_priv); pci_set_drvdata(pdev, NULL); return ret; -- GitLab From 31962ca6a26087eea255c000ea9fa4ffbdad697b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 5 Sep 2018 15:09:21 +0100 Subject: [PATCH 1114/1692] drm/i915: Move final cleanup of drm_i915_private to i915_driver_destroy Introduce a complementary function to i915_driver_create() to undo all that is created. Suggested-by: Michal Wajdeczko Signed-off-by: Chris Wilson Cc: Michal Wajdeczko Reviewed-by: Michal Wajdeczko Link: https://patchwork.freedesktop.org/patch/msgid/20180905140921.17467-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_drv.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 1dddd2f4f929..5dd7fc582e6f 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1355,6 +1355,17 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent) return i915; } +static void i915_driver_destroy(struct drm_i915_private *i915) +{ + struct pci_dev *pdev = i915->drm.pdev; + + drm_dev_fini(&i915->drm); + kfree(i915); + + /* And make sure we never chase our dangling pointer from pci_dev */ + pci_set_drvdata(pdev, NULL); +} + /** * i915_driver_load - setup chip and create an initial config * @pdev: PCI device @@ -1436,9 +1447,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) pci_disable_device(pdev); out_fini: i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret); - drm_dev_fini(&dev_priv->drm); - kfree(dev_priv); - pci_set_drvdata(pdev, NULL); + i915_driver_destroy(dev_priv); return ret; } @@ -1489,9 +1498,7 @@ static void i915_driver_release(struct drm_device *dev) struct drm_i915_private *dev_priv = to_i915(dev); i915_driver_cleanup_early(dev_priv); - drm_dev_fini(&dev_priv->drm); - - kfree(dev_priv); + i915_driver_destroy(dev_priv); } static int i915_driver_open(struct drm_device *dev, struct drm_file *file) -- GitLab From 816e846c2eb9129a3e0afa5f920c8bbc71efecaa Mon Sep 17 00:00:00 2001 From: Aaron Knister Date: Fri, 24 Aug 2018 08:42:46 -0400 Subject: [PATCH 1115/1692] IB/ipoib: Avoid a race condition between start_xmit and cm_rep_handler Inside of start_xmit() the call to check if the connection is up and the queueing of the packets for later transmission is not atomic which leaves a window where cm_rep_handler can run, set the connection up, dequeue pending packets and leave the subsequently queued packets by start_xmit() sitting on neigh->queue until they're dropped when the connection is torn down. This only applies to connected mode. These dropped packets can really upset TCP, for example, and cause multi-minute delays in transmission for open connections. Here's the code in start_xmit where we check to see if the connection is up: if (ipoib_cm_get(neigh)) { if (ipoib_cm_up(neigh)) { ipoib_cm_send(dev, skb, ipoib_cm_get(neigh)); goto unref; } } The race occurs if cm_rep_handler execution occurs after the above connection check (specifically if it gets to the point where it acquires priv->lock to dequeue pending skb's) but before the below code snippet in start_xmit where packets are queued. if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { push_pseudo_header(skb, phdr->hwaddr); spin_lock_irqsave(&priv->lock, flags); __skb_queue_tail(&neigh->queue, skb); spin_unlock_irqrestore(&priv->lock, flags); } else { ++dev->stats.tx_dropped; dev_kfree_skb_any(skb); } The patch acquires the netif tx lock in cm_rep_handler for the section where it sets the connection up and dequeues and retransmits deferred skb's. Fixes: 839fcaba355a ("IPoIB: Connected mode experimental support") Cc: stable@vger.kernel.org Signed-off-by: Aaron Knister Tested-by: Ira Weiny Reviewed-by: Ira Weiny Signed-off-by: Jason Gunthorpe --- drivers/infiniband/ulp/ipoib/ipoib_cm.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index ea01b8dd2be6..3d5424f335cb 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -1027,12 +1027,14 @@ static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, skb_queue_head_init(&skqueue); + netif_tx_lock_bh(p->dev); spin_lock_irq(&priv->lock); set_bit(IPOIB_FLAG_OPER_UP, &p->flags); if (p->neigh) while ((skb = __skb_dequeue(&p->neigh->queue))) __skb_queue_tail(&skqueue, skb); spin_unlock_irq(&priv->lock); + netif_tx_unlock_bh(p->dev); while ((skb = __skb_dequeue(&skqueue))) { skb->dev = p->dev; -- GitLab From e3f3d7ab00cd459d0f7a839758a4542f4d4b8ac8 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 5 Sep 2018 10:46:27 +0300 Subject: [PATCH 1116/1692] hwmon: (nct6775) Set weight source to zero correctly This is dead code because j can never be 1 at this point. We had intended to just test if the bit was clear. Fixes: bbd8decd4123 ("hwmon: (nct6775) Add support for weighted fan control") Signed-off-by: Dan Carpenter Signed-off-by: Guenter Roeck --- drivers/hwmon/nct6775.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index 944f5b63aecd..139781ae830b 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c @@ -1558,7 +1558,7 @@ static void nct6775_update_pwm(struct device *dev) reg = nct6775_read_value(data, data->REG_WEIGHT_TEMP_SEL[i]); data->pwm_weight_temp_sel[i] = reg & 0x1f; /* If weight is disabled, report weight source as 0 */ - if (j == 1 && !(reg & 0x80)) + if (!(reg & 0x80)) data->pwm_weight_temp_sel[i] = 0; /* Weight temp data */ -- GitLab From f40f299bbe806a2e2c8b0d7cdda822fa3bdd171b Mon Sep 17 00:00:00 2001 From: Somnath Kotur Date: Wed, 5 Sep 2018 13:20:34 +0530 Subject: [PATCH 1117/1692] bnxt_re: Fix couple of memory leaks that could lead to IOMMU call traces 1. DMA-able memory allocated for Shadow QP was not being freed. 2. bnxt_qplib_alloc_qp_hdr_buf() had a bug wherein the SQ pointer was erroneously pointing to the RQ. But since the corresponding free_qp_hdr_buf() was correct, memory being free was less than what was allocated. Fixes: 1ac5a4047975 ("RDMA/bnxt_re: Add bnxt_re RoCE driver") Signed-off-by: Somnath Kotur Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/bnxt_re/ib_verbs.c | 2 ++ drivers/infiniband/hw/bnxt_re/qplib_fp.c | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index bbfb86eb2d24..bc2b9e038439 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -833,6 +833,8 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp) "Failed to destroy Shadow QP"); return rc; } + bnxt_qplib_free_qp_res(&rdev->qplib_res, + &rdev->qp1_sqp->qplib_qp); mutex_lock(&rdev->qp_lock); list_del(&rdev->qp1_sqp->list); atomic_dec(&rdev->qp_count); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index e426b990c1dd..6ad0d46ab879 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -196,7 +196,7 @@ static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) { struct bnxt_qplib_q *rq = &qp->rq; - struct bnxt_qplib_q *sq = &qp->rq; + struct bnxt_qplib_q *sq = &qp->sq; int rc = 0; if (qp->sq_hdr_buf_size && sq->hwq.max_elements) { -- GitLab From 8b2ded1c94c06f841f8c1612bcfa33c85012a36b Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Wed, 5 Sep 2018 16:14:36 -0600 Subject: [PATCH 1118/1692] block: don't warn when doing fsync on read-only devices It is possible to call fsync on a read-only handle (for example, fsck.ext2 does it when doing read-only check), and this call results in kernel warning. The patch b089cfd95d32 ("block: don't warn for flush on read-only device") attempted to disable the warning, but it is buggy and it doesn't (op_is_flush tests flags, but bio_op strips off the flags). Signed-off-by: Mikulas Patocka Fixes: 721c7fc701c7 ("block: fail op_is_write() requests to read-only partitions") Cc: stable@vger.kernel.org # 4.18 Signed-off-by: Jens Axboe --- block/blk-core.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/block/blk-core.c b/block/blk-core.c index dee56c282efb..4dbc93f43b38 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -2163,9 +2163,12 @@ static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part) { const int op = bio_op(bio); - if (part->policy && (op_is_write(op) && !op_is_flush(op))) { + if (part->policy && op_is_write(op)) { char b[BDEVNAME_SIZE]; + if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) + return false; + WARN_ONCE(1, "generic_make_request: Trying to write " "to read-only block-device %s (partno %d)\n", -- GitLab From 08e74be103051861eb2c1ee52a2dcf119cde264f Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Wed, 5 Sep 2018 09:47:57 +0300 Subject: [PATCH 1119/1692] RDMA/uverbs: Fix error cleanup path of ib_uverbs_add_one() If ib_uverbs_create_uapi() fails, dev_num should be freed from the bitmap. Fixes: 7d96c9b17636 ("IB/uverbs: Have the core code create the uverbs_root_spec") Signed-off-by: Parav Pandit Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/uverbs_main.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 823beca448e1..6d974e2363df 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -1050,7 +1050,7 @@ static void ib_uverbs_add_one(struct ib_device *device) uverbs_dev->num_comp_vectors = device->num_comp_vectors; if (ib_uverbs_create_uapi(device, uverbs_dev)) - goto err; + goto err_uapi; cdev_init(&uverbs_dev->cdev, NULL); uverbs_dev->cdev.owner = THIS_MODULE; @@ -1077,11 +1077,10 @@ static void ib_uverbs_add_one(struct ib_device *device) err_class: device_destroy(uverbs_class, uverbs_dev->cdev.dev); - err_cdev: cdev_del(&uverbs_dev->cdev); +err_uapi: clear_bit(devnum, dev_map); - err: if (atomic_dec_and_test(&uverbs_dev->refcount)) ib_uverbs_comp_dev(uverbs_dev); -- GitLab From 76d5581c870454be5f1f1a106c57985902e7ea20 Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Sun, 5 Aug 2018 09:19:33 +0300 Subject: [PATCH 1120/1692] net/mlx5: Fix use-after-free in self-healing flow When the mlx5 health mechanism detects a problem while the driver is in the middle of init_one or remove_one, the driver needs to prevent the health mechanism from scheduling future work; if future work is scheduled, there is a problem with use-after-free: the system WQ tries to run the work item (which has been freed) at the scheduled future time. Prevent this by disabling work item scheduling in the health mechanism when the driver is in the middle of init_one() or remove_one(). Fixes: e126ba97dba9 ("mlx5: Add driver for Mellanox Connect-IB adapters") Signed-off-by: Jack Morgenstein Reviewed-by: Feras Daoud Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/health.c | 10 +++++++++- drivers/net/ethernet/mellanox/mlx5/core/main.c | 6 +++--- include/linux/mlx5/driver.h | 2 +- 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index d39b0b7011b2..9f39aeca863f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -331,9 +331,17 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev) add_timer(&health->timer); } -void mlx5_stop_health_poll(struct mlx5_core_dev *dev) +void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health) { struct mlx5_core_health *health = &dev->priv.health; + unsigned long flags; + + if (disable_health) { + spin_lock_irqsave(&health->wq_lock, flags); + set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags); + set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags); + spin_unlock_irqrestore(&health->wq_lock, flags); + } del_timer_sync(&health->timer); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index cf3e4a659052..739aad0a0b35 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1286,7 +1286,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, mlx5_cleanup_once(dev); err_stop_poll: - mlx5_stop_health_poll(dev); + mlx5_stop_health_poll(dev, boot); if (mlx5_cmd_teardown_hca(dev)) { dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n"); goto out_err; @@ -1346,7 +1346,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, mlx5_free_irq_vectors(dev); if (cleanup) mlx5_cleanup_once(dev); - mlx5_stop_health_poll(dev); + mlx5_stop_health_poll(dev, cleanup); err = mlx5_cmd_teardown_hca(dev); if (err) { dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n"); @@ -1608,7 +1608,7 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev) * with the HCA, so the health polll is no longer needed. */ mlx5_drain_health_wq(dev); - mlx5_stop_health_poll(dev); + mlx5_stop_health_poll(dev, false); ret = mlx5_cmd_force_teardown_hca(dev); if (ret) { diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 7a452716de4b..aa65f58c6610 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -1052,7 +1052,7 @@ int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); void mlx5_health_cleanup(struct mlx5_core_dev *dev); int mlx5_health_init(struct mlx5_core_dev *dev); void mlx5_start_health_poll(struct mlx5_core_dev *dev); -void mlx5_stop_health_poll(struct mlx5_core_dev *dev); +void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health); void mlx5_drain_health_wq(struct mlx5_core_dev *dev); void mlx5_trigger_health_work(struct mlx5_core_dev *dev); void mlx5_drain_health_recovery(struct mlx5_core_dev *dev); -- GitLab From 5df816e7f43f1297c40021ef17ec6e722b45c82f Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Tue, 7 Aug 2018 09:59:03 +0300 Subject: [PATCH 1121/1692] net/mlx5: Fix debugfs cleanup in the device init/remove flow When initializing the device (procedure init_one), the driver calls mlx5_pci_init to perform pci initialization. As part of this initialization, mlx5_pci_init creates a debugfs directory. If this creation fails, init_one aborts, returning failure to the caller (which is the probe method caller). The main reason for such a failure to occur is if the debugfs directory already exists. This can happen if the last time mlx5_pci_close was called, debugfs_remove (silently) failed due to the debugfs directory not being empty. Guarantee that such a debugfs_remove failure will not occur by instead calling debugfs_remove_recursive in procedure mlx5_pci_close. Fixes: 59211bd3b632 ("net/mlx5: Split the load/unload flow into hardware and software flows") Signed-off-by: Jack Morgenstein Reviewed-by: Daniel Jurgens Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/main.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 739aad0a0b35..b5e9f664fc66 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -878,8 +878,10 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv) priv->numa_node = dev_to_node(&dev->pdev->dev); priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root); - if (!priv->dbg_root) + if (!priv->dbg_root) { + dev_err(&pdev->dev, "Cannot create debugfs dir, aborting\n"); return -ENOMEM; + } err = mlx5_pci_enable_device(dev); if (err) { @@ -928,7 +930,7 @@ static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv) pci_clear_master(dev->pdev); release_bar(dev->pdev); mlx5_pci_disable_device(dev); - debugfs_remove(priv->dbg_root); + debugfs_remove_recursive(priv->dbg_root); } static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) -- GitLab From 8d71e818506718e8d7032ce824b5c74a17d4f7a5 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Tue, 21 Aug 2018 16:04:41 +0300 Subject: [PATCH 1122/1692] net/mlx5: Use u16 for Work Queue buffer fragment size Minimal stride size is 16. Hence, the number of strides in a fragment (of PAGE_SIZE) is <= PAGE_SIZE / 16 <= 4K. u16 is sufficient to represent this. Fixes: 388ca8be0037 ("IB/mlx5: Implement fragmented completion queue (CQ)") Signed-off-by: Tariq Toukan Reviewed-by: Eran Ben Elisha Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/wq.c | 4 ++-- drivers/net/ethernet/mellanox/mlx5/core/wq.h | 2 +- include/linux/mlx5/driver.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c index c8c315eb5128..d838af9539b1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c @@ -39,9 +39,9 @@ u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq) return (u32)wq->fbc.sz_m1 + 1; } -u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq) +u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq) { - return (u32)wq->fbc.frag_sz_m1 + 1; + return wq->fbc.frag_sz_m1 + 1; } u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h index 2bd4c3184eba..3a1a170bb2d7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h @@ -80,7 +80,7 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *wqc, struct mlx5_wq_cyc *wq, struct mlx5_wq_ctrl *wq_ctrl); u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq); -u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq); +u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq); int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *qpc, struct mlx5_wq_qp *wq, diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index aa65f58c6610..3a1258fd8ac3 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -362,7 +362,7 @@ struct mlx5_frag_buf { struct mlx5_frag_buf_ctrl { struct mlx5_frag_buf frag_buf; u32 sz_m1; - u32 frag_sz_m1; + u16 frag_sz_m1; u32 strides_offset; u8 log_sz; u8 log_stride; -- GitLab From a09036221092989b88c55d24d1f12ceb1d7d361f Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Tue, 21 Aug 2018 16:07:58 +0300 Subject: [PATCH 1123/1692] net/mlx5: Use u16 for Work Queue buffer strides offset Minimal stride size is 16. Hence, the number of strides in a fragment (of PAGE_SIZE) is <= PAGE_SIZE / 16 <= 4K. u16 is sufficient to represent this. Fixes: d7037ad73daa ("net/mlx5: Fix QP fragmented buffer allocation") Signed-off-by: Tariq Toukan Reviewed-by: Eran Ben Elisha Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/wq.c | 2 +- include/linux/mlx5/driver.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c index d838af9539b1..68e7f8df2a6d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c @@ -138,7 +138,7 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *qpc, struct mlx5_wq_qp *wq, struct mlx5_wq_ctrl *wq_ctrl) { - u32 sq_strides_offset; + u16 sq_strides_offset; u32 rq_pg_remainder; int err; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 3a1258fd8ac3..66d94b4557cf 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -363,7 +363,7 @@ struct mlx5_frag_buf_ctrl { struct mlx5_frag_buf frag_buf; u32 sz_m1; u16 frag_sz_m1; - u32 strides_offset; + u16 strides_offset; u8 log_sz; u8 log_stride; u8 log_frag_strides; @@ -995,7 +995,7 @@ static inline u32 mlx5_base_mkey(const u32 key) } static inline void mlx5_fill_fbc_offset(u8 log_stride, u8 log_sz, - u32 strides_offset, + u16 strides_offset, struct mlx5_frag_buf_ctrl *fbc) { fbc->log_stride = log_stride; -- GitLab From c88a026e01219488e745f4f0267fd76c2bb68421 Mon Sep 17 00:00:00 2001 From: Raed Salem Date: Tue, 21 Aug 2018 15:22:42 +0300 Subject: [PATCH 1124/1692] net/mlx5: E-Switch, Fix memory leak when creating switchdev mode FDB tables The memory allocated for the slow path table flow group input structure was not freed upon successful return, fix that. Fixes: 1967ce6ea5c8 ("net/mlx5: E-Switch, Refactor fast path FDB table creation in switchdev mode") Signed-off-by: Raed Salem Reviewed-by: Or Gerlitz Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index f72b5c9dcfe9..3028e8d90920 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -663,6 +663,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) if (err) goto miss_rule_err; + kvfree(flow_group_in); return 0; miss_rule_err: -- GitLab From 071304772fc747d5df13c51f1cf48a4b922a5e0d Mon Sep 17 00:00:00 2001 From: Roi Dayan Date: Sun, 19 Aug 2018 08:56:09 +0300 Subject: [PATCH 1125/1692] net/mlx5: Fix not releasing read lock when adding flow rules If building match list fg fails and we never jumped to search_again_locked label then the function returned without unlocking the read lock. Fixes: bd71b08ec2ee ("net/mlx5: Support multiple updates of steering rules in parallel") Signed-off-by: Roi Dayan Reviewed-by: Maor Gottlieb Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index f418541af7cf..384b560f2a93 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1726,6 +1726,8 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft, if (err) { if (take_write) up_write_ref_node(&ft->node); + else + up_read_ref_node(&ft->node); return ERR_PTR(err); } -- GitLab From df7ddb2396cd162e64aaff9401be05e31e438961 Mon Sep 17 00:00:00 2001 From: Daniel Jurgens Date: Mon, 27 Aug 2018 09:09:46 -0500 Subject: [PATCH 1126/1692] net/mlx5: Consider PCI domain in search for next dev The PCI BDF is not unique. PCI domain must also be considered when searching for the next physical device during lag setup. Example below: mlx5_core 0000:01:00.0: MLX5E: StrdRq(1) RqSz(8) StrdSz(128) RxCqeCmprss(0) mlx5_core 0000:01:00.1: MLX5E: StrdRq(1) RqSz(8) StrdSz(128) RxCqeCmprss(0) mlx5_core 0001:01:00.0: MLX5E: StrdRq(1) RqSz(8) StrdSz(128) RxCqeCmprss(0) mlx5_core 0001:01:00.1: MLX5E: StrdRq(1) RqSz(8) StrdSz(128) RxCqeCmprss(0) Signed-off-by: Daniel Jurgens Reviewed-by: Aviv Heller Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/dev.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index b994b80d5714..ada723bd91b6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -391,16 +391,17 @@ void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol) } } -static u16 mlx5_gen_pci_id(struct mlx5_core_dev *dev) +static u32 mlx5_gen_pci_id(struct mlx5_core_dev *dev) { - return (u16)((dev->pdev->bus->number << 8) | + return (u32)((pci_domain_nr(dev->pdev->bus) << 16) | + (dev->pdev->bus->number << 8) | PCI_SLOT(dev->pdev->devfn)); } /* Must be called with intf_mutex held */ struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev) { - u16 pci_id = mlx5_gen_pci_id(dev); + u32 pci_id = mlx5_gen_pci_id(dev); struct mlx5_core_dev *res = NULL; struct mlx5_core_dev *tmp_dev; struct mlx5_priv *priv; -- GitLab From 47bc94b82291e007da61ee1b3d18c77871f3e158 Mon Sep 17 00:00:00 2001 From: Huy Nguyen Date: Wed, 15 Aug 2018 11:08:48 -0500 Subject: [PATCH 1127/1692] net/mlx5: Check for error in mlx5_attach_interface Currently, mlx5_attach_interface does not check for error after calling intf->attach or intf->add. When these two calls fails, the client is not initialized and will cause issues such as kernel panic on invalid address in the teardown path (mlx5_detach_interface) Fixes: 737a234bb638 ("net/mlx5: Introduce attach/detach to interface API") Signed-off-by: Huy Nguyen Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/dev.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index ada723bd91b6..37ba7c78859d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -132,11 +132,11 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) delayed_event_start(priv); dev_ctx->context = intf->add(dev); - set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); - if (intf->attach) - set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state); - if (dev_ctx->context) { + set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); + if (intf->attach) + set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state); + spin_lock_irq(&priv->ctx_lock); list_add_tail(&dev_ctx->list, &priv->ctx_list); @@ -211,12 +211,17 @@ static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv if (intf->attach) { if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state)) goto out; - intf->attach(dev, dev_ctx->context); + if (intf->attach(dev, dev_ctx->context)) + goto out; + set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state); } else { if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state)) goto out; dev_ctx->context = intf->add(dev); + if (!dev_ctx->context) + goto out; + set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); } -- GitLab From fc433829f9a29530d492f0eb20804ac5e6967204 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Fri, 24 Aug 2018 12:24:10 -0700 Subject: [PATCH 1128/1692] net/mlx5e: Ethtool steering, fix udp source port value Copy and paste bug was introduced in the offending patch. We need to write udp source port value into the headers value and not headers criteria "mask". Fixes: 142644f8a1f8 ("net/mlx5e: Ethtool steering flow parsing refactoring") Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c index 75bb981e00b7..41cde926cdab 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c @@ -191,7 +191,7 @@ set_udp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v, { if (psrc_m) { MLX5E_FTE_SET(headers_c, udp_sport, 0xffff); - MLX5E_FTE_SET(headers_c, udp_sport, ntohs(psrc_v)); + MLX5E_FTE_SET(headers_v, udp_sport, ntohs(psrc_v)); } if (pdst_m) { -- GitLab From ad9421e36a77056a4f095d49b9605e80b4d216ed Mon Sep 17 00:00:00 2001 From: Roi Dayan Date: Mon, 20 Aug 2018 11:43:03 +0300 Subject: [PATCH 1129/1692] net/mlx5: Fix possible deadlock from lockdep when adding fte to fg This is a false positive report due to incorrect nested lock annotations as we lock multiple fgs with the same subclass. Instead of locking all fgs only lock the one being used as was done before. Fixes: bd71b08ec2ee ("net/mlx5: Support multiple updates of steering rules in parallel") Signed-off-by: Roi Dayan Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/fs_core.c | 74 +++++++++---------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 384b560f2a93..37d114c668b7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1578,6 +1578,33 @@ static u64 matched_fgs_get_version(struct list_head *match_head) return version; } +static struct fs_fte * +lookup_fte_locked(struct mlx5_flow_group *g, + u32 *match_value, + bool take_write) +{ + struct fs_fte *fte_tmp; + + if (take_write) + nested_down_write_ref_node(&g->node, FS_LOCK_PARENT); + else + nested_down_read_ref_node(&g->node, FS_LOCK_PARENT); + fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value, + rhash_fte); + if (!fte_tmp || !tree_get_node(&fte_tmp->node)) { + fte_tmp = NULL; + goto out; + } + + nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD); +out: + if (take_write) + up_write_ref_node(&g->node); + else + up_read_ref_node(&g->node); + return fte_tmp; +} + static struct mlx5_flow_handle * try_add_to_existing_fg(struct mlx5_flow_table *ft, struct list_head *match_head, @@ -1600,10 +1627,6 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft, if (IS_ERR(fte)) return ERR_PTR(-ENOMEM); - list_for_each_entry(iter, match_head, list) { - nested_down_read_ref_node(&iter->g->node, FS_LOCK_PARENT); - } - search_again_locked: version = matched_fgs_get_version(match_head); /* Try to find a fg that already contains a matching fte */ @@ -1611,20 +1634,9 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft, struct fs_fte *fte_tmp; g = iter->g; - fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, spec->match_value, - rhash_fte); - if (!fte_tmp || !tree_get_node(&fte_tmp->node)) + fte_tmp = lookup_fte_locked(g, spec->match_value, take_write); + if (!fte_tmp) continue; - - nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD); - if (!take_write) { - list_for_each_entry(iter, match_head, list) - up_read_ref_node(&iter->g->node); - } else { - list_for_each_entry(iter, match_head, list) - up_write_ref_node(&iter->g->node); - } - rule = add_rule_fg(g, spec->match_value, flow_act, dest, dest_num, fte_tmp); up_write_ref_node(&fte_tmp->node); @@ -1633,19 +1645,6 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft, return rule; } - /* No group with matching fte found. Try to add a new fte to any - * matching fg. - */ - - if (!take_write) { - list_for_each_entry(iter, match_head, list) - up_read_ref_node(&iter->g->node); - list_for_each_entry(iter, match_head, list) - nested_down_write_ref_node(&iter->g->node, - FS_LOCK_PARENT); - take_write = true; - } - /* Check the ft version, for case that new flow group * was added while the fgs weren't locked */ @@ -1657,27 +1656,30 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft, /* Check the fgs version, for case the new FTE with the * same values was added while the fgs weren't locked */ - if (version != matched_fgs_get_version(match_head)) + if (version != matched_fgs_get_version(match_head)) { + take_write = true; goto search_again_locked; + } list_for_each_entry(iter, match_head, list) { g = iter->g; if (!g->node.active) continue; + + nested_down_write_ref_node(&g->node, FS_LOCK_PARENT); + err = insert_fte(g, fte); if (err) { + up_write_ref_node(&g->node); if (err == -ENOSPC) continue; - list_for_each_entry(iter, match_head, list) - up_write_ref_node(&iter->g->node); kmem_cache_free(steering->ftes_cache, fte); return ERR_PTR(err); } nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD); - list_for_each_entry(iter, match_head, list) - up_write_ref_node(&iter->g->node); + up_write_ref_node(&g->node); rule = add_rule_fg(g, spec->match_value, flow_act, dest, dest_num, fte); up_write_ref_node(&fte->node); @@ -1686,8 +1688,6 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft, } rule = ERR_PTR(-ENOENT); out: - list_for_each_entry(iter, match_head, list) - up_write_ref_node(&iter->g->node); kmem_cache_free(steering->ftes_cache, fte); return rule; } -- GitLab From 792fab2c0d45758ad3d187bd252570d2bb627fa9 Mon Sep 17 00:00:00 2001 From: Weinan Li Date: Tue, 4 Sep 2018 14:13:43 +0800 Subject: [PATCH 1130/1692] drm/i915/gvt: Fix the incorrect length of child_device_config issue GVT-g emualte the opregion for guest with bdb version as '186' which child_device_config length should be '33'. v2: split into 2 patch. 1st for issue fix, 2nd for code clean up.(Zhenyu) v3: add fixes tag.(Zhenyu) Fixes: 4023f301d28f ("drm/i915/gvt: opregion virtualization for win") CC: Xiaolin Zhang Reviewed-by: Xiaolin Zhang Signed-off-by: Weinan Li Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/opregion.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index fa75a2eead90..b0d3a43ccd03 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c @@ -42,8 +42,6 @@ #define DEVICE_TYPE_EFP3 0x20 #define DEVICE_TYPE_EFP4 0x10 -#define DEV_SIZE 38 - struct opregion_header { u8 signature[16]; u32 size; @@ -63,6 +61,10 @@ struct bdb_data_header { u16 size; /* data size */ } __packed; +/* For supporting windows guest with opregion, here hardcode the emulated + * bdb header version as '186', and the corresponding child_device_config + * length should be '33' but not '38'. + */ struct efp_child_device_config { u16 handle; u16 device_type; @@ -109,12 +111,6 @@ struct efp_child_device_config { u8 mipi_bridge_type; /* 171 */ u16 device_class_ext; u8 dvo_function; - u8 dp_usb_type_c:1; /* 195 */ - u8 skip6:7; - u8 dp_usb_type_c_2x_gpio_index; /* 195 */ - u16 dp_usb_type_c_2x_gpio_pin; /* 195 */ - u8 iboost_dp:4; /* 196 */ - u8 iboost_hdmi:4; /* 196 */ } __packed; struct vbt { @@ -155,7 +151,7 @@ static void virt_vbt_generation(struct vbt *v) v->header.bdb_offset = offsetof(struct vbt, bdb_header); strcpy(&v->bdb_header.signature[0], "BIOS_DATA_BLOCK"); - v->bdb_header.version = 186; /* child_dev_size = 38 */ + v->bdb_header.version = 186; /* child_dev_size = 33 */ v->bdb_header.header_size = sizeof(v->bdb_header); v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header) @@ -169,11 +165,13 @@ static void virt_vbt_generation(struct vbt *v) /* child device */ num_child = 4; /* each port has one child */ + v->general_definitions.child_dev_size = + sizeof(struct efp_child_device_config); v->general_definitions_header.id = BDB_GENERAL_DEFINITIONS; /* size will include child devices */ v->general_definitions_header.size = - sizeof(struct bdb_general_definitions) + num_child * DEV_SIZE; - v->general_definitions.child_dev_size = DEV_SIZE; + sizeof(struct bdb_general_definitions) + + num_child * v->general_definitions.child_dev_size; /* portA */ v->child0.handle = DEVICE_TYPE_EFP1; -- GitLab From 0a3b8b2b215f9e84b82ae97df71292ccfd92b1e7 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Mon, 3 Sep 2018 19:12:41 -0700 Subject: [PATCH 1131/1692] tipc: orphan sock in tipc_release() Before we unlock the sock in tipc_release(), we have to detach sk->sk_socket from sk, otherwise a parallel tipc_sk_fill_sock_diag() could stil read it after we free this socket. Fixes: c30b70deb5f4 ("tipc: implement socket diagnostics for AF_TIPC") Reported-and-tested-by: syzbot+48804b87c16588ad491d@syzkaller.appspotmail.com Cc: Jon Maloy Cc: Ying Xue Signed-off-by: Cong Wang Acked-by: Ying Xue Signed-off-by: David S. Miller --- net/tipc/socket.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/tipc/socket.c b/net/tipc/socket.c index ab7a2a7178f7..a0ff8bffc96b 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -576,6 +576,7 @@ static int tipc_release(struct socket *sock) sk_stop_timer(sk, &sk->sk_timer); tipc_sk_remove(tsk); + sock_orphan(sk); /* Reject any messages that accumulated in backlog queue */ release_sock(sk); tipc_dest_list_purge(&tsk->cong_links); -- GitLab From ee28bb56ac5b4c0c08ef10d33cc7adb749bbf4c6 Mon Sep 17 00:00:00 2001 From: Davide Caratti Date: Tue, 4 Sep 2018 19:00:19 +0200 Subject: [PATCH 1132/1692] net/sched: fix memory leak in act_tunnel_key_init() If users try to install act_tunnel_key 'set' rules with duplicate values of 'index', the tunnel metadata are allocated, but never released. Then, kmemleak complains as follows: # tc a a a tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 index 111 # echo clear > /sys/kernel/debug/kmemleak # tc a a a tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 index 111 Error: TC IDR already exists. We have an error talking to the kernel # echo scan > /sys/kernel/debug/kmemleak # cat /sys/kernel/debug/kmemleak unreferenced object 0xffff8800574e6c80 (size 256): comm "tc", pid 5617, jiffies 4298118009 (age 57.990s) hex dump (first 32 bytes): 00 00 00 00 00 00 00 00 00 1c e8 b0 ff ff ff ff ................ 81 24 c2 ad ff ff ff ff 00 00 00 00 00 00 00 00 .$.............. backtrace: [<00000000b7afbf4e>] tunnel_key_init+0x8a5/0x1800 [act_tunnel_key] [<000000007d98fccd>] tcf_action_init_1+0x698/0xac0 [<0000000099b8f7cc>] tcf_action_init+0x15c/0x590 [<00000000dc60eebe>] tc_ctl_action+0x336/0x5c2 [<000000002f5a2f7d>] rtnetlink_rcv_msg+0x357/0x8e0 [<000000000bfe7575>] netlink_rcv_skb+0x124/0x350 [<00000000edab656f>] netlink_unicast+0x40f/0x5d0 [<00000000b322cdcb>] netlink_sendmsg+0x6e8/0xba0 [<0000000063d9d490>] sock_sendmsg+0xb3/0xf0 [<00000000f0d3315a>] ___sys_sendmsg+0x654/0x960 [<00000000c06cbd42>] __sys_sendmsg+0xd3/0x170 [<00000000ce72e4b0>] do_syscall_64+0xa5/0x470 [<000000005caa2d97>] entry_SYSCALL_64_after_hwframe+0x49/0xbe [<00000000fac1b476>] 0xffffffffffffffff This problem theoretically happens also in case users attempt to setup a geneve rule having wrong configuration data, or when the kernel fails to allocate 'params_new'. Ensure that tunnel_key_init() releases the tunnel metadata also in the above conditions. Addresses-Coverity-ID: 1373974 ("Resource leak") Fixes: d0f6dd8a914f4 ("net/sched: Introduce act_tunnel_key") Fixes: 0ed5269f9e41f ("net/sched: add tunnel option support to act_tunnel_key") Signed-off-by: Davide Caratti Acked-by: Cong Wang Signed-off-by: David S. Miller --- net/sched/act_tunnel_key.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index 420759153d5f..28d58bbc953e 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c @@ -317,7 +317,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, &metadata->u.tun_info, opts_len, extack); if (ret < 0) - goto err_out; + goto release_tun_meta; } metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX; @@ -333,23 +333,24 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, &act_tunnel_key_ops, bind, true); if (ret) { NL_SET_ERR_MSG(extack, "Cannot create TC IDR"); - goto err_out; + goto release_tun_meta; } ret = ACT_P_CREATED; } else if (!ovr) { - tcf_idr_release(*a, bind); NL_SET_ERR_MSG(extack, "TC IDR already exists"); - return -EEXIST; + ret = -EEXIST; + goto release_tun_meta; } t = to_tunnel_key(*a); params_new = kzalloc(sizeof(*params_new), GFP_KERNEL); if (unlikely(!params_new)) { - tcf_idr_release(*a, bind); NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters"); - return -ENOMEM; + ret = -ENOMEM; + exists = true; + goto release_tun_meta; } params_new->tcft_action = parm->t_action; params_new->tcft_enc_metadata = metadata; @@ -367,6 +368,9 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, return ret; +release_tun_meta: + dst_release(&metadata->dst); + err_out: if (exists) tcf_idr_release(*a, bind); -- GitLab From 222440996d6daf635bed6cb35041be22ede3e8a0 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Wed, 5 Sep 2018 16:55:10 +0200 Subject: [PATCH 1133/1692] net/af_iucv: drop inbound packets with invalid flags Inbound packets may have any combination of flag bits set in their iucv header. If we don't know how to handle a specific combination, drop the skb instead of leaking it. To clarify what error is returned in this case, replace the hard-coded 0 with the corresponding macro. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- net/iucv/af_iucv.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index a21d8ed0a325..01000c14417f 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -2155,8 +2155,8 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, struct sock *sk; struct iucv_sock *iucv; struct af_iucv_trans_hdr *trans_hdr; + int err = NET_RX_SUCCESS; char nullstring[8]; - int err = 0; if (skb->len < (ETH_HLEN + sizeof(struct af_iucv_trans_hdr))) { WARN_ONCE(1, "AF_IUCV too short skb, len=%d, min=%d", @@ -2254,7 +2254,7 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, err = afiucv_hs_callback_rx(sk, skb); break; default: - ; + kfree_skb(skb); } return err; -- GitLab From b2f543949acd1ba64313fdad9e672ef47550d773 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Wed, 5 Sep 2018 16:55:11 +0200 Subject: [PATCH 1134/1692] net/af_iucv: fix skb handling on HiperTransport xmit error When sending an skb, afiucv_hs_send() bails out on various error conditions. But currently the caller has no way of telling whether the skb was freed or not - resulting in potentially either a) leaked skbs from iucv_send_ctrl(), or b) double-free's from iucv_sock_sendmsg(). As dev_queue_xmit() will always consume the skb (even on error), be consistent and also free the skb from all other error paths. This way callers no longer need to care about managing the skb. Signed-off-by: Julian Wiedmann Reviewed-by: Ursula Braun Signed-off-by: David S. Miller --- net/iucv/af_iucv.c | 34 +++++++++++++++++++++++----------- 1 file changed, 23 insertions(+), 11 deletions(-) diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 01000c14417f..e2f16a0173a9 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -351,20 +351,28 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message)); skb->dev = iucv->hs_dev; - if (!skb->dev) - return -ENODEV; - if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev)) - return -ENETDOWN; + if (!skb->dev) { + err = -ENODEV; + goto err_free; + } + if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev)) { + err = -ENETDOWN; + goto err_free; + } if (skb->len > skb->dev->mtu) { - if (sock->sk_type == SOCK_SEQPACKET) - return -EMSGSIZE; - else - skb_trim(skb, skb->dev->mtu); + if (sock->sk_type == SOCK_SEQPACKET) { + err = -EMSGSIZE; + goto err_free; + } + skb_trim(skb, skb->dev->mtu); } skb->protocol = cpu_to_be16(ETH_P_AF_IUCV); nskb = skb_clone(skb, GFP_ATOMIC); - if (!nskb) - return -ENOMEM; + if (!nskb) { + err = -ENOMEM; + goto err_free; + } + skb_queue_tail(&iucv->send_skb_q, nskb); err = dev_queue_xmit(skb); if (net_xmit_eval(err)) { @@ -375,6 +383,10 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, WARN_ON(atomic_read(&iucv->msg_recv) < 0); } return net_xmit_eval(err); + +err_free: + kfree_skb(skb); + return err; } static struct sock *__iucv_get_sock_by_name(char *nm) @@ -1167,7 +1179,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg, err = afiucv_hs_send(&txmsg, sk, skb, 0); if (err) { atomic_dec(&iucv->msg_sent); - goto fail; + goto out; } } else { /* Classic VM IUCV transport */ skb_queue_tail(&iucv->send_skb_q, skb); -- GitLab From b7f41565546d393747fd554f9526c1187c6bf652 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Wed, 5 Sep 2018 16:55:12 +0200 Subject: [PATCH 1135/1692] net/iucv: declare iucv_path_table_empty() as static Fixes a compile warning. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- net/iucv/iucv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index 8f7ef167c45a..eb502c6290c2 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c @@ -1874,7 +1874,7 @@ static void iucv_pm_complete(struct device *dev) * Returns 0 if there are still iucv pathes defined * 1 if there are no iucv pathes defined */ -int iucv_path_table_empty(void) +static int iucv_path_table_empty(void) { int i; -- GitLab From 69235ccf491d2e26aefd465c0d3ccd1e3b2a9a9c Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Thu, 6 Sep 2018 03:21:33 +0000 Subject: [PATCH 1136/1692] ASoC: rsnd: adg: care clock-frequency size ADG has buffer over flow bug if DT has more than 3 clock-frequency. This patch fixup this issue, and uses first 2 values. clock-frequency = ; /* this is OK */ clock-frequency = ; /* this is NG */ Signed-off-by: Kuninori Morimoto Tested-by: Hiroyuki Yokoyama Signed-off-by: Mark Brown --- sound/soc/sh/rcar/adg.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c index 3a3064dda57f..051f96405346 100644 --- a/sound/soc/sh/rcar/adg.c +++ b/sound/soc/sh/rcar/adg.c @@ -462,6 +462,11 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv, goto rsnd_adg_get_clkout_end; req_size = prop->length / sizeof(u32); + if (req_size > REQ_SIZE) { + dev_err(dev, + "too many clock-frequency, use top %d\n", REQ_SIZE); + req_size = REQ_SIZE; + } of_property_read_u32_array(np, "clock-frequency", req_rate, req_size); req_48kHz_rate = 0; -- GitLab From 6c92d5a2744e27619a8fcc9d74b91ee9f1cdebd1 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Thu, 6 Sep 2018 03:21:47 +0000 Subject: [PATCH 1137/1692] ASoC: rsnd: don't fallback to PIO mode when -EPROBE_DEFER Current rsnd driver will fallback to PIO mode if it can't get DMA handler. But, DMA might return -EPROBE_DEFER when probe timing. This driver always fallback to PIO mode especially from commit ac6bbf0cdf4206c ("iommu: Remove IOMMU_OF_DECLARE") because of this reason. The DMA driver will be probed later, but sound driver might be probed as PIO mode in such case. This patch fixup this issue. Then, -EPROBE_DEFER is not error. Thus, let's don't indicate error message in such case. And it needs to call rsnd_adg_remove() individually if probe failed, because it registers clk which should be unregister. Maybe PIO fallback feature itself is not needed, but let's keep it so far. Signed-off-by: Kuninori Morimoto Signed-off-by: Mark Brown --- sound/soc/sh/rcar/core.c | 10 +++++++++- sound/soc/sh/rcar/dma.c | 4 ++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c index b35f5509cfe2..d23c2bbff0cf 100644 --- a/sound/soc/sh/rcar/core.c +++ b/sound/soc/sh/rcar/core.c @@ -478,7 +478,7 @@ static int rsnd_status_update(u32 *status, (func_call && (mod)->ops->fn) ? #fn : ""); \ if (func_call && (mod)->ops->fn) \ tmp = (mod)->ops->fn(mod, io, param); \ - if (tmp) \ + if (tmp && (tmp != -EPROBE_DEFER)) \ dev_err(dev, "%s[%d] : %s error %d\n", \ rsnd_mod_name(mod), rsnd_mod_id(mod), \ #fn, tmp); \ @@ -1561,6 +1561,14 @@ static int rsnd_probe(struct platform_device *pdev) rsnd_dai_call(remove, &rdai->capture, priv); } + /* + * adg is very special mod which can't use rsnd_dai_call(remove), + * and it registers ADG clock on probe. + * It should be unregister if probe failed. + * Mainly it is assuming -EPROBE_DEFER case + */ + rsnd_adg_remove(priv); + return ret; } diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c index fe63ef8600d0..d65ea7bc4dac 100644 --- a/sound/soc/sh/rcar/dma.c +++ b/sound/soc/sh/rcar/dma.c @@ -241,6 +241,10 @@ static int rsnd_dmaen_attach(struct rsnd_dai_stream *io, /* try to get DMAEngine channel */ chan = rsnd_dmaen_request_channel(io, mod_from, mod_to); if (IS_ERR_OR_NULL(chan)) { + /* Let's follow when -EPROBE_DEFER case */ + if (PTR_ERR(chan) == -EPROBE_DEFER) + return PTR_ERR(chan); + /* * DMA failed. try to PIO mode * see -- GitLab From 5d128fbd8b20f8a48cb13c3eced789d1f9573ecd Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Tue, 4 Sep 2018 14:55:26 +0200 Subject: [PATCH 1138/1692] ACPI / bus: Only call dmi_check_system() on X86 Calling dmi_check_system() early only works on X86. Other architectures initialize the DMI subsystem later so it's not ready yet when ACPI itself gets initialized. In the best case it results in a useless call to a function which will do nothing. But depending on the dmi implementation, it could also result in warnings. Best is to not call the function when it can't work and isn't needed. Additionally, if anyone ever needs to add non-x86 quirks, it would surprisingly not work, so document the limitation to avoid confusion. Signed-off-by: Jean Delvare Fixes: cce4f632db20 (ACPI: fix early DSDT dmi check warnings on ia64) Signed-off-by: Rafael J. Wysocki --- drivers/acpi/bus.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 292088fcc624..d2e29a19890d 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -35,11 +35,11 @@ #include #ifdef CONFIG_X86 #include +#include #endif #include #include #include -#include #include #include "internal.h" @@ -82,10 +82,6 @@ static const struct dmi_system_id dsdt_dmi_table[] __initconst = { }, {} }; -#else -static const struct dmi_system_id dsdt_dmi_table[] __initconst = { - {} -}; #endif /* -------------------------------------------------------------------------- @@ -1033,11 +1029,16 @@ void __init acpi_early_init(void) acpi_permanent_mmap = true; +#ifdef CONFIG_X86 /* * If the machine falls into the DMI check table, - * DSDT will be copied to memory + * DSDT will be copied to memory. + * Note that calling dmi_check_system() here on other architectures + * would not be OK because only x86 initializes dmi early enough. + * Thankfully only x86 systems need such quirks for now. */ dmi_check_system(dsdt_dmi_table); +#endif status = acpi_reallocate_root_table(); if (ACPI_FAILURE(status)) { -- GitLab From f11fc4bc669b8622510c1039499f5a9d24248fec Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Mon, 3 Sep 2018 10:00:07 +0800 Subject: [PATCH 1139/1692] ACPI / LPSS: Force LPSS quirks on boot Commit 12864ff8545f (ACPI / LPSS: Avoid PM quirks on suspend and resume from hibernation) bypasses lpss quirks for S3 and S4, by setting a flag for S3/S4 in acpi_lpss_suspend(), and check that flag in acpi_lpss_resume(). But this overlooks the boot case where acpi_lpss_resume() may get called without a corresponding acpi_lpss_suspend() having been called. Thus force setting the flag during boot. Fixes: 12864ff8545f (ACPI / LPSS: Avoid PM quirks on suspend and resume from hibernation) Link: https://bugzilla.kernel.org/show_bug.cgi?id=200989 Reported-and-tested-by: William Lieurance Signed-off-by: Zhang Rui Cc: 4.15+ # 4.15+: 12864ff8545f (ACPI / LPSS: Avoid ...) Signed-off-by: Rafael J. Wysocki --- drivers/acpi/acpi_lpss.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 9706613eecf9..bf64cfa30feb 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -879,7 +879,7 @@ static void acpi_lpss_dismiss(struct device *dev) #define LPSS_GPIODEF0_DMA_LLP BIT(13) static DEFINE_MUTEX(lpss_iosf_mutex); -static bool lpss_iosf_d3_entered; +static bool lpss_iosf_d3_entered = true; static void lpss_iosf_enter_d3_state(void) { -- GitLab From 01a84c11a5e6a4f78834c66c7eb84bcdfea6eafc Mon Sep 17 00:00:00 2001 From: Joonas Lahtinen Date: Thu, 6 Sep 2018 15:14:18 +0300 Subject: [PATCH 1140/1692] drm/i915: Update DRIVER_DATE to 20180906 Signed-off-by: Joonas Lahtinen --- drivers/gpu/drm/i915/i915_drv.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 767615ecdea5..f63dca7f396a 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -86,8 +86,8 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20180903" -#define DRIVER_TIMESTAMP 1535975875 +#define DRIVER_DATE "20180906" +#define DRIVER_TIMESTAMP 1536236058 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and * WARN_ON()) for hw state sanity checks to check for unexpected conditions -- GitLab From 17f6bac2249356c795339e03a0742cd79be3cab8 Mon Sep 17 00:00:00 2001 From: Chuanhua Lei Date: Thu, 6 Sep 2018 18:03:23 +0800 Subject: [PATCH 1141/1692] x86/tsc: Prevent result truncation on 32bit Loops per jiffy is calculated by multiplying tsc_khz with 1e3 and then dividing it by HZ. Both tsc_khz and the temporary variable holding the multiplication result are of type unsigned long, so on 32bit the result is truncated to the lower 32bit. Use u64 as type for the temporary variable and cast tsc_khz to it before multiplying. [ tglx: Massaged changelog and removed pointless braces ] Fixes: cf7a63ef4e02 ("x86/tsc: Calibrate tsc only once") Signed-off-by: Chuanhua Lei Signed-off-by: Thomas Gleixner Cc: yixin.zhu@linux.intel.com Cc: "H. Peter Anvin" Cc: Peter Zijlstra Cc: Len Brown Cc: Pavel Tatashin Cc: Rajvi Jingar Cc: Dou Liyang Link: https://lkml.kernel.org/r/1536228203-18701-1-git-send-email-chuanhua.lei@linux.intel.com --- arch/x86/kernel/tsc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 1463468ba9a0..6490f618e096 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -1415,7 +1415,7 @@ static bool __init determine_cpu_tsc_frequencies(bool early) static unsigned long __init get_loops_per_jiffy(void) { - unsigned long lpj = tsc_khz * KHZ; + u64 lpj = (u64)tsc_khz * KHZ; do_div(lpj, HZ); return lpj; -- GitLab From 9fe6299dde587788f245e9f7a5a1b296fad4e8c7 Mon Sep 17 00:00:00 2001 From: Jann Horn Date: Fri, 31 Aug 2018 21:41:51 +0200 Subject: [PATCH 1142/1692] x86/process: Don't mix user/kernel regs in 64bit __show_regs() When the kernel.print-fatal-signals sysctl has been enabled, a simple userspace crash will cause the kernel to write a crash dump that contains, among other things, the kernel gsbase into dmesg. As suggested by Andy, limit output to pt_regs, FS_BASE and KERNEL_GS_BASE in this case. This also moves the bitness-specific logic from show_regs() into process_{32,64}.c. Fixes: 45807a1df9f5 ("vdso: print fatal signals") Signed-off-by: Jann Horn Signed-off-by: Thomas Gleixner Cc: "H. Peter Anvin" Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Greg Kroah-Hartman Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20180831194151.123586-1-jannh@google.com --- arch/x86/include/asm/kdebug.h | 12 +++++++++++- arch/x86/kernel/dumpstack.c | 11 +++-------- arch/x86/kernel/process_32.c | 4 ++-- arch/x86/kernel/process_64.c | 12 ++++++++++-- 4 files changed, 26 insertions(+), 13 deletions(-) diff --git a/arch/x86/include/asm/kdebug.h b/arch/x86/include/asm/kdebug.h index 395c9631e000..75f1e35e7c15 100644 --- a/arch/x86/include/asm/kdebug.h +++ b/arch/x86/include/asm/kdebug.h @@ -22,10 +22,20 @@ enum die_val { DIE_NMIUNKNOWN, }; +enum show_regs_mode { + SHOW_REGS_SHORT, + /* + * For when userspace crashed, but we don't think it's our fault, and + * therefore don't print kernel registers. + */ + SHOW_REGS_USER, + SHOW_REGS_ALL +}; + extern void die(const char *, struct pt_regs *,long); extern int __must_check __die(const char *, struct pt_regs *, long); extern void show_stack_regs(struct pt_regs *regs); -extern void __show_regs(struct pt_regs *regs, int all); +extern void __show_regs(struct pt_regs *regs, enum show_regs_mode); extern void show_iret_regs(struct pt_regs *regs); extern unsigned long oops_begin(void); extern void oops_end(unsigned long, struct pt_regs *, int signr); diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index f56895106ccf..2b5886401e5f 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -146,7 +146,7 @@ static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs, * they can be printed in the right context. */ if (!partial && on_stack(info, regs, sizeof(*regs))) { - __show_regs(regs, 0); + __show_regs(regs, SHOW_REGS_SHORT); } else if (partial && on_stack(info, (void *)regs + IRET_FRAME_OFFSET, IRET_FRAME_SIZE)) { @@ -344,7 +344,7 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr) oops_exit(); /* Executive summary in case the oops scrolled away */ - __show_regs(&exec_summary_regs, true); + __show_regs(&exec_summary_regs, SHOW_REGS_ALL); if (!signr) return; @@ -407,14 +407,9 @@ void die(const char *str, struct pt_regs *regs, long err) void show_regs(struct pt_regs *regs) { - bool all = true; - show_regs_print_info(KERN_DEFAULT); - if (IS_ENABLED(CONFIG_X86_32)) - all = !user_mode(regs); - - __show_regs(regs, all); + __show_regs(regs, user_mode(regs) ? SHOW_REGS_USER : SHOW_REGS_ALL); /* * When in-kernel, we also print out the stack at the time of the fault.. diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 2924fd447e61..5046a3c9dec2 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -59,7 +59,7 @@ #include #include -void __show_regs(struct pt_regs *regs, int all) +void __show_regs(struct pt_regs *regs, enum show_regs_mode mode) { unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; unsigned long d0, d1, d2, d3, d6, d7; @@ -85,7 +85,7 @@ void __show_regs(struct pt_regs *regs, int all) printk(KERN_DEFAULT "DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x EFLAGS: %08lx\n", (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss, regs->flags); - if (!all) + if (mode != SHOW_REGS_ALL) return; cr0 = read_cr0(); diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index a451bc374b9b..ea5ea850348d 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -62,7 +62,7 @@ __visible DEFINE_PER_CPU(unsigned long, rsp_scratch); /* Prints also some state that isn't saved in the pt_regs */ -void __show_regs(struct pt_regs *regs, int all) +void __show_regs(struct pt_regs *regs, enum show_regs_mode mode) { unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; unsigned long d0, d1, d2, d3, d6, d7; @@ -87,9 +87,17 @@ void __show_regs(struct pt_regs *regs, int all) printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n", regs->r13, regs->r14, regs->r15); - if (!all) + if (mode == SHOW_REGS_SHORT) return; + if (mode == SHOW_REGS_USER) { + rdmsrl(MSR_FS_BASE, fs); + rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); + printk(KERN_DEFAULT "FS: %016lx GS: %016lx\n", + fs, shadowgs); + return; + } + asm("movl %%ds,%0" : "=r" (ds)); asm("movl %%cs,%0" : "=r" (cs)); asm("movl %%es,%0" : "=r" (es)); -- GitLab From f8b7530aa0a1def79c93101216b5b17cf408a70a Mon Sep 17 00:00:00 2001 From: Neeraj Upadhyay Date: Wed, 5 Sep 2018 11:22:07 +0530 Subject: [PATCH 1143/1692] cpu/hotplug: Adjust misplaced smb() in cpuhp_thread_fun() The smp_mb() in cpuhp_thread_fun() is misplaced. It needs to be after the load of st->should_run to prevent reordering of the later load/stores w.r.t. the load of st->should_run. Fixes: 4dddfb5faa61 ("smp/hotplug: Rewrite AP state machine core") Signed-off-by: Neeraj Upadhyay Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra (Intel) Cc: josh@joshtriplett.org Cc: peterz@infradead.org Cc: jiangshanlai@gmail.com Cc: dzickus@redhat.com Cc: brendan.jackman@arm.com Cc: malat@debian.org Cc: mojha@codeaurora.org Cc: sramana@codeaurora.org Cc: linux-arm-msm@vger.kernel.org Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/1536126727-11629-1-git-send-email-neeraju@codeaurora.org --- kernel/cpu.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/kernel/cpu.c b/kernel/cpu.c index aa7fe85ad62e..eb4041f78073 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -607,15 +607,15 @@ static void cpuhp_thread_fun(unsigned int cpu) bool bringup = st->bringup; enum cpuhp_state state; + if (WARN_ON_ONCE(!st->should_run)) + return; + /* * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures * that if we see ->should_run we also see the rest of the state. */ smp_mb(); - if (WARN_ON_ONCE(!st->should_run)) - return; - cpuhp_lock_acquire(bringup); if (st->single) { -- GitLab From 69fa6eb7d6a64801ea261025cce9723d9442d773 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 6 Sep 2018 15:21:38 +0200 Subject: [PATCH 1144/1692] cpu/hotplug: Prevent state corruption on error rollback When a teardown callback fails, the CPU hotplug code brings the CPU back to the previous state. The previous state becomes the new target state. The rollback happens in undo_cpu_down() which increments the state unconditionally even if the state is already the same as the target. As a consequence the next CPU hotplug operation will start at the wrong state. This is easily to observe when __cpu_disable() fails. Prevent the unconditional undo by checking the state vs. target before incrementing state and fix up the consequently wrong conditional in the unplug code which handles the failure of the final CPU take down on the control CPU side. Fixes: 4dddfb5faa61 ("smp/hotplug: Rewrite AP state machine core") Reported-by: Neeraj Upadhyay Signed-off-by: Thomas Gleixner Tested-by: Geert Uytterhoeven Tested-by: Sudeep Holla Tested-by: Neeraj Upadhyay Cc: josh@joshtriplett.org Cc: peterz@infradead.org Cc: jiangshanlai@gmail.com Cc: dzickus@redhat.com Cc: brendan.jackman@arm.com Cc: malat@debian.org Cc: sramana@codeaurora.org Cc: linux-arm-msm@vger.kernel.org Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/alpine.DEB.2.21.1809051419580.1416@nanos.tec.linutronix.de ---- --- kernel/cpu.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/kernel/cpu.c b/kernel/cpu.c index eb4041f78073..0097acec1c71 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -916,7 +916,8 @@ static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); if (ret) { st->target = prev_state; - undo_cpu_down(cpu, st); + if (st->state < prev_state) + undo_cpu_down(cpu, st); break; } } @@ -969,7 +970,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, * to do the further cleanups. */ ret = cpuhp_down_callbacks(cpu, st, target); - if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) { + if (ret && st->state == CPUHP_TEARDOWN_CPU && st->state < prev_state) { cpuhp_reset_state(st, prev_state); __cpuhp_kick_ap(st); } -- GitLab From d4da8a4d4004e61bd23494e23e22ddbc98571546 Mon Sep 17 00:00:00 2001 From: Joonas Lahtinen Date: Thu, 6 Sep 2018 16:45:54 +0300 Subject: [PATCH 1145/1692] drm/i915: Update DRIVER_DATE to 20180906 Signed-off-by: Joonas Lahtinen --- drivers/gpu/drm/i915/i915_drv.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index f63dca7f396a..bf78b8f1eb17 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -87,7 +87,7 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" #define DRIVER_DATE "20180906" -#define DRIVER_TIMESTAMP 1536236058 +#define DRIVER_TIMESTAMP 1536241554 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and * WARN_ON()) for hw state sanity checks to check for unexpected conditions -- GitLab From a28957b8f10be714f076fb3981a3b1a0318c48c2 Mon Sep 17 00:00:00 2001 From: Joonas Lahtinen Date: Thu, 6 Sep 2018 16:54:43 +0300 Subject: [PATCH 1146/1692] drm/i915: Update DRIVER_DATE to 20180906 Signed-off-by: Joonas Lahtinen --- drivers/gpu/drm/i915/i915_drv.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index bf78b8f1eb17..2ccb982a5dba 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -87,7 +87,7 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" #define DRIVER_DATE "20180906" -#define DRIVER_TIMESTAMP 1536241554 +#define DRIVER_TIMESTAMP 1536242083 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and * WARN_ON()) for hw state sanity checks to check for unexpected conditions -- GitLab From 8aaff15168cfbc7c8980fdb0e8a585f1afe56ec0 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Fri, 24 Aug 2018 15:32:43 +0200 Subject: [PATCH 1147/1692] ceph: avoid a use-after-free in ceph_destroy_options() syzbot reported a use-after-free in ceph_destroy_options(), called from ceph_mount(). The problem was that create_fs_client() consumed the opt pointer on some errors, but not on all of them. Make sure it always consumes both libceph and ceph options. Reported-by: syzbot+8ab6f1042021b4eed062@syzkaller.appspotmail.com Signed-off-by: Ilya Dryomov Reviewed-by: "Yan, Zheng" --- fs/ceph/super.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 43ca3b763875..eab1359d0553 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -602,6 +602,8 @@ static int extra_mon_dispatch(struct ceph_client *client, struct ceph_msg *msg) /* * create a new fs client + * + * Success or not, this function consumes @fsopt and @opt. */ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt, struct ceph_options *opt) @@ -609,17 +611,20 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt, struct ceph_fs_client *fsc; int page_count; size_t size; - int err = -ENOMEM; + int err; fsc = kzalloc(sizeof(*fsc), GFP_KERNEL); - if (!fsc) - return ERR_PTR(-ENOMEM); + if (!fsc) { + err = -ENOMEM; + goto fail; + } fsc->client = ceph_create_client(opt, fsc); if (IS_ERR(fsc->client)) { err = PTR_ERR(fsc->client); goto fail; } + opt = NULL; /* fsc->client now owns this */ fsc->client->extra_mon_dispatch = extra_mon_dispatch; fsc->client->osdc.abort_on_full = true; @@ -677,6 +682,9 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt, ceph_destroy_client(fsc->client); fail: kfree(fsc); + if (opt) + ceph_destroy_options(opt); + destroy_mount_options(fsopt); return ERR_PTR(err); } @@ -1042,8 +1050,6 @@ static struct dentry *ceph_mount(struct file_system_type *fs_type, fsc = create_fs_client(fsopt, opt); if (IS_ERR(fsc)) { res = ERR_CAST(fsc); - destroy_mount_options(fsopt); - ceph_destroy_options(opt); goto out_final; } -- GitLab From eb3b2d6be4b5e1612827b986cca241c5d104fc41 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Wed, 22 Aug 2018 17:11:27 +0200 Subject: [PATCH 1148/1692] rbd: factor out get_parent_info() In preparation for the new parent_get and parent_overlap_get class methods, factor out the fetching and decoding of parent data. As a side effect, we now decode all four fields in the "no parent" case. Signed-off-by: Ilya Dryomov Reviewed-by: Jason Dillaman --- drivers/block/rbd.c | 134 ++++++++++++++++++++++++++++---------------- 1 file changed, 86 insertions(+), 48 deletions(-) diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 7915f3b03736..bec5a50c9890 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -4584,47 +4584,95 @@ static int rbd_dev_v2_features(struct rbd_device *rbd_dev) &rbd_dev->header.features); } +struct parent_image_info { + u64 pool_id; + const char *image_id; + u64 snap_id; + + u64 overlap; +}; + +/* + * The caller is responsible for @pii. + */ +static int __get_parent_info_legacy(struct rbd_device *rbd_dev, + struct page *req_page, + struct page *reply_page, + struct parent_image_info *pii) +{ + struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; + size_t reply_len = PAGE_SIZE; + void *p, *end; + int ret; + + ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, + "rbd", "get_parent", CEPH_OSD_FLAG_READ, + req_page, sizeof(u64), reply_page, &reply_len); + if (ret) + return ret; + + p = page_address(reply_page); + end = p + reply_len; + ceph_decode_64_safe(&p, end, pii->pool_id, e_inval); + pii->image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL); + if (IS_ERR(pii->image_id)) { + ret = PTR_ERR(pii->image_id); + pii->image_id = NULL; + return ret; + } + ceph_decode_64_safe(&p, end, pii->snap_id, e_inval); + ceph_decode_64_safe(&p, end, pii->overlap, e_inval); + + return 0; + +e_inval: + return -EINVAL; +} + +static int get_parent_info(struct rbd_device *rbd_dev, + struct parent_image_info *pii) +{ + struct page *req_page, *reply_page; + void *p; + int ret; + + req_page = alloc_page(GFP_KERNEL); + if (!req_page) + return -ENOMEM; + + reply_page = alloc_page(GFP_KERNEL); + if (!reply_page) { + __free_page(req_page); + return -ENOMEM; + } + + p = page_address(req_page); + ceph_encode_64(&p, rbd_dev->spec->snap_id); + ret = __get_parent_info_legacy(rbd_dev, req_page, reply_page, pii); + + __free_page(req_page); + __free_page(reply_page); + return ret; +} + static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) { struct rbd_spec *parent_spec; - size_t size; - void *reply_buf = NULL; - __le64 snapid; - void *p; - void *end; - u64 pool_id; - char *image_id; - u64 snap_id; - u64 overlap; + struct parent_image_info pii = { 0 }; int ret; parent_spec = rbd_spec_alloc(); if (!parent_spec) return -ENOMEM; - size = sizeof (__le64) + /* pool_id */ - sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */ - sizeof (__le64) + /* snap_id */ - sizeof (__le64); /* overlap */ - reply_buf = kmalloc(size, GFP_KERNEL); - if (!reply_buf) { - ret = -ENOMEM; + ret = get_parent_info(rbd_dev, &pii); + if (ret) goto out_err; - } - snapid = cpu_to_le64(rbd_dev->spec->snap_id); - ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, - &rbd_dev->header_oloc, "get_parent", - &snapid, sizeof(snapid), reply_buf, size); - dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); - if (ret < 0) - goto out_err; + dout("%s pool_id %llu image_id %s snap_id %llu overlap %llu\n", + __func__, pii.pool_id, pii.image_id, pii.snap_id, pii.overlap); - p = reply_buf; - end = reply_buf + ret; - ret = -ERANGE; - ceph_decode_64_safe(&p, end, pool_id, out_err); - if (pool_id == CEPH_NOPOOL) { + if (pii.pool_id == CEPH_NOPOOL) { /* * Either the parent never existed, or we have * record of it but the image got flattened so it no @@ -4647,19 +4695,11 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) /* The ceph file layout needs to fit pool id in 32 bits */ ret = -EIO; - if (pool_id > (u64)U32_MAX) { + if (pii.pool_id > (u64)U32_MAX) { rbd_warn(NULL, "parent pool id too large (%llu > %u)", - (unsigned long long)pool_id, U32_MAX); - goto out_err; - } - - image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL); - if (IS_ERR(image_id)) { - ret = PTR_ERR(image_id); + (unsigned long long)pii.pool_id, U32_MAX); goto out_err; } - ceph_decode_64_safe(&p, end, snap_id, out_err); - ceph_decode_64_safe(&p, end, overlap, out_err); /* * The parent won't change (except when the clone is @@ -4667,9 +4707,10 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) * record the parent spec we have not already done so. */ if (!rbd_dev->parent_spec) { - parent_spec->pool_id = pool_id; - parent_spec->image_id = image_id; - parent_spec->snap_id = snap_id; + parent_spec->pool_id = pii.pool_id; + parent_spec->image_id = pii.image_id; + pii.image_id = NULL; + parent_spec->snap_id = pii.snap_id; /* TODO: support cloning across namespaces */ if (rbd_dev->spec->pool_ns) { @@ -4683,15 +4724,13 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) rbd_dev->parent_spec = parent_spec; parent_spec = NULL; /* rbd_dev now owns this */ - } else { - kfree(image_id); } /* * We always update the parent overlap. If it's zero we issue * a warning, as we will proceed as if there was no parent. */ - if (!overlap) { + if (!pii.overlap) { if (parent_spec) { /* refresh, careful to warn just once */ if (rbd_dev->parent_overlap) @@ -4702,14 +4741,13 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) rbd_warn(rbd_dev, "clone is standalone (overlap 0)"); } } - rbd_dev->parent_overlap = overlap; + rbd_dev->parent_overlap = pii.overlap; out: ret = 0; out_err: - kfree(reply_buf); + kfree(pii.image_id); rbd_spec_put(parent_spec); - return ret; } -- GitLab From e92c0eaf754310f9f31e9229a3f7274a67478f82 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Wed, 22 Aug 2018 17:26:10 +0200 Subject: [PATCH 1149/1692] rbd: support cloning across namespaces If parent_get class method is not supported by the OSDs, fall back to the legacy class method and assume that the parent is in the default (i.e. "") namespace. The "use the child's image namespace" workaround is no longer needed because creating images within namespaces will require parent_get aware OSDs. Signed-off-by: Ilya Dryomov Reviewed-by: Jason Dillaman --- drivers/block/rbd.c | 111 ++++++++++++++++++++++++++++++++++++++------ 1 file changed, 97 insertions(+), 14 deletions(-) diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index bec5a50c9890..73ed5f3a862d 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -4207,11 +4207,13 @@ static ssize_t rbd_parent_show(struct device *dev, count += sprintf(&buf[count], "%s" "pool_id %llu\npool_name %s\n" + "pool_ns %s\n" "image_id %s\nimage_name %s\n" "snap_id %llu\nsnap_name %s\n" "overlap %llu\n", !count ? "" : "\n", /* first? */ spec->pool_id, spec->pool_name, + spec->pool_ns ?: "", spec->image_id, spec->image_name ?: "(unknown)", spec->snap_id, spec->snap_name, rbd_dev->parent_overlap); @@ -4586,12 +4588,89 @@ static int rbd_dev_v2_features(struct rbd_device *rbd_dev) struct parent_image_info { u64 pool_id; + const char *pool_ns; const char *image_id; u64 snap_id; + bool has_overlap; u64 overlap; }; +/* + * The caller is responsible for @pii. + */ +static int decode_parent_image_spec(void **p, void *end, + struct parent_image_info *pii) +{ + u8 struct_v; + u32 struct_len; + int ret; + + ret = ceph_start_decoding(p, end, 1, "ParentImageSpec", + &struct_v, &struct_len); + if (ret) + return ret; + + ceph_decode_64_safe(p, end, pii->pool_id, e_inval); + pii->pool_ns = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL); + if (IS_ERR(pii->pool_ns)) { + ret = PTR_ERR(pii->pool_ns); + pii->pool_ns = NULL; + return ret; + } + pii->image_id = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL); + if (IS_ERR(pii->image_id)) { + ret = PTR_ERR(pii->image_id); + pii->image_id = NULL; + return ret; + } + ceph_decode_64_safe(p, end, pii->snap_id, e_inval); + return 0; + +e_inval: + return -EINVAL; +} + +static int __get_parent_info(struct rbd_device *rbd_dev, + struct page *req_page, + struct page *reply_page, + struct parent_image_info *pii) +{ + struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; + size_t reply_len = PAGE_SIZE; + void *p, *end; + int ret; + + ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, + "rbd", "parent_get", CEPH_OSD_FLAG_READ, + req_page, sizeof(u64), reply_page, &reply_len); + if (ret) + return ret == -EOPNOTSUPP ? 1 : ret; + + p = page_address(reply_page); + end = p + reply_len; + ret = decode_parent_image_spec(&p, end, pii); + if (ret) + return ret; + + ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, + "rbd", "parent_overlap_get", CEPH_OSD_FLAG_READ, + req_page, sizeof(u64), reply_page, &reply_len); + if (ret) + return ret; + + p = page_address(reply_page); + end = p + reply_len; + ceph_decode_8_safe(&p, end, pii->has_overlap, e_inval); + if (pii->has_overlap) + ceph_decode_64_safe(&p, end, pii->overlap, e_inval); + + return 0; + +e_inval: + return -EINVAL; +} + /* * The caller is responsible for @pii. */ @@ -4621,6 +4700,7 @@ static int __get_parent_info_legacy(struct rbd_device *rbd_dev, return ret; } ceph_decode_64_safe(&p, end, pii->snap_id, e_inval); + pii->has_overlap = true; ceph_decode_64_safe(&p, end, pii->overlap, e_inval); return 0; @@ -4648,7 +4728,10 @@ static int get_parent_info(struct rbd_device *rbd_dev, p = page_address(req_page); ceph_encode_64(&p, rbd_dev->spec->snap_id); - ret = __get_parent_info_legacy(rbd_dev, req_page, reply_page, pii); + ret = __get_parent_info(rbd_dev, req_page, reply_page, pii); + if (ret > 0) + ret = __get_parent_info_legacy(rbd_dev, req_page, reply_page, + pii); __free_page(req_page); __free_page(reply_page); @@ -4669,10 +4752,11 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) if (ret) goto out_err; - dout("%s pool_id %llu image_id %s snap_id %llu overlap %llu\n", - __func__, pii.pool_id, pii.image_id, pii.snap_id, pii.overlap); + dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n", + __func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id, + pii.has_overlap, pii.overlap); - if (pii.pool_id == CEPH_NOPOOL) { + if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) { /* * Either the parent never existed, or we have * record of it but the image got flattened so it no @@ -4681,6 +4765,10 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) * overlap to 0. The effect of this is that all new * requests will be treated as if the image had no * parent. + * + * If !pii.has_overlap, the parent image spec is not + * applicable. It's there to avoid duplication in each + * snapshot record. */ if (rbd_dev->parent_overlap) { rbd_dev->parent_overlap = 0; @@ -4708,20 +4796,14 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) */ if (!rbd_dev->parent_spec) { parent_spec->pool_id = pii.pool_id; + if (pii.pool_ns && *pii.pool_ns) { + parent_spec->pool_ns = pii.pool_ns; + pii.pool_ns = NULL; + } parent_spec->image_id = pii.image_id; pii.image_id = NULL; parent_spec->snap_id = pii.snap_id; - /* TODO: support cloning across namespaces */ - if (rbd_dev->spec->pool_ns) { - parent_spec->pool_ns = kstrdup(rbd_dev->spec->pool_ns, - GFP_KERNEL); - if (!parent_spec->pool_ns) { - ret = -ENOMEM; - goto out_err; - } - } - rbd_dev->parent_spec = parent_spec; parent_spec = NULL; /* rbd_dev now owns this */ } @@ -4746,6 +4828,7 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) out: ret = 0; out_err: + kfree(pii.pool_ns); kfree(pii.image_id); rbd_spec_put(parent_spec); return ret; -- GitLab From 52cf93e63ee672a92f349edc6ddad86ec8808fd8 Mon Sep 17 00:00:00 2001 From: Kai-Heng Feng Date: Thu, 6 Sep 2018 10:55:18 +0800 Subject: [PATCH 1150/1692] HID: i2c-hid: Don't reset device upon system resume Raydium touchscreen triggers interrupt storm after system-wide suspend: [ 179.085033] i2c_hid i2c-CUST0000:00: i2c_hid_get_input: incomplete report (58/65535) According to Raydium, Windows driver does not reset the device after system resume. The HID over I2C spec does specify a reset should be used at intialization, but it doesn't specify if reset is required for system suspend. Tested this patch on other i2c-hid touchpanels I have and those touchpanels do work after S3 without doing reset. If any regression happens to other touchpanel vendors, we can use quirk for Raydium devices. There's still one device uses I2C_HID_QUIRK_RESEND_REPORT_DESCR so keep it there. Cc: Aaron Ma Cc: AceLan Kao Signed-off-by: Kai-Heng Feng Reviewed-by: Benjamin Tissoires Signed-off-by: Jiri Kosina --- drivers/hid/hid-ids.h | 4 ---- drivers/hid/i2c-hid/i2c-hid.c | 13 +++++++------ 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 19a66ceca217..5146ee029db4 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -530,10 +530,6 @@ #define I2C_VENDOR_ID_HANTICK 0x0911 #define I2C_PRODUCT_ID_HANTICK_5288 0x5288 -#define I2C_VENDOR_ID_RAYD 0x2386 -#define I2C_PRODUCT_ID_RAYD_3118 0x3118 -#define I2C_PRODUCT_ID_RAYD_4B33 0x4B33 - #define USB_VENDOR_ID_HANWANG 0x0b57 #define USB_DEVICE_ID_HANWANG_TABLET_FIRST 0x5000 #define USB_DEVICE_ID_HANWANG_TABLET_LAST 0x8fff diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index 57126f6837bb..f3076659361a 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c @@ -170,12 +170,8 @@ static const struct i2c_hid_quirks { I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV }, { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288, I2C_HID_QUIRK_NO_IRQ_AFTER_RESET }, - { I2C_VENDOR_ID_RAYD, I2C_PRODUCT_ID_RAYD_3118, - I2C_HID_QUIRK_RESEND_REPORT_DESCR }, { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS10FB_TOUCH, I2C_HID_QUIRK_RESEND_REPORT_DESCR }, - { I2C_VENDOR_ID_RAYD, I2C_PRODUCT_ID_RAYD_4B33, - I2C_HID_QUIRK_RESEND_REPORT_DESCR }, { 0, 0 } }; @@ -1237,11 +1233,16 @@ static int i2c_hid_resume(struct device *dev) pm_runtime_enable(dev); enable_irq(client->irq); - ret = i2c_hid_hwreset(client); + + /* Instead of resetting device, simply powers the device on. This + * solves "incomplete reports" on Raydium devices 2386:3118 and + * 2386:4B33 + */ + ret = i2c_hid_set_power(client, I2C_HID_PWR_ON); if (ret) return ret; - /* RAYDIUM device (2386:3118) need to re-send report descr cmd + /* Some devices need to re-send report descr cmd * after resume, after this it will be back normal. * otherwise it issues too many incomplete reports. */ -- GitLab From d1c392c9e2a301f38998a353f467f76414e38725 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Wed, 5 Sep 2018 16:29:49 -0400 Subject: [PATCH 1151/1692] printk/tracing: Do not trace printk_nmi_enter() I hit the following splat in my tests: ------------[ cut here ]------------ IRQs not enabled as expected WARNING: CPU: 3 PID: 0 at kernel/time/tick-sched.c:982 tick_nohz_idle_enter+0x44/0x8c Modules linked in: ip6t_REJECT nf_reject_ipv6 ip6table_filter ip6_tables ipv6 CPU: 3 PID: 0 Comm: swapper/3 Not tainted 4.19.0-rc2-test+ #2 Hardware name: MSI MS-7823/CSM-H87M-G43 (MS-7823), BIOS V1.6 02/22/2014 EIP: tick_nohz_idle_enter+0x44/0x8c Code: ec 05 00 00 00 75 26 83 b8 c0 05 00 00 00 75 1d 80 3d d0 36 3e c1 00 75 14 68 94 63 12 c1 c6 05 d0 36 3e c1 01 e8 04 ee f8 ff <0f> 0b 58 fa bb a0 e5 66 c1 e8 25 0f 04 00 64 03 1d 28 31 52 c1 8b EAX: 0000001c EBX: f26e7f8c ECX: 00000006 EDX: 00000007 ESI: f26dd1c0 EDI: 00000000 EBP: f26e7f40 ESP: f26e7f38 DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068 EFLAGS: 00010296 CR0: 80050033 CR2: 0813c6b0 CR3: 2f342000 CR4: 001406f0 Call Trace: do_idle+0x33/0x202 cpu_startup_entry+0x61/0x63 start_secondary+0x18e/0x1ed startup_32_smp+0x164/0x168 irq event stamp: 18773830 hardirqs last enabled at (18773829): [] trace_hardirqs_on_thunk+0xc/0x10 hardirqs last disabled at (18773830): [] trace_hardirqs_off_thunk+0xc/0x10 softirqs last enabled at (18773824): [] __do_softirq+0x25f/0x2bf softirqs last disabled at (18773767): [] call_on_stack+0x45/0x4b ---[ end trace b7c64aa79e17954a ]--- After a bit of debugging, I found what was happening. This would trigger when performing "perf" with a high NMI interrupt rate, while enabling and disabling function tracer. Ftrace uses breakpoints to convert the nops at the start of functions to calls to the function trampolines. The breakpoint traps disable interrupts and this makes calls into lockdep via the trace_hardirqs_off_thunk in the entry.S code. What happens is the following: do_idle { [interrupts enabled] [interrupts disabled] TRACE_IRQS_OFF [lockdep says irqs off] [...] TRACE_IRQS_IRET test if pt_regs say return to interrupts enabled [yes] TRACE_IRQS_ON [lockdep says irqs are on] nmi_enter() { printk_nmi_enter() [traced by ftrace] [ hit ftrace breakpoint ] TRACE_IRQS_OFF [lockdep says irqs off] [...] TRACE_IRQS_IRET [return from breakpoint] test if pt_regs say interrupts enabled [no] [iret back to interrupt] [iret back to code] tick_nohz_idle_enter() { lockdep_assert_irqs_enabled() [lockdep say no!] Although interrupts are indeed enabled, lockdep thinks it is not, and since we now do asserts via lockdep, it gives a false warning. The issue here is that printk_nmi_enter() is called before lockdep_off(), which disables lockdep (for this reason) in NMIs. By simply not allowing ftrace to see printk_nmi_enter() (via notrace annotation) we keep lockdep from getting confused. Cc: stable@vger.kernel.org Fixes: 42a0bb3f71383 ("printk/nmi: generic solution for safe printk in NMI") Acked-by: Sergey Senozhatsky Acked-by: Petr Mladek Signed-off-by: Steven Rostedt (VMware) --- kernel/printk/printk_safe.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c index a0a74c533e4b..0913b4d385de 100644 --- a/kernel/printk/printk_safe.c +++ b/kernel/printk/printk_safe.c @@ -306,12 +306,12 @@ static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args) return printk_safe_log_store(s, fmt, args); } -void printk_nmi_enter(void) +void notrace printk_nmi_enter(void) { this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK); } -void printk_nmi_exit(void) +void notrace printk_nmi_exit(void) { this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK); } -- GitLab From 96d529bac562574600eda85726fcfa3eef6dde8e Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Thu, 6 Sep 2018 16:10:39 +0100 Subject: [PATCH 1152/1692] firmware: arm_scmi: fix divide by zero when sustained_perf_level is zero Firmware can provide zero as values for sustained performance level and corresponding sustained frequency in kHz in order to hide the actual frequencies and provide only abstract values. It may endup with divide by zero scenario resulting in kernel panic. Let's set the multiplication factor to one if either one or both of them (sustained_perf_level and sustained_freq) are set to zero. Fixes: a9e3fbfaa0ff ("firmware: arm_scmi: add initial support for performance protocol") Reported-by: Ionela Voinescu Signed-off-by: Sudeep Holla Signed-off-by: Olof Johansson --- drivers/firmware/arm_scmi/perf.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c index 721e6c57beae..64342944d917 100644 --- a/drivers/firmware/arm_scmi/perf.c +++ b/drivers/firmware/arm_scmi/perf.c @@ -166,7 +166,13 @@ scmi_perf_domain_attributes_get(const struct scmi_handle *handle, u32 domain, le32_to_cpu(attr->sustained_freq_khz); dom_info->sustained_perf_level = le32_to_cpu(attr->sustained_perf_level); - dom_info->mult_factor = (dom_info->sustained_freq_khz * 1000) / + if (!dom_info->sustained_freq_khz || + !dom_info->sustained_perf_level) + /* CPUFreq converts to kHz, hence default 1000 */ + dom_info->mult_factor = 1000; + else + dom_info->mult_factor = + (dom_info->sustained_freq_khz * 1000) / dom_info->sustained_perf_level; memcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE); } -- GitLab From fac880c7d074fdfca874114b5c47b36aa034e4ee Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 5 Sep 2018 17:38:57 +0100 Subject: [PATCH 1153/1692] arm64: fix erroneous warnings in page freeing functions In pmd_free_pte_page() and pud_free_pmd_page() we try to warn if they hit a present non-table entry. In both cases we'll warn for non-present entries, as the VM_WARN_ON() only checks the entry is not a table entry. This has been observed to result in warnings when booting a v4.19-rc2 kernel under qemu. Fix this by bailing out earlier for non-present entries. Fixes: ec28bb9c9b0826d7 ("arm64: Implement page table free interfaces") Signed-off-by: Mark Rutland Cc: Will Deacon Cc: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/mm/mmu.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 65f86271f02b..8080c9f489c3 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -985,8 +985,9 @@ int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr) pmd = READ_ONCE(*pmdp); - /* No-op for empty entry and WARN_ON for valid entry */ - if (!pmd_present(pmd) || !pmd_table(pmd)) { + if (!pmd_present(pmd)) + return 1; + if (!pmd_table(pmd)) { VM_WARN_ON(!pmd_table(pmd)); return 1; } @@ -1007,8 +1008,9 @@ int pud_free_pmd_page(pud_t *pudp, unsigned long addr) pud = READ_ONCE(*pudp); - /* No-op for empty entry and WARN_ON for valid entry */ - if (!pud_present(pud) || !pud_table(pud)) { + if (!pud_present(pud)) + return 1; + if (!pud_table(pud)) { VM_WARN_ON(!pud_table(pud)); return 1; } -- GitLab From 6b45a2b1c0bc2aec84d1c56a1976ca9c8a621ecb Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Thu, 6 Sep 2018 14:12:19 +0200 Subject: [PATCH 1154/1692] memory: ti-aemif: fix a potential NULL-pointer dereference Platform data pointer may be NULL. We check it everywhere but in one place. Fix it. Fixes: 8af70cd2ca50 ("memory: aemif: add support for board files") Reported-by: Dan Carpenter Signed-off-by: Bartosz Golaszewski Cc: stable@vger.kernel.org Signed-off-by: Olof Johansson --- drivers/memory/ti-aemif.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/memory/ti-aemif.c b/drivers/memory/ti-aemif.c index 31112f622b88..475e5b3790ed 100644 --- a/drivers/memory/ti-aemif.c +++ b/drivers/memory/ti-aemif.c @@ -411,7 +411,7 @@ static int aemif_probe(struct platform_device *pdev) if (ret < 0) goto error; } - } else { + } else if (pdata) { for (i = 0; i < pdata->num_sub_devices; i++) { pdata->sub_devices[i].dev.parent = dev; ret = platform_device_register(&pdata->sub_devices[i]); -- GitLab From 432061b3da64e488be3403124a72a9250bbe96d4 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Wed, 5 Sep 2018 09:17:45 -0400 Subject: [PATCH 1155/1692] dm: disable CRYPTO_TFM_REQ_MAY_SLEEP to fix a GFP_KERNEL recursion deadlock There's a XFS on dm-crypt deadlock, recursing back to itself due to the crypto subsystems use of GFP_KERNEL, reported here: https://bugzilla.kernel.org/show_bug.cgi?id=200835 * dm-crypt calls crypt_convert in xts mode * init_crypt from xts.c calls kmalloc(GFP_KERNEL) * kmalloc(GFP_KERNEL) recurses into the XFS filesystem, the filesystem tries to submit some bios and wait for them, causing a deadlock Fix this by updating both the DM crypt and integrity targets to no longer use the CRYPTO_TFM_REQ_MAY_SLEEP flag, which will change the crypto allocations from GFP_KERNEL to GFP_ATOMIC, therefore they can't recurse into a filesystem. A GFP_ATOMIC allocation can fail, but init_crypt() in xts.c handles the allocation failure gracefully - it will fall back to preallocated buffer if the allocation fails. The crypto API maintainer says that the crypto API only needs to allocate memory when dealing with unaligned buffers and therefore turning CRYPTO_TFM_REQ_MAY_SLEEP off is safe (see this discussion: https://www.redhat.com/archives/dm-devel/2018-August/msg00195.html ) Cc: stable@vger.kernel.org Signed-off-by: Mikulas Patocka Signed-off-by: Mike Snitzer --- drivers/md/dm-crypt.c | 10 +++++----- drivers/md/dm-integrity.c | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index f266c81f396f..0481223b1deb 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -332,7 +332,7 @@ static int crypt_iv_essiv_init(struct crypt_config *cc) int err; desc->tfm = essiv->hash_tfm; - desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; + desc->flags = 0; err = crypto_shash_digest(desc, cc->key, cc->key_size, essiv->salt); shash_desc_zero(desc); @@ -606,7 +606,7 @@ static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv, int i, r; desc->tfm = lmk->hash_tfm; - desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; + desc->flags = 0; r = crypto_shash_init(desc); if (r) @@ -768,7 +768,7 @@ static int crypt_iv_tcw_whitening(struct crypt_config *cc, /* calculate crc32 for every 32bit part and xor it */ desc->tfm = tcw->crc32_tfm; - desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; + desc->flags = 0; for (i = 0; i < 4; i++) { r = crypto_shash_init(desc); if (r) @@ -1251,7 +1251,7 @@ static void crypt_alloc_req_skcipher(struct crypt_config *cc, * requests if driver request queue is full. */ skcipher_request_set_callback(ctx->r.req, - CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, + CRYPTO_TFM_REQ_MAY_BACKLOG, kcryptd_async_done, dmreq_of_req(cc, ctx->r.req)); } @@ -1268,7 +1268,7 @@ static void crypt_alloc_req_aead(struct crypt_config *cc, * requests if driver request queue is full. */ aead_request_set_callback(ctx->r.req_aead, - CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, + CRYPTO_TFM_REQ_MAY_BACKLOG, kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead)); } diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 378878599466..89ccb64342de 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -532,7 +532,7 @@ static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result unsigned j, size; desc->tfm = ic->journal_mac; - desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; + desc->flags = 0; r = crypto_shash_init(desc); if (unlikely(r)) { @@ -676,7 +676,7 @@ static void complete_journal_encrypt(struct crypto_async_request *req, int err) static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp) { int r; - skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, + skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, complete_journal_encrypt, comp); if (likely(encrypt)) r = crypto_skcipher_encrypt(req); -- GitLab From d5274b3cd6a814ccb2f56d81ee87cbbf51bd4cf7 Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Thu, 6 Sep 2018 11:05:44 +0300 Subject: [PATCH 1156/1692] block: bfq: swap puts in bfqg_and_blkg_put Fix trivial use-after-free. This could be last reference to bfqg. Fixes: 8f9bebc33dd7 ("block, bfq: access and cache blkg data only when safe") Acked-by: Paolo Valente Signed-off-by: Konstantin Khlebnikov Signed-off-by: Jens Axboe --- block/bfq-cgroup.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index 58c6efa9f9a9..9fe5952d117d 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -275,9 +275,9 @@ static void bfqg_and_blkg_get(struct bfq_group *bfqg) void bfqg_and_blkg_put(struct bfq_group *bfqg) { - bfqg_put(bfqg); - blkg_put(bfqg_to_blkg(bfqg)); + + bfqg_put(bfqg); } /* @stats = 0 */ -- GitLab From 38b0bd0cda07d34ad6f145fce675ead74739c44e Mon Sep 17 00:00:00 2001 From: Heinz Mauelshagen Date: Thu, 6 Sep 2018 18:33:38 +0200 Subject: [PATCH 1157/1692] dm raid: fix reshape race on small devices Loading a new mapping table, the dm-raid target's constructor retrieves the volatile reshaping state from the raid superblocks. When the new table is activated in a following resume, the actual reshape position is retrieved. The reshape driven by the previous mapping can already have finished on small and/or fast devices thus updating raid superblocks about the new raid layout. This causes the actual array state (e.g. stripe size reshape finished) to be inconsistent with the one in the new mapping, causing hangs with left behind devices. This race does not occur with usual raid device sizes but with small ones (e.g. those created by the lvm2 test suite). Fix by no longer transferring stale/inconsistent raid_set state during preresume. Signed-off-by: Heinz Mauelshagen Signed-off-by: Mike Snitzer --- drivers/md/dm-raid.c | 48 +------------------------------------------- 1 file changed, 1 insertion(+), 47 deletions(-) diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index cae689de75fd..d8406e0b4540 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -29,9 +29,6 @@ */ #define MIN_RAID456_JOURNAL_SPACE (4*2048) -/* Global list of all raid sets */ -static LIST_HEAD(raid_sets); - static bool devices_handle_discard_safely = false; /* @@ -227,7 +224,6 @@ struct rs_layout { struct raid_set { struct dm_target *ti; - struct list_head list; uint32_t stripe_cache_entries; unsigned long ctr_flags; @@ -273,19 +269,6 @@ static void rs_config_restore(struct raid_set *rs, struct rs_layout *l) mddev->new_chunk_sectors = l->new_chunk_sectors; } -/* Find any raid_set in active slot for @rs on global list */ -static struct raid_set *rs_find_active(struct raid_set *rs) -{ - struct raid_set *r; - struct mapped_device *md = dm_table_get_md(rs->ti->table); - - list_for_each_entry(r, &raid_sets, list) - if (r != rs && dm_table_get_md(r->ti->table) == md) - return r; - - return NULL; -} - /* raid10 algorithms (i.e. formats) */ #define ALGORITHM_RAID10_DEFAULT 0 #define ALGORITHM_RAID10_NEAR 1 @@ -764,7 +747,6 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r mddev_init(&rs->md); - INIT_LIST_HEAD(&rs->list); rs->raid_disks = raid_devs; rs->delta_disks = 0; @@ -782,9 +764,6 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r for (i = 0; i < raid_devs; i++) md_rdev_init(&rs->dev[i].rdev); - /* Add @rs to global list. */ - list_add(&rs->list, &raid_sets); - /* * Remaining items to be initialized by further RAID params: * rs->md.persistent @@ -797,7 +776,7 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r return rs; } -/* Free all @rs allocations and remove it from global list. */ +/* Free all @rs allocations */ static void raid_set_free(struct raid_set *rs) { int i; @@ -815,8 +794,6 @@ static void raid_set_free(struct raid_set *rs) dm_put_device(rs->ti, rs->dev[i].data_dev); } - list_del(&rs->list); - kfree(rs); } @@ -3947,29 +3924,6 @@ static int raid_preresume(struct dm_target *ti) if (test_and_set_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags)) return 0; - if (!test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) { - struct raid_set *rs_active = rs_find_active(rs); - - if (rs_active) { - /* - * In case no rebuilds have been requested - * and an active table slot exists, copy - * current resynchonization completed and - * reshape position pointers across from - * suspended raid set in the active slot. - * - * This resumes the new mapping at current - * offsets to continue recover/reshape without - * necessarily redoing a raid set partially or - * causing data corruption in case of a reshape. - */ - if (rs_active->md.curr_resync_completed != MaxSector) - mddev->curr_resync_completed = rs_active->md.curr_resync_completed; - if (rs_active->md.reshape_position != MaxSector) - mddev->reshape_position = rs_active->md.reshape_position; - } - } - /* * The superblocks need to be updated on disk if the * array is new or new devices got added (thus zeroed -- GitLab From 7035c568999d6774ff35459a0280eb33e0207168 Mon Sep 17 00:00:00 2001 From: Lei Yang Date: Thu, 6 Sep 2018 13:47:23 +0800 Subject: [PATCH 1158/1692] cgroup: kselftests: add test_core to .gitignore Update .gitignore file. Signed-off-by: Lei Yang Signed-off-by: Shuah Khan (Samsung OSG) --- tools/testing/selftests/cgroup/.gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/testing/selftests/cgroup/.gitignore b/tools/testing/selftests/cgroup/.gitignore index 95eb3a53c381..adacda50a4b2 100644 --- a/tools/testing/selftests/cgroup/.gitignore +++ b/tools/testing/selftests/cgroup/.gitignore @@ -1 +1,2 @@ test_memcontrol +test_core -- GitLab From ef439d49e0bfb26cd5f03c88b4cb7cc9073ed30c Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Thu, 6 Sep 2018 11:19:20 -0700 Subject: [PATCH 1159/1692] xtensa: ISS: don't allocate memory in platform_setup Memory allocator is not initialized at that point yet, use static array instead. Cc: stable@vger.kernel.org Signed-off-by: Max Filippov --- arch/xtensa/platforms/iss/setup.c | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/arch/xtensa/platforms/iss/setup.c b/arch/xtensa/platforms/iss/setup.c index f4bbb28026f8..58709e89a8ed 100644 --- a/arch/xtensa/platforms/iss/setup.c +++ b/arch/xtensa/platforms/iss/setup.c @@ -78,23 +78,28 @@ static struct notifier_block iss_panic_block = { void __init platform_setup(char **p_cmdline) { + static void *argv[COMMAND_LINE_SIZE / sizeof(void *)] __initdata; + static char cmdline[COMMAND_LINE_SIZE] __initdata; int argc = simc_argc(); int argv_size = simc_argv_size(); if (argc > 1) { - void **argv = alloc_bootmem(argv_size); - char *cmdline = alloc_bootmem(argv_size); - int i; + if (argv_size > sizeof(argv)) { + pr_err("%s: command line too long: argv_size = %d\n", + __func__, argv_size); + } else { + int i; - cmdline[0] = 0; - simc_argv((void *)argv); + cmdline[0] = 0; + simc_argv((void *)argv); - for (i = 1; i < argc; ++i) { - if (i > 1) - strcat(cmdline, " "); - strcat(cmdline, argv[i]); + for (i = 1; i < argc; ++i) { + if (i > 1) + strcat(cmdline, " "); + strcat(cmdline, argv[i]); + } + *p_cmdline = cmdline; } - *p_cmdline = cmdline; } atomic_notifier_chain_register(&panic_notifier_list, &iss_panic_block); -- GitLab From 4cb205c0c50f613e2de91f0eb19d5247ed003e89 Mon Sep 17 00:00:00 2001 From: Jia He Date: Tue, 28 Aug 2018 12:53:26 +0800 Subject: [PATCH 1160/1692] irqchip/gic-v3-its: Cap lpi_id_bits to reduce memory footprint Commit fe8e93504ce8 ("irqchip/gic-v3-its: Use full range of LPIs"), removes the cap for lpi_id_bits, which causes the following warning to trigger on a QDF2400 server: WARNING: CPU: 0 PID: 0 at mm/page_alloc.c:4066 __alloc_pages_nodemask ... Call trace: __alloc_pages_nodemask+0x2d8/0x1188 alloc_pages_current+0x8c/0xd8 its_allocate_prop_table+0x5c/0xb8 its_init+0x220/0x3c0 gic_init_bases+0x250/0x380 gic_acpi_init+0x16c/0x2a4 In its_alloc_lpi_tables(), lpi_id_bits is 24 in QDF2400. The allocation in allocate_prop_table() tries therefore to allocate 16M (order 12 if pagesize=4k), which triggers the warning. As said by MarcL Capping lpi_id_bits at 16 (which is what we had before) is plenty, will save a some memory, and gives some margin before we need to push it up again. Bring the upper limit of lpi_id_bits back to prevent Fixes: fe8e93504ce8 ("irqchip/gic-v3-its: Use full range of LPIs") Suggested-by: Marc Zyngier Signed-off-by: Jia He Signed-off-by: Thomas Gleixner Acked-by: Marc Zyngier Tested-by: Olof Johansson Cc: Jason Cooper Cc: linux-arm-kernel@lists.infradead.org Link: https://lkml.kernel.org/r/1535432006-2304-1-git-send-email-jia.he@hxt-semitech.com --- drivers/irqchip/irq-gic-v3-its.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 316a57530f6d..c2df341ff6fa 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -1439,6 +1439,7 @@ static struct irq_chip its_irq_chip = { * The consequence of the above is that allocation is cost is low, but * freeing is expensive. We assumes that freeing rarely occurs. */ +#define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */ static DEFINE_MUTEX(lpi_range_lock); static LIST_HEAD(lpi_range_list); @@ -1625,7 +1626,8 @@ static int __init its_alloc_lpi_tables(void) { phys_addr_t paddr; - lpi_id_bits = GICD_TYPER_ID_BITS(gic_rdists->gicd_typer); + lpi_id_bits = min_t(u32, GICD_TYPER_ID_BITS(gic_rdists->gicd_typer), + ITS_MAX_LPI_NRBITS); gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT); if (!gic_rdists->prop_page) { pr_err("Failed to allocate PROPBASE\n"); -- GitLab From ae7304c3ea28a3ba47a7a8312c76c654ef24967e Mon Sep 17 00:00:00 2001 From: Shubhrajyoti Datta Date: Mon, 3 Sep 2018 15:11:11 +0530 Subject: [PATCH 1161/1692] i2c: xiic: Make the start and the byte count write atomic Disable interrupts while configuring the transfer and enable them back. We have below as the programming sequence 1. start and slave address 2. byte count and stop In some customer platform there was a lot of interrupts between 1 and 2 and after slave address (around 7 clock cyles) if 2 is not executed then the transaction is nacked. To fix this case make the 2 writes atomic. Signed-off-by: Shubhrajyoti Datta Signed-off-by: Michal Simek [wsa: added a newline for better readability] Signed-off-by: Wolfram Sang Cc: stable@kernel.org --- drivers/i2c/busses/i2c-xiic.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c index 9a71e50d21f1..0c51c0ffdda9 100644 --- a/drivers/i2c/busses/i2c-xiic.c +++ b/drivers/i2c/busses/i2c-xiic.c @@ -532,6 +532,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c) { u8 rx_watermark; struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg; + unsigned long flags; /* Clear and enable Rx full interrupt. */ xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | XIIC_INTR_TX_ERROR_MASK); @@ -547,6 +548,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c) rx_watermark = IIC_RX_FIFO_DEPTH; xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1); + local_irq_save(flags); if (!(msg->flags & I2C_M_NOSTART)) /* write the address */ xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, @@ -556,6 +558,8 @@ static void xiic_start_recv(struct xiic_i2c *i2c) xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, msg->len | ((i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0)); + local_irq_restore(flags); + if (i2c->nmsgs == 1) /* very last, enable bus not busy as well */ xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK); -- GitLab From 954a8e3aea87e896e320cf648c1a5bbe47de443e Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Thu, 30 Aug 2018 08:35:19 +0300 Subject: [PATCH 1162/1692] RDMA/cma: Protect cma dev list with lock When AF_IB addresses are used during rdma_resolve_addr() a lock is not held. A cma device can get removed while list traversal is in progress which may lead to crash. ie CPU0 CPU1 ==== ==== rdma_resolve_addr() cma_resolve_ib_dev() list_for_each() cma_remove_one() cur_dev->device mutex_lock(&lock) list_del(); mutex_unlock(&lock); cma_process_remove(); Therefore, hold a lock while traversing the list which avoids such situation. Cc: # 3.10 Fixes: f17df3b0dede ("RDMA/cma: Add support for AF_IB to rdma_resolve_addr()") Signed-off-by: Parav Pandit Reviewed-by: Daniel Jurgens Signed-off-by: Leon Romanovsky Reviewed-by: Dennis Dalessandro Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cma.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index f72677291b69..a36c94930c31 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -724,6 +724,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) dgid = (union ib_gid *) &addr->sib_addr; pkey = ntohs(addr->sib_pkey); + mutex_lock(&lock); list_for_each_entry(cur_dev, &dev_list, list) { for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) { if (!rdma_cap_af_ib(cur_dev->device, p)) @@ -750,18 +751,19 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) cma_dev = cur_dev; sgid = gid; id_priv->id.port_num = p; + goto found; } } } } - - if (!cma_dev) - return -ENODEV; + mutex_unlock(&lock); + return -ENODEV; found: cma_attach_to_dev(id_priv, cma_dev); - addr = (struct sockaddr_ib *) cma_src_addr(id_priv); - memcpy(&addr->sib_addr, &sgid, sizeof sgid); + mutex_unlock(&lock); + addr = (struct sockaddr_ib *)cma_src_addr(id_priv); + memcpy(&addr->sib_addr, &sgid, sizeof(sgid)); cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr); return 0; } -- GitLab From 8f28b178f71cc56eccf2a6e2c0ace17c82f900d7 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Mon, 3 Sep 2018 09:11:14 +0300 Subject: [PATCH 1163/1692] RDMA/mlx4: Ensure that maximal send/receive SGE less than supported by HW In calculating the global maximum number of the Scatter/Gather elements supported, the following four maximum parameters must be taken into consideration: max_sg_rq, max_sg_sq, max_desc_sz_rq and max_desc_sz_sq. However instead of bringing this complexity to query_device, which still won't be sufficient anyway (the calculations are dependent on QP type), the safer approach will be to restore old code, which will give us 32 SGEs. Fixes: 33023fb85a42 ("IB/core: add max_send_sge and max_recv_sge attributes") Reported-by: Chuck Lever Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx4/main.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index ca0f1ee26091..0bbeaaae47e0 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -517,9 +517,11 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, props->page_size_cap = dev->dev->caps.page_size_cap; props->max_qp = dev->dev->quotas.qp; props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE; - props->max_send_sge = dev->dev->caps.max_sq_sg; - props->max_recv_sge = dev->dev->caps.max_rq_sg; - props->max_sge_rd = MLX4_MAX_SGE_RD; + props->max_send_sge = + min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg); + props->max_recv_sge = + min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg); + props->max_sge_rd = MLX4_MAX_SGE_RD; props->max_cq = dev->dev->quotas.cq; props->max_cqe = dev->dev->caps.max_cqes; props->max_mr = dev->dev->quotas.mpt; -- GitLab From d77ef138ff572409ab93d492e5e6c826ee6fb21d Mon Sep 17 00:00:00 2001 From: Lyude Paul Date: Wed, 15 Aug 2018 15:00:11 -0400 Subject: [PATCH 1164/1692] drm/nouveau/drm/nouveau: Fix bogus drm_kms_helper_poll_enable() placement Turns out this part is my fault for not noticing when reviewing 9a2eba337cace ("drm/nouveau: Fix drm poll_helper handling"). Currently we call drm_kms_helper_poll_enable() from nouveau_display_hpd_work(). This makes basically no sense however, because that means we're calling drm_kms_helper_poll_enable() every time we schedule the hotplug detection work. This is also against the advice mentioned in drm_kms_helper_poll_enable()'s documentation: Note that calls to enable and disable polling must be strictly ordered, which is automatically the case when they're only call from suspend/resume callbacks. Of course, hotplugs can't really be ordered. They could even happen immediately after we called drm_kms_helper_poll_disable() in nouveau_display_fini(), which can lead to all sorts of issues. Additionally; enabling polling /after/ we call drm_helper_hpd_irq_event() could also mean that we'd miss a hotplug event anyway, since drm_helper_hpd_irq_event() wouldn't bother trying to probe connectors so long as polling is disabled. So; simply move this back into nouveau_display_init() again. The race condition that both of these patches attempted to work around has already been fixed properly in d61a5c106351 ("drm/nouveau: Fix deadlock on runtime suspend") Fixes: 9a2eba337cace ("drm/nouveau: Fix drm poll_helper handling") Signed-off-by: Lyude Paul Acked-by: Karol Herbst Acked-by: Daniel Vetter Cc: Lukas Wunner Cc: Peter Ujfalusi Cc: stable@vger.kernel.org Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_display.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 139368b31916..7db01ea7fd41 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -355,8 +355,6 @@ nouveau_display_hpd_work(struct work_struct *work) pm_runtime_get_sync(drm->dev->dev); drm_helper_hpd_irq_event(drm->dev); - /* enable polling for external displays */ - drm_kms_helper_poll_enable(drm->dev); pm_runtime_mark_last_busy(drm->dev->dev); pm_runtime_put_sync(drm->dev->dev); @@ -411,6 +409,11 @@ nouveau_display_init(struct drm_device *dev) if (ret) return ret; + /* enable connector detection and polling for connectors without HPD + * support + */ + drm_kms_helper_poll_enable(dev); + /* enable hotplug interrupts */ drm_connector_list_iter_begin(dev, &conn_iter); nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) { -- GitLab From 611ce855420a6e8b9ff47af5f47431d52c7709f8 Mon Sep 17 00:00:00 2001 From: Lyude Paul Date: Wed, 15 Aug 2018 15:00:12 -0400 Subject: [PATCH 1165/1692] drm/nouveau: Remove duplicate poll_enable() in pmops_runtime_suspend() Since actual hotplug notifications don't get disabled until nouveau_display_fini() is called, all this will do is cause any hotplugs that happen between this drm_kms_helper_poll_disable() call and the actual hotplug disablement to potentially be dropped if ACPI isn't around to help us. Signed-off-by: Lyude Paul Acked-by: Karol Herbst Acked-by: Daniel Vetter Cc: stable@vger.kernel.org Cc: Lukas Wunner Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drm.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index c7ec86d6c3c9..5fdc1fbe2ee5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -835,7 +835,6 @@ nouveau_pmops_runtime_suspend(struct device *dev) return -EBUSY; } - drm_kms_helper_poll_disable(drm_dev); nouveau_switcheroo_optimus_dsm(); ret = nouveau_do_suspend(drm_dev, true); pci_save_state(pdev); -- GitLab From 7fec8f5379fb6eddabc0aaef6d2304c366808f97 Mon Sep 17 00:00:00 2001 From: Lyude Paul Date: Wed, 15 Aug 2018 15:00:13 -0400 Subject: [PATCH 1166/1692] drm/nouveau/drm/nouveau: Fix deadlock with fb_helper with async RPM requests Currently, nouveau uses the generic drm_fb_helper_output_poll_changed() function provided by DRM as it's output_poll_changed callback. Unfortunately however, this function doesn't grab runtime PM references early enough and even if it did-we can't block waiting for the device to resume in output_poll_changed() since it's very likely that we'll need to grab the fb_helper lock at some point during the runtime resume process. This currently results in deadlocking like so: [ 246.669625] INFO: task kworker/4:0:37 blocked for more than 120 seconds. [ 246.673398] Not tainted 4.18.0-rc5Lyude-Test+ #2 [ 246.675271] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. [ 246.676527] kworker/4:0 D 0 37 2 0x80000000 [ 246.677580] Workqueue: events output_poll_execute [drm_kms_helper] [ 246.678704] Call Trace: [ 246.679753] __schedule+0x322/0xaf0 [ 246.680916] schedule+0x33/0x90 [ 246.681924] schedule_preempt_disabled+0x15/0x20 [ 246.683023] __mutex_lock+0x569/0x9a0 [ 246.684035] ? kobject_uevent_env+0x117/0x7b0 [ 246.685132] ? drm_fb_helper_hotplug_event.part.28+0x20/0xb0 [drm_kms_helper] [ 246.686179] mutex_lock_nested+0x1b/0x20 [ 246.687278] ? mutex_lock_nested+0x1b/0x20 [ 246.688307] drm_fb_helper_hotplug_event.part.28+0x20/0xb0 [drm_kms_helper] [ 246.689420] drm_fb_helper_output_poll_changed+0x23/0x30 [drm_kms_helper] [ 246.690462] drm_kms_helper_hotplug_event+0x2a/0x30 [drm_kms_helper] [ 246.691570] output_poll_execute+0x198/0x1c0 [drm_kms_helper] [ 246.692611] process_one_work+0x231/0x620 [ 246.693725] worker_thread+0x214/0x3a0 [ 246.694756] kthread+0x12b/0x150 [ 246.695856] ? wq_pool_ids_show+0x140/0x140 [ 246.696888] ? kthread_create_worker_on_cpu+0x70/0x70 [ 246.697998] ret_from_fork+0x3a/0x50 [ 246.699034] INFO: task kworker/0:1:60 blocked for more than 120 seconds. [ 246.700153] Not tainted 4.18.0-rc5Lyude-Test+ #2 [ 246.701182] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. [ 246.702278] kworker/0:1 D 0 60 2 0x80000000 [ 246.703293] Workqueue: pm pm_runtime_work [ 246.704393] Call Trace: [ 246.705403] __schedule+0x322/0xaf0 [ 246.706439] ? wait_for_completion+0x104/0x190 [ 246.707393] schedule+0x33/0x90 [ 246.708375] schedule_timeout+0x3a5/0x590 [ 246.709289] ? mark_held_locks+0x58/0x80 [ 246.710208] ? _raw_spin_unlock_irq+0x2c/0x40 [ 246.711222] ? wait_for_completion+0x104/0x190 [ 246.712134] ? trace_hardirqs_on_caller+0xf4/0x190 [ 246.713094] ? wait_for_completion+0x104/0x190 [ 246.713964] wait_for_completion+0x12c/0x190 [ 246.714895] ? wake_up_q+0x80/0x80 [ 246.715727] ? get_work_pool+0x90/0x90 [ 246.716649] flush_work+0x1c9/0x280 [ 246.717483] ? flush_workqueue_prep_pwqs+0x1b0/0x1b0 [ 246.718442] __cancel_work_timer+0x146/0x1d0 [ 246.719247] cancel_delayed_work_sync+0x13/0x20 [ 246.720043] drm_kms_helper_poll_disable+0x1f/0x30 [drm_kms_helper] [ 246.721123] nouveau_pmops_runtime_suspend+0x3d/0xb0 [nouveau] [ 246.721897] pci_pm_runtime_suspend+0x6b/0x190 [ 246.722825] ? pci_has_legacy_pm_support+0x70/0x70 [ 246.723737] __rpm_callback+0x7a/0x1d0 [ 246.724721] ? pci_has_legacy_pm_support+0x70/0x70 [ 246.725607] rpm_callback+0x24/0x80 [ 246.726553] ? pci_has_legacy_pm_support+0x70/0x70 [ 246.727376] rpm_suspend+0x142/0x6b0 [ 246.728185] pm_runtime_work+0x97/0xc0 [ 246.728938] process_one_work+0x231/0x620 [ 246.729796] worker_thread+0x44/0x3a0 [ 246.730614] kthread+0x12b/0x150 [ 246.731395] ? wq_pool_ids_show+0x140/0x140 [ 246.732202] ? kthread_create_worker_on_cpu+0x70/0x70 [ 246.732878] ret_from_fork+0x3a/0x50 [ 246.733768] INFO: task kworker/4:2:422 blocked for more than 120 seconds. [ 246.734587] Not tainted 4.18.0-rc5Lyude-Test+ #2 [ 246.735393] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. [ 246.736113] kworker/4:2 D 0 422 2 0x80000080 [ 246.736789] Workqueue: events_long drm_dp_mst_link_probe_work [drm_kms_helper] [ 246.737665] Call Trace: [ 246.738490] __schedule+0x322/0xaf0 [ 246.739250] schedule+0x33/0x90 [ 246.739908] rpm_resume+0x19c/0x850 [ 246.740750] ? finish_wait+0x90/0x90 [ 246.741541] __pm_runtime_resume+0x4e/0x90 [ 246.742370] nv50_disp_atomic_commit+0x31/0x210 [nouveau] [ 246.743124] drm_atomic_commit+0x4a/0x50 [drm] [ 246.743775] restore_fbdev_mode_atomic+0x1c8/0x240 [drm_kms_helper] [ 246.744603] restore_fbdev_mode+0x31/0x140 [drm_kms_helper] [ 246.745373] drm_fb_helper_restore_fbdev_mode_unlocked+0x54/0xb0 [drm_kms_helper] [ 246.746220] drm_fb_helper_set_par+0x2d/0x50 [drm_kms_helper] [ 246.746884] drm_fb_helper_hotplug_event.part.28+0x96/0xb0 [drm_kms_helper] [ 246.747675] drm_fb_helper_output_poll_changed+0x23/0x30 [drm_kms_helper] [ 246.748544] drm_kms_helper_hotplug_event+0x2a/0x30 [drm_kms_helper] [ 246.749439] nv50_mstm_hotplug+0x15/0x20 [nouveau] [ 246.750111] drm_dp_send_link_address+0x177/0x1c0 [drm_kms_helper] [ 246.750764] drm_dp_check_and_send_link_address+0xa8/0xd0 [drm_kms_helper] [ 246.751602] drm_dp_mst_link_probe_work+0x51/0x90 [drm_kms_helper] [ 246.752314] process_one_work+0x231/0x620 [ 246.752979] worker_thread+0x44/0x3a0 [ 246.753838] kthread+0x12b/0x150 [ 246.754619] ? wq_pool_ids_show+0x140/0x140 [ 246.755386] ? kthread_create_worker_on_cpu+0x70/0x70 [ 246.756162] ret_from_fork+0x3a/0x50 [ 246.756847] Showing all locks held in the system: [ 246.758261] 3 locks held by kworker/4:0/37: [ 246.759016] #0: 00000000f8df4d2d ((wq_completion)"events"){+.+.}, at: process_one_work+0x1b3/0x620 [ 246.759856] #1: 00000000e6065461 ((work_completion)(&(&dev->mode_config.output_poll_work)->work)){+.+.}, at: process_one_work+0x1b3/0x620 [ 246.760670] #2: 00000000cb66735f (&helper->lock){+.+.}, at: drm_fb_helper_hotplug_event.part.28+0x20/0xb0 [drm_kms_helper] [ 246.761516] 2 locks held by kworker/0:1/60: [ 246.762274] #0: 00000000fff6be0f ((wq_completion)"pm"){+.+.}, at: process_one_work+0x1b3/0x620 [ 246.762982] #1: 000000005ab44fb4 ((work_completion)(&dev->power.work)){+.+.}, at: process_one_work+0x1b3/0x620 [ 246.763890] 1 lock held by khungtaskd/64: [ 246.764664] #0: 000000008cb8b5c3 (rcu_read_lock){....}, at: debug_show_all_locks+0x23/0x185 [ 246.765588] 5 locks held by kworker/4:2/422: [ 246.766440] #0: 00000000232f0959 ((wq_completion)"events_long"){+.+.}, at: process_one_work+0x1b3/0x620 [ 246.767390] #1: 00000000bb59b134 ((work_completion)(&mgr->work)){+.+.}, at: process_one_work+0x1b3/0x620 [ 246.768154] #2: 00000000cb66735f (&helper->lock){+.+.}, at: drm_fb_helper_restore_fbdev_mode_unlocked+0x4c/0xb0 [drm_kms_helper] [ 246.768966] #3: 000000004c8f0b6b (crtc_ww_class_acquire){+.+.}, at: restore_fbdev_mode_atomic+0x4b/0x240 [drm_kms_helper] [ 246.769921] #4: 000000004c34a296 (crtc_ww_class_mutex){+.+.}, at: drm_modeset_backoff+0x8a/0x1b0 [drm] [ 246.770839] 1 lock held by dmesg/1038: [ 246.771739] 2 locks held by zsh/1172: [ 246.772650] #0: 00000000836d0438 (&tty->ldisc_sem){++++}, at: ldsem_down_read+0x37/0x40 [ 246.773680] #1: 000000001f4f4d48 (&ldata->atomic_read_lock){+.+.}, at: n_tty_read+0xc1/0x870 [ 246.775522] ============================================= After trying dozens of different solutions, I found one very simple one that should also have the benefit of preventing us from having to fight locking for the rest of our lives. So, we work around these deadlocks by deferring all fbcon hotplug events that happen after the runtime suspend process starts until after the device is resumed again. Changes since v7: - Fixup commit message - Daniel Vetter Changes since v6: - Remove unused nouveau_fbcon_hotplugged_in_suspend() - Ilia Changes since v5: - Come up with the (hopefully final) solution for solving this dumb problem, one that is a lot less likely to cause issues with locking in the future. This should work around all deadlock conditions with fbcon brought up thus far. Changes since v4: - Add nouveau_fbcon_hotplugged_in_suspend() to workaround deadlock condition that Lukas described - Just move all of this out of drm_fb_helper. It seems that other DRM drivers have already figured out other workarounds for this. If other drivers do end up needing this in the future, we can just move this back into drm_fb_helper again. Changes since v3: - Actually check if fb_helper is NULL in both new helpers - Actually check drm_fbdev_emulation in both new helpers - Don't fire off a fb_helper hotplug unconditionally; only do it if the following conditions are true (as otherwise, calling this in the wrong spot will cause Bad Things to happen): - fb_helper hotplug handling was actually inhibited previously - fb_helper actually has a delayed hotplug pending - fb_helper is actually bound - fb_helper is actually initialized - Add __must_check to drm_fb_helper_suspend_hotplug(). There's no situation where a driver would actually want to use this without checking the return value, so enforce that - Rewrite and clarify the documentation for both helpers. - Make sure to return true in the drm_fb_helper_suspend_hotplug() stub that's provided in drm_fb_helper.h when CONFIG_DRM_FBDEV_EMULATION isn't enabled - Actually grab the toplevel fb_helper lock in drm_fb_helper_resume_hotplug(), since it's possible other activity (such as a hotplug) could be going on at the same time the driver calls drm_fb_helper_resume_hotplug(). We need this to check whether or not drm_fb_helper_hotplug_event() needs to be called anyway Signed-off-by: Lyude Paul Reviewed-by: Karol Herbst Acked-by: Daniel Vetter Cc: stable@vger.kernel.org Cc: Lukas Wunner Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/dispnv50/disp.c | 2 +- drivers/gpu/drm/nouveau/nouveau_display.c | 2 +- drivers/gpu/drm/nouveau/nouveau_fbcon.c | 57 +++++++++++++++++++++++ drivers/gpu/drm/nouveau/nouveau_fbcon.h | 5 ++ 4 files changed, 64 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 8412119bd940..aec6ee1ff4e0 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -2074,7 +2074,7 @@ nv50_disp_atomic_state_alloc(struct drm_device *dev) static const struct drm_mode_config_funcs nv50_disp_func = { .fb_create = nouveau_user_framebuffer_create, - .output_poll_changed = drm_fb_helper_output_poll_changed, + .output_poll_changed = nouveau_fbcon_output_poll_changed, .atomic_check = nv50_disp_atomic_check, .atomic_commit = nv50_disp_atomic_commit, .atomic_state_alloc = nv50_disp_atomic_state_alloc, diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 7db01ea7fd41..42e7c35e3fba 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -293,7 +293,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev, static const struct drm_mode_config_funcs nouveau_mode_config_funcs = { .fb_create = nouveau_user_framebuffer_create, - .output_poll_changed = drm_fb_helper_output_poll_changed, + .output_poll_changed = nouveau_fbcon_output_poll_changed, }; diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 844498c4267c..0f64c0a1d4b3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -466,6 +466,7 @@ nouveau_fbcon_set_suspend_work(struct work_struct *work) console_unlock(); if (state == FBINFO_STATE_RUNNING) { + nouveau_fbcon_hotplug_resume(drm->fbcon); pm_runtime_mark_last_busy(drm->dev->dev); pm_runtime_put_sync(drm->dev->dev); } @@ -487,6 +488,61 @@ nouveau_fbcon_set_suspend(struct drm_device *dev, int state) schedule_work(&drm->fbcon_work); } +void +nouveau_fbcon_output_poll_changed(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_fbdev *fbcon = drm->fbcon; + int ret; + + if (!fbcon) + return; + + mutex_lock(&fbcon->hotplug_lock); + + ret = pm_runtime_get(dev->dev); + if (ret == 1 || ret == -EACCES) { + drm_fb_helper_hotplug_event(&fbcon->helper); + + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); + } else if (ret == 0) { + /* If the GPU was already in the process of suspending before + * this event happened, then we can't block here as we'll + * deadlock the runtime pmops since they wait for us to + * finish. So, just defer this event for when we runtime + * resume again. It will be handled by fbcon_work. + */ + NV_DEBUG(drm, "fbcon HPD event deferred until runtime resume\n"); + fbcon->hotplug_waiting = true; + pm_runtime_put_noidle(drm->dev->dev); + } else { + DRM_WARN("fbcon HPD event lost due to RPM failure: %d\n", + ret); + } + + mutex_unlock(&fbcon->hotplug_lock); +} + +void +nouveau_fbcon_hotplug_resume(struct nouveau_fbdev *fbcon) +{ + struct nouveau_drm *drm; + + if (!fbcon) + return; + drm = nouveau_drm(fbcon->helper.dev); + + mutex_lock(&fbcon->hotplug_lock); + if (fbcon->hotplug_waiting) { + fbcon->hotplug_waiting = false; + + NV_DEBUG(drm, "Handling deferred fbcon HPD events\n"); + drm_fb_helper_hotplug_event(&fbcon->helper); + } + mutex_unlock(&fbcon->hotplug_lock); +} + int nouveau_fbcon_init(struct drm_device *dev) { @@ -505,6 +561,7 @@ nouveau_fbcon_init(struct drm_device *dev) drm->fbcon = fbcon; INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work); + mutex_init(&fbcon->hotplug_lock); drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs); diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h index a6f192ea3fa6..db9d52047ef8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h @@ -41,6 +41,9 @@ struct nouveau_fbdev { struct nvif_object gdi; struct nvif_object blit; struct nvif_object twod; + + struct mutex hotplug_lock; + bool hotplug_waiting; }; void nouveau_fbcon_restore(void); @@ -68,6 +71,8 @@ void nouveau_fbcon_set_suspend(struct drm_device *dev, int state); void nouveau_fbcon_accel_save_disable(struct drm_device *dev); void nouveau_fbcon_accel_restore(struct drm_device *dev); +void nouveau_fbcon_output_poll_changed(struct drm_device *dev); +void nouveau_fbcon_hotplug_resume(struct nouveau_fbdev *fbcon); extern int nouveau_nofbaccel; #endif /* __NV50_FBCON_H__ */ -- GitLab From 6833fb1ec120bf078e1a527c573a09d4de286224 Mon Sep 17 00:00:00 2001 From: Lyude Paul Date: Wed, 15 Aug 2018 15:00:14 -0400 Subject: [PATCH 1167/1692] drm/nouveau/drm/nouveau: Use pm_runtime_get_noresume() in connector_detect() It's true we can't resume the device from poll workers in nouveau_connector_detect(). We can however, prevent the autosuspend timer from elapsing immediately if it hasn't already without risking any sort of deadlock with the runtime suspend/resume operations. So do that instead of entirely avoiding grabbing a power reference. Signed-off-by: Lyude Paul Reviewed-by: Karol Herbst Acked-by: Daniel Vetter Cc: stable@vger.kernel.org Cc: Lukas Wunner Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_connector.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 51932c72334e..31b31a35c8fe 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -555,12 +555,16 @@ nouveau_connector_detect(struct drm_connector *connector, bool force) nv_connector->edid = NULL; } - /* Outputs are only polled while runtime active, so acquiring a - * runtime PM ref here is unnecessary (and would deadlock upon - * runtime suspend because it waits for polling to finish). + /* Outputs are only polled while runtime active, so resuming the + * device here is unnecessary (and would deadlock upon runtime suspend + * because it waits for polling to finish). We do however, want to + * prevent the autosuspend timer from elapsing during this operation + * if possible. */ - if (!drm_kms_helper_is_poll_worker()) { - ret = pm_runtime_get_sync(connector->dev->dev); + if (drm_kms_helper_is_poll_worker()) { + pm_runtime_get_noresume(dev->dev); + } else { + ret = pm_runtime_get_sync(dev->dev); if (ret < 0 && ret != -EACCES) return conn_status; } @@ -638,10 +642,8 @@ nouveau_connector_detect(struct drm_connector *connector, bool force) out: - if (!drm_kms_helper_is_poll_worker()) { - pm_runtime_mark_last_busy(connector->dev->dev); - pm_runtime_put_autosuspend(connector->dev->dev); - } + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); return conn_status; } -- GitLab From 3e1a12754d4df5804bfca5dedf09d2ba291bdc2a Mon Sep 17 00:00:00 2001 From: Lyude Paul Date: Wed, 15 Aug 2018 15:00:15 -0400 Subject: [PATCH 1168/1692] drm/nouveau: Fix deadlocks in nouveau_connector_detect() When we disable hotplugging on the GPU, we need to be able to synchronize with each connector's hotplug interrupt handler before the interrupt is finally disabled. This can be a problem however, since nouveau_connector_detect() currently grabs a runtime power reference when handling connector probing. This will deadlock the runtime suspend handler like so: [ 861.480896] INFO: task kworker/0:2:61 blocked for more than 120 seconds. [ 861.483290] Tainted: G O 4.18.0-rc6Lyude-Test+ #1 [ 861.485158] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. [ 861.486332] kworker/0:2 D 0 61 2 0x80000000 [ 861.487044] Workqueue: events nouveau_display_hpd_work [nouveau] [ 861.487737] Call Trace: [ 861.488394] __schedule+0x322/0xaf0 [ 861.489070] schedule+0x33/0x90 [ 861.489744] rpm_resume+0x19c/0x850 [ 861.490392] ? finish_wait+0x90/0x90 [ 861.491068] __pm_runtime_resume+0x4e/0x90 [ 861.491753] nouveau_display_hpd_work+0x22/0x60 [nouveau] [ 861.492416] process_one_work+0x231/0x620 [ 861.493068] worker_thread+0x44/0x3a0 [ 861.493722] kthread+0x12b/0x150 [ 861.494342] ? wq_pool_ids_show+0x140/0x140 [ 861.494991] ? kthread_create_worker_on_cpu+0x70/0x70 [ 861.495648] ret_from_fork+0x3a/0x50 [ 861.496304] INFO: task kworker/6:2:320 blocked for more than 120 seconds. [ 861.496968] Tainted: G O 4.18.0-rc6Lyude-Test+ #1 [ 861.497654] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. [ 861.498341] kworker/6:2 D 0 320 2 0x80000080 [ 861.499045] Workqueue: pm pm_runtime_work [ 861.499739] Call Trace: [ 861.500428] __schedule+0x322/0xaf0 [ 861.501134] ? wait_for_completion+0x104/0x190 [ 861.501851] schedule+0x33/0x90 [ 861.502564] schedule_timeout+0x3a5/0x590 [ 861.503284] ? mark_held_locks+0x58/0x80 [ 861.503988] ? _raw_spin_unlock_irq+0x2c/0x40 [ 861.504710] ? wait_for_completion+0x104/0x190 [ 861.505417] ? trace_hardirqs_on_caller+0xf4/0x190 [ 861.506136] ? wait_for_completion+0x104/0x190 [ 861.506845] wait_for_completion+0x12c/0x190 [ 861.507555] ? wake_up_q+0x80/0x80 [ 861.508268] flush_work+0x1c9/0x280 [ 861.508990] ? flush_workqueue_prep_pwqs+0x1b0/0x1b0 [ 861.509735] nvif_notify_put+0xb1/0xc0 [nouveau] [ 861.510482] nouveau_display_fini+0xbd/0x170 [nouveau] [ 861.511241] nouveau_display_suspend+0x67/0x120 [nouveau] [ 861.511969] nouveau_do_suspend+0x5e/0x2d0 [nouveau] [ 861.512715] nouveau_pmops_runtime_suspend+0x47/0xb0 [nouveau] [ 861.513435] pci_pm_runtime_suspend+0x6b/0x180 [ 861.514165] ? pci_has_legacy_pm_support+0x70/0x70 [ 861.514897] __rpm_callback+0x7a/0x1d0 [ 861.515618] ? pci_has_legacy_pm_support+0x70/0x70 [ 861.516313] rpm_callback+0x24/0x80 [ 861.517027] ? pci_has_legacy_pm_support+0x70/0x70 [ 861.517741] rpm_suspend+0x142/0x6b0 [ 861.518449] pm_runtime_work+0x97/0xc0 [ 861.519144] process_one_work+0x231/0x620 [ 861.519831] worker_thread+0x44/0x3a0 [ 861.520522] kthread+0x12b/0x150 [ 861.521220] ? wq_pool_ids_show+0x140/0x140 [ 861.521925] ? kthread_create_worker_on_cpu+0x70/0x70 [ 861.522622] ret_from_fork+0x3a/0x50 [ 861.523299] INFO: task kworker/6:0:1329 blocked for more than 120 seconds. [ 861.523977] Tainted: G O 4.18.0-rc6Lyude-Test+ #1 [ 861.524644] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. [ 861.525349] kworker/6:0 D 0 1329 2 0x80000000 [ 861.526073] Workqueue: events nvif_notify_work [nouveau] [ 861.526751] Call Trace: [ 861.527411] __schedule+0x322/0xaf0 [ 861.528089] schedule+0x33/0x90 [ 861.528758] rpm_resume+0x19c/0x850 [ 861.529399] ? finish_wait+0x90/0x90 [ 861.530073] __pm_runtime_resume+0x4e/0x90 [ 861.530798] nouveau_connector_detect+0x7e/0x510 [nouveau] [ 861.531459] ? ww_mutex_lock+0x47/0x80 [ 861.532097] ? ww_mutex_lock+0x47/0x80 [ 861.532819] ? drm_modeset_lock+0x88/0x130 [drm] [ 861.533481] drm_helper_probe_detect_ctx+0xa0/0x100 [drm_kms_helper] [ 861.534127] drm_helper_hpd_irq_event+0xa4/0x120 [drm_kms_helper] [ 861.534940] nouveau_connector_hotplug+0x98/0x120 [nouveau] [ 861.535556] nvif_notify_work+0x2d/0xb0 [nouveau] [ 861.536221] process_one_work+0x231/0x620 [ 861.536994] worker_thread+0x44/0x3a0 [ 861.537757] kthread+0x12b/0x150 [ 861.538463] ? wq_pool_ids_show+0x140/0x140 [ 861.539102] ? kthread_create_worker_on_cpu+0x70/0x70 [ 861.539815] ret_from_fork+0x3a/0x50 [ 861.540521] Showing all locks held in the system: [ 861.541696] 2 locks held by kworker/0:2/61: [ 861.542406] #0: 000000002dbf8af5 ((wq_completion)"events"){+.+.}, at: process_one_work+0x1b3/0x620 [ 861.543071] #1: 0000000076868126 ((work_completion)(&drm->hpd_work)){+.+.}, at: process_one_work+0x1b3/0x620 [ 861.543814] 1 lock held by khungtaskd/64: [ 861.544535] #0: 0000000059db4b53 (rcu_read_lock){....}, at: debug_show_all_locks+0x23/0x185 [ 861.545160] 3 locks held by kworker/6:2/320: [ 861.545896] #0: 00000000d9e1bc59 ((wq_completion)"pm"){+.+.}, at: process_one_work+0x1b3/0x620 [ 861.546702] #1: 00000000c9f92d84 ((work_completion)(&dev->power.work)){+.+.}, at: process_one_work+0x1b3/0x620 [ 861.547443] #2: 000000004afc5de1 (drm_connector_list_iter){.+.+}, at: nouveau_display_fini+0x96/0x170 [nouveau] [ 861.548146] 1 lock held by dmesg/983: [ 861.548889] 2 locks held by zsh/1250: [ 861.549605] #0: 00000000348e3cf6 (&tty->ldisc_sem){++++}, at: ldsem_down_read+0x37/0x40 [ 861.550393] #1: 000000007009a7a8 (&ldata->atomic_read_lock){+.+.}, at: n_tty_read+0xc1/0x870 [ 861.551122] 6 locks held by kworker/6:0/1329: [ 861.551957] #0: 000000002dbf8af5 ((wq_completion)"events"){+.+.}, at: process_one_work+0x1b3/0x620 [ 861.552765] #1: 00000000ddb499ad ((work_completion)(¬ify->work)#2){+.+.}, at: process_one_work+0x1b3/0x620 [ 861.553582] #2: 000000006e013cbe (&dev->mode_config.mutex){+.+.}, at: drm_helper_hpd_irq_event+0x6c/0x120 [drm_kms_helper] [ 861.554357] #3: 000000004afc5de1 (drm_connector_list_iter){.+.+}, at: drm_helper_hpd_irq_event+0x78/0x120 [drm_kms_helper] [ 861.555227] #4: 0000000044f294d9 (crtc_ww_class_acquire){+.+.}, at: drm_helper_probe_detect_ctx+0x3d/0x100 [drm_kms_helper] [ 861.556133] #5: 00000000db193642 (crtc_ww_class_mutex){+.+.}, at: drm_modeset_lock+0x4b/0x130 [drm] [ 861.557864] ============================================= [ 861.559507] NMI backtrace for cpu 2 [ 861.560363] CPU: 2 PID: 64 Comm: khungtaskd Tainted: G O 4.18.0-rc6Lyude-Test+ #1 [ 861.561197] Hardware name: LENOVO 20EQS64N0B/20EQS64N0B, BIOS N1EET78W (1.51 ) 05/18/2018 [ 861.561948] Call Trace: [ 861.562757] dump_stack+0x8e/0xd3 [ 861.563516] nmi_cpu_backtrace.cold.3+0x14/0x5a [ 861.564269] ? lapic_can_unplug_cpu.cold.27+0x42/0x42 [ 861.565029] nmi_trigger_cpumask_backtrace+0xa1/0xae [ 861.565789] arch_trigger_cpumask_backtrace+0x19/0x20 [ 861.566558] watchdog+0x316/0x580 [ 861.567355] kthread+0x12b/0x150 [ 861.568114] ? reset_hung_task_detector+0x20/0x20 [ 861.568863] ? kthread_create_worker_on_cpu+0x70/0x70 [ 861.569598] ret_from_fork+0x3a/0x50 [ 861.570370] Sending NMI from CPU 2 to CPUs 0-1,3-7: [ 861.571426] NMI backtrace for cpu 6 skipped: idling at intel_idle+0x7f/0x120 [ 861.571429] NMI backtrace for cpu 7 skipped: idling at intel_idle+0x7f/0x120 [ 861.571432] NMI backtrace for cpu 3 skipped: idling at intel_idle+0x7f/0x120 [ 861.571464] NMI backtrace for cpu 5 skipped: idling at intel_idle+0x7f/0x120 [ 861.571467] NMI backtrace for cpu 0 skipped: idling at intel_idle+0x7f/0x120 [ 861.571469] NMI backtrace for cpu 4 skipped: idling at intel_idle+0x7f/0x120 [ 861.571472] NMI backtrace for cpu 1 skipped: idling at intel_idle+0x7f/0x120 [ 861.572428] Kernel panic - not syncing: hung_task: blocked tasks So: fix this by making it so that normal hotplug handling /only/ happens so long as the GPU is currently awake without any pending runtime PM requests. In the event that a hotplug occurs while the device is suspending or resuming, we can simply defer our response until the GPU is fully runtime resumed again. Changes since v4: - Use a new trick I came up with using pm_runtime_get() instead of the hackish junk we had before Signed-off-by: Lyude Paul Reviewed-by: Karol Herbst Acked-by: Daniel Vetter Cc: stable@vger.kernel.org Cc: Lukas Wunner Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_connector.c | 22 +++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 31b31a35c8fe..76660bc1ccfb 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -1107,6 +1107,26 @@ nouveau_connector_hotplug(struct nvif_notify *notify) const struct nvif_notify_conn_rep_v0 *rep = notify->data; const char *name = connector->name; struct nouveau_encoder *nv_encoder; + int ret; + + ret = pm_runtime_get(drm->dev->dev); + if (ret == 0) { + /* We can't block here if there's a pending PM request + * running, as we'll deadlock nouveau_display_fini() when it + * calls nvif_put() on our nvif_notify struct. So, simply + * defer the hotplug event until the device finishes resuming + */ + NV_DEBUG(drm, "Deferring HPD on %s until runtime resume\n", + name); + schedule_work(&drm->hpd_work); + + pm_runtime_put_noidle(drm->dev->dev); + return NVIF_NOTIFY_KEEP; + } else if (ret != 1 && ret != -EACCES) { + NV_WARN(drm, "HPD on %s dropped due to RPM failure: %d\n", + name, ret); + return NVIF_NOTIFY_DROP; + } if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) { NV_DEBUG(drm, "service %s\n", name); @@ -1124,6 +1144,8 @@ nouveau_connector_hotplug(struct nvif_notify *notify) drm_helper_hpd_irq_event(connector->dev); } + pm_runtime_mark_last_busy(drm->dev->dev); + pm_runtime_put_autosuspend(drm->dev->dev); return NVIF_NOTIFY_KEEP; } -- GitLab From 0445f7537d0742e4f8bcf594a8d81fb901fd131e Mon Sep 17 00:00:00 2001 From: Lyude Paul Date: Wed, 15 Aug 2018 15:15:11 -0400 Subject: [PATCH 1169/1692] drm/nouveau: Remove useless poll_enable() call in switcheroo_set_state() This doesn't do anything, drm_kms_helper_poll_enable() gets called in nouveau_pmops_resume()->nouveau_display_resume()->nouveau_display_init() already. Signed-off-by: Lyude Paul Reviewed-by: Karol Herbst Acked-by: Daniel Vetter Cc: Lukas Wunner Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_vga.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c index 3da5a4305aa4..09b1d8151881 100644 --- a/drivers/gpu/drm/nouveau/nouveau_vga.c +++ b/drivers/gpu/drm/nouveau/nouveau_vga.c @@ -46,7 +46,6 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev, pr_err("VGA switcheroo: switched nouveau on\n"); dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; nouveau_pmops_resume(&pdev->dev); - drm_kms_helper_poll_enable(dev); dev->switch_power_state = DRM_SWITCH_POWER_ON; } else { pr_err("VGA switcheroo: switched nouveau off\n"); -- GitLab From 0d7b2d4def679cae3bf2728fc31be7f8a48ceab3 Mon Sep 17 00:00:00 2001 From: Lyude Paul Date: Wed, 15 Aug 2018 15:15:12 -0400 Subject: [PATCH 1170/1692] drm/nouveau: Remove useless poll_disable() call in switcheroo_set_state() This won't do anything but potentially make us miss hotplugs. We already call drm_kms_helper_poll_disable() in nouveau_pmops_suspend()->nouveau_display_suspend()->nouveau_display_fini() Signed-off-by: Lyude Paul Reviewed-by: Karol Herbst Acked-by: Daniel Vetter Cc: Lukas Wunner Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_vga.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c index 09b1d8151881..8f1ce4833230 100644 --- a/drivers/gpu/drm/nouveau/nouveau_vga.c +++ b/drivers/gpu/drm/nouveau/nouveau_vga.c @@ -50,7 +50,6 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev, } else { pr_err("VGA switcheroo: switched nouveau off\n"); dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - drm_kms_helper_poll_disable(dev); nouveau_switcheroo_optimus_dsm(); nouveau_pmops_suspend(&pdev->dev); dev->switch_power_state = DRM_SWITCH_POWER_OFF; -- GitLab From 7326ead9828e5eb5c6030d80310241c404e919f9 Mon Sep 17 00:00:00 2001 From: Lyude Paul Date: Wed, 15 Aug 2018 15:15:13 -0400 Subject: [PATCH 1171/1692] drm/nouveau: Remove useless poll_enable() call in drm_load() Again, this doesn't do anything. drm_kms_helper_poll_enable() will have already been called in nouveau_display_init() Signed-off-by: Lyude Paul Reviewed-by: Karol Herbst Acked-by: Daniel Vetter Cc: Lukas Wunner Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drm.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 5fdc1fbe2ee5..04f704b77a3c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -592,10 +592,8 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) pm_runtime_allow(dev->dev); pm_runtime_mark_last_busy(dev->dev); pm_runtime_put(dev->dev); - } else { - /* enable polling for external displays */ - drm_kms_helper_poll_enable(dev); } + return 0; fail_dispinit: -- GitLab From b26b4590dd53e012526342e749c423e6c0e73437 Mon Sep 17 00:00:00 2001 From: Lyude Paul Date: Thu, 9 Aug 2018 18:22:05 -0400 Subject: [PATCH 1172/1692] drm/nouveau: Only write DP_MSTM_CTRL when needed Currently, nouveau will re-write the DP_MSTM_CTRL register for an MST hub every time it receives a long HPD pulse on DP. This isn't actually necessary and additionally, has some unintended side effects. With the P50 I've got here, rewriting DP_MSTM_CTRL constantly seems to make it rather likely (1 out of 5 times usually) that bringing up MST with it's ThinkPad dock will fail and result in sideband messages timing out in the middle. Afterwards, successive probes don't manage to get the dock to communicate properly over MST sideband properly. Many times sideband message timeouts from MST hubs are indicative of either the source or the sink dropping an ESI event, which can cause DRM's perspective of the topology's current state to go out of sync with reality. While it's tough to really know for sure what's happening to the dock, using userspace tools to write to DP_MSTM_CTRL in the middle of the MST link probing process does appear to make things flaky. It's possible that when we write to DP_MSTM_CTRL, the function that gets triggered to respond in the dock's firmware temporarily puts it in a state where it might end up not reporting an ESI to the source, or ends up dropping a sideband message we sent it. So, to fix this we make it so that when probing an MST topology, we respect it's current state. If the dock's already enabled, we simply read DP_MSTM_CTRL and disable the topology if it's value is not what we expected. Otherwise, we perform the normal MST probing dance. We avoid taking any action except if the state of the MST topology actually changes. This fixes MST sideband message timeouts and detection failures on my P50 with its ThinkPad dock. Signed-off-by: Lyude Paul Cc: stable@vger.kernel.org Cc: Karol Herbst Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/dispnv50/disp.c | 45 ++++++++++++++++++++----- 1 file changed, 36 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index aec6ee1ff4e0..1a06c165a8df 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -1142,31 +1142,58 @@ nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state) int nv50_mstm_detect(struct nv50_mstm *mstm, u8 dpcd[8], int allow) { - int ret, state = 0; + struct drm_dp_aux *aux; + int ret; + bool old_state, new_state; + u8 mstm_ctrl; if (!mstm) return 0; - if (dpcd[0] >= 0x12) { - ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CAP, &dpcd[1]); + mutex_lock(&mstm->mgr.lock); + + old_state = mstm->mgr.mst_state; + new_state = old_state; + aux = mstm->mgr.aux; + + if (old_state) { + /* Just check that the MST hub is still as we expect it */ + ret = drm_dp_dpcd_readb(aux, DP_MSTM_CTRL, &mstm_ctrl); + if (ret < 0 || !(mstm_ctrl & DP_MST_EN)) { + DRM_DEBUG_KMS("Hub gone, disabling MST topology\n"); + new_state = false; + } + } else if (dpcd[0] >= 0x12) { + ret = drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &dpcd[1]); if (ret < 0) - return ret; + goto probe_error; if (!(dpcd[1] & DP_MST_CAP)) dpcd[0] = 0x11; else - state = allow; + new_state = allow; } - ret = nv50_mstm_enable(mstm, dpcd[0], state); + if (new_state == old_state) { + mutex_unlock(&mstm->mgr.lock); + return new_state; + } + + ret = nv50_mstm_enable(mstm, dpcd[0], new_state); if (ret) - return ret; + goto probe_error; + + mutex_unlock(&mstm->mgr.lock); - ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, state); + ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, new_state); if (ret) return nv50_mstm_enable(mstm, dpcd[0], 0); - return mstm->mgr.mst_state; + return new_state; + +probe_error: + mutex_unlock(&mstm->mgr.lock); + return ret; } static void -- GitLab From fa3cdf8d0b092c4561f9f017dfac409eb7644737 Mon Sep 17 00:00:00 2001 From: Lyude Paul Date: Thu, 9 Aug 2018 18:22:06 -0400 Subject: [PATCH 1173/1692] drm/nouveau: Reset MST branching unit before enabling When probing a new MST device, it's not safe to make any assumptions about it's current state. While most well mannered MST hubs will just disable the branching unit on hotplug disconnects, this isn't enough to save us from various other scenarios that might have resulted in something writing to the MST branching unit before we got control of it. This could happen if a previous probe we tried failed, if we're booting in kexec context and the hub is still in the state the last kernel put it in, etc. Luckily; there is no reason we can't just reset the branching unit every time we enable a new topology. So, fix this by resetting it on enabling new topologies to ensure that we always start off with a clean, unmodified topology state on MST sinks. This fixes occasional hard-lockups on my P50's laptop dock (e.g. AUX times out all DPCD trasactions) observed after multiple docks, undocks, and module reloads. Signed-off-by: Lyude Paul Cc: stable@vger.kernel.org Cc: Karol Herbst Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/dispnv50/disp.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 1a06c165a8df..5691dfa1db6f 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -1123,17 +1123,21 @@ nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state) int ret; if (dpcd >= 0x12) { - ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CTRL, &dpcd); + /* Even if we're enabling MST, start with disabling the + * branching unit to clear any sink-side MST topology state + * that wasn't set by us + */ + ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, 0); if (ret < 0) return ret; - dpcd &= ~DP_MST_EN; - if (state) - dpcd |= DP_MST_EN; - - ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, dpcd); - if (ret < 0) - return ret; + if (state) { + /* Now, start initializing */ + ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, + DP_MST_EN); + if (ret < 0) + return ret; + } } return nvif_mthd(disp, 0, &args, sizeof(args)); -- GitLab From 79e765ad665da4b8aa7e9c878bd2fef837f6fea5 Mon Sep 17 00:00:00 2001 From: Lyude Paul Date: Thu, 16 Aug 2018 16:13:13 -0400 Subject: [PATCH 1174/1692] drm/nouveau/drm/nouveau: Prevent handling ACPI HPD events too early On most systems with ACPI hotplugging support, it seems that we always receive a hotplug event once we re-enable EC interrupts even if the GPU hasn't even been resumed yet. This can cause problems since even though we schedule hpd_work to handle connector reprobing for us, hpd_work synchronizes on pm_runtime_get_sync() to wait until the device is ready to perform reprobing. Since runtime suspend/resume callbacks are disabled before the PM core calls ->suspend(), any calls to pm_runtime_get_sync() during this period will grab a runtime PM ref and return immediately with -EACCES. Because we schedule hpd_work from our ACPI HPD handler, and hpd_work synchronizes on pm_runtime_get_sync(), this causes us to launch a connector reprobe immediately even if the GPU isn't actually resumed just yet. This causes various warnings in dmesg and occasionally, also prevents some displays connected to the dedicated GPU from coming back up after suspend. Example: usb 1-4: USB disconnect, device number 14 usb 1-4.1: USB disconnect, device number 15 WARNING: CPU: 0 PID: 838 at drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h:170 nouveau_dp_detect+0x17e/0x370 [nouveau] CPU: 0 PID: 838 Comm: kworker/0:6 Not tainted 4.17.14-201.Lyude.bz1477182.V3.fc28.x86_64 #1 Hardware name: LENOVO 20EQS64N00/20EQS64N00, BIOS N1EET77W (1.50 ) 03/28/2018 Workqueue: events nouveau_display_hpd_work [nouveau] RIP: 0010:nouveau_dp_detect+0x17e/0x370 [nouveau] RSP: 0018:ffffa15143933cf0 EFLAGS: 00010293 RAX: 0000000000000000 RBX: ffff8cb4f656c400 RCX: 0000000000000000 RDX: ffffa1514500e4e4 RSI: ffffa1514500e4e4 RDI: 0000000001009002 RBP: ffff8cb4f4a8a800 R08: ffffa15143933cfd R09: ffffa15143933cfc R10: 0000000000000000 R11: 0000000000000000 R12: ffff8cb4fb57a000 R13: ffff8cb4fb57a000 R14: ffff8cb4f4a8f800 R15: ffff8cb4f656c418 FS: 0000000000000000(0000) GS:ffff8cb51f400000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007f78ec938000 CR3: 000000073720a003 CR4: 00000000003606f0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: ? _cond_resched+0x15/0x30 nouveau_connector_detect+0x2ce/0x520 [nouveau] ? _cond_resched+0x15/0x30 ? ww_mutex_lock+0x12/0x40 drm_helper_probe_detect_ctx+0x8b/0xe0 [drm_kms_helper] drm_helper_hpd_irq_event+0xa8/0x120 [drm_kms_helper] nouveau_display_hpd_work+0x2a/0x60 [nouveau] process_one_work+0x187/0x340 worker_thread+0x2e/0x380 ? pwq_unbound_release_workfn+0xd0/0xd0 kthread+0x112/0x130 ? kthread_create_worker_on_cpu+0x70/0x70 ret_from_fork+0x35/0x40 Code: 4c 8d 44 24 0d b9 00 05 00 00 48 89 ef ba 09 00 00 00 be 01 00 00 00 e8 e1 09 f8 ff 85 c0 0f 85 b2 01 00 00 80 7c 24 0c 03 74 02 <0f> 0b 48 89 ef e8 b8 07 f8 ff f6 05 51 1b c8 ff 02 0f 84 72 ff ---[ end trace 55d811b38fc8e71a ]--- So, to fix this we attempt to grab a runtime PM reference in the ACPI handler itself asynchronously. If the GPU is already awake (it will have normal hotplugging at this point) or runtime PM callbacks are currently disabled on the device, we drop our reference without updating the autosuspend delay. We only schedule connector reprobes when we successfully managed to queue up a resume request with our asynchronous PM ref. This also has the added benefit of preventing redundant connector reprobes from ACPI while the GPU is runtime resumed! Signed-off-by: Lyude Paul Cc: stable@vger.kernel.org Cc: Karol Herbst Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1477182#c41 Signed-off-by: Lyude Paul Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_display.c | 26 +++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 42e7c35e3fba..e4024af5a46f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -377,15 +377,29 @@ nouveau_display_acpi_ntfy(struct notifier_block *nb, unsigned long val, { struct nouveau_drm *drm = container_of(nb, typeof(*drm), acpi_nb); struct acpi_bus_event *info = data; + int ret; if (!strcmp(info->device_class, ACPI_VIDEO_CLASS)) { if (info->type == ACPI_VIDEO_NOTIFY_PROBE) { - /* - * This may be the only indication we receive of a - * connector hotplug on a runtime suspended GPU, - * schedule hpd_work to check. - */ - schedule_work(&drm->hpd_work); + ret = pm_runtime_get(drm->dev->dev); + if (ret == 1 || ret == -EACCES) { + /* If the GPU is already awake, or in a state + * where we can't wake it up, it can handle + * it's own hotplug events. + */ + pm_runtime_put_autosuspend(drm->dev->dev); + } else if (ret == 0) { + /* This may be the only indication we receive + * of a connector hotplug on a runtime + * suspended GPU, schedule hpd_work to check. + */ + NV_DEBUG(drm, "ACPI requested connector reprobe\n"); + schedule_work(&drm->hpd_work); + pm_runtime_put_noidle(drm->dev->dev); + } else { + NV_WARN(drm, "Dropped ACPI reprobe event due to RPM error: %d\n", + ret); + } /* acpi-video should not generate keypresses for this */ return NOTIFY_BAD; -- GitLab From 2f7ca781fd382cf8dde73ed36dfdd93fd05b3332 Mon Sep 17 00:00:00 2001 From: Lyude Paul Date: Tue, 7 Aug 2018 17:32:48 -0400 Subject: [PATCH 1175/1692] drm/nouveau/drm/nouveau: Don't forget to cancel hpd_work on suspend/unload Currently, there's nothing in nouveau that actually cancels this work struct. So, cancel it on suspend/unload. Otherwise, if we're unlucky enough hpd_work might try to keep running up until the system is suspended. Signed-off-by: Lyude Paul Cc: stable@vger.kernel.org Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_display.c | 9 ++++++--- drivers/gpu/drm/nouveau/nouveau_display.h | 2 +- drivers/gpu/drm/nouveau/nouveau_drm.c | 2 +- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index e4024af5a46f..540c0cbbfcee 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -442,7 +442,7 @@ nouveau_display_init(struct drm_device *dev) } void -nouveau_display_fini(struct drm_device *dev, bool suspend) +nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime) { struct nouveau_display *disp = nouveau_display(dev); struct nouveau_drm *drm = nouveau_drm(dev); @@ -467,6 +467,9 @@ nouveau_display_fini(struct drm_device *dev, bool suspend) } drm_connector_list_iter_end(&conn_iter); + if (!runtime) + cancel_work_sync(&drm->hpd_work); + drm_kms_helper_poll_disable(dev); disp->fini(dev); } @@ -635,11 +638,11 @@ nouveau_display_suspend(struct drm_device *dev, bool runtime) } } - nouveau_display_fini(dev, true); + nouveau_display_fini(dev, true, runtime); return 0; } - nouveau_display_fini(dev, true); + nouveau_display_fini(dev, true, runtime); list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct nouveau_framebuffer *nouveau_fb; diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h index 54aa7c3fa42d..ff92b54ce448 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.h +++ b/drivers/gpu/drm/nouveau/nouveau_display.h @@ -62,7 +62,7 @@ nouveau_display(struct drm_device *dev) int nouveau_display_create(struct drm_device *dev); void nouveau_display_destroy(struct drm_device *dev); int nouveau_display_init(struct drm_device *dev); -void nouveau_display_fini(struct drm_device *dev, bool suspend); +void nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime); int nouveau_display_suspend(struct drm_device *dev, bool runtime); void nouveau_display_resume(struct drm_device *dev, bool runtime); int nouveau_display_vblank_enable(struct drm_device *, unsigned int); diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 04f704b77a3c..f1a119113d04 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -627,7 +627,7 @@ nouveau_drm_unload(struct drm_device *dev) nouveau_debugfs_fini(drm); if (dev->mode_config.num_crtc) - nouveau_display_fini(dev, false); + nouveau_display_fini(dev, false, false); nouveau_display_destroy(dev); nouveau_bios_takedown(dev); -- GitLab From d5986a1c4dcd00cb8b9eee4a56ee93868222a9a2 Mon Sep 17 00:00:00 2001 From: Lyude Paul Date: Thu, 30 Aug 2018 13:16:28 -0400 Subject: [PATCH 1176/1692] drm/nouveau: Fix nouveau_connector_ddc_detect() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It looks like that when we moved over to using drm_connector_for_each_possible_encoder() in nouveau, that one rather important part of this function got dropped by accident: /* Right v here */ for (i = 0; nv_encoder = NULL, i < DRM_CONNECTOR_MAX_ENCODER; i++) { int id = connector->encoder_ids[i]; if (id == 0) break; Since it's rather difficult to notice: the conditional in this loop is actually: nv_encoder = NULL, i < DRM_CONNECTOR_MAX_ENCODER Meaning that all early breaks result in nv_encoder keeping it's value, otherwise nv_encoder = NULL. Ugh. Since this got dropped, nouveau_connector_ddc_detect() now returns an encoder for every single connector, regardless of whether or not it's detected: [ 1780.056185] nouveau 0000:01:00.0: DRM: DDC responded, but no EDID for DP-2 So: fix this to ensure we only return an encoder if we actually found one, and clean up the rest of the function while we're at it since it's nearly impossible to read properly. Changes since v1: - Don't skip ddc probing for LVDS if we can't switch DDC through vga-switcheroo, just do the DDC probing without calling vga_switcheroo_lock_ddc() - skeggsb Signed-off-by: Lyude Paul Cc: Ville Syrjälä Fixes: ddba766dd07e ("drm/nouveau: Use drm_connector_for_each_possible_encoder()") Reviewed-by: Ville Syrjälä Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_connector.c | 49 ++++++++++++--------- 1 file changed, 28 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 76660bc1ccfb..259ee5039125 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -412,9 +412,10 @@ nouveau_connector_ddc_detect(struct drm_connector *connector) struct nouveau_connector *nv_connector = nouveau_connector(connector); struct nouveau_drm *drm = nouveau_drm(dev); struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device); - struct nouveau_encoder *nv_encoder = NULL; + struct nouveau_encoder *nv_encoder = NULL, *found = NULL; struct drm_encoder *encoder; - int i, panel = -ENODEV; + int i, ret, panel = -ENODEV; + bool switcheroo_ddc = false; /* eDP panels need powering on by us (if the VBIOS doesn't default it * to on) before doing any AUX channel transactions. LVDS panel power @@ -431,37 +432,43 @@ nouveau_connector_ddc_detect(struct drm_connector *connector) drm_connector_for_each_possible_encoder(connector, encoder, i) { nv_encoder = nouveau_encoder(encoder); - if (nv_encoder->dcb->type == DCB_OUTPUT_DP) { - int ret = nouveau_dp_detect(nv_encoder); + switch (nv_encoder->dcb->type) { + case DCB_OUTPUT_DP: + ret = nouveau_dp_detect(nv_encoder); if (ret == NOUVEAU_DP_MST) return NULL; - if (ret == NOUVEAU_DP_SST) - break; - } else - if ((vga_switcheroo_handler_flags() & - VGA_SWITCHEROO_CAN_SWITCH_DDC) && - nv_encoder->dcb->type == DCB_OUTPUT_LVDS && - nv_encoder->i2c) { - int ret; - vga_switcheroo_lock_ddc(dev->pdev); - ret = nvkm_probe_i2c(nv_encoder->i2c, 0x50); - vga_switcheroo_unlock_ddc(dev->pdev); - if (ret) + else if (ret == NOUVEAU_DP_SST) + found = nv_encoder; + + break; + case DCB_OUTPUT_LVDS: + switcheroo_ddc = !!(vga_switcheroo_handler_flags() & + VGA_SWITCHEROO_CAN_SWITCH_DDC); + /* fall-through */ + default: + if (!nv_encoder->i2c) break; - } else - if (nv_encoder->i2c) { + + if (switcheroo_ddc) + vga_switcheroo_lock_ddc(dev->pdev); if (nvkm_probe_i2c(nv_encoder->i2c, 0x50)) - break; + found = nv_encoder; + if (switcheroo_ddc) + vga_switcheroo_unlock_ddc(dev->pdev); + + break; } + if (found) + break; } /* eDP panel not detected, restore panel power GPIO to previous * state to avoid confusing the SOR for other output types. */ - if (!nv_encoder && panel == 0) + if (!found && panel == 0) nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, panel); - return nv_encoder; + return found; } static struct nouveau_encoder * -- GitLab From a43b16dda2d7485f5c5aed075c1dc9785e339515 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 28 Aug 2018 14:10:34 +1000 Subject: [PATCH 1177/1692] drm/nouveau: fix oops in client init failure path The NV_ERROR macro requires drm->client to be initialised, which it may not be at this stage of the init process. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drm.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index f1a119113d04..74d2283f2c28 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -230,7 +230,7 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname, mutex_unlock(&drm->master.lock); } if (ret) { - NV_ERROR(drm, "Client allocation failed: %d\n", ret); + NV_PRINTK(err, cli, "Client allocation failed: %d\n", ret); goto done; } @@ -240,37 +240,37 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname, }, sizeof(struct nv_device_v0), &cli->device); if (ret) { - NV_ERROR(drm, "Device allocation failed: %d\n", ret); + NV_PRINTK(err, cli, "Device allocation failed: %d\n", ret); goto done; } ret = nvif_mclass(&cli->device.object, mmus); if (ret < 0) { - NV_ERROR(drm, "No supported MMU class\n"); + NV_PRINTK(err, cli, "No supported MMU class\n"); goto done; } ret = nvif_mmu_init(&cli->device.object, mmus[ret].oclass, &cli->mmu); if (ret) { - NV_ERROR(drm, "MMU allocation failed: %d\n", ret); + NV_PRINTK(err, cli, "MMU allocation failed: %d\n", ret); goto done; } ret = nvif_mclass(&cli->mmu.object, vmms); if (ret < 0) { - NV_ERROR(drm, "No supported VMM class\n"); + NV_PRINTK(err, cli, "No supported VMM class\n"); goto done; } ret = nouveau_vmm_init(cli, vmms[ret].oclass, &cli->vmm); if (ret) { - NV_ERROR(drm, "VMM allocation failed: %d\n", ret); + NV_PRINTK(err, cli, "VMM allocation failed: %d\n", ret); goto done; } ret = nvif_mclass(&cli->mmu.object, mems); if (ret < 0) { - NV_ERROR(drm, "No supported MEM class\n"); + NV_PRINTK(err, cli, "No supported MEM class\n"); goto done; } -- GitLab From 51ed833c881b9d96557c773f6a37018d79e29a46 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 28 Aug 2018 14:10:42 +1000 Subject: [PATCH 1178/1692] drm/nouveau/mmu: don't attempt to dereference vmm without valid instance pointer Fixes oopses in certain failure paths. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c index de269eb482dd..7459def78d50 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c @@ -1423,7 +1423,7 @@ nvkm_vmm_get(struct nvkm_vmm *vmm, u8 page, u64 size, struct nvkm_vma **pvma) void nvkm_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst) { - if (vmm->func->part && inst) { + if (inst && vmm->func->part) { mutex_lock(&vmm->mutex); vmm->func->part(vmm, inst); mutex_unlock(&vmm->mutex); -- GitLab From 0a6986c6595e9afd20ff7280dab36431c1e467f8 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 4 Sep 2018 15:56:57 +1000 Subject: [PATCH 1179/1692] drm/nouveau/TBDdevinit: don't fail when PMU/PRE_OS is missing from VBIOS This Falcon application doesn't appear to be present on some newer systems, so let's not fail init if we can't find it. TBD: is there a way to determine whether it *should* be there? Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c index b80618e35491..d65959ef0564 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c @@ -158,7 +158,8 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post) } /* load and execute some other ucode image (bios therm?) */ - return pmu_load(init, 0x01, post, NULL, NULL); + pmu_load(init, 0x01, post, NULL, NULL); + return 0; } static const struct nvkm_devinit_func -- GitLab From 606557708fa06ebf21372d8fabf6f97529ab2349 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 4 Sep 2018 15:57:04 +1000 Subject: [PATCH 1180/1692] drm/nouveau/disp: remove unused struct member Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c | 1 - drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h | 1 - 2 files changed, 2 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c index be9e7f8c3b23..4b6973f90309 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c @@ -245,7 +245,6 @@ nvkm_outp_ctor(const struct nvkm_outp_func *func, struct nvkm_disp *disp, outp->index = index; outp->info = *dcbE; outp->i2c = nvkm_i2c_bus_find(i2c, dcbE->i2c_index); - outp->or = ffs(outp->info.or) - 1; OUTP_DBG(outp, "type %02x loc %d or %d link %d con %x " "edid %x bus %d head %x", diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h index ea84d7d5741a..776e36972daa 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h @@ -13,7 +13,6 @@ struct nvkm_outp { struct dcb_output info; struct nvkm_i2c_bus *i2c; - int or; struct list_head head; struct nvkm_conn *conn; -- GitLab From f6d52b2172b1adfde010df34730290c282ee641b Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 4 Sep 2018 15:57:07 +1000 Subject: [PATCH 1181/1692] drm/nouveau/disp: move eDP panel power handling We need to do this earlier to prevent aux channel timeouts in resume paths on certain systems. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_connector.c | 23 +----------- drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c | 37 +++++++++++++++++-- 2 files changed, 35 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 259ee5039125..247f72cc4d10 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -409,26 +409,11 @@ static struct nouveau_encoder * nouveau_connector_ddc_detect(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct nouveau_connector *nv_connector = nouveau_connector(connector); - struct nouveau_drm *drm = nouveau_drm(dev); - struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device); struct nouveau_encoder *nv_encoder = NULL, *found = NULL; struct drm_encoder *encoder; - int i, ret, panel = -ENODEV; + int i, ret; bool switcheroo_ddc = false; - /* eDP panels need powering on by us (if the VBIOS doesn't default it - * to on) before doing any AUX channel transactions. LVDS panel power - * is handled by the SOR itself, and not required for LVDS DDC. - */ - if (nv_connector->type == DCB_CONNECTOR_eDP) { - panel = nvkm_gpio_get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff); - if (panel == 0) { - nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1); - msleep(300); - } - } - drm_connector_for_each_possible_encoder(connector, encoder, i) { nv_encoder = nouveau_encoder(encoder); @@ -462,12 +447,6 @@ nouveau_connector_ddc_detect(struct drm_connector *connector) break; } - /* eDP panel not detected, restore panel power GPIO to previous - * state to avoid confusing the SOR for other output types. - */ - if (!found && panel == 0) - nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, panel); - return found; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c index 7c5bed29ffef..bb34ee77458e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c @@ -28,6 +28,7 @@ #include #include +#include #include #include @@ -491,7 +492,7 @@ nvkm_dp_acquire(struct nvkm_outp *outp) return ret; } -static void +static bool nvkm_dp_enable(struct nvkm_dp *dp, bool enable) { struct nvkm_i2c_aux *aux = dp->aux; @@ -505,7 +506,7 @@ nvkm_dp_enable(struct nvkm_dp *dp, bool enable) if (!nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, dp->dpcd, sizeof(dp->dpcd))) - return; + return true; } if (dp->present) { @@ -515,6 +516,7 @@ nvkm_dp_enable(struct nvkm_dp *dp, bool enable) } atomic_set(&dp->lt.done, 0); + return false; } static int @@ -555,9 +557,38 @@ nvkm_dp_fini(struct nvkm_outp *outp) static void nvkm_dp_init(struct nvkm_outp *outp) { + struct nvkm_gpio *gpio = outp->disp->engine.subdev.device->gpio; struct nvkm_dp *dp = nvkm_dp(outp); + nvkm_notify_put(&dp->outp.conn->hpd); - nvkm_dp_enable(dp, true); + + /* eDP panels need powering on by us (if the VBIOS doesn't default it + * to on) before doing any AUX channel transactions. LVDS panel power + * is handled by the SOR itself, and not required for LVDS DDC. + */ + if (dp->outp.conn->info.type == DCB_CONNECTOR_eDP) { + int power = nvkm_gpio_get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff); + if (power == 0) + nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1); + + /* We delay here unconditionally, even if already powered, + * because some laptop panels having a significant resume + * delay before the panel begins responding. + * + * This is likely a bit of a hack, but no better idea for + * handling this at the moment. + */ + msleep(300); + + /* If the eDP panel can't be detected, we need to restore + * the panel power GPIO to avoid breaking another output. + */ + if (!nvkm_dp_enable(dp, true) && power == 0) + nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 0); + } else { + nvkm_dp_enable(dp, true); + } + nvkm_notify_get(&dp->hpd); } -- GitLab From e04cfdc9b7398c60dbc70212415ea63b6c6a93ae Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 4 Sep 2018 15:57:09 +1000 Subject: [PATCH 1182/1692] drm/nouveau/disp: fix DP disable race If a HPD pulse signalling the need to retrain the link occurs between the KMS driver releasing the output and the supervisor interrupt that finishes the teardown, it was possible get a NULL-ptr deref. Avoid this by marking the link as inactive earlier. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c | 17 ++++++++++++----- drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c | 6 +++--- drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c | 2 ++ drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h | 3 ++- 4 files changed, 19 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c index bb34ee77458e..5f301e632599 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c @@ -413,14 +413,10 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps) } static void -nvkm_dp_release(struct nvkm_outp *outp, struct nvkm_ior *ior) +nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior) { struct nvkm_dp *dp = nvkm_dp(outp); - /* Prevent link from being retrained if sink sends an IRQ. */ - atomic_set(&dp->lt.done, 0); - ior->dp.nr = 0; - /* Execute DisableLT script from DP Info Table. */ nvbios_init(&ior->disp->engine.subdev, dp->info.script[4], init.outp = &dp->outp.info; @@ -429,6 +425,16 @@ nvkm_dp_release(struct nvkm_outp *outp, struct nvkm_ior *ior) ); } +static void +nvkm_dp_release(struct nvkm_outp *outp) +{ + struct nvkm_dp *dp = nvkm_dp(outp); + + /* Prevent link from being retrained if sink sends an IRQ. */ + atomic_set(&dp->lt.done, 0); + dp->outp.ior->dp.nr = 0; +} + static int nvkm_dp_acquire(struct nvkm_outp *outp) { @@ -607,6 +613,7 @@ nvkm_dp_func = { .fini = nvkm_dp_fini, .acquire = nvkm_dp_acquire, .release = nvkm_dp_release, + .disable = nvkm_dp_disable, }; static int diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c index f89c7b977aa5..def005dd5fda 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c @@ -501,11 +501,11 @@ nv50_disp_super_2_0(struct nv50_disp *disp, struct nvkm_head *head) nv50_disp_super_ied_off(head, ior, 2); /* If we're shutting down the OR's only active head, execute - * the output path's release function. + * the output path's disable function. */ if (ior->arm.head == (1 << head->id)) { - if ((outp = ior->arm.outp) && outp->func->release) - outp->func->release(outp, ior); + if ((outp = ior->arm.outp) && outp->func->disable) + outp->func->disable(outp, ior); } } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c index 4b6973f90309..9fcaf3147eb8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c @@ -93,6 +93,8 @@ nvkm_outp_release(struct nvkm_outp *outp, u8 user) if (ior) { outp->acquired &= ~user; if (!outp->acquired) { + if (outp->func->release && outp->ior) + outp->func->release(outp); outp->ior->asy.outp = NULL; outp->ior = NULL; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h index 776e36972daa..96272ecccb59 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h @@ -40,7 +40,8 @@ struct nvkm_outp_func { void (*init)(struct nvkm_outp *); void (*fini)(struct nvkm_outp *); int (*acquire)(struct nvkm_outp *); - void (*release)(struct nvkm_outp *, struct nvkm_ior *); + void (*release)(struct nvkm_outp *); + void (*disable)(struct nvkm_outp *, struct nvkm_ior *); }; #define OUTP_MSG(o,l,f,a...) do { \ -- GitLab From 53b0cc46f27cfc2cadca609b503a7d92b5185a47 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 4 Sep 2018 15:57:11 +1000 Subject: [PATCH 1183/1692] drm/nouveau/disp/gm200-: enforce identity-mapped SOR assignment for LVDS/eDP panels Fixes eDP backlight issues on more recent laptops. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c | 14 ++++++++++++++ drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h | 1 + drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c | 15 ++++++++++++--- drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h | 1 + 4 files changed, 28 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c index 32fa94a9773f..cbd33e87b799 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c @@ -275,6 +275,7 @@ nvkm_disp_oneinit(struct nvkm_engine *engine) struct nvkm_outp *outp, *outt, *pair; struct nvkm_conn *conn; struct nvkm_head *head; + struct nvkm_ior *ior; struct nvbios_connE connE; struct dcb_output dcbE; u8 hpd = 0, ver, hdr; @@ -399,6 +400,19 @@ nvkm_disp_oneinit(struct nvkm_engine *engine) return ret; } + /* Enforce identity-mapped SOR assignment for panels, which have + * certain bits (ie. backlight controls) wired to a specific SOR. + */ + list_for_each_entry(outp, &disp->outp, head) { + if (outp->conn->info.type == DCB_CONNECTOR_LVDS || + outp->conn->info.type == DCB_CONNECTOR_eDP) { + ior = nvkm_ior_find(disp, SOR, ffs(outp->info.or) - 1); + if (!WARN_ON(!ior)) + ior->identity = true; + outp->identity = true; + } + } + i = 0; list_for_each_entry(head, &disp->head, head) i = max(i, head->id + 1); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h index e0b4e0c5704e..19911211a12a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h @@ -16,6 +16,7 @@ struct nvkm_ior { char name[8]; struct list_head head; + bool identity; struct nvkm_ior_state { struct nvkm_outp *outp; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c index 9fcaf3147eb8..c62030c96fba 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c @@ -129,17 +129,26 @@ nvkm_outp_acquire(struct nvkm_outp *outp, u8 user) if (proto == UNKNOWN) return -ENOSYS; + /* Deal with panels requiring identity-mapped SOR assignment. */ + if (outp->identity) { + ior = nvkm_ior_find(outp->disp, SOR, ffs(outp->info.or) - 1); + if (WARN_ON(!ior)) + return -ENOSPC; + return nvkm_outp_acquire_ior(outp, user, ior); + } + /* First preference is to reuse the OR that is currently armed * on HW, if any, in order to prevent unnecessary switching. */ list_for_each_entry(ior, &outp->disp->ior, head) { - if (!ior->asy.outp && ior->arm.outp == outp) + if (!ior->identity && !ior->asy.outp && ior->arm.outp == outp) return nvkm_outp_acquire_ior(outp, user, ior); } /* Failing that, a completely unused OR is the next best thing. */ list_for_each_entry(ior, &outp->disp->ior, head) { - if (!ior->asy.outp && ior->type == type && !ior->arm.outp && + if (!ior->identity && + !ior->asy.outp && ior->type == type && !ior->arm.outp && (ior->func->route.set || ior->id == __ffs(outp->info.or))) return nvkm_outp_acquire_ior(outp, user, ior); } @@ -148,7 +157,7 @@ nvkm_outp_acquire(struct nvkm_outp *outp, u8 user) * but will be released during the next modeset. */ list_for_each_entry(ior, &outp->disp->ior, head) { - if (!ior->asy.outp && ior->type == type && + if (!ior->identity && !ior->asy.outp && ior->type == type && (ior->func->route.set || ior->id == __ffs(outp->info.or))) return nvkm_outp_acquire_ior(outp, user, ior); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h index 96272ecccb59..6c8aa5cfed9d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h @@ -16,6 +16,7 @@ struct nvkm_outp { struct list_head head; struct nvkm_conn *conn; + bool identity; /* Assembly state. */ #define NVKM_OUTP_PRIV 1 -- GitLab From 644e2537fdc77baeeefc829524937bca64329f82 Mon Sep 17 00:00:00 2001 From: Heinz Mauelshagen Date: Thu, 6 Sep 2018 18:33:39 +0200 Subject: [PATCH 1184/1692] dm raid: fix stripe adding reshape deadlock When initiating a stripe adding reshape, a deadlock between md_stop_writes() waiting for the sync thread to stop and the running sync thread waiting for inactive stripes occurs (this frequently happens on single-core but rarely on multi-core systems). Fix this deadlock by setting MD_RECOVERY_WAIT to have the main MD resynchronization thread worker (md_do_sync()) bail out when initiating the reshape via constructor arguments. Signed-off-by: Heinz Mauelshagen Signed-off-by: Mike Snitzer --- drivers/md/dm-raid.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index d8406e0b4540..9129c5e0c280 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -3869,14 +3869,13 @@ static int rs_start_reshape(struct raid_set *rs) struct mddev *mddev = &rs->md; struct md_personality *pers = mddev->pers; + /* Don't allow the sync thread to work until the table gets reloaded. */ + set_bit(MD_RECOVERY_WAIT, &mddev->recovery); + r = rs_setup_reshape(rs); if (r) return r; - /* Need to be resumed to be able to start reshape, recovery is frozen until raid_resume() though */ - if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) - mddev_resume(mddev); - /* * Check any reshape constraints enforced by the personalility * @@ -3900,10 +3899,6 @@ static int rs_start_reshape(struct raid_set *rs) } } - /* Suspend because a resume will happen in raid_resume() */ - set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags); - mddev_suspend(mddev); - /* * Now reshape got set up, update superblocks to * reflect the fact so that a table reload will -- GitLab From c44a5ee803d2b7ed8c2e6ce24a5c4dd60778886e Mon Sep 17 00:00:00 2001 From: Heinz Mauelshagen Date: Thu, 6 Sep 2018 18:33:40 +0200 Subject: [PATCH 1185/1692] dm raid: fix rebuild of specific devices by updating superblock Update superblock when particular devices are requested via rebuild (e.g. lvconvert --replace ...) to avoid spurious failure with the "New device injected into existing raid set without 'delta_disks' or 'rebuild' parameter specified" error message. Signed-off-by: Heinz Mauelshagen Signed-off-by: Mike Snitzer --- drivers/md/dm-raid.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 9129c5e0c280..6d961db8760e 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -3126,6 +3126,11 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); rs_set_new(rs); } else if (rs_is_recovering(rs)) { + /* Rebuild particular devices */ + if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) { + set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); + rs_setup_recovery(rs, MaxSector); + } /* A recovering raid set may be resized */ ; /* skip setup rs */ } else if (rs_is_reshaping(rs)) { -- GitLab From 36a240a706d43383bbdd377522501ddd2e5771f6 Mon Sep 17 00:00:00 2001 From: Heinz Mauelshagen Date: Thu, 6 Sep 2018 22:54:29 +0200 Subject: [PATCH 1186/1692] dm raid: fix RAID leg rebuild errors On fast devices such as NVMe, a flaw in rs_get_progress() results in false target status output when userspace lvm2 requests leg rebuilds (symptom of the failure is device health chars 'aaaaaaaa' instead of expected 'aAaAAAAA' causing lvm2 to fail). The correct sync action state definitions already exist in decipher_sync_action() so fix rs_get_progress() to use it. Change decipher_sync_action() to return an enum rather than a string for the sync states and call it from rs_get_progress(). Introduce sync_str() to translate from enum to the string that is needed by raid_status(). Signed-off-by: Heinz Mauelshagen Signed-off-by: Mike Snitzer --- drivers/md/dm-raid.c | 80 +++++++++++++++++++++++++------------------- 1 file changed, 46 insertions(+), 34 deletions(-) diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 6d961db8760e..fceeb962f43b 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -3332,32 +3332,53 @@ static int raid_map(struct dm_target *ti, struct bio *bio) return DM_MAPIO_SUBMITTED; } -/* Return string describing the current sync action of @mddev */ -static const char *decipher_sync_action(struct mddev *mddev, unsigned long recovery) +/* Return sync state string for @state */ +enum sync_state { st_frozen, st_reshape, st_resync, st_check, st_repair, st_recover, st_idle }; +static const char *sync_str(enum sync_state state) +{ + /* Has to be in above sync_state order! */ + static const char *sync_strs[] = { + "frozen", + "reshape", + "resync", + "check", + "repair", + "recover", + "idle" + }; + + return __within_range(state, 0, ARRAY_SIZE(sync_strs) - 1) ? sync_strs[state] : "undef"; +}; + +/* Return enum sync_state for @mddev derived from @recovery flags */ +static const enum sync_state decipher_sync_action(struct mddev *mddev, unsigned long recovery) { if (test_bit(MD_RECOVERY_FROZEN, &recovery)) - return "frozen"; + return st_frozen; - /* The MD sync thread can be done with io but still be running */ + /* The MD sync thread can be done with io or be interrupted but still be running */ if (!test_bit(MD_RECOVERY_DONE, &recovery) && (test_bit(MD_RECOVERY_RUNNING, &recovery) || (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery)))) { if (test_bit(MD_RECOVERY_RESHAPE, &recovery)) - return "reshape"; + return st_reshape; if (test_bit(MD_RECOVERY_SYNC, &recovery)) { if (!test_bit(MD_RECOVERY_REQUESTED, &recovery)) - return "resync"; - else if (test_bit(MD_RECOVERY_CHECK, &recovery)) - return "check"; - return "repair"; + return st_resync; + if (test_bit(MD_RECOVERY_CHECK, &recovery)) + return st_check; + return st_repair; } if (test_bit(MD_RECOVERY_RECOVER, &recovery)) - return "recover"; + return st_recover; + + if (mddev->reshape_position != MaxSector) + return st_reshape; } - return "idle"; + return st_idle; } /* @@ -3391,6 +3412,7 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery, sector_t resync_max_sectors) { sector_t r; + enum sync_state state; struct mddev *mddev = &rs->md; clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); @@ -3401,20 +3423,14 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery, set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); } else { - if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags) && - !test_bit(MD_RECOVERY_INTR, &recovery) && - (test_bit(MD_RECOVERY_NEEDED, &recovery) || - test_bit(MD_RECOVERY_RESHAPE, &recovery) || - test_bit(MD_RECOVERY_RUNNING, &recovery))) - r = mddev->curr_resync_completed; - else + state = decipher_sync_action(mddev, recovery); + + if (state == st_idle && !test_bit(MD_RECOVERY_INTR, &recovery)) r = mddev->recovery_cp; + else + r = mddev->curr_resync_completed; - if (r >= resync_max_sectors && - (!test_bit(MD_RECOVERY_REQUESTED, &recovery) || - (!test_bit(MD_RECOVERY_FROZEN, &recovery) && - !test_bit(MD_RECOVERY_NEEDED, &recovery) && - !test_bit(MD_RECOVERY_RUNNING, &recovery)))) { + if (state == st_idle && r >= resync_max_sectors) { /* * Sync complete. */ @@ -3422,24 +3438,20 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery, if (test_bit(MD_RECOVERY_RECOVER, &recovery)) set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); - } else if (test_bit(MD_RECOVERY_RECOVER, &recovery)) { + } else if (state == st_recover) /* * In case we are recovering, the array is not in sync * and health chars should show the recovering legs. */ ; - - } else if (test_bit(MD_RECOVERY_SYNC, &recovery) && - !test_bit(MD_RECOVERY_REQUESTED, &recovery)) { + else if (state == st_resync) /* * If "resync" is occurring, the raid set * is or may be out of sync hence the health * characters shall be 'a'. */ set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags); - - } else if (test_bit(MD_RECOVERY_RESHAPE, &recovery) && - !test_bit(MD_RECOVERY_REQUESTED, &recovery)) { + else if (state == st_reshape) /* * If "reshape" is occurring, the raid set * is or may be out of sync hence the health @@ -3447,7 +3459,7 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery, */ set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags); - } else if (test_bit(MD_RECOVERY_REQUESTED, &recovery)) { + else if (state == st_check || state == st_repair) /* * If "check" or "repair" is occurring, the raid set has * undergone an initial sync and the health characters @@ -3455,12 +3467,12 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery, */ set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); - } else { + else { struct md_rdev *rdev; /* * We are idle and recovery is needed, prevent 'A' chars race - * caused by components still set to in-sync by constrcuctor. + * caused by components still set to in-sync by constructor. */ if (test_bit(MD_RECOVERY_NEEDED, &recovery)) set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags); @@ -3524,7 +3536,7 @@ static void raid_status(struct dm_target *ti, status_type_t type, progress = rs_get_progress(rs, recovery, resync_max_sectors); resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ? atomic64_read(&mddev->resync_mismatches) : 0; - sync_action = decipher_sync_action(&rs->md, recovery); + sync_action = sync_str(decipher_sync_action(&rs->md, recovery)); /* HM FIXME: do we want another state char for raid0? It shows 'D'/'A'/'-' now */ for (i = 0; i < rs->raid_disks; i++) -- GitLab From 5380c05b682991a6818c3755d450a3e87eeac0e5 Mon Sep 17 00:00:00 2001 From: Heinz Mauelshagen Date: Thu, 6 Sep 2018 14:02:54 -0400 Subject: [PATCH 1187/1692] dm raid: bump target version, update comments and documentation Bump target version to reflect the documented fixes are available. Also fix some code comments (typos and clarity). Signed-off-by: Heinz Mauelshagen Signed-off-by: Mike Snitzer --- Documentation/device-mapper/dm-raid.txt | 4 ++++ drivers/md/dm-raid.c | 10 ++++++---- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/Documentation/device-mapper/dm-raid.txt b/Documentation/device-mapper/dm-raid.txt index 390c145f01d7..52a719b49afd 100644 --- a/Documentation/device-mapper/dm-raid.txt +++ b/Documentation/device-mapper/dm-raid.txt @@ -348,3 +348,7 @@ Version History 1.13.1 Fix deadlock caused by early md_stop_writes(). Also fix size an state races. 1.13.2 Fix raid redundancy validation and avoid keeping raid set frozen +1.14.0 Fix reshape race on small devices. Fix stripe adding reshape + deadlock/potential data corruption. Update superblock when + specific devices are requested via rebuild. Fix RAID leg + rebuild errors. diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index fceeb962f43b..5ba067fa0c72 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -1,6 +1,6 @@ /* * Copyright (C) 2010-2011 Neil Brown - * Copyright (C) 2010-2017 Red Hat, Inc. All rights reserved. + * Copyright (C) 2010-2018 Red Hat, Inc. All rights reserved. * * This file is released under the GPL. */ @@ -2626,7 +2626,7 @@ static int rs_adjust_data_offsets(struct raid_set *rs) return 0; } - /* HM FIXME: get InSync raid_dev? */ + /* HM FIXME: get In_Sync raid_dev? */ rdev = &rs->dev[0].rdev; if (rs->delta_disks < 0) { @@ -3224,6 +3224,8 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) /* Start raid set read-only and assumed clean to change in raid_resume() */ rs->md.ro = 1; rs->md.in_sync = 1; + + /* Keep array frozen */ set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery); /* Has to be held on running the array */ @@ -3247,7 +3249,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) rs->callbacks.congested_fn = raid_is_congested; dm_table_add_target_callbacks(ti->table, &rs->callbacks); - /* If raid4/5/6 journal mode explictely requested (only possible with journal dev) -> set it */ + /* If raid4/5/6 journal mode explicitly requested (only possible with journal dev) -> set it */ if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) { r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode); if (r) { @@ -4012,7 +4014,7 @@ static void raid_resume(struct dm_target *ti) static struct target_type raid_target = { .name = "raid", - .version = {1, 13, 2}, + .version = {1, 14, 0}, .module = THIS_MODULE, .ctr = raid_ctr, .dtr = raid_dtr, -- GitLab From e2c631ba75a7e727e8db0a9d30a06bfd434adb3a Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 5 Sep 2018 10:41:58 +0200 Subject: [PATCH 1188/1692] clocksource: Revert "Remove kthread" I turns out that the silly spawn kthread from worker was actually needed. clocksource_watchdog_kthread() cannot be called directly from clocksource_watchdog_work(), because clocksource_select() calls timekeeping_notify() which uses stop_machine(). One cannot use stop_machine() from a workqueue() due lock inversions wrt CPU hotplug. Revert the patch but add a comment that explain why we jump through such apparently silly hoops. Fixes: 7197e77abcb6 ("clocksource: Remove kthread") Reported-by: Siegfried Metz Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Tested-by: Niklas Cassel Tested-by: Kevin Shanahan Tested-by: viktor_jaegerskuepper@freenet.de Tested-by: Siegfried Metz Cc: rafael.j.wysocki@intel.com Cc: len.brown@intel.com Cc: diego.viola@gmail.com Cc: rui.zhang@intel.com Cc: bjorn.andersson@linaro.org Link: https://lkml.kernel.org/r/20180905084158.GR24124@hirez.programming.kicks-ass.net --- kernel/time/clocksource.c | 40 +++++++++++++++++++++++++++++---------- 1 file changed, 30 insertions(+), 10 deletions(-) diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index f74fb00d8064..0e6e97a01942 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -133,19 +133,40 @@ static void inline clocksource_watchdog_unlock(unsigned long *flags) spin_unlock_irqrestore(&watchdog_lock, *flags); } +static int clocksource_watchdog_kthread(void *data); +static void __clocksource_change_rating(struct clocksource *cs, int rating); + /* * Interval: 0.5sec Threshold: 0.0625s */ #define WATCHDOG_INTERVAL (HZ >> 1) #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4) +static void clocksource_watchdog_work(struct work_struct *work) +{ + /* + * We cannot directly run clocksource_watchdog_kthread() here, because + * clocksource_select() calls timekeeping_notify() which uses + * stop_machine(). One cannot use stop_machine() from a workqueue() due + * lock inversions wrt CPU hotplug. + * + * Also, we only ever run this work once or twice during the lifetime + * of the kernel, so there is no point in creating a more permanent + * kthread for this. + * + * If kthread_run fails the next watchdog scan over the + * watchdog_list will find the unstable clock again. + */ + kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog"); +} + static void __clocksource_unstable(struct clocksource *cs) { cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); cs->flags |= CLOCK_SOURCE_UNSTABLE; /* - * If the clocksource is registered clocksource_watchdog_work() will + * If the clocksource is registered clocksource_watchdog_kthread() will * re-rate and re-select. */ if (list_empty(&cs->list)) { @@ -156,7 +177,7 @@ static void __clocksource_unstable(struct clocksource *cs) if (cs->mark_unstable) cs->mark_unstable(cs); - /* kick clocksource_watchdog_work() */ + /* kick clocksource_watchdog_kthread() */ if (finished_booting) schedule_work(&watchdog_work); } @@ -166,7 +187,7 @@ static void __clocksource_unstable(struct clocksource *cs) * @cs: clocksource to be marked unstable * * This function is called by the x86 TSC code to mark clocksources as unstable; - * it defers demotion and re-selection to a work. + * it defers demotion and re-selection to a kthread. */ void clocksource_mark_unstable(struct clocksource *cs) { @@ -391,9 +412,7 @@ static void clocksource_dequeue_watchdog(struct clocksource *cs) } } -static void __clocksource_change_rating(struct clocksource *cs, int rating); - -static int __clocksource_watchdog_work(void) +static int __clocksource_watchdog_kthread(void) { struct clocksource *cs, *tmp; unsigned long flags; @@ -418,12 +437,13 @@ static int __clocksource_watchdog_work(void) return select; } -static void clocksource_watchdog_work(struct work_struct *work) +static int clocksource_watchdog_kthread(void *data) { mutex_lock(&clocksource_mutex); - if (__clocksource_watchdog_work()) + if (__clocksource_watchdog_kthread()) clocksource_select(); mutex_unlock(&clocksource_mutex); + return 0; } static bool clocksource_is_watchdog(struct clocksource *cs) @@ -442,7 +462,7 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs) static void clocksource_select_watchdog(bool fallback) { } static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { } static inline void clocksource_resume_watchdog(void) { } -static inline int __clocksource_watchdog_work(void) { return 0; } +static inline int __clocksource_watchdog_kthread(void) { return 0; } static bool clocksource_is_watchdog(struct clocksource *cs) { return false; } void clocksource_mark_unstable(struct clocksource *cs) { } @@ -810,7 +830,7 @@ static int __init clocksource_done_booting(void) /* * Run the watchdog first to eliminate unstable clock sources */ - __clocksource_watchdog_work(); + __clocksource_watchdog_kthread(); clocksource_select(); mutex_unlock(&clocksource_mutex); return 0; -- GitLab From 8f5c5fcf353302374b36232d6885c1a3b579e5ca Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Tue, 4 Sep 2018 14:54:55 -0700 Subject: [PATCH 1189/1692] tipc: call start and done ops directly in __tipc_nl_compat_dumpit() __tipc_nl_compat_dumpit() uses a netlink_callback on stack, so the only way to align it with other ->dumpit() call path is calling tipc_dump_start() and tipc_dump_done() directly inside it. Otherwise ->dumpit() would always get NULL from cb->args[]. But tipc_dump_start() uses sock_net(cb->skb->sk) to retrieve net pointer, the cb->skb here doesn't set skb->sk, the net pointer is saved in msg->net instead, so introduce a helper function __tipc_dump_start() to pass in msg->net. Ying pointed out cb->args[0...3] are already used by other callbacks on this call path, so we can't use cb->args[0] any more, use cb->args[4] instead. Fixes: 9a07efa9aea2 ("tipc: switch to rhashtable iterator") Reported-and-tested-by: syzbot+e93a2c41f91b8e2c7d9b@syzkaller.appspotmail.com Cc: Jon Maloy Cc: Ying Xue Signed-off-by: Cong Wang Acked-by: Ying Xue Signed-off-by: David S. Miller --- net/tipc/netlink_compat.c | 2 ++ net/tipc/socket.c | 17 +++++++++++------ net/tipc/socket.h | 1 + 3 files changed, 14 insertions(+), 6 deletions(-) diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c index a2f76743c73a..82f665728382 100644 --- a/net/tipc/netlink_compat.c +++ b/net/tipc/netlink_compat.c @@ -185,6 +185,7 @@ static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, return -ENOMEM; buf->sk = msg->dst_sk; + __tipc_dump_start(&cb, msg->net); do { int rem; @@ -216,6 +217,7 @@ static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, err = 0; err_out: + tipc_dump_done(&cb); kfree_skb(buf); if (err == -EMSGSIZE) { diff --git a/net/tipc/socket.c b/net/tipc/socket.c index a0ff8bffc96b..3f03ddd0e35b 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -3230,7 +3230,7 @@ int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb, struct netlink_callback *cb, struct tipc_sock *tsk)) { - struct rhashtable_iter *iter = (void *)cb->args[0]; + struct rhashtable_iter *iter = (void *)cb->args[4]; struct tipc_sock *tsk; int err; @@ -3266,8 +3266,14 @@ EXPORT_SYMBOL(tipc_nl_sk_walk); int tipc_dump_start(struct netlink_callback *cb) { - struct rhashtable_iter *iter = (void *)cb->args[0]; - struct net *net = sock_net(cb->skb->sk); + return __tipc_dump_start(cb, sock_net(cb->skb->sk)); +} +EXPORT_SYMBOL(tipc_dump_start); + +int __tipc_dump_start(struct netlink_callback *cb, struct net *net) +{ + /* tipc_nl_name_table_dump() uses cb->args[0...3]. */ + struct rhashtable_iter *iter = (void *)cb->args[4]; struct tipc_net *tn = tipc_net(net); if (!iter) { @@ -3275,17 +3281,16 @@ int tipc_dump_start(struct netlink_callback *cb) if (!iter) return -ENOMEM; - cb->args[0] = (long)iter; + cb->args[4] = (long)iter; } rhashtable_walk_enter(&tn->sk_rht, iter); return 0; } -EXPORT_SYMBOL(tipc_dump_start); int tipc_dump_done(struct netlink_callback *cb) { - struct rhashtable_iter *hti = (void *)cb->args[0]; + struct rhashtable_iter *hti = (void *)cb->args[4]; rhashtable_walk_exit(hti); kfree(hti); diff --git a/net/tipc/socket.h b/net/tipc/socket.h index d43032e26532..5e575f205afe 100644 --- a/net/tipc/socket.h +++ b/net/tipc/socket.h @@ -69,5 +69,6 @@ int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb, struct netlink_callback *cb, struct tipc_sock *tsk)); int tipc_dump_start(struct netlink_callback *cb); +int __tipc_dump_start(struct netlink_callback *cb, struct net *net); int tipc_dump_done(struct netlink_callback *cb); #endif -- GitLab From da4dfaf8428d9f71e2ac4f736bacb81adab36504 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Fri, 7 Sep 2018 08:02:05 +0200 Subject: [PATCH 1190/1692] i2c: xiic: Record xilinx i2c with Zynq fragment Include xilinx soft i2c controller to Zynq fragment to make clear who is responsible for it. Signed-off-by: Michal Simek Signed-off-by: Wolfram Sang --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) diff --git a/MAINTAINERS b/MAINTAINERS index 9ad052aeac39..d870cb57c887 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2311,6 +2311,7 @@ F: drivers/clocksource/cadence_ttc_timer.c F: drivers/i2c/busses/i2c-cadence.c F: drivers/mmc/host/sdhci-of-arasan.c F: drivers/edac/synopsys_edac.c +F: drivers/i2c/busses/i2c-xiic.c ARM64 PORT (AARCH64 ARCHITECTURE) M: Catalin Marinas -- GitLab From 694556d54f354d3fe43bb2e61fd6103cca2638a4 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 23 Aug 2018 09:58:27 +0100 Subject: [PATCH 1191/1692] KVM: arm/arm64: Clean dcache to PoC when changing PTE due to CoW When triggering a CoW, we unmap the RO page via an MMU notifier (invalidate_range_start), and then populate the new PTE using another one (change_pte). In the meantime, we'll have copied the old page into the new one. The problem is that the data for the new page is sitting in the cache, and should the guest have an uncached mapping to that page (or its MMU off), following accesses will bypass the cache. In a way, this is similar to what happens on a translation fault: We need to clean the page to the PoC before mapping it. So let's just do that. This fixes a KVM unit test regression observed on a HiSilicon platform, and subsequently reproduced on Seattle. Fixes: a9c0e12ebee5 ("KVM: arm/arm64: Only clean the dcache on translation fault") Cc: stable@vger.kernel.org # v4.16+ Reported-by: Mike Galbraith Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall --- virt/kvm/arm/mmu.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index 91aaf73b00df..111a660be3be 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c @@ -1860,13 +1860,20 @@ static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) { unsigned long end = hva + PAGE_SIZE; + kvm_pfn_t pfn = pte_pfn(pte); pte_t stage2_pte; if (!kvm->arch.pgd) return; trace_kvm_set_spte_hva(hva); - stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2); + + /* + * We've moved a page around, probably through CoW, so let's treat it + * just like a translation fault and clean the cache to the PoC. + */ + clean_dcache_guest_page(pfn, PAGE_SIZE); + stage2_pte = pfn_pte(pfn, PAGE_S2); handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte); } -- GitLab From 7d14919c0d475a795c0127631ac8ecb2b0f31831 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 23 Aug 2018 11:51:43 +0100 Subject: [PATCH 1192/1692] arm64: KVM: Only force FPEXC32_EL2.EN if trapping FPSIMD If trapping FPSIMD in the context of an AArch32 guest, it is critical to set FPEXC32_EL2.EN to 1 so that the trapping is taken to EL2 and not EL1. Conversely, it is just as critical *not* to set FPEXC32_EL2.EN to 1 if we're not going to trap FPSIMD, as we then corrupt the existing VFP state. Moving the call to __activate_traps_fpsimd32 to the point where we know for sure that we are going to trap ensures that we don't set that bit spuriously. Fixes: e6b673b741ea ("KVM: arm64: Optimise FPSIMD handling to reduce guest/host thrashing") Cc: stable@vger.kernel.org # v4.18 Cc: Dave Martin Reported-by: Alexander Graf Tested-by: Alexander Graf Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm64/kvm/hyp/switch.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index d496ef579859..ca46153d7915 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -98,8 +98,10 @@ static void activate_traps_vhe(struct kvm_vcpu *vcpu) val = read_sysreg(cpacr_el1); val |= CPACR_EL1_TTA; val &= ~CPACR_EL1_ZEN; - if (!update_fp_enabled(vcpu)) + if (!update_fp_enabled(vcpu)) { val &= ~CPACR_EL1_FPEN; + __activate_traps_fpsimd32(vcpu); + } write_sysreg(val, cpacr_el1); @@ -114,8 +116,10 @@ static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu) val = CPTR_EL2_DEFAULT; val |= CPTR_EL2_TTA | CPTR_EL2_TZ; - if (!update_fp_enabled(vcpu)) + if (!update_fp_enabled(vcpu)) { val |= CPTR_EL2_TFP; + __activate_traps_fpsimd32(vcpu); + } write_sysreg(val, cptr_el2); } @@ -129,7 +133,6 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu) if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE)) write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2); - __activate_traps_fpsimd32(vcpu); if (has_vhe()) activate_traps_vhe(vcpu); else -- GitLab From a35381e10dc46dd75e65e4b3832d9a0005d48d44 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 23 Aug 2018 10:18:14 +0100 Subject: [PATCH 1193/1692] KVM: Remove obsolete kvm_unmap_hva notifier backend kvm_unmap_hva is long gone, and we only have kvm_unmap_hva_range to deal with. Drop the now obsolete code. Fixes: fb1522e099f0 ("KVM: update to new mmu_notifier semantic v2") Cc: James Hogan Reviewed-by: Paolo Bonzini Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm/include/asm/kvm_host.h | 1 - arch/arm64/include/asm/kvm_host.h | 1 - arch/mips/include/asm/kvm_host.h | 1 - arch/mips/kvm/mmu.c | 10 ---------- arch/x86/include/asm/kvm_host.h | 1 - arch/x86/kvm/mmu.c | 5 ----- virt/kvm/arm/mmu.c | 12 ------------ virt/kvm/arm/trace.h | 15 --------------- 8 files changed, 46 deletions(-) diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 79906cecb091..3ad482d2f1eb 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -223,7 +223,6 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events); #define KVM_ARCH_WANT_MMU_NOTIFIER -int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end); void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index f26055f2306e..8e6d46df38aa 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -357,7 +357,6 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events); #define KVM_ARCH_WANT_MMU_NOTIFIER -int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end); void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index a9af1d2dcd69..2c1c53d12179 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -931,7 +931,6 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu, bool write); #define KVM_ARCH_WANT_MMU_NOTIFIER -int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end); void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c index ee64db032793..d8dcdb350405 100644 --- a/arch/mips/kvm/mmu.c +++ b/arch/mips/kvm/mmu.c @@ -512,16 +512,6 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, return 1; } -int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) -{ - unsigned long end = hva + PAGE_SIZE; - - handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL); - - kvm_mips_callbacks->flush_shadow_all(kvm); - return 0; -} - int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) { handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 00ddb0c9e612..e6a33420b871 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1450,7 +1450,6 @@ asmlinkage void kvm_spurious_fault(void); ____kvm_handle_fault_on_reboot(insn, "") #define KVM_ARCH_WANT_MMU_NOTIFIER -int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index a282321329b5..d440154e8938 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1853,11 +1853,6 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler); } -int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) -{ - return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp); -} - int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) { return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp); diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index 111a660be3be..ed162a6c57c5 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c @@ -1817,18 +1817,6 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *dat return 0; } -int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) -{ - unsigned long end = hva + PAGE_SIZE; - - if (!kvm->arch.pgd) - return 0; - - trace_kvm_unmap_hva(hva); - handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL); - return 0; -} - int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) { diff --git a/virt/kvm/arm/trace.h b/virt/kvm/arm/trace.h index e53b596f483b..57b3edebbb40 100644 --- a/virt/kvm/arm/trace.h +++ b/virt/kvm/arm/trace.h @@ -134,21 +134,6 @@ TRACE_EVENT(kvm_mmio_emulate, __entry->vcpu_pc, __entry->instr, __entry->cpsr) ); -TRACE_EVENT(kvm_unmap_hva, - TP_PROTO(unsigned long hva), - TP_ARGS(hva), - - TP_STRUCT__entry( - __field( unsigned long, hva ) - ), - - TP_fast_assign( - __entry->hva = hva; - ), - - TP_printk("mmu notifier unmap hva: %#08lx", __entry->hva) -); - TRACE_EVENT(kvm_unmap_hva_range, TP_PROTO(unsigned long start, unsigned long end), TP_ARGS(start, end), -- GitLab From df3190e22016abf74ef67c9691e9fa1012a66bd5 Mon Sep 17 00:00:00 2001 From: Steven Price Date: Mon, 13 Aug 2018 17:04:53 +0100 Subject: [PATCH 1194/1692] arm64: KVM: Remove pgd_lock The lock has never been used and the page tables are protected by mmu_lock in struct kvm. Reviewed-by: Suzuki K Poulose Signed-off-by: Steven Price Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm64/include/asm/kvm_host.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 8e6d46df38aa..3d6d7336f871 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -61,8 +61,7 @@ struct kvm_arch { u64 vmid_gen; u32 vmid; - /* 1-level 2nd stage table and lock */ - spinlock_t pgd_lock; + /* 1-level 2nd stage table, protected by kvm->mmu_lock */ pgd_t *pgd; /* VTTBR value associated with above pgd and vmid */ -- GitLab From c793279c77035053e67937f5743c6ebfc303e7c5 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Thu, 6 Sep 2018 09:47:51 -0700 Subject: [PATCH 1195/1692] hwmon: (nct6775) Fix access to fan pulse registers Not all fans have a fan pulse register. This can result in reading beyond the end of REG_FAN_PULSES and FAN_PULSE_SHIFT arrays, and was reported by smatch as possible error. 1672 for (i = 0; i < ARRAY_SIZE(data->rpm); i++) { ^^^^^^^^^^^^^^^^^^^^^^^^ This is a 7 element array. ... 1685 data->fan_pulses[i] = 1686 (nct6775_read_value(data, data->REG_FAN_PULSES[i]) 1687 >> data->FAN_PULSE_SHIFT[i]) & 0x03; ^^^^^^^^^^^^^^^^^^^^^^^^ FAN_PULSE_SHIFT is either 5 or 6 elements. To fix the problem, we have to ensure that all REG_FAN_PULSES and FAN_PULSE_SHIFT have the appropriate length, and that REG_FAN_PULSES is only read if the register actually exists. Fixes: 6c009501ff200 ("hwmon: (nct6775) Add support for NCT6102D/6106D") Reported-by: Dan Carpenter Signed-off-by: Guenter Roeck --- drivers/hwmon/nct6775.c | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index 139781ae830b..87c316c6c341 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c @@ -299,8 +299,9 @@ static const u16 NCT6775_REG_PWM_READ[] = { static const u16 NCT6775_REG_FAN[] = { 0x630, 0x632, 0x634, 0x636, 0x638 }; static const u16 NCT6775_REG_FAN_MIN[] = { 0x3b, 0x3c, 0x3d }; -static const u16 NCT6775_REG_FAN_PULSES[] = { 0x641, 0x642, 0x643, 0x644, 0 }; -static const u16 NCT6775_FAN_PULSE_SHIFT[] = { 0, 0, 0, 0, 0, 0 }; +static const u16 NCT6775_REG_FAN_PULSES[NUM_FAN] = { + 0x641, 0x642, 0x643, 0x644 }; +static const u16 NCT6775_FAN_PULSE_SHIFT[NUM_FAN] = { }; static const u16 NCT6775_REG_TEMP[] = { 0x27, 0x150, 0x250, 0x62b, 0x62c, 0x62d }; @@ -425,8 +426,8 @@ static const u8 NCT6776_PWM_MODE_MASK[] = { 0x01, 0, 0, 0, 0, 0 }; static const u16 NCT6776_REG_FAN_MIN[] = { 0x63a, 0x63c, 0x63e, 0x640, 0x642, 0x64a, 0x64c }; -static const u16 NCT6776_REG_FAN_PULSES[] = { - 0x644, 0x645, 0x646, 0x647, 0x648, 0x649, 0 }; +static const u16 NCT6776_REG_FAN_PULSES[NUM_FAN] = { + 0x644, 0x645, 0x646, 0x647, 0x648, 0x649 }; static const u16 NCT6776_REG_WEIGHT_DUTY_BASE[] = { 0x13e, 0x23e, 0x33e, 0x83e, 0x93e, 0xa3e }; @@ -502,8 +503,8 @@ static const s8 NCT6779_BEEP_BITS[] = { static const u16 NCT6779_REG_FAN[] = { 0x4b0, 0x4b2, 0x4b4, 0x4b6, 0x4b8, 0x4ba, 0x660 }; -static const u16 NCT6779_REG_FAN_PULSES[] = { - 0x644, 0x645, 0x646, 0x647, 0x648, 0x649, 0 }; +static const u16 NCT6779_REG_FAN_PULSES[NUM_FAN] = { + 0x644, 0x645, 0x646, 0x647, 0x648, 0x649 }; static const u16 NCT6779_REG_CRITICAL_PWM_ENABLE[] = { 0x136, 0x236, 0x336, 0x836, 0x936, 0xa36, 0xb36 }; @@ -779,8 +780,8 @@ static const u16 NCT6106_REG_TEMP_CONFIG[] = { static const u16 NCT6106_REG_FAN[] = { 0x20, 0x22, 0x24 }; static const u16 NCT6106_REG_FAN_MIN[] = { 0xe0, 0xe2, 0xe4 }; -static const u16 NCT6106_REG_FAN_PULSES[] = { 0xf6, 0xf6, 0xf6, 0, 0 }; -static const u16 NCT6106_FAN_PULSE_SHIFT[] = { 0, 2, 4, 0, 0 }; +static const u16 NCT6106_REG_FAN_PULSES[] = { 0xf6, 0xf6, 0xf6 }; +static const u16 NCT6106_FAN_PULSE_SHIFT[] = { 0, 2, 4 }; static const u8 NCT6106_REG_PWM_MODE[] = { 0xf3, 0xf3, 0xf3 }; static const u8 NCT6106_PWM_MODE_MASK[] = { 0x01, 0x02, 0x04 }; @@ -1682,9 +1683,13 @@ static struct nct6775_data *nct6775_update_device(struct device *dev) if (data->has_fan_min & BIT(i)) data->fan_min[i] = nct6775_read_value(data, data->REG_FAN_MIN[i]); - data->fan_pulses[i] = - (nct6775_read_value(data, data->REG_FAN_PULSES[i]) - >> data->FAN_PULSE_SHIFT[i]) & 0x03; + + if (data->REG_FAN_PULSES[i]) { + data->fan_pulses[i] = + (nct6775_read_value(data, + data->REG_FAN_PULSES[i]) + >> data->FAN_PULSE_SHIFT[i]) & 0x03; + } nct6775_select_fan_div(dev, data, i, reg); } -- GitLab From b5861e5cf2fcf83031ea3e26b0a69d887adf7d21 Mon Sep 17 00:00:00 2001 From: Liran Alon Date: Mon, 3 Sep 2018 15:20:22 +0300 Subject: [PATCH 1196/1692] KVM: nVMX: Fix loss of pending IRQ/NMI before entering L2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Consider the case L1 had a IRQ/NMI event until it executed VMLAUNCH/VMRESUME which wasn't delivered because it was disallowed (e.g. interrupts disabled). When L1 executes VMLAUNCH/VMRESUME, L0 needs to evaluate if this pending event should cause an exit from L2 to L1 or delivered directly to L2 (e.g. In case L1 don't intercept EXTERNAL_INTERRUPT). Usually this would be handled by L0 requesting a IRQ/NMI window by setting VMCS accordingly. However, this setting was done on VMCS01 and now VMCS02 is active instead. Thus, when L1 executes VMLAUNCH/VMRESUME we force L0 to perform pending event evaluation by requesting a KVM_REQ_EVENT. Note that above scenario exists when L1 KVM is about to enter L2 but requests an "immediate-exit". As in this case, L1 will disable-interrupts and then send a self-IPI before entering L2. Reviewed-by: Nikita Leshchenko Co-developed-by: Sean Christopherson Signed-off-by: Sean Christopherson Signed-off-by: Liran Alon Signed-off-by: Radim Krčmář --- arch/x86/kvm/vmx.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index f910d33858d9..533a327372c8 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -12537,8 +12537,11 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual) struct vmcs12 *vmcs12 = get_vmcs12(vcpu); bool from_vmentry = !!exit_qual; u32 dummy_exit_qual; + u32 vmcs01_cpu_exec_ctrl; int r = 0; + vmcs01_cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); + enter_guest_mode(vcpu); if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) @@ -12574,6 +12577,25 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual) kvm_make_request(KVM_REQ_GET_VMCS12_PAGES, vcpu); } + /* + * If L1 had a pending IRQ/NMI until it executed + * VMLAUNCH/VMRESUME which wasn't delivered because it was + * disallowed (e.g. interrupts disabled), L0 needs to + * evaluate if this pending event should cause an exit from L2 + * to L1 or delivered directly to L2 (e.g. In case L1 don't + * intercept EXTERNAL_INTERRUPT). + * + * Usually this would be handled by L0 requesting a + * IRQ/NMI window by setting VMCS accordingly. However, + * this setting was done on VMCS01 and now VMCS02 is active + * instead. Thus, we force L0 to perform pending event + * evaluation by requesting a KVM_REQ_EVENT. + */ + if (vmcs01_cpu_exec_ctrl & + (CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING)) { + kvm_make_request(KVM_REQ_EVENT, vcpu); + } + /* * Note no nested_vmx_succeed or nested_vmx_fail here. At this point * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet -- GitLab From bdf7ffc89922a52a4f08a12f7421ea24bb7626a0 Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Thu, 30 Aug 2018 10:03:30 +0800 Subject: [PATCH 1197/1692] KVM: LAPIC: Fix pv ipis out-of-bounds access MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Dan Carpenter reported that the untrusted data returns from kvm_register_read() results in the following static checker warning: arch/x86/kvm/lapic.c:576 kvm_pv_send_ipi() error: buffer underflow 'map->phys_map' 's32min-s32max' KVM guest can easily trigger this by executing the following assembly sequence in Ring0: mov $10, %rax mov $0xFFFFFFFF, %rbx mov $0xFFFFFFFF, %rdx mov $0, %rsi vmcall As this will cause KVM to execute the following code-path: vmx_handle_exit() -> handle_vmcall() -> kvm_emulate_hypercall() -> kvm_pv_send_ipi() which will reach out-of-bounds access. This patch fixes it by adding a check to kvm_pv_send_ipi() against map->max_apic_id, ignoring destinations that are not present and delivering the rest. We also check whether or not map->phys_map[min + i] is NULL since the max_apic_id is set to the max apic id, some phys_map maybe NULL when apic id is sparse, especially kvm unconditionally set max_apic_id to 255 to reserve enough space for any xAPIC ID. Reported-by: Dan Carpenter Reviewed-by: Liran Alon Cc: Paolo Bonzini Cc: Radim Krčmář Cc: Liran Alon Cc: Dan Carpenter Signed-off-by: Wanpeng Li [Add second "if (min > map->max_apic_id)" to complete the fix. -Radim] Signed-off-by: Radim Krčmář --- arch/x86/include/asm/kvm_host.h | 2 +- arch/x86/kvm/lapic.c | 27 ++++++++++++++++++++------- 2 files changed, 21 insertions(+), 8 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 3ad10f634d4c..8e90488c3d56 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1455,7 +1455,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event); void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu); int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low, - unsigned long ipi_bitmap_high, int min, + unsigned long ipi_bitmap_high, u32 min, unsigned long icr, int op_64_bit); u64 kvm_get_arch_capabilities(void); diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 0cefba28c864..17c0472c5b34 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -548,7 +548,7 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq, } int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low, - unsigned long ipi_bitmap_high, int min, + unsigned long ipi_bitmap_high, u32 min, unsigned long icr, int op_64_bit) { int i; @@ -571,18 +571,31 @@ int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low, rcu_read_lock(); map = rcu_dereference(kvm->arch.apic_map); + if (min > map->max_apic_id) + goto out; /* Bits above cluster_size are masked in the caller. */ - for_each_set_bit(i, &ipi_bitmap_low, BITS_PER_LONG) { - vcpu = map->phys_map[min + i]->vcpu; - count += kvm_apic_set_irq(vcpu, &irq, NULL); + for_each_set_bit(i, &ipi_bitmap_low, + min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) { + if (map->phys_map[min + i]) { + vcpu = map->phys_map[min + i]->vcpu; + count += kvm_apic_set_irq(vcpu, &irq, NULL); + } } min += cluster_size; - for_each_set_bit(i, &ipi_bitmap_high, BITS_PER_LONG) { - vcpu = map->phys_map[min + i]->vcpu; - count += kvm_apic_set_irq(vcpu, &irq, NULL); + + if (min > map->max_apic_id) + goto out; + + for_each_set_bit(i, &ipi_bitmap_high, + min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) { + if (map->phys_map[min + i]) { + vcpu = map->phys_map[min + i]->vcpu; + count += kvm_apic_set_irq(vcpu, &irq, NULL); + } } +out: rcu_read_unlock(); return count; } -- GitLab From f74dd480cf4e31e12971c58a1d832044db945670 Mon Sep 17 00:00:00 2001 From: "Maciej S. Szmigiero" Date: Fri, 7 Sep 2018 20:15:22 +0200 Subject: [PATCH 1198/1692] r8169: set TxConfig register after TX / RX is enabled, just like RxConfig Commit 3559d81e76bf ("r8169: simplify rtl_hw_start_8169") changed order of two register writes: 1) Caused RxConfig to be written before TX / RX is enabled, 2) Caused TxConfig to be written before TX / RX is enabled. At least on XIDs 10000000 ("RTL8169sb/8110sb") and 18000000 ("RTL8169sc/8110sc") such writes are ignored by the chip, leaving values in these registers intact. Change 1) was reverted by commit 05212ba8132b42 ("r8169: set RxConfig after tx/rx is enabled for RTL8169sb/8110sb devices"), however change 2) wasn't. In practice, this caused TxConfig's "InterFrameGap time" and "Max DMA Burst Size per Tx DMA Burst" bits to be zero dramatically reducing TX performance (in my tests it dropped from around 500Mbps to around 50Mbps). This patch fixes the issue by moving TxConfig register write a bit later in the code so it happens after TX / RX is already enabled. Fixes: 05212ba8132b42 ("r8169: set RxConfig after tx/rx is enabled for RTL8169sb/8110sb devices") Signed-off-by: Maciej S. Szmigiero Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index b08d51bf7a20..a1f37d58e2fe 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -4634,13 +4634,13 @@ static void rtl_hw_start(struct rtl8169_private *tp) rtl_set_rx_max_size(tp); rtl_set_rx_tx_desc_registers(tp); - rtl_set_tx_config_registers(tp); RTL_W8(tp, Cfg9346, Cfg9346_Lock); /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ RTL_R8(tp, IntrMask); RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); rtl_init_rxcfg(tp); + rtl_set_tx_config_registers(tp); rtl_set_rx_mode(tp->dev); /* no early-rx interrupts */ -- GitLab From 48c2bb0b9cf863e0ed78e269f188ce65b73e0fd1 Mon Sep 17 00:00:00 2001 From: Jay Kamat Date: Fri, 7 Sep 2018 14:34:04 -0700 Subject: [PATCH 1199/1692] Fix cg_read_strcmp() Fix a couple issues with cg_read_strcmp(), to improve correctness of cgroup tests - Fix cg_read_strcmp() always returning 0 for empty "needle" strings. Previously, this function read to a size = 1 buffer when comparing against empty strings, which would lead to cg_read_strcmp() comparing two empty strings. - Fix a memory leak in cg_read_strcmp() Fixes: 84092dbcf901 ("selftests: cgroup: add memory controller self-tests") Signed-off-by: Jay Kamat Acked-by: Roman Gushchin Signed-off-by: Shuah Khan (Samsung OSG) --- tools/testing/selftests/cgroup/cgroup_util.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/cgroup/cgroup_util.c b/tools/testing/selftests/cgroup/cgroup_util.c index 1c5d2b2a583b..f857def9a9e6 100644 --- a/tools/testing/selftests/cgroup/cgroup_util.c +++ b/tools/testing/selftests/cgroup/cgroup_util.c @@ -89,17 +89,28 @@ int cg_read(const char *cgroup, const char *control, char *buf, size_t len) int cg_read_strcmp(const char *cgroup, const char *control, const char *expected) { - size_t size = strlen(expected) + 1; + size_t size; char *buf; + int ret; + + /* Handle the case of comparing against empty string */ + if (!expected) + size = 32; + else + size = strlen(expected) + 1; buf = malloc(size); if (!buf) return -1; - if (cg_read(cgroup, control, buf, size)) + if (cg_read(cgroup, control, buf, size)) { + free(buf); return -1; + } - return strcmp(expected, buf); + ret = strcmp(expected, buf); + free(buf); + return ret; } int cg_read_strstr(const char *cgroup, const char *control, const char *needle) -- GitLab From a987785dcd6c8ae2915460582aebd6481c81eb67 Mon Sep 17 00:00:00 2001 From: Jay Kamat Date: Fri, 7 Sep 2018 14:34:05 -0700 Subject: [PATCH 1200/1692] Add tests for memory.oom.group Add tests for memory.oom.group for the following cases: - Killing all processes in a leaf cgroup, but leaving the parent untouched - Killing all processes in a parent and leaf cgroup - Keeping processes marked by OOM_SCORE_ADJ_MIN alive when considered for being killed by the group oom killer. Signed-off-by: Jay Kamat Acked-by: Roman Gushchin Signed-off-by: Shuah Khan (Samsung OSG) --- tools/testing/selftests/cgroup/cgroup_util.c | 21 ++ tools/testing/selftests/cgroup/cgroup_util.h | 1 + .../selftests/cgroup/test_memcontrol.c | 205 ++++++++++++++++++ 3 files changed, 227 insertions(+) diff --git a/tools/testing/selftests/cgroup/cgroup_util.c b/tools/testing/selftests/cgroup/cgroup_util.c index f857def9a9e6..14c9fe284806 100644 --- a/tools/testing/selftests/cgroup/cgroup_util.c +++ b/tools/testing/selftests/cgroup/cgroup_util.c @@ -348,3 +348,24 @@ int is_swap_enabled(void) return cnt > 1; } + +int set_oom_adj_score(int pid, int score) +{ + char path[PATH_MAX]; + int fd, len; + + sprintf(path, "/proc/%d/oom_score_adj", pid); + + fd = open(path, O_WRONLY | O_APPEND); + if (fd < 0) + return fd; + + len = dprintf(fd, "%d", score); + if (len < 0) { + close(fd); + return len; + } + + close(fd); + return 0; +} diff --git a/tools/testing/selftests/cgroup/cgroup_util.h b/tools/testing/selftests/cgroup/cgroup_util.h index 1ff6f9f1abdc..9ac8b7958f83 100644 --- a/tools/testing/selftests/cgroup/cgroup_util.h +++ b/tools/testing/selftests/cgroup/cgroup_util.h @@ -40,3 +40,4 @@ extern int get_temp_fd(void); extern int alloc_pagecache(int fd, size_t size); extern int alloc_anon(const char *cgroup, void *arg); extern int is_swap_enabled(void); +extern int set_oom_adj_score(int pid, int score); diff --git a/tools/testing/selftests/cgroup/test_memcontrol.c b/tools/testing/selftests/cgroup/test_memcontrol.c index cf0bddc9d271..28d321ba311b 100644 --- a/tools/testing/selftests/cgroup/test_memcontrol.c +++ b/tools/testing/selftests/cgroup/test_memcontrol.c @@ -2,6 +2,7 @@ #define _GNU_SOURCE #include +#include #include #include #include @@ -202,6 +203,36 @@ static int alloc_pagecache_50M_noexit(const char *cgroup, void *arg) return 0; } +static int alloc_anon_noexit(const char *cgroup, void *arg) +{ + int ppid = getppid(); + + if (alloc_anon(cgroup, arg)) + return -1; + + while (getppid() == ppid) + sleep(1); + + return 0; +} + +/* + * Wait until processes are killed asynchronously by the OOM killer + * If we exceed a timeout, fail. + */ +static int cg_test_proc_killed(const char *cgroup) +{ + int limit; + + for (limit = 10; limit > 0; limit--) { + if (cg_read_strcmp(cgroup, "cgroup.procs", "") == 0) + return 0; + + usleep(100000); + } + return -1; +} + /* * First, this test creates the following hierarchy: * A memory.min = 50M, memory.max = 200M @@ -964,6 +995,177 @@ static int test_memcg_sock(const char *root) return ret; } +/* + * This test disables swapping and tries to allocate anonymous memory + * up to OOM with memory.group.oom set. Then it checks that all + * processes in the leaf (but not the parent) were killed. + */ +static int test_memcg_oom_group_leaf_events(const char *root) +{ + int ret = KSFT_FAIL; + char *parent, *child; + + parent = cg_name(root, "memcg_test_0"); + child = cg_name(root, "memcg_test_0/memcg_test_1"); + + if (!parent || !child) + goto cleanup; + + if (cg_create(parent)) + goto cleanup; + + if (cg_create(child)) + goto cleanup; + + if (cg_write(parent, "cgroup.subtree_control", "+memory")) + goto cleanup; + + if (cg_write(child, "memory.max", "50M")) + goto cleanup; + + if (cg_write(child, "memory.swap.max", "0")) + goto cleanup; + + if (cg_write(child, "memory.oom.group", "1")) + goto cleanup; + + cg_run_nowait(parent, alloc_anon_noexit, (void *) MB(60)); + cg_run_nowait(child, alloc_anon_noexit, (void *) MB(1)); + cg_run_nowait(child, alloc_anon_noexit, (void *) MB(1)); + if (!cg_run(child, alloc_anon, (void *)MB(100))) + goto cleanup; + + if (cg_test_proc_killed(child)) + goto cleanup; + + if (cg_read_key_long(child, "memory.events", "oom_kill ") <= 0) + goto cleanup; + + if (cg_read_key_long(parent, "memory.events", "oom_kill ") != 0) + goto cleanup; + + ret = KSFT_PASS; + +cleanup: + if (child) + cg_destroy(child); + if (parent) + cg_destroy(parent); + free(child); + free(parent); + + return ret; +} + +/* + * This test disables swapping and tries to allocate anonymous memory + * up to OOM with memory.group.oom set. Then it checks that all + * processes in the parent and leaf were killed. + */ +static int test_memcg_oom_group_parent_events(const char *root) +{ + int ret = KSFT_FAIL; + char *parent, *child; + + parent = cg_name(root, "memcg_test_0"); + child = cg_name(root, "memcg_test_0/memcg_test_1"); + + if (!parent || !child) + goto cleanup; + + if (cg_create(parent)) + goto cleanup; + + if (cg_create(child)) + goto cleanup; + + if (cg_write(parent, "memory.max", "80M")) + goto cleanup; + + if (cg_write(parent, "memory.swap.max", "0")) + goto cleanup; + + if (cg_write(parent, "memory.oom.group", "1")) + goto cleanup; + + cg_run_nowait(parent, alloc_anon_noexit, (void *) MB(60)); + cg_run_nowait(child, alloc_anon_noexit, (void *) MB(1)); + cg_run_nowait(child, alloc_anon_noexit, (void *) MB(1)); + + if (!cg_run(child, alloc_anon, (void *)MB(100))) + goto cleanup; + + if (cg_test_proc_killed(child)) + goto cleanup; + if (cg_test_proc_killed(parent)) + goto cleanup; + + ret = KSFT_PASS; + +cleanup: + if (child) + cg_destroy(child); + if (parent) + cg_destroy(parent); + free(child); + free(parent); + + return ret; +} + +/* + * This test disables swapping and tries to allocate anonymous memory + * up to OOM with memory.group.oom set. Then it checks that all + * processes were killed except those set with OOM_SCORE_ADJ_MIN + */ +static int test_memcg_oom_group_score_events(const char *root) +{ + int ret = KSFT_FAIL; + char *memcg; + int safe_pid; + + memcg = cg_name(root, "memcg_test_0"); + + if (!memcg) + goto cleanup; + + if (cg_create(memcg)) + goto cleanup; + + if (cg_write(memcg, "memory.max", "50M")) + goto cleanup; + + if (cg_write(memcg, "memory.swap.max", "0")) + goto cleanup; + + if (cg_write(memcg, "memory.oom.group", "1")) + goto cleanup; + + safe_pid = cg_run_nowait(memcg, alloc_anon_noexit, (void *) MB(1)); + if (set_oom_adj_score(safe_pid, OOM_SCORE_ADJ_MIN)) + goto cleanup; + + cg_run_nowait(memcg, alloc_anon_noexit, (void *) MB(1)); + if (!cg_run(memcg, alloc_anon, (void *)MB(100))) + goto cleanup; + + if (cg_read_key_long(memcg, "memory.events", "oom_kill ") != 3) + goto cleanup; + + if (kill(safe_pid, SIGKILL)) + goto cleanup; + + ret = KSFT_PASS; + +cleanup: + if (memcg) + cg_destroy(memcg); + free(memcg); + + return ret; +} + + #define T(x) { x, #x } struct memcg_test { int (*fn)(const char *root); @@ -978,6 +1180,9 @@ struct memcg_test { T(test_memcg_oom_events), T(test_memcg_swap_max), T(test_memcg_sock), + T(test_memcg_oom_group_leaf_events), + T(test_memcg_oom_group_parent_events), + T(test_memcg_oom_group_score_events), }; #undef T -- GitLab From ecfe951f0c1b169ea4b7dd6f3a404dfedd795bc2 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 7 Sep 2018 23:55:17 +0100 Subject: [PATCH 1201/1692] afs: Fix cell specification to permit an empty address list Fix the cell specification mechanism to allow cells to be pre-created without having to specify at least one address (the addresses will be upcalled for). This allows the cell information preload service to avoid the need to issue loads of DNS lookups during boot to get the addresses for each cell (500+ lookups for the 'standard' cell list[*]). The lookups can be done later as each cell is accessed through the filesystem. Also remove the print statement that prints a line every time a new cell is added. [*] There are 144 cells in the list. Each cell is first looked up for an SRV record, and if that fails, for an AFSDB record. These get a list of server names, each of which then has to be looked up to get the addresses for that server. E.g.: dig srv _afs3-vlserver._udp.grand.central.org Signed-off-by: David Howells Signed-off-by: Linus Torvalds --- fs/afs/proc.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/fs/afs/proc.c b/fs/afs/proc.c index 0c3285c8db95..476dcbb79713 100644 --- a/fs/afs/proc.c +++ b/fs/afs/proc.c @@ -98,13 +98,13 @@ static int afs_proc_cells_write(struct file *file, char *buf, size_t size) goto inval; args = strchr(name, ' '); - if (!args) - goto inval; - do { - *args++ = 0; - } while(*args == ' '); - if (!*args) - goto inval; + if (args) { + do { + *args++ = 0; + } while(*args == ' '); + if (!*args) + goto inval; + } /* determine command to perform */ _debug("cmd=%s name=%s args=%s", buf, name, args); @@ -120,7 +120,6 @@ static int afs_proc_cells_write(struct file *file, char *buf, size_t size) if (test_and_set_bit(AFS_CELL_FL_NO_GC, &cell->flags)) afs_put_cell(net, cell); - printk("kAFS: Added new cell '%s'\n", name); } else { goto inval; } -- GitLab From 8edfe2e992b75aee3da9316e9697c531194c2f53 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Fri, 7 Sep 2018 14:21:30 +0200 Subject: [PATCH 1202/1692] xen/netfront: fix waiting for xenbus state change Commit 822fb18a82aba ("xen-netfront: wait xenbus state change when load module manually") added a new wait queue to wait on for a state change when the module is loaded manually. Unfortunately there is no wakeup anywhere to stop that waiting. Instead of introducing a new wait queue rename the existing module_unload_q to module_wq and use it for both purposes (loading and unloading). As any state change of the backend might be intended to stop waiting do the wake_up_all() in any case when netback_changed() is called. Fixes: 822fb18a82aba ("xen-netfront: wait xenbus state change when load module manually") Cc: #4.18 Signed-off-by: Juergen Gross Reviewed-by: Boris Ostrovsky Signed-off-by: David S. Miller --- drivers/net/xen-netfront.c | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 73f596a90c69..9407acbd19a9 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -87,8 +87,7 @@ struct netfront_cb { /* IRQ name is queue name with "-tx" or "-rx" appended */ #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) -static DECLARE_WAIT_QUEUE_HEAD(module_load_q); -static DECLARE_WAIT_QUEUE_HEAD(module_unload_q); +static DECLARE_WAIT_QUEUE_HEAD(module_wq); struct netfront_stats { u64 packets; @@ -1332,11 +1331,11 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) netif_carrier_off(netdev); xenbus_switch_state(dev, XenbusStateInitialising); - wait_event(module_load_q, - xenbus_read_driver_state(dev->otherend) != - XenbusStateClosed && - xenbus_read_driver_state(dev->otherend) != - XenbusStateUnknown); + wait_event(module_wq, + xenbus_read_driver_state(dev->otherend) != + XenbusStateClosed && + xenbus_read_driver_state(dev->otherend) != + XenbusStateUnknown); return netdev; exit: @@ -2010,15 +2009,14 @@ static void netback_changed(struct xenbus_device *dev, dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state)); + wake_up_all(&module_wq); + switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: - break; - case XenbusStateUnknown: - wake_up_all(&module_unload_q); break; case XenbusStateInitWait: @@ -2034,12 +2032,10 @@ static void netback_changed(struct xenbus_device *dev, break; case XenbusStateClosed: - wake_up_all(&module_unload_q); if (dev->state == XenbusStateClosed) break; /* Missed the backend's CLOSING state -- fallthrough */ case XenbusStateClosing: - wake_up_all(&module_unload_q); xenbus_frontend_closed(dev); break; } @@ -2147,14 +2143,14 @@ static int xennet_remove(struct xenbus_device *dev) if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) { xenbus_switch_state(dev, XenbusStateClosing); - wait_event(module_unload_q, + wait_event(module_wq, xenbus_read_driver_state(dev->otherend) == XenbusStateClosing || xenbus_read_driver_state(dev->otherend) == XenbusStateUnknown); xenbus_switch_state(dev, XenbusStateClosed); - wait_event(module_unload_q, + wait_event(module_wq, xenbus_read_driver_state(dev->otherend) == XenbusStateClosed || xenbus_read_driver_state(dev->otherend) == -- GitLab From a162c3511410b50f09c002fea56fea2153b679d0 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Thu, 6 Sep 2018 14:50:16 -0700 Subject: [PATCH 1203/1692] net_sched: properly cancel netlink dump on failure When nla_put*() fails after nla_nest_start(), we need to call nla_nest_cancel() to cancel the message, otherwise we end up calling nla_nest_end() like a success. Fixes: 0ed5269f9e41 ("net/sched: add tunnel option support to act_tunnel_key") Cc: Davide Caratti Cc: Simon Horman Signed-off-by: Cong Wang Signed-off-by: David S. Miller --- net/sched/act_tunnel_key.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index 28d58bbc953e..681f6f04e7da 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c @@ -412,8 +412,10 @@ static int tunnel_key_geneve_opts_dump(struct sk_buff *skb, nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE, opt->type) || nla_put(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA, - opt->length * 4, opt + 1)) + opt->length * 4, opt + 1)) { + nla_nest_cancel(skb, start); return -EMSGSIZE; + } len -= sizeof(struct geneve_opt) + opt->length * 4; src += sizeof(struct geneve_opt) + opt->length * 4; @@ -427,7 +429,7 @@ static int tunnel_key_opts_dump(struct sk_buff *skb, const struct ip_tunnel_info *info) { struct nlattr *start; - int err; + int err = -EINVAL; if (!info->options_len) return 0; @@ -439,9 +441,11 @@ static int tunnel_key_opts_dump(struct sk_buff *skb, if (info->key.tun_flags & TUNNEL_GENEVE_OPT) { err = tunnel_key_geneve_opts_dump(skb, info); if (err) - return err; + goto err_out; } else { - return -EINVAL; +err_out: + nla_nest_cancel(skb, start); + return err; } nla_nest_end(skb, start); -- GitLab From 5cf4a8532c992bb22a9ecd5f6d93f873f4eaccc2 Mon Sep 17 00:00:00 2001 From: Vincent Whitchurch Date: Thu, 6 Sep 2018 15:54:59 +0200 Subject: [PATCH 1204/1692] tcp: really ignore MSG_ZEROCOPY if no SO_ZEROCOPY According to the documentation in msg_zerocopy.rst, the SO_ZEROCOPY flag was introduced because send(2) ignores unknown message flags and any legacy application which was accidentally passing the equivalent of MSG_ZEROCOPY earlier should not see any new behaviour. Before commit f214f915e7db ("tcp: enable MSG_ZEROCOPY"), a send(2) call which passed the equivalent of MSG_ZEROCOPY without setting SO_ZEROCOPY would succeed. However, after that commit, it fails with -ENOBUFS. So it appears that the SO_ZEROCOPY flag fails to fulfill its intended purpose. Fix it. Fixes: f214f915e7db ("tcp: enable MSG_ZEROCOPY") Signed-off-by: Vincent Whitchurch Acked-by: Willem de Bruijn Signed-off-by: David S. Miller --- net/core/skbuff.c | 3 --- net/ipv4/tcp.c | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index c996c09d095f..b2c807f67aba 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -939,9 +939,6 @@ struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size) WARN_ON_ONCE(!in_task()); - if (!sock_flag(sk, SOCK_ZEROCOPY)) - return NULL; - skb = sock_omalloc(sk, 0, GFP_KERNEL); if (!skb) return NULL; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index b8af2fec5ad5..10c6246396cc 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1185,7 +1185,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) flags = msg->msg_flags; - if (flags & MSG_ZEROCOPY && size) { + if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) { if (sk->sk_state != TCP_ESTABLISHED) { err = -EINVAL; goto out_err; -- GitLab From 47b7360ce563e18c524ce92b55fb4da72b3b3578 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 8 Sep 2018 12:07:26 +0200 Subject: [PATCH 1205/1692] x86/apic/vector: Make error return value negative activate_managed() returns EINVAL instead of -EINVAL in case of error. While this is unlikely to happen, the positive return value would cause further malfunction at the call site. Fixes: 2db1f959d9dc ("x86/vector: Handle managed interrupts proper") Signed-off-by: Thomas Gleixner Cc: stable@vger.kernel.org --- arch/x86/kernel/apic/vector.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index 9f148e3d45b4..7654febd5102 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c @@ -413,7 +413,7 @@ static int activate_managed(struct irq_data *irqd) if (WARN_ON_ONCE(cpumask_empty(vector_searchmask))) { /* Something in the core code broke! Survive gracefully */ pr_err("Managed startup for irq %u, but no CPU\n", irqd->irq); - return EINVAL; + return -EINVAL; } ret = assign_managed_vector(irqd, vector_searchmask); -- GitLab From 9bc4f28af75a91aea0ae383f50b0a430c4509303 Mon Sep 17 00:00:00 2001 From: Nadav Amit Date: Sun, 2 Sep 2018 11:14:50 -0700 Subject: [PATCH 1206/1692] x86/mm: Use WRITE_ONCE() when setting PTEs When page-table entries are set, the compiler might optimize their assignment by using multiple instructions to set the PTE. This might turn into a security hazard if the user somehow manages to use the interim PTE. L1TF does not make our lives easier, making even an interim non-present PTE a security hazard. Using WRITE_ONCE() to set PTEs and friends should prevent this potential security hazard. I skimmed the differences in the binary with and without this patch. The differences are (obviously) greater when CONFIG_PARAVIRT=n as more code optimizations are possible. For better and worse, the impact on the binary with this patch is pretty small. Skimming the code did not cause anything to jump out as a security hazard, but it seems that at least move_soft_dirty_pte() caused set_pte_at() to use multiple writes. Signed-off-by: Nadav Amit Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra (Intel) Cc: Dave Hansen Cc: Andi Kleen Cc: Josh Poimboeuf Cc: Michal Hocko Cc: Vlastimil Babka Cc: Sean Christopherson Cc: Andy Lutomirski Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20180902181451.80520-1-namit@vmware.com --- arch/x86/include/asm/pgtable.h | 2 +- arch/x86/include/asm/pgtable_64.h | 20 ++++++++++---------- arch/x86/mm/pgtable.c | 8 ++++---- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index e4ffa565a69f..690c0307afed 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -1195,7 +1195,7 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma, return xchg(pmdp, pmd); } else { pmd_t old = *pmdp; - *pmdp = pmd; + WRITE_ONCE(*pmdp, pmd); return old; } } diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index f773d5e6c8cc..ce2b59047cb8 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -55,15 +55,15 @@ struct mm_struct; void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte); void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte); -static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, - pte_t *ptep) +static inline void native_set_pte(pte_t *ptep, pte_t pte) { - *ptep = native_make_pte(0); + WRITE_ONCE(*ptep, pte); } -static inline void native_set_pte(pte_t *ptep, pte_t pte) +static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) { - *ptep = pte; + native_set_pte(ptep, native_make_pte(0)); } static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) @@ -73,7 +73,7 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) { - *pmdp = pmd; + WRITE_ONCE(*pmdp, pmd); } static inline void native_pmd_clear(pmd_t *pmd) @@ -109,7 +109,7 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) static inline void native_set_pud(pud_t *pudp, pud_t pud) { - *pudp = pud; + WRITE_ONCE(*pudp, pud); } static inline void native_pud_clear(pud_t *pud) @@ -137,13 +137,13 @@ static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d) pgd_t pgd; if (pgtable_l5_enabled() || !IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) { - *p4dp = p4d; + WRITE_ONCE(*p4dp, p4d); return; } pgd = native_make_pgd(native_p4d_val(p4d)); pgd = pti_set_user_pgtbl((pgd_t *)p4dp, pgd); - *p4dp = native_make_p4d(native_pgd_val(pgd)); + WRITE_ONCE(*p4dp, native_make_p4d(native_pgd_val(pgd))); } static inline void native_p4d_clear(p4d_t *p4d) @@ -153,7 +153,7 @@ static inline void native_p4d_clear(p4d_t *p4d) static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) { - *pgdp = pti_set_user_pgtbl(pgdp, pgd); + WRITE_ONCE(*pgdp, pti_set_user_pgtbl(pgdp, pgd)); } static inline void native_pgd_clear(pgd_t *pgd) diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index e848a4811785..ae394552fb94 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -269,7 +269,7 @@ static void mop_up_one_pmd(struct mm_struct *mm, pgd_t *pgdp) if (pgd_val(pgd) != 0) { pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd); - *pgdp = native_make_pgd(0); + pgd_clear(pgdp); paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT); pmd_free(mm, pmd); @@ -494,7 +494,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma, int changed = !pte_same(*ptep, entry); if (changed && dirty) - *ptep = entry; + set_pte(ptep, entry); return changed; } @@ -509,7 +509,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, VM_BUG_ON(address & ~HPAGE_PMD_MASK); if (changed && dirty) { - *pmdp = entry; + set_pmd(pmdp, entry); /* * We had a write-protection fault here and changed the pmd * to to more permissive. No need to flush the TLB for that, @@ -529,7 +529,7 @@ int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address, VM_BUG_ON(address & ~HPAGE_PUD_MASK); if (changed && dirty) { - *pudp = entry; + set_pud(pudp, entry); /* * We had a write-protection fault here and changed the pud * to to more permissive. No need to flush the TLB for that, -- GitLab From f0b0d88a825149ef3b06656886bc211c71dcb852 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Thu, 6 Sep 2018 16:37:24 -0700 Subject: [PATCH 1207/1692] kbuild: modules_install: warn when missing System.map file If there is no System.map file for "make modules_install", scripts/depmod.sh will silently exit with success, having done nothing. Since this is an unexpected situation, change it to report a Warning for the missing file. The behavior is not changed except for the Warning message. The (previous) silent success and new Warning can be reproduced by: $ make mrproper; make defconfig $ make modules; make modules_install and since System.map is produced by "make vmlinux", the steps above omit producing the System.map file. Reported-by: Masahiro Yamada Signed-off-by: Randy Dunlap Signed-off-by: Masahiro Yamada --- scripts/depmod.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/depmod.sh b/scripts/depmod.sh index e5f0aad75b96..e083bcae343f 100755 --- a/scripts/depmod.sh +++ b/scripts/depmod.sh @@ -11,6 +11,7 @@ DEPMOD=$1 KERNELRELEASE=$2 if ! test -r System.map ; then + echo "Warning: modules_install: missing 'System.map' file. Skipping depmod." >&2 exit 0 fi -- GitLab From bcfb84a996f6fa90b5e6e2954b2accb7a4711097 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Mon, 3 Sep 2018 13:15:58 +1000 Subject: [PATCH 1208/1692] fs/cifs: suppress a string overflow warning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A powerpc build of cifs with gcc v8.2.0 produces this warning: fs/cifs/cifssmb.c: In function ‘CIFSSMBNegotiate’: fs/cifs/cifssmb.c:605:3: warning: ‘strncpy’ writing 16 bytes into a region of size 1 overflows the destination [-Wstringop-overflow=] strncpy(pSMB->DialectsArray+count, protocols[i].name, 16); ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Since we are already doing a strlen() on the source, change the strncpy to a memcpy(). Signed-off-by: Stephen Rothwell Signed-off-by: Steve French --- fs/cifs/cifssmb.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index dc2f4cf08fe9..5657b79dbc99 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -601,10 +601,15 @@ CIFSSMBNegotiate(const unsigned int xid, struct cifs_ses *ses) } count = 0; + /* + * We know that all the name entries in the protocols array + * are short (< 16 bytes anyway) and are NUL terminated. + */ for (i = 0; i < CIFS_NUM_PROT; i++) { - strncpy(pSMB->DialectsArray+count, protocols[i].name, 16); - count += strlen(protocols[i].name) + 1; - /* null at end of source and target buffers anyway */ + size_t len = strlen(protocols[i].name) + 1; + + memcpy(pSMB->DialectsArray+count, protocols[i].name, len); + count += len; } inc_rfc1001_len(pSMB, count); pSMB->ByteCount = cpu_to_le16(count); -- GitLab From 5890184d2b506f88886b7322d7d44464453bd3a6 Mon Sep 17 00:00:00 2001 From: Stefan Metzmacher Date: Fri, 7 Sep 2018 18:24:17 +0200 Subject: [PATCH 1209/1692] fs/cifs: require sha512 This got lost in commit 0fdfef9aa7ee68ddd508aef7c98630cfc054f8d6, which removed CONFIG_CIFS_SMB311. Signed-off-by: Stefan Metzmacher Fixes: 0fdfef9aa7ee68ddd ("smb3: simplify code by removing CONFIG_CIFS_SMB311") CC: Stable CC: linux-cifs@vger.kernel.org Signed-off-by: Steve French --- fs/cifs/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig index 35c83fe7dba0..abcd78e332fe 100644 --- a/fs/cifs/Kconfig +++ b/fs/cifs/Kconfig @@ -6,6 +6,7 @@ config CIFS select CRYPTO_MD4 select CRYPTO_MD5 select CRYPTO_SHA256 + select CRYPTO_SHA512 select CRYPTO_CMAC select CRYPTO_HMAC select CRYPTO_ARC4 -- GitLab From 772ed869f535b4ec2b134645c951ff22de4d3f79 Mon Sep 17 00:00:00 2001 From: Netanel Belgazal Date: Sun, 9 Sep 2018 08:15:20 +0000 Subject: [PATCH 1210/1692] net: ena: fix surprise unplug NULL dereference kernel crash Starting with driver version 1.5.0, in case of a surprise device unplug, there is a race caused by invoking ena_destroy_device() from two different places. As a result, the readless register might be accessed after it was destroyed. Signed-off-by: Netanel Belgazal Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_netdev.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index c673ac2df65b..170830b807fe 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -3409,12 +3409,12 @@ static void ena_remove(struct pci_dev *pdev) netdev->rx_cpu_rmap = NULL; } #endif /* CONFIG_RFS_ACCEL */ - - unregister_netdev(netdev); del_timer_sync(&adapter->timer_service); cancel_work_sync(&adapter->reset_task); + unregister_netdev(netdev); + /* Reset the device only if the device is running. */ if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)) ena_com_dev_reset(ena_dev, adapter->reset_reason); -- GitLab From ef5b0771d247379c90c8bf1332ff32f7f74bff7f Mon Sep 17 00:00:00 2001 From: Netanel Belgazal Date: Sun, 9 Sep 2018 08:15:21 +0000 Subject: [PATCH 1211/1692] net: ena: fix driver when PAGE_SIZE == 64kB The buffer length field in the ena rx descriptor is 16 bit, and the current driver passes a full page in each ena rx descriptor. When PAGE_SIZE equals 64kB or more, the buffer length field becomes zero. To solve this issue, limit the ena Rx descriptor to use 16kB even when allocating 64kB kernel pages. This change would not impact ena device functionality, as 16kB is still larger than maximum MTU. Signed-off-by: Netanel Belgazal Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_netdev.c | 10 +++++----- drivers/net/ethernet/amazon/ena/ena_netdev.h | 11 +++++++++++ 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 170830b807fe..69e684fd2787 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -461,7 +461,7 @@ static inline int ena_alloc_rx_page(struct ena_ring *rx_ring, return -ENOMEM; } - dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, + dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(rx_ring->dev, dma))) { u64_stats_update_begin(&rx_ring->syncp); @@ -478,7 +478,7 @@ static inline int ena_alloc_rx_page(struct ena_ring *rx_ring, rx_info->page_offset = 0; ena_buf = &rx_info->ena_buf; ena_buf->paddr = dma; - ena_buf->len = PAGE_SIZE; + ena_buf->len = ENA_PAGE_SIZE; return 0; } @@ -495,7 +495,7 @@ static void ena_free_rx_page(struct ena_ring *rx_ring, return; } - dma_unmap_page(rx_ring->dev, ena_buf->paddr, PAGE_SIZE, + dma_unmap_page(rx_ring->dev, ena_buf->paddr, ENA_PAGE_SIZE, DMA_FROM_DEVICE); __free_page(page); @@ -916,10 +916,10 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, do { dma_unmap_page(rx_ring->dev, dma_unmap_addr(&rx_info->ena_buf, paddr), - PAGE_SIZE, DMA_FROM_DEVICE); + ENA_PAGE_SIZE, DMA_FROM_DEVICE); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page, - rx_info->page_offset, len, PAGE_SIZE); + rx_info->page_offset, len, ENA_PAGE_SIZE); netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, "rx skb updated. len %d. data_len %d\n", diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index f1972b5ab650..7c7ae56c52cf 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -355,4 +355,15 @@ void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf); int ena_get_sset_count(struct net_device *netdev, int sset); +/* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the + * driver passas 0. + * Since the max packet size the ENA handles is ~9kB limit the buffer length to + * 16kB. + */ +#if PAGE_SIZE > SZ_16K +#define ENA_PAGE_SIZE SZ_16K +#else +#define ENA_PAGE_SIZE PAGE_SIZE +#endif + #endif /* !(ENA_H) */ -- GitLab From cfa324a514233b28a6934de619183eee941f02d7 Mon Sep 17 00:00:00 2001 From: Netanel Belgazal Date: Sun, 9 Sep 2018 08:15:22 +0000 Subject: [PATCH 1212/1692] net: ena: fix device destruction to gracefully free resources When ena_destroy_device() is called from ena_suspend(), the device is still reachable from the driver. Therefore, the driver can send a command to the device to free all resources. However, in all other cases of calling ena_destroy_device(), the device is potentially in an error state and unreachable from the driver. In these cases the driver must not send commands to the device. The current implementation does not request resource freeing from the device even when possible. We add the graceful parameter to ena_destroy_device() to enable resource freeing when possible, and use it in ena_suspend(). Signed-off-by: Netanel Belgazal Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_netdev.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 69e684fd2787..035d47d2179a 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -76,7 +76,7 @@ MODULE_DEVICE_TABLE(pci, ena_pci_tbl); static int ena_rss_init_default(struct ena_adapter *adapter); static void check_for_admin_com_state(struct ena_adapter *adapter); -static void ena_destroy_device(struct ena_adapter *adapter); +static void ena_destroy_device(struct ena_adapter *adapter, bool graceful); static int ena_restore_device(struct ena_adapter *adapter); static void ena_tx_timeout(struct net_device *dev) @@ -1900,7 +1900,7 @@ static int ena_close(struct net_device *netdev) "Destroy failure, restarting device\n"); ena_dump_stats_to_dmesg(adapter); /* rtnl lock already obtained in dev_ioctl() layer */ - ena_destroy_device(adapter); + ena_destroy_device(adapter, false); ena_restore_device(adapter); } @@ -2550,7 +2550,7 @@ static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter, return rc; } -static void ena_destroy_device(struct ena_adapter *adapter) +static void ena_destroy_device(struct ena_adapter *adapter, bool graceful) { struct net_device *netdev = adapter->netdev; struct ena_com_dev *ena_dev = adapter->ena_dev; @@ -2563,7 +2563,8 @@ static void ena_destroy_device(struct ena_adapter *adapter) dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); adapter->dev_up_before_reset = dev_up; - ena_com_set_admin_running_state(ena_dev, false); + if (!graceful) + ena_com_set_admin_running_state(ena_dev, false); if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) ena_down(adapter); @@ -2665,7 +2666,7 @@ static void ena_fw_reset_device(struct work_struct *work) return; } rtnl_lock(); - ena_destroy_device(adapter); + ena_destroy_device(adapter, false); ena_restore_device(adapter); rtnl_unlock(); } @@ -3467,7 +3468,7 @@ static int ena_suspend(struct pci_dev *pdev, pm_message_t state) "ignoring device reset request as the device is being suspended\n"); clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); } - ena_destroy_device(adapter); + ena_destroy_device(adapter, true); rtnl_unlock(); return 0; } -- GitLab From fe870c77efdf8682252545cbd3d29800d8379efc Mon Sep 17 00:00:00 2001 From: Netanel Belgazal Date: Sun, 9 Sep 2018 08:15:23 +0000 Subject: [PATCH 1213/1692] net: ena: fix potential double ena_destroy_device() ena_destroy_device() can potentially be called twice. To avoid this, check that the device is running and only then proceed destroying it. Signed-off-by: Netanel Belgazal Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_netdev.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 035d47d2179a..a68c2a8d4da2 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -2556,6 +2556,9 @@ static void ena_destroy_device(struct ena_adapter *adapter, bool graceful) struct ena_com_dev *ena_dev = adapter->ena_dev; bool dev_up; + if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)) + return; + netif_carrier_off(netdev); del_timer_sync(&adapter->timer_service); @@ -2592,6 +2595,7 @@ static void ena_destroy_device(struct ena_adapter *adapter, bool graceful) adapter->reset_reason = ENA_REGS_RESET_NORMAL; clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); + clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); } static int ena_restore_device(struct ena_adapter *adapter) @@ -2636,6 +2640,7 @@ static int ena_restore_device(struct ena_adapter *adapter) } } + set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); dev_err(&pdev->dev, "Device reset completed successfully\n"); -- GitLab From 944b28aa2982b4590d4d4dfc777cf85135dca2c0 Mon Sep 17 00:00:00 2001 From: Netanel Belgazal Date: Sun, 9 Sep 2018 08:15:24 +0000 Subject: [PATCH 1214/1692] net: ena: fix missing lock during device destruction acquire the rtnl_lock during device destruction to avoid using partially destroyed device. ena_remove() shares almost the same logic as ena_destroy_device(), so use ena_destroy_device() and avoid duplications. Signed-off-by: Netanel Belgazal Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_netdev.c | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index a68c2a8d4da2..b9ce2a6a87ed 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -3421,24 +3421,18 @@ static void ena_remove(struct pci_dev *pdev) unregister_netdev(netdev); - /* Reset the device only if the device is running. */ + /* If the device is running then we want to make sure the device will be + * reset to make sure no more events will be issued by the device. + */ if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)) - ena_com_dev_reset(ena_dev, adapter->reset_reason); - - ena_free_mgmnt_irq(adapter); + set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); - ena_disable_msix(adapter); + rtnl_lock(); + ena_destroy_device(adapter, true); + rtnl_unlock(); free_netdev(netdev); - ena_com_mmio_reg_read_request_destroy(ena_dev); - - ena_com_abort_admin_commands(ena_dev); - - ena_com_wait_for_abort_completion(ena_dev); - - ena_com_admin_destroy(ena_dev); - ena_com_rss_destroy(ena_dev); ena_com_delete_debug_area(ena_dev); -- GitLab From 28abf4e9c9201eda5c4d29ea609d07e877b464b8 Mon Sep 17 00:00:00 2001 From: Netanel Belgazal Date: Sun, 9 Sep 2018 08:15:25 +0000 Subject: [PATCH 1215/1692] net: ena: fix missing calls to READ_ONCE Add READ_ONCE calls where necessary (for example when iterating over a memory field that gets updated by the hardware). Signed-off-by: Netanel Belgazal Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_com.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index 17f12c18d225..c37deef3bcf1 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -459,7 +459,7 @@ static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_qu cqe = &admin_queue->cq.entries[head_masked]; /* Go over all the completions */ - while ((cqe->acq_common_descriptor.flags & + while ((READ_ONCE(cqe->acq_common_descriptor.flags) & ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) { /* Do not read the rest of the completion entry before the * phase bit was validated @@ -637,7 +637,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset) mmiowb(); for (i = 0; i < timeout; i++) { - if (read_resp->req_id == mmio_read->seq_num) + if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num) break; udelay(1); @@ -1796,8 +1796,8 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) aenq_common = &aenq_e->aenq_common_desc; /* Go over all the events */ - while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == - phase) { + while ((READ_ONCE(aenq_common->flags) & + ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) { pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n", aenq_common->group, aenq_common->syndrom, (u64)aenq_common->timestamp_low + -- GitLab From 37dff155dcf57f6c08bf1641c5ddf9abd45f2b1f Mon Sep 17 00:00:00 2001 From: Netanel Belgazal Date: Sun, 9 Sep 2018 08:15:26 +0000 Subject: [PATCH 1216/1692] net: ena: fix incorrect usage of memory barriers Added memory barriers where they were missing to support multiple architectures, and removed redundant ones. As part of removing the redundant memory barriers and improving performance, we moved to more relaxed versions of memory barriers, as well as to the more relaxed version of writel - writel_relaxed, while maintaining correctness. Signed-off-by: Netanel Belgazal Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_com.c | 16 +++++----- drivers/net/ethernet/amazon/ena/ena_eth_com.c | 6 ++++ drivers/net/ethernet/amazon/ena/ena_eth_com.h | 8 ++--- drivers/net/ethernet/amazon/ena/ena_netdev.c | 30 +++++++------------ 4 files changed, 26 insertions(+), 34 deletions(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index c37deef3bcf1..7635c38e77dd 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -464,7 +464,7 @@ static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_qu /* Do not read the rest of the completion entry before the * phase bit was validated */ - rmb(); + dma_rmb(); ena_com_handle_single_admin_completion(admin_queue, cqe); head_masked++; @@ -627,15 +627,8 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset) mmio_read_reg |= mmio_read->seq_num & ENA_REGS_MMIO_REG_READ_REQ_ID_MASK; - /* make sure read_resp->req_id get updated before the hw can write - * there - */ - wmb(); - - writel_relaxed(mmio_read_reg, - ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF); + writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF); - mmiowb(); for (i = 0; i < timeout; i++) { if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num) break; @@ -1798,6 +1791,11 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) /* Go over all the events */ while ((READ_ONCE(aenq_common->flags) & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) { + /* Make sure the phase bit (ownership) is as expected before + * reading the rest of the descriptor. + */ + dma_rmb(); + pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n", aenq_common->group, aenq_common->syndrom, (u64)aenq_common->timestamp_low + diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c index ea149c134e15..1c682b76190f 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c @@ -51,6 +51,11 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc( if (desc_phase != expected_phase) return NULL; + /* Make sure we read the rest of the descriptor after the phase bit + * has been read + */ + dma_rmb(); + return cdesc; } @@ -493,6 +498,7 @@ int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id) if (cdesc_phase != expected_phase) return -EAGAIN; + dma_rmb(); if (unlikely(cdesc->req_id >= io_cq->q_depth)) { pr_err("Invalid req id %d\n", cdesc->req_id); return -EINVAL; diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h index 6fdc753d9483..2f7657227cfe 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h @@ -107,8 +107,7 @@ static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq) return io_sq->q_depth - 1 - cnt; } -static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq, - bool relaxed) +static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq) { u16 tail; @@ -117,10 +116,7 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq, pr_debug("write submission queue doorbell for queue: %d tail: %d\n", io_sq->qid, tail); - if (relaxed) - writel_relaxed(tail, io_sq->db_addr); - else - writel(tail, io_sq->db_addr); + writel(tail, io_sq->db_addr); return 0; } diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index b9ce2a6a87ed..29b5774dd32d 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -551,14 +551,9 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num) rx_ring->qid, i, num); } - if (likely(i)) { - /* Add memory barrier to make sure the desc were written before - * issue a doorbell - */ - wmb(); - ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq, true); - mmiowb(); - } + /* ena_com_write_sq_doorbell issues a wmb() */ + if (likely(i)) + ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq); rx_ring->next_to_use = next_to_use; @@ -2112,12 +2107,6 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, tx_ring->ring_size); - /* This WMB is aimed to: - * 1 - perform smp barrier before reading next_to_completion - * 2 - make sure the desc were written before trigger DB - */ - wmb(); - /* stop the queue when no more space available, the packet can have up * to sgl_size + 2. one for the meta descriptor and one for header * (if the header is larger than tx_max_header_size). @@ -2136,10 +2125,11 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) * stop the queue but meanwhile clean_tx_irq updates * next_to_completion and terminates. * The queue will remain stopped forever. - * To solve this issue this function perform rmb, check - * the wakeup condition and wake up the queue if needed. + * To solve this issue add a mb() to make sure that + * netif_tx_stop_queue() write is vissible before checking if + * there is additional space in the queue. */ - smp_rmb(); + smp_mb(); if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq) > ENA_TX_WAKEUP_THRESH) { @@ -2151,8 +2141,10 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) } if (netif_xmit_stopped(txq) || !skb->xmit_more) { - /* trigger the dma engine */ - ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq, false); + /* trigger the dma engine. ena_com_write_sq_doorbell() + * has a mb + */ + ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); u64_stats_update_begin(&tx_ring->syncp); tx_ring->tx_stats.doorbells++; u64_stats_update_end(&tx_ring->syncp); -- GitLab From 52ea992cfac357b73180d5c051dca43bc8d20c2a Mon Sep 17 00:00:00 2001 From: Vakul Garg Date: Thu, 6 Sep 2018 21:41:40 +0530 Subject: [PATCH 1217/1692] net/tls: Set count of SG entries if sk_alloc_sg returns -ENOSPC tls_sw_sendmsg() allocates plaintext and encrypted SG entries using function sk_alloc_sg(). In case the number of SG entries hit MAX_SKB_FRAGS, sk_alloc_sg() returns -ENOSPC and sets the variable for current SG index to '0'. This leads to calling of function tls_push_record() with 'sg_encrypted_num_elem = 0' and later causes kernel crash. To fix this, set the number of SG elements to the number of elements in plaintext/encrypted SG arrays in case sk_alloc_sg() returns -ENOSPC. Fixes: 3c4d7559159b ("tls: kernel TLS support") Signed-off-by: Vakul Garg Signed-off-by: David S. Miller --- net/tls/tls_sw.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 52fbe727d7c1..e28a6ff25d96 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -125,6 +125,9 @@ static int alloc_encrypted_sg(struct sock *sk, int len) &ctx->sg_encrypted_num_elem, &ctx->sg_encrypted_size, 0); + if (rc == -ENOSPC) + ctx->sg_encrypted_num_elem = ARRAY_SIZE(ctx->sg_encrypted_data); + return rc; } @@ -138,6 +141,9 @@ static int alloc_plaintext_sg(struct sock *sk, int len) &ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size, tls_ctx->pending_open_record_frags); + if (rc == -ENOSPC) + ctx->sg_plaintext_num_elem = ARRAY_SIZE(ctx->sg_plaintext_data); + return rc; } -- GitLab From 5d407b071dc369c26a38398326ee2be53651cfe4 Mon Sep 17 00:00:00 2001 From: Taehee Yoo Date: Mon, 10 Sep 2018 02:47:05 +0900 Subject: [PATCH 1218/1692] ip: frags: fix crash in ip_do_fragment() A kernel crash occurrs when defragmented packet is fragmented in ip_do_fragment(). In defragment routine, skb_orphan() is called and skb->ip_defrag_offset is set. but skb->sk and skb->ip_defrag_offset are same union member. so that frag->sk is not NULL. Hence crash occurrs in skb->sk check routine in ip_do_fragment() when defragmented packet is fragmented. test commands: %iptables -t nat -I POSTROUTING -j MASQUERADE %hping3 192.168.4.2 -s 1000 -p 2000 -d 60000 splat looks like: [ 261.069429] kernel BUG at net/ipv4/ip_output.c:636! [ 261.075753] invalid opcode: 0000 [#1] SMP DEBUG_PAGEALLOC KASAN PTI [ 261.083854] CPU: 1 PID: 1349 Comm: hping3 Not tainted 4.19.0-rc2+ #3 [ 261.100977] RIP: 0010:ip_do_fragment+0x1613/0x2600 [ 261.106945] Code: e8 e2 38 e3 fe 4c 8b 44 24 18 48 8b 74 24 08 e9 92 f6 ff ff 80 3c 02 00 0f 85 da 07 00 00 48 8b b5 d0 00 00 00 e9 25 f6 ff ff <0f> 0b 0f 0b 44 8b 54 24 58 4c 8b 4c 24 18 4c 8b 5c 24 60 4c 8b 6c [ 261.127015] RSP: 0018:ffff8801031cf2c0 EFLAGS: 00010202 [ 261.134156] RAX: 1ffff1002297537b RBX: ffffed0020639e6e RCX: 0000000000000004 [ 261.142156] RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffff880114ba9bd8 [ 261.150157] RBP: ffff880114ba8a40 R08: ffffed0022975395 R09: ffffed0022975395 [ 261.158157] R10: 0000000000000001 R11: ffffed0022975394 R12: ffff880114ba9ca4 [ 261.166159] R13: 0000000000000010 R14: ffff880114ba9bc0 R15: dffffc0000000000 [ 261.174169] FS: 00007fbae2199700(0000) GS:ffff88011b400000(0000) knlGS:0000000000000000 [ 261.183012] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 261.189013] CR2: 00005579244fe000 CR3: 0000000119bf4000 CR4: 00000000001006e0 [ 261.198158] Call Trace: [ 261.199018] ? dst_output+0x180/0x180 [ 261.205011] ? save_trace+0x300/0x300 [ 261.209018] ? ip_copy_metadata+0xb00/0xb00 [ 261.213034] ? sched_clock_local+0xd4/0x140 [ 261.218158] ? kill_l4proto+0x120/0x120 [nf_conntrack] [ 261.223014] ? rt_cpu_seq_stop+0x10/0x10 [ 261.227014] ? find_held_lock+0x39/0x1c0 [ 261.233008] ip_finish_output+0x51d/0xb50 [ 261.237006] ? ip_fragment.constprop.56+0x220/0x220 [ 261.243011] ? nf_ct_l4proto_register_one+0x5b0/0x5b0 [nf_conntrack] [ 261.250152] ? rcu_is_watching+0x77/0x120 [ 261.255010] ? nf_nat_ipv4_out+0x1e/0x2b0 [nf_nat_ipv4] [ 261.261033] ? nf_hook_slow+0xb1/0x160 [ 261.265007] ip_output+0x1c7/0x710 [ 261.269005] ? ip_mc_output+0x13f0/0x13f0 [ 261.273002] ? __local_bh_enable_ip+0xe9/0x1b0 [ 261.278152] ? ip_fragment.constprop.56+0x220/0x220 [ 261.282996] ? nf_hook_slow+0xb1/0x160 [ 261.287007] raw_sendmsg+0x21f9/0x4420 [ 261.291008] ? dst_output+0x180/0x180 [ 261.297003] ? sched_clock_cpu+0x126/0x170 [ 261.301003] ? find_held_lock+0x39/0x1c0 [ 261.306155] ? stop_critical_timings+0x420/0x420 [ 261.311004] ? check_flags.part.36+0x450/0x450 [ 261.315005] ? _raw_spin_unlock_irq+0x29/0x40 [ 261.320995] ? _raw_spin_unlock_irq+0x29/0x40 [ 261.326142] ? cyc2ns_read_end+0x10/0x10 [ 261.330139] ? raw_bind+0x280/0x280 [ 261.334138] ? sched_clock_cpu+0x126/0x170 [ 261.338995] ? check_flags.part.36+0x450/0x450 [ 261.342991] ? __lock_acquire+0x4500/0x4500 [ 261.348994] ? inet_sendmsg+0x11c/0x500 [ 261.352989] ? dst_output+0x180/0x180 [ 261.357012] inet_sendmsg+0x11c/0x500 [ ... ] v2: - clear skb->sk at reassembly routine.(Eric Dumarzet) Fixes: fa0f527358bd ("ip: use rb trees for IP frag queue.") Suggested-by: Eric Dumazet Signed-off-by: Taehee Yoo Reviewed-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/ip_fragment.c | 1 + net/ipv6/netfilter/nf_conntrack_reasm.c | 1 + 2 files changed, 2 insertions(+) diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 88281fbce88c..e7227128df2c 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -599,6 +599,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, nextp = &fp->next; fp->prev = NULL; memset(&fp->rbnode, 0, sizeof(fp->rbnode)); + fp->sk = NULL; head->data_len += fp->len; head->len += fp->len; if (head->ip_summed != fp->ip_summed) diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 2a14d8b65924..8f68a518d9db 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -445,6 +445,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic else if (head->ip_summed == CHECKSUM_COMPLETE) head->csum = csum_add(head->csum, fp->csum); head->truesize += fp->truesize; + fp->sk = NULL; } sub_frag_mem_limit(fq->q.net, head->truesize); -- GitLab From 11da3a7f84f19c26da6f86af878298694ede0804 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 9 Sep 2018 17:26:43 -0700 Subject: [PATCH 1219/1692] Linux 4.19-rc3 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 19948e556941..4d5c883a98e5 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ VERSION = 4 PATCHLEVEL = 19 SUBLEVEL = 0 -EXTRAVERSION = -rc2 +EXTRAVERSION = -rc3 NAME = Merciless Moray # *DOCUMENTATION* -- GitLab From 493626f2d87a74e6dbea1686499ed6e7e600484e Mon Sep 17 00:00:00 2001 From: Takashi Sakamoto Date: Sun, 9 Sep 2018 22:25:12 +0900 Subject: [PATCH 1220/1692] ALSA: bebob: use address returned by kmalloc() instead of kernel stack for streaming DMA mapping When executing 'fw_run_transaction()' with 'TCODE_WRITE_BLOCK_REQUEST', an address of 'payload' argument is used for streaming DMA mapping by 'firewire_ohci' module if 'size' argument is larger than 8 byte. Although in this case the address should not be on kernel stack, current implementation of ALSA bebob driver uses data in kernel stack for a cue to boot M-Audio devices. This often brings unexpected result, especially for a case of CONFIG_VMAP_STACK=y. This commit fixes the bug. Reference: https://bugzilla.kernel.org/show_bug.cgi?id=201021 Reference: https://forum.manjaro.org/t/firewire-m-audio-410-driver-wont-load-firmware/51165 Fixes: a2b2a7798fb6('ALSA: bebob: Send a cue to load firmware for M-Audio Firewire series') Cc: # v3.16+ Signed-off-by: Takashi Sakamoto Signed-off-by: Takashi Iwai --- sound/firewire/bebob/bebob_maudio.c | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/sound/firewire/bebob/bebob_maudio.c b/sound/firewire/bebob/bebob_maudio.c index bd55620c6a47..0c5a4cbb99ba 100644 --- a/sound/firewire/bebob/bebob_maudio.c +++ b/sound/firewire/bebob/bebob_maudio.c @@ -96,17 +96,13 @@ int snd_bebob_maudio_load_firmware(struct fw_unit *unit) struct fw_device *device = fw_parent_device(unit); int err, rcode; u64 date; - __le32 cues[3] = { - cpu_to_le32(MAUDIO_BOOTLOADER_CUE1), - cpu_to_le32(MAUDIO_BOOTLOADER_CUE2), - cpu_to_le32(MAUDIO_BOOTLOADER_CUE3) - }; + __le32 *cues; /* check date of software used to build */ err = snd_bebob_read_block(unit, INFO_OFFSET_SW_DATE, &date, sizeof(u64)); if (err < 0) - goto end; + return err; /* * firmware version 5058 or later has date later than "20070401", but * 'date' is not null-terminated. @@ -114,20 +110,28 @@ int snd_bebob_maudio_load_firmware(struct fw_unit *unit) if (date < 0x3230303730343031LL) { dev_err(&unit->device, "Use firmware version 5058 or later\n"); - err = -ENOSYS; - goto end; + return -ENXIO; } + cues = kmalloc_array(3, sizeof(*cues), GFP_KERNEL); + if (!cues) + return -ENOMEM; + + cues[0] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE1); + cues[1] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE2); + cues[2] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE3); + rcode = fw_run_transaction(device->card, TCODE_WRITE_BLOCK_REQUEST, device->node_id, device->generation, device->max_speed, BEBOB_ADDR_REG_REQ, - cues, sizeof(cues)); + cues, 3 * sizeof(*cues)); + kfree(cues); if (rcode != RCODE_COMPLETE) { dev_err(&unit->device, "Failed to send a cue to load firmware\n"); err = -EIO; } -end: + return err; } -- GitLab From 36f3a6e02c143a7e9e4e143e416371f67bc1fae6 Mon Sep 17 00:00:00 2001 From: Takashi Sakamoto Date: Sun, 9 Sep 2018 22:25:52 +0900 Subject: [PATCH 1221/1692] ALSA: fireface: fix memory leak in ff400_switch_fetching_mode() An allocated memory forgets to be released. Fixes: 76fdb3a9e13 ('ALSA: fireface: add support for Fireface 400') Cc: # 4.12+ Signed-off-by: Takashi Sakamoto Signed-off-by: Takashi Iwai --- sound/firewire/fireface/ff-protocol-ff400.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/sound/firewire/fireface/ff-protocol-ff400.c b/sound/firewire/fireface/ff-protocol-ff400.c index ad7a0a32557d..64c3cb0fb926 100644 --- a/sound/firewire/fireface/ff-protocol-ff400.c +++ b/sound/firewire/fireface/ff-protocol-ff400.c @@ -146,6 +146,7 @@ static int ff400_switch_fetching_mode(struct snd_ff *ff, bool enable) { __le32 *reg; int i; + int err; reg = kcalloc(18, sizeof(__le32), GFP_KERNEL); if (reg == NULL) @@ -163,9 +164,11 @@ static int ff400_switch_fetching_mode(struct snd_ff *ff, bool enable) reg[i] = cpu_to_le32(0x00000001); } - return snd_fw_transaction(ff->unit, TCODE_WRITE_BLOCK_REQUEST, - FF400_FETCH_PCM_FRAMES, reg, - sizeof(__le32) * 18, 0); + err = snd_fw_transaction(ff->unit, TCODE_WRITE_BLOCK_REQUEST, + FF400_FETCH_PCM_FRAMES, reg, + sizeof(__le32) * 18, 0); + kfree(reg); + return err; } static void ff400_dump_sync_status(struct snd_ff *ff, -- GitLab From 16160c1946b702dcfa95ef63389a56deb2f1c7cb Mon Sep 17 00:00:00 2001 From: Jacek Tomaka Date: Thu, 2 Aug 2018 09:38:30 +0800 Subject: [PATCH 1222/1692] perf/x86/intel: Add support/quirk for the MISPREDICT bit on Knights Landing CPUs Problem: perf did not show branch predicted/mispredicted bit in brstack. Output of perf -F brstack for profile collected Before: 0x4fdbcd/0x4fdc03/-/-/-/0 0x45f4c1/0x4fdba0/-/-/-/0 0x45f544/0x45f4bb/-/-/-/0 0x45f555/0x45f53c/-/-/-/0 0x7f66901cc24b/0x45f555/-/-/-/0 0x7f66901cc22e/0x7f66901cc23d/-/-/-/0 0x7f66901cc1ff/0x7f66901cc20f/-/-/-/0 0x7f66901cc1e8/0x7f66901cc1fc/-/-/-/0 After: 0x4fdbcd/0x4fdc03/P/-/-/0 0x45f4c1/0x4fdba0/P/-/-/0 0x45f544/0x45f4bb/P/-/-/0 0x45f555/0x45f53c/P/-/-/0 0x7f66901cc24b/0x45f555/P/-/-/0 0x7f66901cc22e/0x7f66901cc23d/P/-/-/0 0x7f66901cc1ff/0x7f66901cc20f/P/-/-/0 0x7f66901cc1e8/0x7f66901cc1fc/P/-/-/0 Cause: As mentioned in Software Development Manual vol 3, 17.4.8.1, IA32_PERF_CAPABILITIES[5:0] indicates the format of the address that is stored in the LBR stack. Knights Landing reports 1 (LBR_FORMAT_LIP) as its format. Despite that, registers containing FROM address of the branch, do have MISPREDICT bit but because of the format indicated in IA32_PERF_CAPABILITIES[5:0], LBR did not read MISPREDICT bit. Solution: Teach LBR about above Knights Landing quirk and make it read MISPREDICT bit. Signed-off-by: Jacek Tomaka Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20180802013830.10600-1-jacekt@dugeo.com Signed-off-by: Ingo Molnar --- arch/x86/events/intel/lbr.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index f3e006bed9a7..c88ed39582a1 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -1272,4 +1272,8 @@ void intel_pmu_lbr_init_knl(void) x86_pmu.lbr_sel_mask = LBR_SEL_MASK; x86_pmu.lbr_sel_map = snb_lbr_sel_map; + + /* Knights Landing does have MISPREDICT bit */ + if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_LIP) + x86_pmu.intel_cap.lbr_format = LBR_FORMAT_EIP_FLAGS; } -- GitLab From 09121255c784fd36ad6237a4e239c634b0209de0 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 29 Aug 2018 14:13:13 +0200 Subject: [PATCH 1223/1692] perf/UAPI: Clearly mark __PERF_SAMPLE_CALLCHAIN_EARLY as internal use Vince noted that commit: 6cbc304f2f36 ("perf/x86/intel: Fix unwind errors from PEBS entries (mk-II)") 'leaked' __PERF_SAMPLE_CALLCHAIN_EARLY into the UAPI namespace. And while sys_perf_event_open() will error out if you try to use it, it is exposed. Clearly mark it for internal use only to avoid any confusion. Requested-by: Vince Weaver Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- include/uapi/linux/perf_event.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index eeb787b1c53c..f35eb72739c0 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -144,7 +144,7 @@ enum perf_event_sample_format { PERF_SAMPLE_MAX = 1U << 20, /* non-ABI */ - __PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63, + __PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63, /* non-ABI; internal use */ }; /* -- GitLab From e13e2366d8415e029fe96a62502955083e272cef Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Mon, 3 Sep 2018 16:07:08 +0200 Subject: [PATCH 1224/1692] locking/mutex: Fix mutex debug call and ww_mutex documentation The following commit: 08295b3b5bee ("Implement an algorithm choice for Wound-Wait mutexes") introduced a reference in the documentation to a function that was removed in an earlier commit. It also forgot to remove a call to debug_mutex_add_waiter() which is now unconditionally called by __mutex_add_waiter(). Fix those bugs. Signed-off-by: Thomas Hellstrom Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: dri-devel@lists.freedesktop.org Fixes: 08295b3b5bee ("Implement an algorithm choice for Wound-Wait mutexes") Link: http://lkml.kernel.org/r/20180903140708.2401-1-thellstrom@vmware.com Signed-off-by: Ingo Molnar --- kernel/locking/mutex.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index 1a81a1257b3f..3f8a35104285 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -389,7 +389,7 @@ static bool __ww_mutex_wound(struct mutex *lock, /* * wake_up_process() paired with set_current_state() * inserts sufficient barriers to make sure @owner either sees - * it's wounded in __ww_mutex_lock_check_stamp() or has a + * it's wounded in __ww_mutex_check_kill() or has a * wakeup pending to re-read the wounded state. */ if (owner != current) @@ -946,7 +946,6 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, } debug_mutex_lock_common(lock, &waiter); - debug_mutex_add_waiter(lock, &waiter, current); lock_contended(&lock->dep_map, ip); -- GitLab From 5f0abea6ab6dd3104fc00c64a86d58b5d59a3818 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Thu, 6 Sep 2018 17:01:47 +0800 Subject: [PATCH 1225/1692] staging: erofs: rename superblock flags (MS_xyz -> SB_xyz) This patch follows commit 1751e8a6cb93 ("Rename superblock flags (MS_xyz -> SB_xyz)") and after commit ("vfs: Suppress MS_* flag defs within the kernel unless explicitly enabled"), there is no MS_RDONLY and MS_NOATIME at all. Reported-by: Stephen Rothwell Reviewed-by: Chao Yu Signed-off-by: Gao Xiang Reviewed-by: David Howells Signed-off-by: Greg Kroah-Hartman --- drivers/staging/erofs/super.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/staging/erofs/super.c b/drivers/staging/erofs/super.c index 1aec509c805f..2df9768edac9 100644 --- a/drivers/staging/erofs/super.c +++ b/drivers/staging/erofs/super.c @@ -340,7 +340,7 @@ static int erofs_read_super(struct super_block *sb, goto err_sbread; sb->s_magic = EROFS_SUPER_MAGIC; - sb->s_flags |= MS_RDONLY | MS_NOATIME; + sb->s_flags |= SB_RDONLY | SB_NOATIME; sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_time_gran = 1; @@ -627,7 +627,7 @@ static int erofs_remount(struct super_block *sb, int *flags, char *data) { BUG_ON(!sb_rdonly(sb)); - *flags |= MS_RDONLY; + *flags |= SB_RDONLY; return 0; } -- GitLab From e73e81975f2447e6f556100cada64a18ec631cbb Mon Sep 17 00:00:00 2001 From: Jiada Wang Date: Tue, 31 Jul 2018 21:12:22 +0900 Subject: [PATCH 1226/1692] sched/debug: Fix potential deadlock when writing to sched_features The following lockdep report can be triggered by writing to /sys/kernel/debug/sched_features: ====================================================== WARNING: possible circular locking dependency detected 4.18.0-rc6-00152-gcd3f77d74ac3-dirty #18 Not tainted ------------------------------------------------------ sh/3358 is trying to acquire lock: 000000004ad3989d (cpu_hotplug_lock.rw_sem){++++}, at: static_key_enable+0x14/0x30 but task is already holding lock: 00000000c1b31a88 (&sb->s_type->i_mutex_key#3){+.+.}, at: sched_feat_write+0x160/0x428 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #3 (&sb->s_type->i_mutex_key#3){+.+.}: lock_acquire+0xb8/0x148 down_write+0xac/0x140 start_creating+0x5c/0x168 debugfs_create_dir+0x18/0x220 opp_debug_register+0x8c/0x120 _add_opp_dev+0x104/0x1f8 dev_pm_opp_get_opp_table+0x174/0x340 _of_add_opp_table_v2+0x110/0x760 dev_pm_opp_of_add_table+0x5c/0x240 dev_pm_opp_of_cpumask_add_table+0x5c/0x100 cpufreq_init+0x160/0x430 cpufreq_online+0x1cc/0xe30 cpufreq_add_dev+0x78/0x198 subsys_interface_register+0x168/0x270 cpufreq_register_driver+0x1c8/0x278 dt_cpufreq_probe+0xdc/0x1b8 platform_drv_probe+0xb4/0x168 driver_probe_device+0x318/0x4b0 __device_attach_driver+0xfc/0x1f0 bus_for_each_drv+0xf8/0x180 __device_attach+0x164/0x200 device_initial_probe+0x10/0x18 bus_probe_device+0x110/0x178 device_add+0x6d8/0x908 platform_device_add+0x138/0x3d8 platform_device_register_full+0x1cc/0x1f8 cpufreq_dt_platdev_init+0x174/0x1bc do_one_initcall+0xb8/0x310 kernel_init_freeable+0x4b8/0x56c kernel_init+0x10/0x138 ret_from_fork+0x10/0x18 -> #2 (opp_table_lock){+.+.}: lock_acquire+0xb8/0x148 __mutex_lock+0x104/0xf50 mutex_lock_nested+0x1c/0x28 _of_add_opp_table_v2+0xb4/0x760 dev_pm_opp_of_add_table+0x5c/0x240 dev_pm_opp_of_cpumask_add_table+0x5c/0x100 cpufreq_init+0x160/0x430 cpufreq_online+0x1cc/0xe30 cpufreq_add_dev+0x78/0x198 subsys_interface_register+0x168/0x270 cpufreq_register_driver+0x1c8/0x278 dt_cpufreq_probe+0xdc/0x1b8 platform_drv_probe+0xb4/0x168 driver_probe_device+0x318/0x4b0 __device_attach_driver+0xfc/0x1f0 bus_for_each_drv+0xf8/0x180 __device_attach+0x164/0x200 device_initial_probe+0x10/0x18 bus_probe_device+0x110/0x178 device_add+0x6d8/0x908 platform_device_add+0x138/0x3d8 platform_device_register_full+0x1cc/0x1f8 cpufreq_dt_platdev_init+0x174/0x1bc do_one_initcall+0xb8/0x310 kernel_init_freeable+0x4b8/0x56c kernel_init+0x10/0x138 ret_from_fork+0x10/0x18 -> #1 (subsys mutex#6){+.+.}: lock_acquire+0xb8/0x148 __mutex_lock+0x104/0xf50 mutex_lock_nested+0x1c/0x28 subsys_interface_register+0xd8/0x270 cpufreq_register_driver+0x1c8/0x278 dt_cpufreq_probe+0xdc/0x1b8 platform_drv_probe+0xb4/0x168 driver_probe_device+0x318/0x4b0 __device_attach_driver+0xfc/0x1f0 bus_for_each_drv+0xf8/0x180 __device_attach+0x164/0x200 device_initial_probe+0x10/0x18 bus_probe_device+0x110/0x178 device_add+0x6d8/0x908 platform_device_add+0x138/0x3d8 platform_device_register_full+0x1cc/0x1f8 cpufreq_dt_platdev_init+0x174/0x1bc do_one_initcall+0xb8/0x310 kernel_init_freeable+0x4b8/0x56c kernel_init+0x10/0x138 ret_from_fork+0x10/0x18 -> #0 (cpu_hotplug_lock.rw_sem){++++}: __lock_acquire+0x203c/0x21d0 lock_acquire+0xb8/0x148 cpus_read_lock+0x58/0x1c8 static_key_enable+0x14/0x30 sched_feat_write+0x314/0x428 full_proxy_write+0xa0/0x138 __vfs_write+0xd8/0x388 vfs_write+0xdc/0x318 ksys_write+0xb4/0x138 sys_write+0xc/0x18 __sys_trace_return+0x0/0x4 other info that might help us debug this: Chain exists of: cpu_hotplug_lock.rw_sem --> opp_table_lock --> &sb->s_type->i_mutex_key#3 Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&sb->s_type->i_mutex_key#3); lock(opp_table_lock); lock(&sb->s_type->i_mutex_key#3); lock(cpu_hotplug_lock.rw_sem); *** DEADLOCK *** 2 locks held by sh/3358: #0: 00000000a8c4b363 (sb_writers#10){.+.+}, at: vfs_write+0x238/0x318 #1: 00000000c1b31a88 (&sb->s_type->i_mutex_key#3){+.+.}, at: sched_feat_write+0x160/0x428 stack backtrace: CPU: 5 PID: 3358 Comm: sh Not tainted 4.18.0-rc6-00152-gcd3f77d74ac3-dirty #18 Hardware name: Renesas H3ULCB Kingfisher board based on r8a7795 ES2.0+ (DT) Call trace: dump_backtrace+0x0/0x288 show_stack+0x14/0x20 dump_stack+0x13c/0x1ac print_circular_bug.isra.10+0x270/0x438 check_prev_add.constprop.16+0x4dc/0xb98 __lock_acquire+0x203c/0x21d0 lock_acquire+0xb8/0x148 cpus_read_lock+0x58/0x1c8 static_key_enable+0x14/0x30 sched_feat_write+0x314/0x428 full_proxy_write+0xa0/0x138 __vfs_write+0xd8/0x388 vfs_write+0xdc/0x318 ksys_write+0xb4/0x138 sys_write+0xc/0x18 __sys_trace_return+0x0/0x4 This is because when loading the cpufreq_dt module we first acquire cpu_hotplug_lock.rw_sem lock, then in cpufreq_init(), we are taking the &sb->s_type->i_mutex_key lock. But when writing to /sys/kernel/debug/sched_features, the cpu_hotplug_lock.rw_sem lock depends on the &sb->s_type->i_mutex_key lock. To fix this bug, reverse the lock acquisition order when writing to sched_features, this way cpu_hotplug_lock.rw_sem no longer depends on &sb->s_type->i_mutex_key. Tested-by: Dietmar Eggemann Signed-off-by: Jiada Wang Signed-off-by: Peter Zijlstra (Intel) Cc: Eugeniu Rosca Cc: George G. Davis Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20180731121222.26195-1-jiada_wang@mentor.com Signed-off-by: Ingo Molnar --- kernel/sched/debug.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 60caf1fb94e0..6383aa6a60ca 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -89,12 +89,12 @@ struct static_key sched_feat_keys[__SCHED_FEAT_NR] = { static void sched_feat_disable(int i) { - static_key_disable(&sched_feat_keys[i]); + static_key_disable_cpuslocked(&sched_feat_keys[i]); } static void sched_feat_enable(int i) { - static_key_enable(&sched_feat_keys[i]); + static_key_enable_cpuslocked(&sched_feat_keys[i]); } #else static void sched_feat_disable(int i) { }; @@ -146,9 +146,11 @@ sched_feat_write(struct file *filp, const char __user *ubuf, /* Ensure the static_key remains in a consistent state */ inode = file_inode(filp); + cpus_read_lock(); inode_lock(inode); ret = sched_feat_set(cmp); inode_unlock(inode); + cpus_read_unlock(); if (ret < 0) return ret; -- GitLab From e5e96fafd9028b1478b165db78c52d981c14f471 Mon Sep 17 00:00:00 2001 From: Srikar Dronamraju Date: Fri, 10 Aug 2018 22:30:18 +0530 Subject: [PATCH 1227/1692] sched/topology: Set correct NUMA topology type With the following commit: 051f3ca02e46 ("sched/topology: Introduce NUMA identity node sched domain") the scheduler introduced a new NUMA level. However this leads to the NUMA topology on 2 node systems to not be marked as NUMA_DIRECT anymore. After this commit, it gets reported as NUMA_BACKPLANE, because sched_domains_numa_level is now 2 on 2 node systems. Fix this by allowing setting systems that have up to 2 NUMA levels as NUMA_DIRECT. While here remove code that assumes that level can be 0. Signed-off-by: Srikar Dronamraju Signed-off-by: Peter Zijlstra (Intel) Cc: Andre Wild Cc: Heiko Carstens Cc: Linus Torvalds Cc: Mel Gorman Cc: Michael Ellerman Cc: Peter Zijlstra Cc: Rik van Riel Cc: Suravee Suthikulpanit Cc: Thomas Gleixner Cc: linuxppc-dev Fixes: 051f3ca02e46 "Introduce NUMA identity node sched domain" Link: http://lkml.kernel.org/r/1533920419-17410-1-git-send-email-srikar@linux.vnet.ibm.com Signed-off-by: Ingo Molnar --- kernel/sched/topology.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 56a0fed30c0a..505a41c42b96 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -1295,7 +1295,7 @@ static void init_numa_topology_type(void) n = sched_max_numa_distance; - if (sched_domains_numa_levels <= 1) { + if (sched_domains_numa_levels <= 2) { sched_numa_topology_type = NUMA_DIRECT; return; } @@ -1380,9 +1380,6 @@ void sched_init_numa(void) break; } - if (!level) - return; - /* * 'level' contains the number of unique distances * -- GitLab From 12b04875d666e83d27511df25580de84505bc758 Mon Sep 17 00:00:00 2001 From: Vincent Guittot Date: Fri, 31 Aug 2018 17:22:55 +0200 Subject: [PATCH 1228/1692] sched/pelt: Fix update_blocked_averages() for RT and DL classes update_blocked_averages() is called to periodiccally decay the stalled load of idle CPUs and to sync all loads before running load balance. When cfs rq is idle, it trigs a load balance during pick_next_task_fair() in order to potentially pull tasks and to use this newly idle CPU. This load balance happens whereas prev task from another class has not been put and its utilization updated yet. This may lead to wrongly account running time as idle time for RT or DL classes. Test that no RT or DL task is running when updating their utilization in update_blocked_averages(). We still update RT and DL utilization instead of simply skipping them to make sure that all metrics are synced when used during load balance. Signed-off-by: Vincent Guittot Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Fixes: 371bf4273269 ("sched/rt: Add rt_rq utilization tracking") Fixes: 3727e0e16340 ("sched/dl: Add dl_rq utilization tracking") Link: http://lkml.kernel.org/r/1535728975-22799-1-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index b39fb596f6c1..8cff8d55ee95 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7263,6 +7263,7 @@ static void update_blocked_averages(int cpu) { struct rq *rq = cpu_rq(cpu); struct cfs_rq *cfs_rq, *pos; + const struct sched_class *curr_class; struct rq_flags rf; bool done = true; @@ -7299,8 +7300,10 @@ static void update_blocked_averages(int cpu) if (cfs_rq_has_blocked(cfs_rq)) done = false; } - update_rt_rq_load_avg(rq_clock_task(rq), rq, 0); - update_dl_rq_load_avg(rq_clock_task(rq), rq, 0); + + curr_class = rq->curr->sched_class; + update_rt_rq_load_avg(rq_clock_task(rq), rq, curr_class == &rt_sched_class); + update_dl_rq_load_avg(rq_clock_task(rq), rq, curr_class == &dl_sched_class); update_irq_load_avg(rq, 0); /* Don't need periodic decay once load/util_avg are null */ if (others_have_blocked(rq)) @@ -7365,13 +7368,16 @@ static inline void update_blocked_averages(int cpu) { struct rq *rq = cpu_rq(cpu); struct cfs_rq *cfs_rq = &rq->cfs; + const struct sched_class *curr_class; struct rq_flags rf; rq_lock_irqsave(rq, &rf); update_rq_clock(rq); update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq); - update_rt_rq_load_avg(rq_clock_task(rq), rq, 0); - update_dl_rq_load_avg(rq_clock_task(rq), rq, 0); + + curr_class = rq->curr->sched_class; + update_rt_rq_load_avg(rq_clock_task(rq), rq, curr_class == &rt_sched_class); + update_dl_rq_load_avg(rq_clock_task(rq), rq, curr_class == &dl_sched_class); update_irq_load_avg(rq, 0); #ifdef CONFIG_NO_HZ_COMMON rq->last_blocked_load_update_tick = jiffies; -- GitLab From d0cdb3ce8834332d918fc9c8ff74f8a169ec9abe Mon Sep 17 00:00:00 2001 From: Steve Muckle Date: Fri, 31 Aug 2018 15:42:17 -0700 Subject: [PATCH 1229/1692] sched/fair: Fix vruntime_normalized() for remote non-migration wakeup When a task which previously ran on a given CPU is remotely queued to wake up on that same CPU, there is a period where the task's state is TASK_WAKING and its vruntime is not normalized. This is not accounted for in vruntime_normalized() which will cause an error in the task's vruntime if it is switched from the fair class during this time. For example if it is boosted to RT priority via rt_mutex_setprio(), rq->min_vruntime will not be subtracted from the task's vruntime but it will be added again when the task returns to the fair class. The task's vruntime will have been erroneously doubled and the effective priority of the task will be reduced. Note this will also lead to inflation of all vruntimes since the doubled vruntime value will become the rq's min_vruntime when other tasks leave the rq. This leads to repeated doubling of the vruntime and priority penalty. Fix this by recognizing a WAKING task's vruntime as normalized only if sched_remote_wakeup is true. This indicates a migration, in which case the vruntime would have been normalized in migrate_task_rq_fair(). Based on a similar patch from John Dias . Suggested-by: Peter Zijlstra Tested-by: Dietmar Eggemann Signed-off-by: Steve Muckle Signed-off-by: Peter Zijlstra (Intel) Cc: Chris Redpath Cc: John Dias Cc: Linus Torvalds Cc: Miguel de Dios Cc: Morten Rasmussen Cc: Patrick Bellasi Cc: Paul Turner Cc: Quentin Perret Cc: Thomas Gleixner Cc: Todd Kjos Cc: kernel-team@android.com Fixes: b5179ac70de8 ("sched/fair: Prepare to fix fairness problems on migration") Link: http://lkml.kernel.org/r/20180831224217.169476-1-smuckle@google.com Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 8cff8d55ee95..c6b7d6daab20 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -9644,7 +9644,8 @@ static inline bool vruntime_normalized(struct task_struct *p) * - A task which has been woken up by try_to_wake_up() and * waiting for actually being woken up by sched_ttwu_pending(). */ - if (!se->sum_exec_runtime || p->state == TASK_WAKING) + if (!se->sum_exec_runtime || + (p->state == TASK_WAKING && p->sched_remote_wakeup)) return true; return false; -- GitLab From 287cdaac5700c5b8970d739f73d742d863d3e2ca Mon Sep 17 00:00:00 2001 From: Vincent Guittot Date: Tue, 4 Sep 2018 11:36:26 +0200 Subject: [PATCH 1230/1692] sched/fair: Fix scale_rt_capacity() for SMT Since commit: 523e979d3164 ("sched/core: Use PELT for scale_rt_capacity()") scale_rt_capacity() returns the remaining capacity and not a scale factor to apply on cpu_capacity_orig. arch_scale_cpu() is directly called by scale_rt_capacity() so we must take the sched_domain argument. Reported-by: Srikar Dronamraju Signed-off-by: Vincent Guittot Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Srikar Dronamraju Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Fixes: 523e979d3164 ("sched/core: Use PELT for scale_rt_capacity()") Link: http://lkml.kernel.org/r/20180904093626.GA23936@linaro.org Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index c6b7d6daab20..f12d004be6a1 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7488,10 +7488,10 @@ static inline int get_sd_load_idx(struct sched_domain *sd, return load_idx; } -static unsigned long scale_rt_capacity(int cpu) +static unsigned long scale_rt_capacity(struct sched_domain *sd, int cpu) { struct rq *rq = cpu_rq(cpu); - unsigned long max = arch_scale_cpu_capacity(NULL, cpu); + unsigned long max = arch_scale_cpu_capacity(sd, cpu); unsigned long used, free; unsigned long irq; @@ -7513,7 +7513,7 @@ static unsigned long scale_rt_capacity(int cpu) static void update_cpu_capacity(struct sched_domain *sd, int cpu) { - unsigned long capacity = scale_rt_capacity(cpu); + unsigned long capacity = scale_rt_capacity(sd, cpu); struct sched_group *sdg = sd->groups; cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(sd, cpu); -- GitLab From bb3485c8ace6475c269b1aa2da674490f455f412 Mon Sep 17 00:00:00 2001 From: Vincent Guittot Date: Fri, 7 Sep 2018 09:51:04 +0200 Subject: [PATCH 1231/1692] sched/fair: Fix load_balance redo for !imbalance It can happen that load_balance() finds a busiest group and then a busiest rq but the calculated imbalance is in fact 0. In such situation, detach_tasks() returns immediately and lets the flag LBF_ALL_PINNED set. The busiest CPU is then wrongly assumed to have pinned tasks and removed from the load balance mask. then, we redo a load balance without the busiest CPU. This creates wrong load balance situation and generates wrong task migration. If the calculated imbalance is 0, it's useless to try to find a busiest rq as no task will be migrated and we can return immediately. This situation can happen with heterogeneous system or smp system when RT tasks are decreasing the capacity of some CPUs. Signed-off-by: Vincent Guittot Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: dietmar.eggemann@arm.com Cc: jhugo@codeaurora.org Link: http://lkml.kernel.org/r/1536306664-29827-1-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index f12d004be6a1..fc9a484ef82b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -8275,7 +8275,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env) force_balance: /* Looks like there is an imbalance. Compute it */ calculate_imbalance(env, &sds); - return sds.busiest; + return env->imbalance ? sds.busiest : NULL; out_balanced: env->imbalance = 0; -- GitLab From da260fe12330be8b003c2ab07a112704163ea675 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Fri, 7 Sep 2018 12:35:21 +0200 Subject: [PATCH 1232/1692] jump_label: Fix typo in warning message There's no 'allocatote' - use the next best thing: 'allocate' :-) Signed-off-by: Borislav Petkov Signed-off-by: Peter Zijlstra (Intel) Cc: Jason Baron Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Steven Rostedt (VMware) Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20180907103521.31344-1-bp@alien8.de Signed-off-by: Ingo Molnar --- kernel/jump_label.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 01ebdf1f9f40..2e62503bea0d 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -678,7 +678,7 @@ jump_label_module_notify(struct notifier_block *self, unsigned long val, case MODULE_STATE_COMING: ret = jump_label_add_module(mod); if (ret) { - WARN(1, "Failed to allocatote memory: jump_label may not work properly.\n"); + WARN(1, "Failed to allocate memory: jump_label may not work properly.\n"); jump_label_del_module(mod); } break; -- GitLab From 882a78a9f39f5535b209b4aa0a1741e35b8c67fb Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Mon, 3 Sep 2018 12:53:17 -0700 Subject: [PATCH 1233/1692] sched/fair: Fix kernel-doc notation warning Fix kernel-doc warning for missing 'flags' parameter description: ../kernel/sched/fair.c:3371: warning: Function parameter or member 'flags' not described in 'attach_entity_load_avg' Signed-off-by: Randy Dunlap Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Fixes: ea14b57e8a18 ("sched/cpufreq: Provide migration hint") Link: http://lkml.kernel.org/r/cdda0d42-880d-4229-a9f7-5899c977a063@infradead.org Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index fc9a484ef82b..f808ddf2a868 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3362,6 +3362,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) * attach_entity_load_avg - attach this entity to its cfs_rq load avg * @cfs_rq: cfs_rq to attach to * @se: sched_entity to attach + * @flags: migration hints * * Must call update_cfs_rq_load_avg() before this, since we rely on * cfs_rq->avg.last_update_time being current. -- GitLab From f8ff6b2d4a51f08ff53360aab633ba6d4f2d54b6 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 5 Sep 2018 17:40:09 +0200 Subject: [PATCH 1234/1692] staging/fbtft: Update TODO and mailing lists Motivated by the ksummit-discuss discussion. Cc: Shuah Khan Cc: Thomas Petazzoni Cc: Mauro Carvalho Chehab Cc: linux-fbdev@vger.kernel.org Signed-off-by: Daniel Vetter Signed-off-by: Greg Kroah-Hartman --- MAINTAINERS | 2 ++ drivers/staging/fbtft/TODO | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index a726e22976bb..bb23faafecc4 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5624,6 +5624,8 @@ F: lib/fault-inject.c FBTFT Framebuffer drivers M: Thomas Petazzoni +L: dri-devel@lists.freedesktop.org +L: linux-fbdev@vger.kernel.org S: Maintained F: drivers/staging/fbtft/ diff --git a/drivers/staging/fbtft/TODO b/drivers/staging/fbtft/TODO index 7e64c7e438f0..a9f4802bb6be 100644 --- a/drivers/staging/fbtft/TODO +++ b/drivers/staging/fbtft/TODO @@ -2,3 +2,7 @@ GPIO descriptor API in and look up GPIO lines from device tree, ACPI or board files, board files should use + +* convert all these over to drm_simple_display_pipe and submit for inclusion + into the DRM subsystem under drivers/gpu/drm - fbdev doesn't take any new + drivers anymore. -- GitLab From 8c25741aaad8be6fbe51510e917c740e0059cf83 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Mon, 10 Sep 2018 11:43:29 +0200 Subject: [PATCH 1235/1692] ovl: fix oopses in ovl_fill_super() failure paths ovl_free_fs() dereferences ofs->workbasedir and ofs->upper_mnt in cases when those might not have been initialized yet. Fix the initialization order for these fields. Reported-by: syzbot+c75f181dc8429d2eb887@syzkaller.appspotmail.com Signed-off-by: Miklos Szeredi Cc: # v4.15 Fixes: 95e6d4177cb7 ("ovl: grab reference to workbasedir early") Fixes: a9075cdb467d ("ovl: factor out ovl_free_fs() helper") --- fs/overlayfs/super.c | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 2e0fc93c2c06..30adc9d408a0 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -982,16 +982,6 @@ static int ovl_get_upper(struct ovl_fs *ofs, struct path *upperpath) if (err) goto out; - err = -EBUSY; - if (ovl_inuse_trylock(upperpath->dentry)) { - ofs->upperdir_locked = true; - } else if (ofs->config.index) { - pr_err("overlayfs: upperdir is in-use by another mount, mount with '-o index=off' to override exclusive upperdir protection.\n"); - goto out; - } else { - pr_warn("overlayfs: upperdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n"); - } - upper_mnt = clone_private_mount(upperpath); err = PTR_ERR(upper_mnt); if (IS_ERR(upper_mnt)) { @@ -1002,6 +992,17 @@ static int ovl_get_upper(struct ovl_fs *ofs, struct path *upperpath) /* Don't inherit atime flags */ upper_mnt->mnt_flags &= ~(MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME); ofs->upper_mnt = upper_mnt; + + err = -EBUSY; + if (ovl_inuse_trylock(ofs->upper_mnt->mnt_root)) { + ofs->upperdir_locked = true; + } else if (ofs->config.index) { + pr_err("overlayfs: upperdir is in-use by another mount, mount with '-o index=off' to override exclusive upperdir protection.\n"); + goto out; + } else { + pr_warn("overlayfs: upperdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n"); + } + err = 0; out: return err; @@ -1101,8 +1102,10 @@ static int ovl_get_workdir(struct ovl_fs *ofs, struct path *upperpath) goto out; } + ofs->workbasedir = dget(workpath.dentry); + err = -EBUSY; - if (ovl_inuse_trylock(workpath.dentry)) { + if (ovl_inuse_trylock(ofs->workbasedir)) { ofs->workdir_locked = true; } else if (ofs->config.index) { pr_err("overlayfs: workdir is in-use by another mount, mount with '-o index=off' to override exclusive workdir protection.\n"); @@ -1111,7 +1114,6 @@ static int ovl_get_workdir(struct ovl_fs *ofs, struct path *upperpath) pr_warn("overlayfs: workdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n"); } - ofs->workbasedir = dget(workpath.dentry); err = ovl_make_workdir(ofs, &workpath); if (err) goto out; -- GitLab From 2a665dba016d5493c7d826fec82b0cb643b30d42 Mon Sep 17 00:00:00 2001 From: Akshu Agrawal Date: Mon, 10 Sep 2018 13:36:30 +0530 Subject: [PATCH 1236/1692] ASoC: AMD: Ensure reset bit is cleared before configuring HW register descriptions says: "DMA Channel Reset...Software must confirm that this bit is cleared before reprogramming any of the channel configuration registers." There could be cases where dma stop errored out leaving dma channel in reset state. We need to ensure that before the start of another dma, channel is out of the reset state. Signed-off-by: Akshu Agrawal Signed-off-by: Mark Brown --- sound/soc/amd/acp-pcm-dma.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c index e359938e3d7e..77b265bd0505 100644 --- a/sound/soc/amd/acp-pcm-dma.c +++ b/sound/soc/amd/acp-pcm-dma.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -184,6 +185,24 @@ static void config_dma_descriptor_in_sram(void __iomem *acp_mmio, acp_reg_write(descr_info->xfer_val, acp_mmio, mmACP_SRBM_Targ_Idx_Data); } +static void pre_config_reset(void __iomem *acp_mmio, u16 ch_num) +{ + u32 dma_ctrl; + int ret; + + /* clear the reset bit */ + dma_ctrl = acp_reg_read(acp_mmio, mmACP_DMA_CNTL_0 + ch_num); + dma_ctrl &= ~ACP_DMA_CNTL_0__DMAChRst_MASK; + acp_reg_write(dma_ctrl, acp_mmio, mmACP_DMA_CNTL_0 + ch_num); + /* check the reset bit before programming configuration registers */ + ret = readl_poll_timeout(acp_mmio + ((mmACP_DMA_CNTL_0 + ch_num) * 4), + dma_ctrl, + !(dma_ctrl & ACP_DMA_CNTL_0__DMAChRst_MASK), + 100, ACP_DMA_RESET_TIME); + if (ret < 0) + pr_err("Failed to clear reset of channel : %d\n", ch_num); +} + /* * Initialize the DMA descriptor information for transfer between * system memory <-> ACP SRAM @@ -236,6 +255,7 @@ static void set_acp_sysmem_dma_descriptors(void __iomem *acp_mmio, config_dma_descriptor_in_sram(acp_mmio, dma_dscr_idx, &dmadscr[i]); } + pre_config_reset(acp_mmio, ch); config_acp_dma_channel(acp_mmio, ch, dma_dscr_idx - 1, NUM_DSCRS_PER_CHANNEL, @@ -275,6 +295,7 @@ static void set_acp_to_i2s_dma_descriptors(void __iomem *acp_mmio, u32 size, config_dma_descriptor_in_sram(acp_mmio, dma_dscr_idx, &dmadscr[i]); } + pre_config_reset(acp_mmio, ch); /* Configure the DMA channel with the above descriptore */ config_acp_dma_channel(acp_mmio, ch, dma_dscr_idx - 1, NUM_DSCRS_PER_CHANNEL, -- GitLab From 83e01228cb35823f7bd0e5a0584e24ed72a8af2b Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Mon, 13 Aug 2018 19:05:37 +0000 Subject: [PATCH 1237/1692] tools/lib/lockdep: Update Sasha Levin email to MSFT Signed-off-by: Sasha Levin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Sasha Levin Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20180813190527.16853-2-alexander.levin@microsoft.com Signed-off-by: Ingo Molnar --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index d870cb57c887..f999786cfa90 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8300,7 +8300,7 @@ F: include/linux/libata.h F: Documentation/devicetree/bindings/ata/ LIBLOCKDEP -M: Sasha Levin +M: Sasha Levin S: Maintained F: tools/lib/lockdep/ -- GitLab From 1064ea494bb00519c6e34f791dcf17436f70592d Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Mon, 13 Aug 2018 19:05:38 +0000 Subject: [PATCH 1238/1692] tools/lib/lockdep: Add empty nmi.h Required since: 88f1c87de11a8 ("locking/lockdep: Avoid triggering hardlockup from debug_show_all_locks()") Signed-off-by: Sasha Levin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Sasha Levin Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20180813190527.16853-3-alexander.levin@microsoft.com Signed-off-by: Ingo Molnar --- tools/include/linux/nmi.h | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 tools/include/linux/nmi.h diff --git a/tools/include/linux/nmi.h b/tools/include/linux/nmi.h new file mode 100644 index 000000000000..e69de29bb2d1 -- GitLab From 16214312df6d5aaa5324864d032ce565e97f8890 Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Mon, 13 Aug 2018 19:05:39 +0000 Subject: [PATCH 1239/1692] tools/lib/lockdep: Add dummy task_struct state member Commit: 8cc05c71ba5f ("locking/lockdep: Move sanity check to inside lockdep_print_held_locks()") added accesses to the task_struct's state member. Add dummy userspace declaration. Signed-off-by: Sasha Levin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Sasha Levin Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20180813190527.16853-4-alexander.levin@microsoft.com Signed-off-by: Ingo Molnar --- tools/include/linux/lockdep.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/include/linux/lockdep.h b/tools/include/linux/lockdep.h index 6b0c36a58fcb..e56997288f2b 100644 --- a/tools/include/linux/lockdep.h +++ b/tools/include/linux/lockdep.h @@ -30,9 +30,12 @@ struct task_struct { struct held_lock held_locks[MAX_LOCK_DEPTH]; gfp_t lockdep_reclaim_gfp; int pid; + int state; char comm[17]; }; +#define TASK_RUNNING 0 + extern struct task_struct *__curr(void); #define current (__curr()) -- GitLab From dc5591a03f1d6dae6b11cdf1d74b023f7ac0fdbf Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Tue, 28 Aug 2018 21:33:15 +0100 Subject: [PATCH 1240/1692] locking/lockdep: Delete unnecessary #include Commit: c3bc8fd637a9 ("tracing: Centralize preemptirq tracepoints and unify their usage") added the inclusion of . liblockdep doesn't have a stub version of that header so now fails to build. However, commit: bff1b208a5d1 ("tracing: Partial revert of "tracing: Centralize preemptirq tracepoints and unify their usage"") removed the use of functions declared in that header. So delete the #include. Signed-off-by: Ben Hutchings Cc: Joel Fernandes Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Sasha Levin Cc: Steven Rostedt Cc: Thomas Gleixner Cc: Will Deacon Fixes: bff1b208a5d1 ("tracing: Partial revert of "tracing: Centralize ...") Fixes: c3bc8fd637a9 ("tracing: Centralize preemptirq tracepoints ...") Link: http://lkml.kernel.org/r/20180828203315.GD18030@decadent.org.uk Signed-off-by: Ingo Molnar --- kernel/locking/lockdep.c | 1 - 1 file changed, 1 deletion(-) diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index e406c5fdb41e..dd13f865ad40 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -55,7 +55,6 @@ #include "lockdep_internals.h" -#include #define CREATE_TRACE_POINTS #include -- GitLab From 0b405c65ad459f5f4d3db1672246172bd19d946d Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 24 Aug 2018 12:22:35 +0100 Subject: [PATCH 1241/1692] locking/ww_mutex: Fix spelling mistake "cylic" -> "cyclic" Trivial fix to spelling mistake in pr_err() error message Signed-off-by: Colin Ian King Acked-by: Will Deacon Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: kernel-janitors@vger.kernel.org Link: http://lkml.kernel.org/r/20180824112235.8842-1-colin.king@canonical.com Signed-off-by: Ingo Molnar --- kernel/locking/test-ww_mutex.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c index 5b915b370d5a..0be047dbd897 100644 --- a/kernel/locking/test-ww_mutex.c +++ b/kernel/locking/test-ww_mutex.c @@ -324,7 +324,7 @@ static int __test_cycle(unsigned int nthreads) if (!cycle->result) continue; - pr_err("cylic deadlock not resolved, ret[%d/%d] = %d\n", + pr_err("cyclic deadlock not resolved, ret[%d/%d] = %d\n", n, nthreads, cycle->result); ret = -EINVAL; break; -- GitLab From 02e184476eff848273826c1d6617bb37e5bcc7ad Mon Sep 17 00:00:00 2001 From: Yabin Cui Date: Thu, 23 Aug 2018 15:59:35 -0700 Subject: [PATCH 1242/1692] perf/core: Force USER_DS when recording user stack data Perf can record user stack data in response to a synchronous request, such as a tracepoint firing. If this happens under set_fs(KERNEL_DS), then we end up reading user stack data using __copy_from_user_inatomic() under set_fs(KERNEL_DS). I think this conflicts with the intention of using set_fs(KERNEL_DS). And it is explicitly forbidden by hardware on ARM64 when both CONFIG_ARM64_UAO and CONFIG_ARM64_PAN are used. So fix this by forcing USER_DS when recording user stack data. Signed-off-by: Yabin Cui Acked-by: Peter Zijlstra (Intel) Cc: Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Thomas Gleixner Fixes: 88b0193d9418 ("perf/callchain: Force USER_DS when invoking perf_callchain_user()") Link: http://lkml.kernel.org/r/20180823225935.27035-1-yabinc@google.com Signed-off-by: Ingo Molnar --- kernel/events/core.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/kernel/events/core.c b/kernel/events/core.c index abaed4f8bb7f..c80549bf82c6 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -5943,6 +5943,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size, unsigned long sp; unsigned int rem; u64 dyn_size; + mm_segment_t fs; /* * We dump: @@ -5960,7 +5961,10 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size, /* Data. */ sp = perf_user_stack_pointer(regs); + fs = get_fs(); + set_fs(USER_DS); rem = __output_copy_user(handle, (void *) sp, dump_size); + set_fs(fs); dyn_size = dump_size - rem; perf_output_skip(handle, rem); -- GitLab From 07e846bace717729fd20b5d99521a5f8c7d7a9cb Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sun, 5 Aug 2018 20:34:05 -0700 Subject: [PATCH 1243/1692] x86/doc: Fix Documentation/x86/earlyprintk.txt Fix a few issues in Documentation/x86/earlyprintk.txt: - correct typos, punctuation, missing word, wrong word - change product name from Netchip to NetChip - expand where to add "earlyprintk=dbg" Signed-off-by: Randy Dunlap Cc: Eric W. Biederman Cc: Jason Wessel Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Yinghai Lu Cc: linux-doc@vger.kernel.org Cc: linux-usb@vger.kernel.org Link: http://lkml.kernel.org/r/d0c40ac3-7659-6374-dbda-23d3d2577f30@infradead.org Signed-off-by: Ingo Molnar --- Documentation/x86/earlyprintk.txt | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/Documentation/x86/earlyprintk.txt b/Documentation/x86/earlyprintk.txt index 688e3eeed21d..46933e06c972 100644 --- a/Documentation/x86/earlyprintk.txt +++ b/Documentation/x86/earlyprintk.txt @@ -35,25 +35,25 @@ and two USB cables, connected like this: ( If your system does not list a debug port capability then you probably won't be able to use the USB debug key. ) - b.) You also need a Netchip USB debug cable/key: + b.) You also need a NetChip USB debug cable/key: http://www.plxtech.com/products/NET2000/NET20DC/default.asp - This is a small blue plastic connector with two USB connections, + This is a small blue plastic connector with two USB connections; it draws power from its USB connections. c.) You need a second client/console system with a high speed USB 2.0 port. - d.) The Netchip device must be plugged directly into the physical + d.) The NetChip device must be plugged directly into the physical debug port on the "host/target" system. You cannot use a USB hub in between the physical debug port and the "host/target" system. The EHCI debug controller is bound to a specific physical USB - port and the Netchip device will only work as an early printk + port and the NetChip device will only work as an early printk device in this port. The EHCI host controllers are electrically wired such that the EHCI debug controller is hooked up to the - first physical and there is no way to change this via software. + first physical port and there is no way to change this via software. You can find the physical port through experimentation by trying each physical port on the system and rebooting. Or you can try and use lsusb or look at the kernel info messages emitted by the @@ -65,9 +65,9 @@ and two USB cables, connected like this: to the hardware vendor, because there is no reason not to wire this port into one of the physically accessible ports. - e.) It is also important to note, that many versions of the Netchip + e.) It is also important to note, that many versions of the NetChip device require the "client/console" system to be plugged into the - right and side of the device (with the product logo facing up and + right hand side of the device (with the product logo facing up and readable left to right). The reason being is that the 5 volt power supply is taken from only one side of the device and it must be the side that does not get rebooted. @@ -81,13 +81,18 @@ and two USB cables, connected like this: CONFIG_EARLY_PRINTK_DBGP=y And you need to add the boot command line: "earlyprintk=dbgp". + (If you are using Grub, append it to the 'kernel' line in - /etc/grub.conf) + /etc/grub.conf. If you are using Grub2 on a BIOS firmware system, + append it to the 'linux' line in /boot/grub2/grub.cfg. If you are + using Grub2 on an EFI firmware system, append it to the 'linux' + or 'linuxefi' line in /boot/grub2/grub.cfg or + /boot/efi/EFI//grub.cfg.) On systems with more than one EHCI debug controller you must specify the correct EHCI debug controller number. The ordering comes from the PCI bus enumeration of the EHCI controllers. The - default with no number argument is "0" the first EHCI debug + default with no number argument is "0" or the first EHCI debug controller. To use the second EHCI debug controller, you would use the command line: "earlyprintk=dbgp1" @@ -111,7 +116,7 @@ and two USB cables, connected like this: see the raw output. c.) On Nvidia Southbridge based systems: the kernel will try to probe - and find out which port has debug device connected. + and find out which port has a debug device connected. 3. Testing that it works fine: -- GitLab From 21a268069203cc72d1b2990bc68386516fabc274 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 7 Sep 2018 16:29:54 +0200 Subject: [PATCH 1244/1692] mtd: rawnand: marvell: prevent harmless warnings Since the addition of WARN_ON() in nand_subop_get_data/addr_len() helpers, this driver will produce harmless warnings (mostly at probe) just because it always calls the nand_subop_get_data_len() helper in the parsing function (even on non-data instructions, where this value is meaningless and unneeded). Fix these warnings by deriving the length only when it is relevant. Fixes: 760c435e0f85 ("mtd: rawnand: make subop helpers return unsigned values") Signed-off-by: Miquel Raynal Signed-off-by: Boris Brezillon --- drivers/mtd/nand/raw/marvell_nand.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index 7af4d6213ee5..bc2ef5209783 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -1547,7 +1547,7 @@ static void marvell_nfc_parse_instructions(struct nand_chip *chip, for (op_id = 0; op_id < subop->ninstrs; op_id++) { unsigned int offset, naddrs; const u8 *addrs; - int len = nand_subop_get_data_len(subop, op_id); + int len; instr = &subop->instrs[op_id]; @@ -1593,6 +1593,7 @@ static void marvell_nfc_parse_instructions(struct nand_chip *chip, nfc_op->ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) | NDCB0_LEN_OVRD; + len = nand_subop_get_data_len(subop, op_id); nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH); } nfc_op->data_delay_ns = instr->delay_ns; @@ -1606,6 +1607,7 @@ static void marvell_nfc_parse_instructions(struct nand_chip *chip, nfc_op->ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) | NDCB0_LEN_OVRD; + len = nand_subop_get_data_len(subop, op_id); nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH); } nfc_op->data_delay_ns = instr->delay_ns; -- GitLab From 90a3b7f8aba3011badacd6d8121e03aa24ac79d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Szymanski?= Date: Thu, 6 Sep 2018 11:16:00 +0200 Subject: [PATCH 1245/1692] ASoC: cs4265: fix MMTLR Data switch control MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The MMTLR bit is in the CS4265_SPDIF_CTL2 register at address 0x12 bit 0 and not at address 0x0 bit 1. Fix this. Signed-off-by: Sébastien Szymanski Signed-off-by: Mark Brown Cc: stable@vger.kernel.org --- sound/soc/codecs/cs4265.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c index 275677de669f..407554175282 100644 --- a/sound/soc/codecs/cs4265.c +++ b/sound/soc/codecs/cs4265.c @@ -157,8 +157,8 @@ static const struct snd_kcontrol_new cs4265_snd_controls[] = { SOC_SINGLE("Validity Bit Control Switch", CS4265_SPDIF_CTL2, 3, 1, 0), SOC_ENUM("SPDIF Mono/Stereo", spdif_mono_stereo_enum), - SOC_SINGLE("MMTLR Data Switch", 0, - 1, 1, 0), + SOC_SINGLE("MMTLR Data Switch", CS4265_SPDIF_CTL2, + 0, 1, 0), SOC_ENUM("Mono Channel Select", spdif_mono_select_enum), SND_SOC_BYTES("C Data Buffer", CS4265_C_DATA_BUFF, 24), }; -- GitLab From 49434c6c575d2008c0abbc93e615019f39e01252 Mon Sep 17 00:00:00 2001 From: Willy Tarreau Date: Sat, 8 Sep 2018 08:12:21 +0200 Subject: [PATCH 1246/1692] ALSA: emu10k1: fix possible info leak to userspace on SNDRV_EMU10K1_IOCTL_INFO snd_emu10k1_fx8010_ioctl(SNDRV_EMU10K1_IOCTL_INFO) allocates memory using kmalloc() and partially fills it by calling snd_emu10k1_fx8010_info() before returning the resulting structure to userspace, leaving uninitialized holes. Let's just use kzalloc() here. BugLink: http://blog.infosectcbr.com.au/2018/09/linux-kernel-infoleaks.html Signed-off-by: Willy Tarreau Cc: Jann Horn Cc: Signed-off-by: Takashi Iwai --- sound/pci/emu10k1/emufx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c index 90713741c2dc..6ebe817801ea 100644 --- a/sound/pci/emu10k1/emufx.c +++ b/sound/pci/emu10k1/emufx.c @@ -2540,7 +2540,7 @@ static int snd_emu10k1_fx8010_ioctl(struct snd_hwdep * hw, struct file *file, un emu->support_tlv = 1; return put_user(SNDRV_EMU10K1_VERSION, (int __user *)argp); case SNDRV_EMU10K1_IOCTL_INFO: - info = kmalloc(sizeof(*info), GFP_KERNEL); + info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; snd_emu10k1_fx8010_info(emu, info); -- GitLab From a318c2432218d3cd189ec8228b8a795666899c2a Mon Sep 17 00:00:00 2001 From: Philipp Zabel Date: Fri, 31 Aug 2018 14:34:02 +0200 Subject: [PATCH 1247/1692] mfd: da9063: Fix DT probing with constraints Commit 1c892e38ce59 ("regulator: da9063: Handle less LDOs on DA9063L") reordered the da9063_regulator_info[] array, but not the DA9063_ID_* regulator ids and not the da9063_matches[] array, because ids are used as indices in the array initializer. This mismatch between regulator id and da9063_regulator_info[] array index causes the driver probe to fail because constraints from DT are not applied to the correct regulator: da9063 0-0058: Device detected (chip-ID: 0x61, var-ID: 0x50) DA9063_BMEM: Bringing 900000uV into 3300000-3300000uV DA9063_LDO9: Bringing 3300000uV into 2500000-2500000uV DA9063_LDO1: Bringing 900000uV into 3300000-3300000uV DA9063_LDO1: failed to apply 3300000-3300000uV constraint(-22) This patch reorders the DA9063_ID_* as apparently intended, and with them the entries in the da90630_matches[] array. Fixes: 1c892e38ce59 ("regulator: da9063: Handle less LDOs on DA9063L") Signed-off-by: Philipp Zabel Reviewed-by: Marek Vasut Reviewed-by: Geert Uytterhoeven Signed-off-by: Lee Jones --- include/linux/mfd/da9063/pdata.h | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/include/linux/mfd/da9063/pdata.h b/include/linux/mfd/da9063/pdata.h index 8a125701ef7b..50bed4f89c1a 100644 --- a/include/linux/mfd/da9063/pdata.h +++ b/include/linux/mfd/da9063/pdata.h @@ -21,7 +21,7 @@ /* * Regulator configuration */ -/* DA9063 regulator IDs */ +/* DA9063 and DA9063L regulator IDs */ enum { /* BUCKs */ DA9063_ID_BCORE1, @@ -37,18 +37,20 @@ enum { DA9063_ID_BMEM_BIO_MERGED, /* When two BUCKs are merged, they cannot be reused separately */ - /* LDOs */ + /* LDOs on both DA9063 and DA9063L */ + DA9063_ID_LDO3, + DA9063_ID_LDO7, + DA9063_ID_LDO8, + DA9063_ID_LDO9, + DA9063_ID_LDO11, + + /* DA9063-only LDOs */ DA9063_ID_LDO1, DA9063_ID_LDO2, - DA9063_ID_LDO3, DA9063_ID_LDO4, DA9063_ID_LDO5, DA9063_ID_LDO6, - DA9063_ID_LDO7, - DA9063_ID_LDO8, - DA9063_ID_LDO9, DA9063_ID_LDO10, - DA9063_ID_LDO11, }; /* Regulators platform data */ -- GitLab From 6e7f6b82c60afb46ff71c2127421c66207966d6d Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Sun, 9 Sep 2018 15:39:14 +1000 Subject: [PATCH 1248/1692] tty: hvc: hvc_poll() fix read loop hang Commit ec97eaad1383 ("tty: hvc: hvc_poll() break hv read loop") causes the virtio console to hang at times (e.g., if you paste a bunch of characters to it. The reason is that get_chars must return 0 before we can be sure the driver will kick or poll input again, but this change only scheduled a poll if get_chars had returned a full count. Change this to poll on any > 0 count. Reported-by: Matteo Croce Reported-by: Jason Gunthorpe Tested-by: Matteo Croce Tested-by: Jason Gunthorpe Tested-by: Leon Romanovsky Signed-off-by: Nicholas Piggin Signed-off-by: Greg Kroah-Hartman --- drivers/tty/hvc/hvc_console.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c index 5414c4a87bea..c917749708d2 100644 --- a/drivers/tty/hvc/hvc_console.c +++ b/drivers/tty/hvc/hvc_console.c @@ -717,10 +717,13 @@ static int __hvc_poll(struct hvc_struct *hp, bool may_sleep) #endif /* CONFIG_MAGIC_SYSRQ */ tty_insert_flip_char(&hp->port, buf[i], 0); } - if (n == count) - poll_mask |= HVC_POLL_READ; read_total = n; + /* + * Latency break, schedule another poll immediately. + */ + poll_mask |= HVC_POLL_READ; + out: /* Wakeup write queue if necessary */ if (hp->do_wakeup) { -- GitLab From 68b2fc714fb1e08385f9c810d84f06affd007350 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Sun, 9 Sep 2018 15:39:15 +1000 Subject: [PATCH 1249/1692] tty: hvc: hvc_poll() fix read loop batching Commit ec97eaad1383 ("tty: hvc: hvc_poll() break hv read loop") removes get_chars batching entirely, which slows down large console operations like paste -- virtio console "feels worse than a 9600 baud serial line," reports Matteo. This adds back batching in a more latency friendly way. If the caller can sleep then we try to fill the entire flip buffer, releasing the lock and scheduling between each iteration. If it can not sleep, then batches are limited to 128 bytes. Matteo confirms this fixes the performance problem. Latency testing the powerpc OPAL console with OpenBMC UART with a large paste shows about 0.25ms latency, which seems reasonable. 10ms latencies were typical for this case before the latency breaking work, so we still see most of the benefit. kopald-1204 0d.h. 5us : hvc_poll <-hvc_handle_interrupt kopald-1204 0d.h. 5us : __hvc_poll <-hvc_handle_interrupt kopald-1204 0d.h. 5us : _raw_spin_lock_irqsave <-__hvc_poll kopald-1204 0d.h. 5us : tty_port_tty_get <-__hvc_poll kopald-1204 0d.h. 6us : _raw_spin_lock_irqsave <-tty_port_tty_get kopald-1204 0d.h. 6us : _raw_spin_unlock_irqrestore <-tty_port_tty_get kopald-1204 0d.h. 6us : tty_buffer_request_room <-__hvc_poll kopald-1204 0d.h. 7us : __tty_buffer_request_room <-__hvc_poll kopald-1204 0d.h. 7us+: opal_get_chars <-__hvc_poll kopald-1204 0d.h. 36us : tty_buffer_request_room <-__hvc_poll kopald-1204 0d.h. 36us : __tty_buffer_request_room <-__hvc_poll kopald-1204 0d.h. 36us+: opal_get_chars <-__hvc_poll kopald-1204 0d.h. 65us : tty_buffer_request_room <-__hvc_poll kopald-1204 0d.h. 65us : __tty_buffer_request_room <-__hvc_poll kopald-1204 0d.h. 66us+: opal_get_chars <-__hvc_poll kopald-1204 0d.h. 94us : tty_buffer_request_room <-__hvc_poll kopald-1204 0d.h. 95us : __tty_buffer_request_room <-__hvc_poll kopald-1204 0d.h. 95us+: opal_get_chars <-__hvc_poll kopald-1204 0d.h. 124us : tty_buffer_request_room <-__hvc_poll kopald-1204 0d.h. 124us : __tty_buffer_request_room <-__hvc_poll kopald-1204 0d.h. 125us+: opal_get_chars <-__hvc_poll kopald-1204 0d.h. 154us : tty_buffer_request_room <-__hvc_poll kopald-1204 0d.h. 154us : __tty_buffer_request_room <-__hvc_poll kopald-1204 0d.h. 154us+: opal_get_chars <-__hvc_poll kopald-1204 0d.h. 183us : tty_buffer_request_room <-__hvc_poll kopald-1204 0d.h. 184us : __tty_buffer_request_room <-__hvc_poll kopald-1204 0d.h. 184us+: opal_get_chars <-__hvc_poll kopald-1204 0d.h. 213us : tty_buffer_request_room <-__hvc_poll kopald-1204 0d.h. 213us : __tty_buffer_request_room <-__hvc_poll kopald-1204 0d.h. 213us+: opal_get_chars <-__hvc_poll kopald-1204 0d.h. 242us : _raw_spin_unlock_irqrestore <-__hvc_poll kopald-1204 0d.h. 242us : tty_flip_buffer_push <-__hvc_poll kopald-1204 0d.h. 243us : queue_work_on <-tty_flip_buffer_push kopald-1204 0d.h. 243us : tty_kref_put <-__hvc_poll kopald-1204 0d.h. 243us : hvc_kick <-hvc_handle_interrupt kopald-1204 0d.h. 243us : wake_up_process <-hvc_kick kopald-1204 0d.h. 244us : try_to_wake_up <-hvc_kick kopald-1204 0d.h. 244us : _raw_spin_lock_irqsave <-try_to_wake_up kopald-1204 0d.h. 244us : _raw_spin_unlock_irqrestore <-try_to_wake_up Reported-by: Matteo Croce Tested-by: Matteo Croce Tested-by: Jason Gunthorpe Tested-by: Leon Romanovsky Signed-off-by: Nicholas Piggin Signed-off-by: Greg Kroah-Hartman --- drivers/tty/hvc/hvc_console.c | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c index c917749708d2..bacf9b73ec98 100644 --- a/drivers/tty/hvc/hvc_console.c +++ b/drivers/tty/hvc/hvc_console.c @@ -623,6 +623,15 @@ static int hvc_chars_in_buffer(struct tty_struct *tty) #define MAX_TIMEOUT (2000) static u32 timeout = MIN_TIMEOUT; +/* + * Maximum number of bytes to get from the console driver if hvc_poll is + * called from driver (and can't sleep). Any more than this and we break + * and start polling with khvcd. This value was derived from from an OpenBMC + * console with the OPAL driver that results in about 0.25ms interrupts off + * latency. + */ +#define HVC_ATOMIC_READ_MAX 128 + #define HVC_POLL_READ 0x00000001 #define HVC_POLL_WRITE 0x00000002 @@ -669,8 +678,8 @@ static int __hvc_poll(struct hvc_struct *hp, bool may_sleep) if (!hp->irq_requested) poll_mask |= HVC_POLL_READ; + read_again: /* Read data if any */ - count = tty_buffer_request_room(&hp->port, N_INBUF); /* If flip is full, just reschedule a later read */ @@ -717,7 +726,18 @@ static int __hvc_poll(struct hvc_struct *hp, bool may_sleep) #endif /* CONFIG_MAGIC_SYSRQ */ tty_insert_flip_char(&hp->port, buf[i], 0); } - read_total = n; + read_total += n; + + if (may_sleep) { + /* Keep going until the flip is full */ + spin_unlock_irqrestore(&hp->lock, flags); + cond_resched(); + spin_lock_irqsave(&hp->lock, flags); + goto read_again; + } else if (read_total < HVC_ATOMIC_READ_MAX) { + /* Break and defer if it's a large read in atomic */ + goto read_again; + } /* * Latency break, schedule another poll immediately. -- GitLab From 7f2bf7840b74a160f908db83bc8829f8de10629b Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Sun, 9 Sep 2018 15:39:16 +1000 Subject: [PATCH 1250/1692] tty: hvc: hvc_write() fix break condition Commit 550ddadcc758 ("tty: hvc: hvc_write() may sleep") broke the termination condition in case the driver stops accepting characters. This can result in unnecessary polling of the busy driver. Restore it by testing the hvc_push return code. Tested-by: Matteo Croce Tested-by: Jason Gunthorpe Tested-by: Leon Romanovsky Signed-off-by: Nicholas Piggin Signed-off-by: Greg Kroah-Hartman --- drivers/tty/hvc/hvc_console.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c index bacf9b73ec98..27284a2dcd2b 100644 --- a/drivers/tty/hvc/hvc_console.c +++ b/drivers/tty/hvc/hvc_console.c @@ -522,6 +522,8 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count return -EIO; while (count > 0) { + int ret = 0; + spin_lock_irqsave(&hp->lock, flags); rsize = hp->outbuf_size - hp->n_outbuf; @@ -537,10 +539,13 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count } if (hp->n_outbuf > 0) - hvc_push(hp); + ret = hvc_push(hp); spin_unlock_irqrestore(&hp->lock, flags); + if (!ret) + break; + if (count) { if (hp->n_outbuf > 0) hvc_flush(hp); -- GitLab From 383584157786e09fed6d9e87b2cd8784b6709216 Mon Sep 17 00:00:00 2001 From: "Ahmed S. Darwish" Date: Mon, 10 Sep 2018 15:28:37 +0000 Subject: [PATCH 1251/1692] staging: gasket: TODO: re-implement using UIO The gasket in-kernel framework, recently introduced under staging, re-implements what is already long-time provided by the UIO subsystem, with extra PCI BAR remapping and MSI conveniences. Before moving it out of staging, make sure we add the new bits to the UIO framework instead, then transform its signle client, the Apex driver, to a proper UIO driver (uio_driver.h). Link: https://lkml.kernel.org/r/20180828103817.GB1397@do-kernel Signed-off-by: Ahmed S. Darwish Signed-off-by: Greg Kroah-Hartman --- drivers/staging/gasket/TODO | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/staging/gasket/TODO b/drivers/staging/gasket/TODO index 6ff8e01b04cc..5b1865f8af2d 100644 --- a/drivers/staging/gasket/TODO +++ b/drivers/staging/gasket/TODO @@ -1,9 +1,22 @@ This is a list of things that need to be done to get this driver out of the staging directory. + +- Implement the gasket framework's functionality through UIO instead of + introducing a new user-space drivers framework that is quite similar. + + UIO provides the necessary bits to implement user-space drivers. Meanwhile + the gasket APIs adds some extra conveniences like PCI BAR mapping, and + MSI interrupts. Add these features to the UIO subsystem, then re-implement + the Apex driver as a basic UIO driver instead (include/linux/uio_driver.h) + - Document sysfs files with Documentation/ABI/ entries. + - Use misc interface instead of major number for driver version description. + - Add descriptions of module_param's + - apex_get_status() should actually check status. + - "drivers" should never be dealing with "raw" sysfs calls or mess around with kobjects at all. The driver core should handle all of this for you automaically. There should not be a need for raw attribute macros. -- GitLab From 3ebb17446b954b7d39264564ec3f7522d502e785 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Fri, 7 Sep 2018 02:02:45 +0000 Subject: [PATCH 1252/1692] ethernet: renesas: convert to SPDX identifiers This patch updates license to use SPDX-License-Identifier instead of verbose license text. Signed-off-by: Kuninori Morimoto Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/Kconfig | 1 + drivers/net/ethernet/renesas/Makefile | 1 + drivers/net/ethernet/renesas/ravb_ptp.c | 6 +----- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig index f3f7477043ce..bb0ebdfd4459 100644 --- a/drivers/net/ethernet/renesas/Kconfig +++ b/drivers/net/ethernet/renesas/Kconfig @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 # # Renesas device configuration # diff --git a/drivers/net/ethernet/renesas/Makefile b/drivers/net/ethernet/renesas/Makefile index a05102a7df02..f21ab8c02af0 100644 --- a/drivers/net/ethernet/renesas/Makefile +++ b/drivers/net/ethernet/renesas/Makefile @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 # # Makefile for the Renesas device drivers. # diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c index eede70ec37f8..0721b5c35d91 100644 --- a/drivers/net/ethernet/renesas/ravb_ptp.c +++ b/drivers/net/ethernet/renesas/ravb_ptp.c @@ -1,13 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0+ /* PTP 1588 clock using the Renesas Ethernet AVB * * Copyright (C) 2013-2015 Renesas Electronics Corporation * Copyright (C) 2015 Renesas Solutions Corp. * Copyright (C) 2015-2016 Cogent Embedded, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #include "ravb.h" -- GitLab From 92a6803149465e2339f8f7f8f6415d75be80073d Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Wed, 5 Sep 2018 13:00:05 +0300 Subject: [PATCH 1253/1692] drm/i915/bdw: Increase IPS disable timeout to 100ms MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit During IPS disabling the current 42ms timeout value leads to occasional timeouts, increase it to 100ms which seems to get rid of the problem. References: https://bugs.freedesktop.org/show_bug.cgi?id=107494 Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=107562 Reported-by: Diego Viola Tested-by: Diego Viola Cc: Diego Viola Cc: Signed-off-by: Imre Deak Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20180905100005.7663-1-imre.deak@intel.com (cherry picked from commit acb3ef0ee40ea657280a4a11d9f60eb2937c0dca) Signed-off-by: Rodrigo Vivi --- drivers/gpu/drm/i915/intel_display.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 4a3c8ee9a973..d2951096bca0 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5079,10 +5079,14 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state) mutex_lock(&dev_priv->pcu_lock); WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); mutex_unlock(&dev_priv->pcu_lock); - /* wait for pcode to finish disabling IPS, which may take up to 42ms */ + /* + * Wait for PCODE to finish disabling IPS. The BSpec specified + * 42ms timeout value leads to occasional timeouts so use 100ms + * instead. + */ if (intel_wait_for_register(dev_priv, IPS_CTL, IPS_ENABLE, 0, - 42)) + 100)) DRM_ERROR("Timed out waiting for IPS disable\n"); } else { I915_WRITE(IPS_CTL, 0); -- GitLab From 7c5cca3588545e7f255171e28e0dd6e384ebb91d Mon Sep 17 00:00:00 2001 From: Kristian Evensen Date: Sat, 8 Sep 2018 13:50:48 +0200 Subject: [PATCH 1254/1692] qmi_wwan: Support dynamic config on Quectel EP06 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Quectel EP06 (and EM06/EG06) supports dynamic configuration of USB interfaces, without the device changing VID/PID or configuration number. When the configuration is updated and interfaces are added/removed, the interface numbers change. This means that the current code for matching EP06 does not work. This patch removes the current EP06 interface number match, and replaces it with a match on class, subclass and protocol. Unfortunately, matching on those three alone is not enough, as the diag interface exports the same values as QMI. The other serial interfaces + adb export different values and do not match. The diag interface only has two endpoints, while the QMI interface has three. I have therefore added a check for number of interfaces, and we ignore the interface if the number of endpoints equals two. Signed-off-by: Kristian Evensen Acked-by: Bjørn Mork Acked-by: Dan Williams Signed-off-by: David S. Miller --- drivers/net/usb/qmi_wwan.c | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index cb0cc30c3d6a..e3270deecec2 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -967,6 +967,13 @@ static const struct usb_device_id products[] = { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7), .driver_info = (unsigned long)&qmi_wwan_info, }, + { /* Quectel EP06/EG06/EM06 */ + USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0306, + USB_CLASS_VENDOR_SPEC, + USB_SUBCLASS_VENDOR_SPEC, + 0xff), + .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr, + }, /* 3. Combined interface devices matching on interface number */ {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */ @@ -1255,7 +1262,6 @@ static const struct usb_device_id products[] = { {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */ {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */ - {QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */ /* 4. Gobi 1000 devices */ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ @@ -1331,6 +1337,19 @@ static bool quectel_ec20_detected(struct usb_interface *intf) return false; } +static bool quectel_ep06_diag_detected(struct usb_interface *intf) +{ + struct usb_device *dev = interface_to_usbdev(intf); + struct usb_interface_descriptor intf_desc = intf->cur_altsetting->desc; + + if (le16_to_cpu(dev->descriptor.idVendor) == 0x2c7c && + le16_to_cpu(dev->descriptor.idProduct) == 0x0306 && + intf_desc.bNumEndpoints == 2) + return true; + + return false; +} + static int qmi_wwan_probe(struct usb_interface *intf, const struct usb_device_id *prod) { @@ -1365,6 +1384,15 @@ static int qmi_wwan_probe(struct usb_interface *intf, return -ENODEV; } + /* Quectel EP06/EM06/EG06 supports dynamic interface configuration, so + * we need to match on class/subclass/protocol. These values are + * identical for the diagnostic- and QMI-interface, but bNumEndpoints is + * different. Ignore the current interface if the number of endpoints + * the number for the diag interface (two). + */ + if (quectel_ep06_diag_detected(intf)) + return -ENODEV; + return usbnet_probe(intf, id); } -- GitLab From 0a3b53305c8ff427bbc1d9d5bd78524007f19600 Mon Sep 17 00:00:00 2001 From: Chunfeng Yun Date: Fri, 7 Sep 2018 15:29:12 +0800 Subject: [PATCH 1255/1692] usb: xhci: fix interrupt transfer error happened on MTK platforms The MTK xHCI controller use some reserved bytes in endpoint context for bandwidth scheduling, so need keep them in xhci_endpoint_copy(); The issue is introduced by: commit f5249461b504 ("xhci: Clear the host side toggle manually when endpoint is soft reset") It resets endpoints and will drop bandwidth scheduling parameters used by interrupt or isochronous endpoints on MTK xHCI controller. Fixes: f5249461b504 ("xhci: Clear the host side toggle manually when endpoint is soft reset") Cc: stable@vger.kernel.org Signed-off-by: Chunfeng Yun Tested-by: Sean Wang Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/xhci-mem.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index ef350c33dc4a..b1f27aa38b10 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -1613,6 +1613,10 @@ void xhci_endpoint_copy(struct xhci_hcd *xhci, in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2; in_ep_ctx->deq = out_ep_ctx->deq; in_ep_ctx->tx_info = out_ep_ctx->tx_info; + if (xhci->quirks & XHCI_MTK_HOST) { + in_ep_ctx->reserved[0] = out_ep_ctx->reserved[0]; + in_ep_ctx->reserved[1] = out_ep_ctx->reserved[1]; + } } /* Copy output xhci_slot_ctx to the input xhci_slot_ctx. -- GitLab From fa827966090e2a6fc07b437d0d2ffae748ec6e28 Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Mon, 20 Aug 2018 12:10:26 +0900 Subject: [PATCH 1256/1692] usb: Change usb_of_get_companion_dev() place to usb/common Since renesas_usb3 udc driver calls usb_of_get_companion_dev() which is on usb/core/of.c, build error like below happens if we disable CONFIG_USB because the usb/core/ needs CONFIG_USB: ERROR: "usb_of_get_companion_dev" [drivers/usb/gadget/udc/renesas_usb3.ko] undefined! According to the usb/gadget/Kconfig, "NOTE: Gadget support ** DOES NOT ** depend on host-side CONFIG_USB !!". So, to fix the issue, this patch changes the usb_of_get_companion_dev() place from usb/core/of.c to usb/common/common.c to be called by both host and gadget. Reported-by: John Garry Fixes: 39facfa01c9f ("usb: gadget: udc: renesas_usb3: Add register of usb role switch") Signed-off-by: Yoshihiro Shimoda Acked-by: Arnd Bergmann Signed-off-by: Greg Kroah-Hartman --- drivers/usb/common/common.c | 25 +++++++++++++++++++++++++ drivers/usb/core/of.c | 26 -------------------------- 2 files changed, 25 insertions(+), 26 deletions(-) diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c index 50a2362ed3ea..48277bbc15e4 100644 --- a/drivers/usb/common/common.c +++ b/drivers/usb/common/common.c @@ -246,6 +246,31 @@ int of_usb_update_otg_caps(struct device_node *np, } EXPORT_SYMBOL_GPL(of_usb_update_otg_caps); +/** + * usb_of_get_companion_dev - Find the companion device + * @dev: the device pointer to find a companion + * + * Find the companion device from platform bus. + * + * Takes a reference to the returned struct device which needs to be dropped + * after use. + * + * Return: On success, a pointer to the companion device, %NULL on failure. + */ +struct device *usb_of_get_companion_dev(struct device *dev) +{ + struct device_node *node; + struct platform_device *pdev = NULL; + + node = of_parse_phandle(dev->of_node, "companion", 0); + if (node) + pdev = of_find_device_by_node(node); + + of_node_put(node); + + return pdev ? &pdev->dev : NULL; +} +EXPORT_SYMBOL_GPL(usb_of_get_companion_dev); #endif MODULE_LICENSE("GPL"); diff --git a/drivers/usb/core/of.c b/drivers/usb/core/of.c index fd77442c2d12..651708d8c908 100644 --- a/drivers/usb/core/of.c +++ b/drivers/usb/core/of.c @@ -105,29 +105,3 @@ usb_of_get_interface_node(struct usb_device *udev, u8 config, u8 ifnum) return NULL; } EXPORT_SYMBOL_GPL(usb_of_get_interface_node); - -/** - * usb_of_get_companion_dev - Find the companion device - * @dev: the device pointer to find a companion - * - * Find the companion device from platform bus. - * - * Takes a reference to the returned struct device which needs to be dropped - * after use. - * - * Return: On success, a pointer to the companion device, %NULL on failure. - */ -struct device *usb_of_get_companion_dev(struct device *dev) -{ - struct device_node *node; - struct platform_device *pdev = NULL; - - node = of_parse_phandle(dev->of_node, "companion", 0); - if (node) - pdev = of_find_device_by_node(node); - - of_node_put(node); - - return pdev ? &pdev->dev : NULL; -} -EXPORT_SYMBOL_GPL(usb_of_get_companion_dev); -- GitLab From df3aa13c7bbb307e172c37f193f9a7aa058d4739 Mon Sep 17 00:00:00 2001 From: Oliver Neukum Date: Wed, 5 Sep 2018 17:56:46 +0200 Subject: [PATCH 1257/1692] Revert "cdc-acm: implement put_char() and flush_chars()" This reverts commit a81cf9799ad7299b03a4dff020d9685f9ac5f3e0. The patch causes a regression, which I cannot find the reason for. So let's revert for now, as a revert hurts only performance. Original report: I was trying to resolve the problem with Oliver but we don't get any conclusion for 5 months, so I am now sending this to mail list and cdc_acm authors. I am using simple request-response protocol to obtain the boiller parameters in constant intervals. A simple one transaction is: 1. opening the /dev/ttyACM0 2. sending the following 10-bytes request to the device: unsigned char req[] = {0x02, 0xfe, 0x01, 0x05, 0x08, 0x02, 0x01, 0x69, 0xab, 0x03}; 3. reading response (frame of 74 bytes length). 4. closing the descriptor I am doing this transaction with 5 seconds intervals. Before the bad commit everything was working correctly: I've got a requests and a responses in a timely manner. After the bad commit more time I am using the kernel module, more problems I have. The graph [2] is showing the problem. As you can see after module load all seems fine but after about 30 minutes I've got a plenty of EAGAINs when doing read()'s and trying to read back the data. When I rmmod and insmod the cdc_acm module again, then the situation is starting over again: running ok shortly after load, and more time it is running, more EAGAINs I have when calling read(). As a bonus I can see the problem on the device itself: The device is configured as you can see here on this screen [3]. It has two transmision LEDs: TX and RX. Blink duration is set for 100ms. This is a recording before the bad commit when all is working fine: [4] And this is with the bad commit: [5] As you can see the TX led is blinking wrongly long (indicating transmission?) and I have problems doing read() calls (EAGAIN). Reported-by: Mariusz Bialonczyk Signed-off-by: Oliver Neukum Fixes: a81cf9799ad7 ("cdc-acm: implement put_char() and flush_chars()") Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/usb/class/cdc-acm.c | 73 ------------------------------------- drivers/usb/class/cdc-acm.h | 1 - 2 files changed, 74 deletions(-) diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 27346d69f393..f9b40a9dc4d3 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -780,20 +780,9 @@ static int acm_tty_write(struct tty_struct *tty, } if (acm->susp_count) { - if (acm->putbuffer) { - /* now to preserve order */ - usb_anchor_urb(acm->putbuffer->urb, &acm->delayed); - acm->putbuffer = NULL; - } usb_anchor_urb(wb->urb, &acm->delayed); spin_unlock_irqrestore(&acm->write_lock, flags); return count; - } else { - if (acm->putbuffer) { - /* at this point there is no good way to handle errors */ - acm_start_wb(acm, acm->putbuffer); - acm->putbuffer = NULL; - } } stat = acm_start_wb(acm, wb); @@ -804,66 +793,6 @@ static int acm_tty_write(struct tty_struct *tty, return count; } -static void acm_tty_flush_chars(struct tty_struct *tty) -{ - struct acm *acm = tty->driver_data; - struct acm_wb *cur; - int err; - unsigned long flags; - - spin_lock_irqsave(&acm->write_lock, flags); - - cur = acm->putbuffer; - if (!cur) /* nothing to do */ - goto out; - - acm->putbuffer = NULL; - err = usb_autopm_get_interface_async(acm->control); - if (err < 0) { - cur->use = 0; - acm->putbuffer = cur; - goto out; - } - - if (acm->susp_count) - usb_anchor_urb(cur->urb, &acm->delayed); - else - acm_start_wb(acm, cur); -out: - spin_unlock_irqrestore(&acm->write_lock, flags); - return; -} - -static int acm_tty_put_char(struct tty_struct *tty, unsigned char ch) -{ - struct acm *acm = tty->driver_data; - struct acm_wb *cur; - int wbn; - unsigned long flags; - -overflow: - cur = acm->putbuffer; - if (!cur) { - spin_lock_irqsave(&acm->write_lock, flags); - wbn = acm_wb_alloc(acm); - if (wbn >= 0) { - cur = &acm->wb[wbn]; - acm->putbuffer = cur; - } - spin_unlock_irqrestore(&acm->write_lock, flags); - if (!cur) - return 0; - } - - if (cur->len == acm->writesize) { - acm_tty_flush_chars(tty); - goto overflow; - } - - cur->buf[cur->len++] = ch; - return 1; -} - static int acm_tty_write_room(struct tty_struct *tty) { struct acm *acm = tty->driver_data; @@ -1987,8 +1916,6 @@ static const struct tty_operations acm_ops = { .cleanup = acm_tty_cleanup, .hangup = acm_tty_hangup, .write = acm_tty_write, - .put_char = acm_tty_put_char, - .flush_chars = acm_tty_flush_chars, .write_room = acm_tty_write_room, .ioctl = acm_tty_ioctl, .throttle = acm_tty_throttle, diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h index eacc116e83da..ca06b20d7af9 100644 --- a/drivers/usb/class/cdc-acm.h +++ b/drivers/usb/class/cdc-acm.h @@ -96,7 +96,6 @@ struct acm { unsigned long read_urbs_free; struct urb *read_urbs[ACM_NR]; struct acm_rb read_buffers[ACM_NR]; - struct acm_wb *putbuffer; /* for acm_tty_put_char() */ int rx_buflimit; spinlock_t read_lock; u8 *notification_buffer; /* to reassemble fragmented notifications */ -- GitLab From 658d8cbd07dae22ccecf49399e18c609c4e85c53 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Wed, 25 Jul 2018 14:29:07 +0200 Subject: [PATCH 1258/1692] drm/vc4: Fix the "no scaling" case on multi-planar YUV formats When there's no scaling requested ->is_unity should be true no matter the format. Also, when no scaling is requested and we have a multi-planar YUV format, we should leave ->y_scaling[0] to VC4_SCALING_NONE and only set ->x_scaling[0] to VC4_SCALING_PPF. Doing this fixes an hardly visible artifact (seen when using modetest and a rather big overlay plane in YUV420). Fixes: fc04023fafec ("drm/vc4: Add support for YUV planes.") Cc: Signed-off-by: Boris Brezillon Reviewed-by: Eric Anholt Link: https://patchwork.freedesktop.org/patch/msgid/20180725122907.13702-1-boris.brezillon@bootlin.com Signed-off-by: Sean Paul --- drivers/gpu/drm/vc4/vc4_plane.c | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index cfb50fedfa2b..a3275fa66b7b 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -297,6 +297,9 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) vc4_state->y_scaling[0] = vc4_get_scaling_mode(vc4_state->src_h[0], vc4_state->crtc_h); + vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE && + vc4_state->y_scaling[0] == VC4_SCALING_NONE); + if (num_planes > 1) { vc4_state->is_yuv = true; @@ -312,24 +315,17 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) vc4_get_scaling_mode(vc4_state->src_h[1], vc4_state->crtc_h); - /* YUV conversion requires that scaling be enabled, - * even on a plane that's otherwise 1:1. Choose TPZ - * for simplicity. + /* YUV conversion requires that horizontal scaling be enabled, + * even on a plane that's otherwise 1:1. Looks like only PPF + * works in that case, so let's pick that one. */ - if (vc4_state->x_scaling[0] == VC4_SCALING_NONE) - vc4_state->x_scaling[0] = VC4_SCALING_TPZ; - if (vc4_state->y_scaling[0] == VC4_SCALING_NONE) - vc4_state->y_scaling[0] = VC4_SCALING_TPZ; + if (vc4_state->is_unity) + vc4_state->x_scaling[0] = VC4_SCALING_PPF; } else { vc4_state->x_scaling[1] = VC4_SCALING_NONE; vc4_state->y_scaling[1] = VC4_SCALING_NONE; } - vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE && - vc4_state->y_scaling[0] == VC4_SCALING_NONE && - vc4_state->x_scaling[1] == VC4_SCALING_NONE && - vc4_state->y_scaling[1] == VC4_SCALING_NONE); - /* No configuring scaling on the cursor plane, since it gets non-vblank-synced updates, and scaling requires requires LBM changes which have to be vblank-synced. @@ -672,7 +668,10 @@ static int vc4_plane_mode_set(struct drm_plane *plane, vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5); } - if (!vc4_state->is_unity) { + if (vc4_state->x_scaling[0] != VC4_SCALING_NONE || + vc4_state->x_scaling[1] != VC4_SCALING_NONE || + vc4_state->y_scaling[0] != VC4_SCALING_NONE || + vc4_state->y_scaling[1] != VC4_SCALING_NONE) { /* LBM Base Address. */ if (vc4_state->y_scaling[0] != VC4_SCALING_NONE || vc4_state->y_scaling[1] != VC4_SCALING_NONE) { -- GitLab From 7eb33224572636248d5b6cfa1a6b2472207be5c4 Mon Sep 17 00:00:00 2001 From: zhong jiang Date: Sat, 4 Aug 2018 18:49:27 +0800 Subject: [PATCH 1259/1692] drm/pl111: Make sure of_device_id tables are NULL terminated We prefer to of_device_id tables are NULL terminated. So make vexpress_muxfpga_match is NULL terminated. Signed-off-by: zhong jiang Signed-off-by: Linus Walleij Link: https://patchwork.freedesktop.org/patch/msgid/1533379767-15629-1-git-send-email-zhongjiang@huawei.com Signed-off-by: Sean Paul --- drivers/gpu/drm/pl111/pl111_vexpress.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/pl111/pl111_vexpress.c b/drivers/gpu/drm/pl111/pl111_vexpress.c index a534b225e31b..5fa0441bb6df 100644 --- a/drivers/gpu/drm/pl111/pl111_vexpress.c +++ b/drivers/gpu/drm/pl111/pl111_vexpress.c @@ -111,7 +111,8 @@ static int vexpress_muxfpga_probe(struct platform_device *pdev) } static const struct of_device_id vexpress_muxfpga_match[] = { - { .compatible = "arm,vexpress-muxfpga", } + { .compatible = "arm,vexpress-muxfpga", }, + {} }; static struct platform_driver vexpress_muxfpga_driver = { -- GitLab From 3510e7a7f91088159bfc67e8abdc9f9e77d28870 Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Mon, 27 Aug 2018 16:39:50 +0800 Subject: [PATCH 1260/1692] drm/sun4i: Remove R40 display pipeline compatibles Two patches from the R40 display pipeline support series weren't applied with the rest of the series. When they did get applied, the -rc6 deadline for drm-misc-next had past, so they didn't get into 4.19-rc1 with the rest of the series. However, the two patches are crucial in the parsing of the R40's display pipeline graph in the device tree. Without them, the driver crashes because it can't follow the odd graph structure. This patch removes the R40 compatibles from the sun4i-drm driver, effectively disabling DRM support for the R40 for one release cycle. This will prevent the driver from crashing upon probing. The compatibles should be reinstated for the next release. Signed-off-by: Chen-Yu Tsai Signed-off-by: Maxime Ripard Link: https://patchwork.freedesktop.org/patch/msgid/20180827083950.602-1-wens@csie.org Signed-off-by: Sean Paul --- drivers/gpu/drm/sun4i/sun4i_drv.c | 1 - drivers/gpu/drm/sun4i/sun8i_mixer.c | 24 ------------------------ drivers/gpu/drm/sun4i/sun8i_tcon_top.c | 1 - 3 files changed, 26 deletions(-) diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index dd19d674055c..8b0cd08034e0 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c @@ -418,7 +418,6 @@ static const struct of_device_id sun4i_drv_of_table[] = { { .compatible = "allwinner,sun8i-a33-display-engine" }, { .compatible = "allwinner,sun8i-a83t-display-engine" }, { .compatible = "allwinner,sun8i-h3-display-engine" }, - { .compatible = "allwinner,sun8i-r40-display-engine" }, { .compatible = "allwinner,sun8i-v3s-display-engine" }, { .compatible = "allwinner,sun9i-a80-display-engine" }, { } diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c index fc3713608f78..cb65b0ed53fd 100644 --- a/drivers/gpu/drm/sun4i/sun8i_mixer.c +++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c @@ -545,22 +545,6 @@ static const struct sun8i_mixer_cfg sun8i_h3_mixer0_cfg = { .vi_num = 1, }; -static const struct sun8i_mixer_cfg sun8i_r40_mixer0_cfg = { - .ccsc = 0, - .mod_rate = 297000000, - .scaler_mask = 0xf, - .ui_num = 3, - .vi_num = 1, -}; - -static const struct sun8i_mixer_cfg sun8i_r40_mixer1_cfg = { - .ccsc = 1, - .mod_rate = 297000000, - .scaler_mask = 0x3, - .ui_num = 1, - .vi_num = 1, -}; - static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = { .vi_num = 2, .ui_num = 1, @@ -582,14 +566,6 @@ static const struct of_device_id sun8i_mixer_of_table[] = { .compatible = "allwinner,sun8i-h3-de2-mixer-0", .data = &sun8i_h3_mixer0_cfg, }, - { - .compatible = "allwinner,sun8i-r40-de2-mixer-0", - .data = &sun8i_r40_mixer0_cfg, - }, - { - .compatible = "allwinner,sun8i-r40-de2-mixer-1", - .data = &sun8i_r40_mixer1_cfg, - }, { .compatible = "allwinner,sun8i-v3s-de2-mixer", .data = &sun8i_v3s_mixer_cfg, diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c index 55fe398d8290..d5240b777a8f 100644 --- a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c +++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c @@ -253,7 +253,6 @@ static int sun8i_tcon_top_remove(struct platform_device *pdev) /* sun4i_drv uses this list to check if a device node is a TCON TOP */ const struct of_device_id sun8i_tcon_top_of_table[] = { - { .compatible = "allwinner,sun8i-r40-tcon-top" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, sun8i_tcon_top_of_table); -- GitLab From fcb74da1eb8edd3a4ef9b9724f88ed709d684227 Mon Sep 17 00:00:00 2001 From: Emil Lundmark Date: Mon, 28 May 2018 16:27:11 +0200 Subject: [PATCH 1261/1692] drm: udl: Destroy framebuffer only if it was initialized MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This fixes a NULL pointer dereference that can happen if the UDL driver is unloaded before the framebuffer is initialized. This can happen e.g. if the USB device is unplugged right after it was plugged in. As explained by Stéphane Marchesin: It happens when fbdev is disabled (which is the case for Chrome OS). Even though intialization of the fbdev part is optional (it's done in udlfb_create which is the callback for fb_probe()), the teardown isn't optional (udl_driver_unload -> udl_fbdev_cleanup -> udl_fbdev_destroy). Note that udl_fbdev_cleanup *tries* to be conditional (you can see it does if (!udl->fbdev)) but that doesn't work, because udl->fbdev is always set during udl_fbdev_init. Cc: stable@vger.kernel.org Suggested-by: Sean Paul Reviewed-by: Sean Paul Acked-by: Daniel Vetter Signed-off-by: Emil Lundmark Signed-off-by: Sean Paul Link: https://patchwork.freedesktop.org/patch/msgid/20180528142711.142466-1-lndmrk@chromium.org Signed-off-by: Sean Paul --- drivers/gpu/drm/udl/udl_fb.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c index dbb62f6eb48a..dd9ffded223b 100644 --- a/drivers/gpu/drm/udl/udl_fb.c +++ b/drivers/gpu/drm/udl/udl_fb.c @@ -432,9 +432,11 @@ static void udl_fbdev_destroy(struct drm_device *dev, { drm_fb_helper_unregister_fbi(&ufbdev->helper); drm_fb_helper_fini(&ufbdev->helper); - drm_framebuffer_unregister_private(&ufbdev->ufb.base); - drm_framebuffer_cleanup(&ufbdev->ufb.base); - drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base); + if (ufbdev->ufb.obj) { + drm_framebuffer_unregister_private(&ufbdev->ufb.base); + drm_framebuffer_cleanup(&ufbdev->ufb.base); + drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base); + } } int udl_fbdev_init(struct drm_device *dev) -- GitLab From affab51082174f60ef71ced8ab5fbe71f00e9ae3 Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Mon, 10 Sep 2018 13:01:52 -0500 Subject: [PATCH 1262/1692] platform/x86: dell-smbios-wmi: Correct a memory leak ACPI buffers were being allocated but never freed. Reported-by: Pinzhen Xu Signed-off-by: Mario Limonciello Cc: stable@vger.kernel.org Signed-off-by: Darren Hart (VMware) --- drivers/platform/x86/dell-smbios-wmi.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/platform/x86/dell-smbios-wmi.c b/drivers/platform/x86/dell-smbios-wmi.c index 88afe5651d24..cf2229ece9ff 100644 --- a/drivers/platform/x86/dell-smbios-wmi.c +++ b/drivers/platform/x86/dell-smbios-wmi.c @@ -78,6 +78,7 @@ static int run_smbios_call(struct wmi_device *wdev) dev_dbg(&wdev->dev, "result: [%08x,%08x,%08x,%08x]\n", priv->buf->std.output[0], priv->buf->std.output[1], priv->buf->std.output[2], priv->buf->std.output[3]); + kfree(output.pointer); return 0; } -- GitLab From ff0e9f26288d2daee4950f42b37a3d3d30d36ec1 Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Mon, 10 Sep 2018 13:01:53 -0500 Subject: [PATCH 1263/1692] platform/x86: alienware-wmi: Correct a memory leak An ACPI buffer that was allocated was not being freed after use. Signed-off-by: Mario Limonciello Cc: stable@vger.kernel.org Signed-off-by: Darren Hart (VMware) --- drivers/platform/x86/alienware-wmi.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c index d975462a4c57..f10af5c383c5 100644 --- a/drivers/platform/x86/alienware-wmi.c +++ b/drivers/platform/x86/alienware-wmi.c @@ -536,6 +536,7 @@ static acpi_status alienware_wmax_command(struct wmax_basic_args *in_args, if (obj && obj->type == ACPI_TYPE_INTEGER) *out_data = (u32) obj->integer.value; } + kfree(output.pointer); return status; } -- GitLab From 3ab91828166895600efd9cdc3a0eb32001f7204a Mon Sep 17 00:00:00 2001 From: Joe Thornber Date: Mon, 10 Sep 2018 16:50:09 +0100 Subject: [PATCH 1264/1692] dm thin metadata: try to avoid ever aborting transactions Committing a transaction can consume some metadata of it's own, we now reserve a small amount of metadata to cover this. Free metadata reported by the kernel will not include this reserve. If any of the reserve has been used after a commit we enter a new internal state PM_OUT_OF_METADATA_SPACE. This is reported as PM_READ_ONLY, so no userland changes are needed. If the metadata device is resized the pool will move back to PM_WRITE. These changes mean we never need to abort and rollback a transaction due to running out of metadata space. This is particularly important because there have been a handful of reports of data corruption against DM thin-provisioning that can all be attributed to the thin-pool having ran out of metadata space. Signed-off-by: Joe Thornber Signed-off-by: Mike Snitzer --- drivers/md/dm-thin-metadata.c | 36 ++++++++++++++++- drivers/md/dm-thin.c | 73 +++++++++++++++++++++++++++++++---- 2 files changed, 100 insertions(+), 9 deletions(-) diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 72142021b5c9..74f6770c70b1 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -188,6 +188,12 @@ struct dm_pool_metadata { unsigned long flags; sector_t data_block_size; + /* + * We reserve a section of the metadata for commit overhead. + * All reported space does *not* include this. + */ + dm_block_t metadata_reserve; + /* * Set if a transaction has to be aborted but the attempt to roll back * to the previous (good) transaction failed. The only pool metadata @@ -816,6 +822,22 @@ static int __commit_transaction(struct dm_pool_metadata *pmd) return dm_tm_commit(pmd->tm, sblock); } +static void __set_metadata_reserve(struct dm_pool_metadata *pmd) +{ + int r; + dm_block_t total; + dm_block_t max_blocks = 4096; /* 16M */ + + r = dm_sm_get_nr_blocks(pmd->metadata_sm, &total); + if (r) { + DMERR("could not get size of metadata device"); + pmd->metadata_reserve = max_blocks; + } else { + sector_div(total, 10); + pmd->metadata_reserve = min(max_blocks, total); + } +} + struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev, sector_t data_block_size, bool format_device) @@ -849,6 +871,8 @@ struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev, return ERR_PTR(r); } + __set_metadata_reserve(pmd); + return pmd; } @@ -1820,6 +1844,13 @@ int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd, down_read(&pmd->root_lock); if (!pmd->fail_io) r = dm_sm_get_nr_free(pmd->metadata_sm, result); + + if (!r) { + if (*result < pmd->metadata_reserve) + *result = 0; + else + *result -= pmd->metadata_reserve; + } up_read(&pmd->root_lock); return r; @@ -1932,8 +1963,11 @@ int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_cou int r = -EINVAL; down_write(&pmd->root_lock); - if (!pmd->fail_io) + if (!pmd->fail_io) { r = __resize_space_map(pmd->metadata_sm, new_count); + if (!r) + __set_metadata_reserve(pmd); + } up_write(&pmd->root_lock); return r; diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 7bd60a150f8f..aaf1ad481ee8 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -200,7 +200,13 @@ struct dm_thin_new_mapping; enum pool_mode { PM_WRITE, /* metadata may be changed */ PM_OUT_OF_DATA_SPACE, /* metadata may be changed, though data may not be allocated */ + + /* + * Like READ_ONLY, except may switch back to WRITE on metadata resize. Reported as READ_ONLY. + */ + PM_OUT_OF_METADATA_SPACE, PM_READ_ONLY, /* metadata may not be changed */ + PM_FAIL, /* all I/O fails */ }; @@ -1371,7 +1377,35 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode); static void requeue_bios(struct pool *pool); -static void check_for_space(struct pool *pool) +static bool is_read_only_pool_mode(enum pool_mode mode) +{ + return (mode == PM_OUT_OF_METADATA_SPACE || mode == PM_READ_ONLY); +} + +static bool is_read_only(struct pool *pool) +{ + return is_read_only_pool_mode(get_pool_mode(pool)); +} + +static void check_for_metadata_space(struct pool *pool) +{ + int r; + const char *ooms_reason = NULL; + dm_block_t nr_free; + + r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free); + if (r) + ooms_reason = "Could not get free metadata blocks"; + else if (!nr_free) + ooms_reason = "No free metadata blocks"; + + if (ooms_reason && !is_read_only(pool)) { + DMERR("%s", ooms_reason); + set_pool_mode(pool, PM_OUT_OF_METADATA_SPACE); + } +} + +static void check_for_data_space(struct pool *pool) { int r; dm_block_t nr_free; @@ -1397,14 +1431,16 @@ static int commit(struct pool *pool) { int r; - if (get_pool_mode(pool) >= PM_READ_ONLY) + if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE) return -EINVAL; r = dm_pool_commit_metadata(pool->pmd); if (r) metadata_operation_failed(pool, "dm_pool_commit_metadata", r); - else - check_for_space(pool); + else { + check_for_metadata_space(pool); + check_for_data_space(pool); + } return r; } @@ -1470,6 +1506,19 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result) return r; } + r = dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks); + if (r) { + metadata_operation_failed(pool, "dm_pool_get_free_metadata_block_count", r); + return r; + } + + if (!free_blocks) { + /* Let's commit before we use up the metadata reserve. */ + r = commit(pool); + if (r) + return r; + } + return 0; } @@ -1501,6 +1550,7 @@ static blk_status_t should_error_unserviceable_bio(struct pool *pool) case PM_OUT_OF_DATA_SPACE: return pool->pf.error_if_no_space ? BLK_STS_NOSPC : 0; + case PM_OUT_OF_METADATA_SPACE: case PM_READ_ONLY: case PM_FAIL: return BLK_STS_IOERR; @@ -2464,8 +2514,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) error_retry_list(pool); break; + case PM_OUT_OF_METADATA_SPACE: case PM_READ_ONLY: - if (old_mode != new_mode) + if (!is_read_only_pool_mode(old_mode)) notify_of_pool_mode_change(pool, "read-only"); dm_pool_metadata_read_only(pool->pmd); pool->process_bio = process_bio_read_only; @@ -3403,6 +3454,10 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit) DMINFO("%s: growing the metadata device from %llu to %llu blocks", dm_device_name(pool->pool_md), sb_metadata_dev_size, metadata_dev_size); + + if (get_pool_mode(pool) == PM_OUT_OF_METADATA_SPACE) + set_pool_mode(pool, PM_WRITE); + r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size); if (r) { metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r); @@ -3707,7 +3762,7 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv, struct pool_c *pt = ti->private; struct pool *pool = pt->pool; - if (get_pool_mode(pool) >= PM_READ_ONLY) { + if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE) { DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode", dm_device_name(pool->pool_md)); return -EOPNOTSUPP; @@ -3781,6 +3836,7 @@ static void pool_status(struct dm_target *ti, status_type_t type, dm_block_t nr_blocks_data; dm_block_t nr_blocks_metadata; dm_block_t held_root; + enum pool_mode mode; char buf[BDEVNAME_SIZE]; char buf2[BDEVNAME_SIZE]; struct pool_c *pt = ti->private; @@ -3851,9 +3907,10 @@ static void pool_status(struct dm_target *ti, status_type_t type, else DMEMIT("- "); - if (pool->pf.mode == PM_OUT_OF_DATA_SPACE) + mode = get_pool_mode(pool); + if (mode == PM_OUT_OF_DATA_SPACE) DMEMIT("out_of_data_space "); - else if (pool->pf.mode == PM_READ_ONLY) + else if (is_read_only_pool_mode(mode)) DMEMIT("ro "); else DMEMIT("rw "); -- GitLab From f94e63801ab2791ed64c409d0f751f6a0c953ead Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 24 Aug 2018 23:22:08 +0200 Subject: [PATCH 1265/1692] netfilter: conntrack: reset tcp maxwin on re-register Doug Smythies says: Sometimes it is desirable to temporarily disable, or clear, the iptables rule set on a computer being controlled via a secure shell session (SSH). While unwise on an internet facing computer, I also do it often on non-internet accessible computers while testing. Recently, this has become problematic, with the SSH session being dropped upon re-load of the rule set. The problem is that when all rules are deleted, conntrack hooks get unregistered. In case the rules are re-added later, its possible that tcp window has moved far enough so that all packets are considered invalid (out of window) until entry expires (which can take forever, default established timeout is 5 days). Fix this by clearing maxwin of existing tcp connections on register. v2: don't touch entries on hook removal. v3: remove obsolete expiry check. Reported-by: Doug Smythies Fixes: 4d3a57f23dec59 ("netfilter: conntrack: do not enable connection tracking unless needed") Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_proto.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c index 9f14b0df6960..51c5d7eec0a3 100644 --- a/net/netfilter/nf_conntrack_proto.c +++ b/net/netfilter/nf_conntrack_proto.c @@ -776,9 +776,26 @@ static const struct nf_hook_ops ipv6_conntrack_ops[] = { }; #endif +static int nf_ct_tcp_fixup(struct nf_conn *ct, void *_nfproto) +{ + u8 nfproto = (unsigned long)_nfproto; + + if (nf_ct_l3num(ct) != nfproto) + return 0; + + if (nf_ct_protonum(ct) == IPPROTO_TCP && + ct->proto.tcp.state == TCP_CONNTRACK_ESTABLISHED) { + ct->proto.tcp.seen[0].td_maxwin = 0; + ct->proto.tcp.seen[1].td_maxwin = 0; + } + + return 0; +} + static int nf_ct_netns_do_get(struct net *net, u8 nfproto) { struct nf_conntrack_net *cnet = net_generic(net, nf_conntrack_net_id); + bool fixup_needed = false; int err = 0; mutex_lock(&nf_ct_proto_mutex); @@ -798,6 +815,8 @@ static int nf_ct_netns_do_get(struct net *net, u8 nfproto) ARRAY_SIZE(ipv4_conntrack_ops)); if (err) cnet->users4 = 0; + else + fixup_needed = true; break; #if IS_ENABLED(CONFIG_IPV6) case NFPROTO_IPV6: @@ -814,6 +833,8 @@ static int nf_ct_netns_do_get(struct net *net, u8 nfproto) ARRAY_SIZE(ipv6_conntrack_ops)); if (err) cnet->users6 = 0; + else + fixup_needed = true; break; #endif default: @@ -822,6 +843,11 @@ static int nf_ct_netns_do_get(struct net *net, u8 nfproto) } out_unlock: mutex_unlock(&nf_ct_proto_mutex); + + if (fixup_needed) + nf_ct_iterate_cleanup_net(net, nf_ct_tcp_fixup, + (void *)(unsigned long)nfproto, 0, 0); + return err; } -- GitLab From a874752a10da113f513980e28f562d946d3f829d Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Fri, 31 Aug 2018 12:36:01 +0200 Subject: [PATCH 1266/1692] netfilter: conntrack: timeout interface depend on CONFIG_NF_CONNTRACK_TIMEOUT Now that cttimeout support for nft_ct is in place, these should depend on CONFIG_NF_CONNTRACK_TIMEOUT otherwise we can crash when dumping the policy if this option is not enabled. [ 71.600121] BUG: unable to handle kernel NULL pointer dereference at 0000000000000000 [...] [ 71.600141] CPU: 3 PID: 7612 Comm: nft Not tainted 4.18.0+ #246 [...] [ 71.600188] Call Trace: [ 71.600201] ? nft_ct_timeout_obj_dump+0xc6/0xf0 [nft_ct] Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_proto_dccp.c | 12 ++++++------ net/netfilter/nf_conntrack_proto_generic.c | 8 ++++---- net/netfilter/nf_conntrack_proto_gre.c | 8 ++++---- net/netfilter/nf_conntrack_proto_icmp.c | 8 ++++---- net/netfilter/nf_conntrack_proto_icmpv6.c | 8 ++++---- net/netfilter/nf_conntrack_proto_sctp.c | 14 +++++++------- net/netfilter/nf_conntrack_proto_tcp.c | 12 ++++++------ net/netfilter/nf_conntrack_proto_udp.c | 20 ++++++++++---------- 8 files changed, 45 insertions(+), 45 deletions(-) diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index b81f70039828..f3f91ed2c21a 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c @@ -675,7 +675,7 @@ static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct) } #endif -#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) +#ifdef CONFIG_NF_CONNTRACK_TIMEOUT #include #include @@ -728,7 +728,7 @@ dccp_timeout_nla_policy[CTA_TIMEOUT_DCCP_MAX+1] = { [CTA_TIMEOUT_DCCP_CLOSING] = { .type = NLA_U32 }, [CTA_TIMEOUT_DCCP_TIMEWAIT] = { .type = NLA_U32 }, }; -#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ +#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ #ifdef CONFIG_SYSCTL /* template, data assigned later */ @@ -863,7 +863,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 = { .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, .nla_policy = nf_ct_port_nla_policy, #endif -#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) +#ifdef CONFIG_NF_CONNTRACK_TIMEOUT .ctnl_timeout = { .nlattr_to_obj = dccp_timeout_nlattr_to_obj, .obj_to_nlattr = dccp_timeout_obj_to_nlattr, @@ -871,7 +871,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 = { .obj_size = sizeof(unsigned int) * CT_DCCP_MAX, .nla_policy = dccp_timeout_nla_policy, }, -#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ +#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ .init_net = dccp_init_net, .get_net_proto = dccp_get_net_proto, }; @@ -896,7 +896,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 = { .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, .nla_policy = nf_ct_port_nla_policy, #endif -#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) +#ifdef CONFIG_NF_CONNTRACK_TIMEOUT .ctnl_timeout = { .nlattr_to_obj = dccp_timeout_nlattr_to_obj, .obj_to_nlattr = dccp_timeout_obj_to_nlattr, @@ -904,7 +904,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 = { .obj_size = sizeof(unsigned int) * CT_DCCP_MAX, .nla_policy = dccp_timeout_nla_policy, }, -#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ +#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ .init_net = dccp_init_net, .get_net_proto = dccp_get_net_proto, }; diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c index ac4a0b296dcd..1df3244ecd07 100644 --- a/net/netfilter/nf_conntrack_proto_generic.c +++ b/net/netfilter/nf_conntrack_proto_generic.c @@ -70,7 +70,7 @@ static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb, return ret; } -#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) +#ifdef CONFIG_NF_CONNTRACK_TIMEOUT #include #include @@ -113,7 +113,7 @@ static const struct nla_policy generic_timeout_nla_policy[CTA_TIMEOUT_GENERIC_MAX+1] = { [CTA_TIMEOUT_GENERIC_TIMEOUT] = { .type = NLA_U32 }, }; -#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ +#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ #ifdef CONFIG_SYSCTL static struct ctl_table generic_sysctl_table[] = { @@ -164,7 +164,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic = .pkt_to_tuple = generic_pkt_to_tuple, .packet = generic_packet, .new = generic_new, -#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) +#ifdef CONFIG_NF_CONNTRACK_TIMEOUT .ctnl_timeout = { .nlattr_to_obj = generic_timeout_nlattr_to_obj, .obj_to_nlattr = generic_timeout_obj_to_nlattr, @@ -172,7 +172,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic = .obj_size = sizeof(unsigned int), .nla_policy = generic_timeout_nla_policy, }, -#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ +#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ .init_net = generic_init_net, .get_net_proto = generic_get_net_proto, }; diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c index d1632252bf5b..650eb4fba2c5 100644 --- a/net/netfilter/nf_conntrack_proto_gre.c +++ b/net/netfilter/nf_conntrack_proto_gre.c @@ -285,7 +285,7 @@ static void gre_destroy(struct nf_conn *ct) nf_ct_gre_keymap_destroy(master); } -#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) +#ifdef CONFIG_NF_CONNTRACK_TIMEOUT #include #include @@ -334,7 +334,7 @@ gre_timeout_nla_policy[CTA_TIMEOUT_GRE_MAX+1] = { [CTA_TIMEOUT_GRE_UNREPLIED] = { .type = NLA_U32 }, [CTA_TIMEOUT_GRE_REPLIED] = { .type = NLA_U32 }, }; -#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ +#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ static int gre_init_net(struct net *net, u_int16_t proto) { @@ -367,7 +367,7 @@ static const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 = { .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, .nla_policy = nf_ct_port_nla_policy, #endif -#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) +#ifdef CONFIG_NF_CONNTRACK_TIMEOUT .ctnl_timeout = { .nlattr_to_obj = gre_timeout_nlattr_to_obj, .obj_to_nlattr = gre_timeout_obj_to_nlattr, @@ -375,7 +375,7 @@ static const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 = { .obj_size = sizeof(unsigned int) * GRE_CT_MAX, .nla_policy = gre_timeout_nla_policy, }, -#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ +#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ .net_id = &proto_gre_net_id, .init_net = gre_init_net, }; diff --git a/net/netfilter/nf_conntrack_proto_icmp.c b/net/netfilter/nf_conntrack_proto_icmp.c index 036670b38282..43c7e1a217b9 100644 --- a/net/netfilter/nf_conntrack_proto_icmp.c +++ b/net/netfilter/nf_conntrack_proto_icmp.c @@ -273,7 +273,7 @@ static unsigned int icmp_nlattr_tuple_size(void) } #endif -#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) +#ifdef CONFIG_NF_CONNTRACK_TIMEOUT #include #include @@ -313,7 +313,7 @@ static const struct nla_policy icmp_timeout_nla_policy[CTA_TIMEOUT_ICMP_MAX+1] = { [CTA_TIMEOUT_ICMP_TIMEOUT] = { .type = NLA_U32 }, }; -#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ +#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ #ifdef CONFIG_SYSCTL static struct ctl_table icmp_sysctl_table[] = { @@ -374,7 +374,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp = .nlattr_to_tuple = icmp_nlattr_to_tuple, .nla_policy = icmp_nla_policy, #endif -#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) +#ifdef CONFIG_NF_CONNTRACK_TIMEOUT .ctnl_timeout = { .nlattr_to_obj = icmp_timeout_nlattr_to_obj, .obj_to_nlattr = icmp_timeout_obj_to_nlattr, @@ -382,7 +382,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp = .obj_size = sizeof(unsigned int), .nla_policy = icmp_timeout_nla_policy, }, -#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ +#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ .init_net = icmp_init_net, .get_net_proto = icmp_get_net_proto, }; diff --git a/net/netfilter/nf_conntrack_proto_icmpv6.c b/net/netfilter/nf_conntrack_proto_icmpv6.c index bed07b998a10..97e40f77d678 100644 --- a/net/netfilter/nf_conntrack_proto_icmpv6.c +++ b/net/netfilter/nf_conntrack_proto_icmpv6.c @@ -274,7 +274,7 @@ static unsigned int icmpv6_nlattr_tuple_size(void) } #endif -#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) +#ifdef CONFIG_NF_CONNTRACK_TIMEOUT #include #include @@ -314,7 +314,7 @@ static const struct nla_policy icmpv6_timeout_nla_policy[CTA_TIMEOUT_ICMPV6_MAX+1] = { [CTA_TIMEOUT_ICMPV6_TIMEOUT] = { .type = NLA_U32 }, }; -#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ +#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ #ifdef CONFIG_SYSCTL static struct ctl_table icmpv6_sysctl_table[] = { @@ -373,7 +373,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 = .nlattr_to_tuple = icmpv6_nlattr_to_tuple, .nla_policy = icmpv6_nla_policy, #endif -#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) +#ifdef CONFIG_NF_CONNTRACK_TIMEOUT .ctnl_timeout = { .nlattr_to_obj = icmpv6_timeout_nlattr_to_obj, .obj_to_nlattr = icmpv6_timeout_obj_to_nlattr, @@ -381,7 +381,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 = .obj_size = sizeof(unsigned int), .nla_policy = icmpv6_timeout_nla_policy, }, -#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ +#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ .init_net = icmpv6_init_net, .get_net_proto = icmpv6_get_net_proto, }; diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index 5eddfd32b852..e4d738d34cd0 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c @@ -591,7 +591,7 @@ static int nlattr_to_sctp(struct nlattr *cda[], struct nf_conn *ct) } #endif -#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) +#ifdef CONFIG_NF_CONNTRACK_TIMEOUT #include #include @@ -646,7 +646,7 @@ sctp_timeout_nla_policy[CTA_TIMEOUT_SCTP_MAX+1] = { [CTA_TIMEOUT_SCTP_HEARTBEAT_SENT] = { .type = NLA_U32 }, [CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED] = { .type = NLA_U32 }, }; -#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ +#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ #ifdef CONFIG_SYSCTL @@ -780,7 +780,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 = { .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, .nla_policy = nf_ct_port_nla_policy, #endif -#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) +#ifdef CONFIG_NF_CONNTRACK_TIMEOUT .ctnl_timeout = { .nlattr_to_obj = sctp_timeout_nlattr_to_obj, .obj_to_nlattr = sctp_timeout_obj_to_nlattr, @@ -788,7 +788,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 = { .obj_size = sizeof(unsigned int) * SCTP_CONNTRACK_MAX, .nla_policy = sctp_timeout_nla_policy, }, -#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ +#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ .init_net = sctp_init_net, .get_net_proto = sctp_get_net_proto, }; @@ -813,7 +813,8 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 = { .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, .nla_policy = nf_ct_port_nla_policy, -#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) +#endif +#ifdef CONFIG_NF_CONNTRACK_TIMEOUT .ctnl_timeout = { .nlattr_to_obj = sctp_timeout_nlattr_to_obj, .obj_to_nlattr = sctp_timeout_obj_to_nlattr, @@ -821,8 +822,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 = { .obj_size = sizeof(unsigned int) * SCTP_CONNTRACK_MAX, .nla_policy = sctp_timeout_nla_policy, }, -#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ -#endif +#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ .init_net = sctp_init_net, .get_net_proto = sctp_get_net_proto, }; diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 3e2dc56a96c3..b4bdf9eda7b7 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -1279,7 +1279,7 @@ static unsigned int tcp_nlattr_tuple_size(void) } #endif -#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) +#ifdef CONFIG_NF_CONNTRACK_TIMEOUT #include #include @@ -1394,7 +1394,7 @@ static const struct nla_policy tcp_timeout_nla_policy[CTA_TIMEOUT_TCP_MAX+1] = { [CTA_TIMEOUT_TCP_RETRANS] = { .type = NLA_U32 }, [CTA_TIMEOUT_TCP_UNACK] = { .type = NLA_U32 }, }; -#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ +#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ #ifdef CONFIG_SYSCTL static struct ctl_table tcp_sysctl_table[] = { @@ -1558,7 +1558,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 = .nlattr_size = TCP_NLATTR_SIZE, .nla_policy = nf_ct_port_nla_policy, #endif -#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) +#ifdef CONFIG_NF_CONNTRACK_TIMEOUT .ctnl_timeout = { .nlattr_to_obj = tcp_timeout_nlattr_to_obj, .obj_to_nlattr = tcp_timeout_obj_to_nlattr, @@ -1567,7 +1567,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 = TCP_CONNTRACK_TIMEOUT_MAX, .nla_policy = tcp_timeout_nla_policy, }, -#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ +#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ .init_net = tcp_init_net, .get_net_proto = tcp_get_net_proto, }; @@ -1593,7 +1593,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 = .nlattr_tuple_size = tcp_nlattr_tuple_size, .nla_policy = nf_ct_port_nla_policy, #endif -#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) +#ifdef CONFIG_NF_CONNTRACK_TIMEOUT .ctnl_timeout = { .nlattr_to_obj = tcp_timeout_nlattr_to_obj, .obj_to_nlattr = tcp_timeout_obj_to_nlattr, @@ -1602,7 +1602,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 = TCP_CONNTRACK_TIMEOUT_MAX, .nla_policy = tcp_timeout_nla_policy, }, -#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ +#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ .init_net = tcp_init_net, .get_net_proto = tcp_get_net_proto, }; diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c index 9272a2c525a8..3065fb8ef91b 100644 --- a/net/netfilter/nf_conntrack_proto_udp.c +++ b/net/netfilter/nf_conntrack_proto_udp.c @@ -171,7 +171,7 @@ static int udp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, return NF_ACCEPT; } -#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) +#ifdef CONFIG_NF_CONNTRACK_TIMEOUT #include #include @@ -221,7 +221,7 @@ udp_timeout_nla_policy[CTA_TIMEOUT_UDP_MAX+1] = { [CTA_TIMEOUT_UDP_UNREPLIED] = { .type = NLA_U32 }, [CTA_TIMEOUT_UDP_REPLIED] = { .type = NLA_U32 }, }; -#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ +#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ #ifdef CONFIG_SYSCTL static struct ctl_table udp_sysctl_table[] = { @@ -292,7 +292,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 = .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, .nla_policy = nf_ct_port_nla_policy, #endif -#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) +#ifdef CONFIG_NF_CONNTRACK_TIMEOUT .ctnl_timeout = { .nlattr_to_obj = udp_timeout_nlattr_to_obj, .obj_to_nlattr = udp_timeout_obj_to_nlattr, @@ -300,7 +300,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 = .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX, .nla_policy = udp_timeout_nla_policy, }, -#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ +#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ .init_net = udp_init_net, .get_net_proto = udp_get_net_proto, }; @@ -321,7 +321,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 = .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, .nla_policy = nf_ct_port_nla_policy, #endif -#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) +#ifdef CONFIG_NF_CONNTRACK_TIMEOUT .ctnl_timeout = { .nlattr_to_obj = udp_timeout_nlattr_to_obj, .obj_to_nlattr = udp_timeout_obj_to_nlattr, @@ -329,7 +329,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 = .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX, .nla_policy = udp_timeout_nla_policy, }, -#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ +#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ .init_net = udp_init_net, .get_net_proto = udp_get_net_proto, }; @@ -350,7 +350,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 = .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, .nla_policy = nf_ct_port_nla_policy, #endif -#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) +#ifdef CONFIG_NF_CONNTRACK_TIMEOUT .ctnl_timeout = { .nlattr_to_obj = udp_timeout_nlattr_to_obj, .obj_to_nlattr = udp_timeout_obj_to_nlattr, @@ -358,7 +358,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 = .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX, .nla_policy = udp_timeout_nla_policy, }, -#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ +#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ .init_net = udp_init_net, .get_net_proto = udp_get_net_proto, }; @@ -379,7 +379,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 = .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, .nla_policy = nf_ct_port_nla_policy, #endif -#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) +#ifdef CONFIG_NF_CONNTRACK_TIMEOUT .ctnl_timeout = { .nlattr_to_obj = udp_timeout_nlattr_to_obj, .obj_to_nlattr = udp_timeout_obj_to_nlattr, @@ -387,7 +387,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 = .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX, .nla_policy = udp_timeout_nla_policy, }, -#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ +#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ .init_net = udp_init_net, .get_net_proto = udp_get_net_proto, }; -- GitLab From 99e25d071fca91eb90ffa2f51240547a69137bde Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Mon, 3 Sep 2018 13:53:22 +0200 Subject: [PATCH 1267/1692] netfilter: cttimeout: ctnl_timeout_find_get() returns incorrect pointer to type Compiler did not catch incorrect typing in the rcu hook assignment. % nfct add timeout test-tcp inet tcp established 100 close 10 close_wait 10 % iptables -I OUTPUT -t raw -p tcp -j CT --timeout test-tcp dmesg - xt_CT: Timeout policy `test-tcp' can only be used by L3 protocol number 25000 The CT target bails out with incorrect layer 3 protocol number. Fixes: 6c1fd7dc489d ("netfilter: cttimeout: decouple timeout policy from nfnetlink_cttimeout object") Reported-by: Harsha Sharma Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nfnetlink_cttimeout.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c index d46a236cdf31..a30f8ba4b89a 100644 --- a/net/netfilter/nfnetlink_cttimeout.c +++ b/net/netfilter/nfnetlink_cttimeout.c @@ -489,8 +489,8 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl, return err; } -static struct ctnl_timeout * -ctnl_timeout_find_get(struct net *net, const char *name) +static struct nf_ct_timeout *ctnl_timeout_find_get(struct net *net, + const char *name) { struct ctnl_timeout *timeout, *matching = NULL; @@ -509,7 +509,7 @@ ctnl_timeout_find_get(struct net *net, const char *name) break; } err: - return matching; + return matching ? &matching->timeout : NULL; } static void ctnl_timeout_put(struct nf_ct_timeout *t) -- GitLab From ad18d7bf68a3da860ebb62a59c449804a6d237b4 Mon Sep 17 00:00:00 2001 From: Michal 'vorner' Vaner Date: Tue, 4 Sep 2018 13:25:44 +0200 Subject: [PATCH 1268/1692] netfilter: nfnetlink_queue: Solve the NFQUEUE/conntrack clash for NF_REPEAT NF_REPEAT places the packet at the beginning of the iptables chain instead of accepting or rejecting it right away. The packet however will reach the end of the chain and continue to the end of iptables eventually, so it needs the same handling as NF_ACCEPT and NF_DROP. Fixes: 368982cd7d1b ("netfilter: nfnetlink_queue: resolve clash for unconfirmed conntracks") Signed-off-by: Michal 'vorner' Vaner Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nfnetlink_queue.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index ea4ba551abb2..d33094f4ec41 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -233,6 +233,7 @@ static void nfqnl_reinject(struct nf_queue_entry *entry, unsigned int verdict) int err; if (verdict == NF_ACCEPT || + verdict == NF_REPEAT || verdict == NF_STOP) { rcu_read_lock(); ct_hook = rcu_dereference(nf_ct_hook); -- GitLab From 1286df269f498165061e0cf8092ca212545dbb5a Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Wed, 5 Sep 2018 11:41:31 -0700 Subject: [PATCH 1269/1692] netfilter: xt_hashlimit: use s->file instead of s->private After switching to the new procfs API, it is supposed to retrieve the private pointer from PDE_DATA(file_inode(s->file)), s->private is no longer referred. Fixes: 1cd671827290 ("netfilter/x_tables: switch to proc_create_seq_private") Reported-by: Sami Farin Signed-off-by: Cong Wang Acked-by: Christoph Hellwig Tested-by: Sami Farin Signed-off-by: Pablo Neira Ayuso --- net/netfilter/xt_hashlimit.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index 9b16402f29af..3e7d259e5d8d 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c @@ -1057,7 +1057,7 @@ static struct xt_match hashlimit_mt_reg[] __read_mostly = { static void *dl_seq_start(struct seq_file *s, loff_t *pos) __acquires(htable->lock) { - struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private)); + struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file)); unsigned int *bucket; spin_lock_bh(&htable->lock); @@ -1074,7 +1074,7 @@ static void *dl_seq_start(struct seq_file *s, loff_t *pos) static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos) { - struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private)); + struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file)); unsigned int *bucket = v; *pos = ++(*bucket); @@ -1088,7 +1088,7 @@ static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos) static void dl_seq_stop(struct seq_file *s, void *v) __releases(htable->lock) { - struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private)); + struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file)); unsigned int *bucket = v; if (!IS_ERR(bucket)) @@ -1130,7 +1130,7 @@ static void dl_seq_print(struct dsthash_ent *ent, u_int8_t family, static int dl_seq_real_show_v2(struct dsthash_ent *ent, u_int8_t family, struct seq_file *s) { - struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->private)); + struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->file)); spin_lock(&ent->lock); /* recalculate to show accurate numbers */ @@ -1145,7 +1145,7 @@ static int dl_seq_real_show_v2(struct dsthash_ent *ent, u_int8_t family, static int dl_seq_real_show_v1(struct dsthash_ent *ent, u_int8_t family, struct seq_file *s) { - struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->private)); + struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->file)); spin_lock(&ent->lock); /* recalculate to show accurate numbers */ @@ -1160,7 +1160,7 @@ static int dl_seq_real_show_v1(struct dsthash_ent *ent, u_int8_t family, static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family, struct seq_file *s) { - struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->private)); + struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->file)); spin_lock(&ent->lock); /* recalculate to show accurate numbers */ @@ -1174,7 +1174,7 @@ static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family, static int dl_seq_show_v2(struct seq_file *s, void *v) { - struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private)); + struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file)); unsigned int *bucket = (unsigned int *)v; struct dsthash_ent *ent; @@ -1188,7 +1188,7 @@ static int dl_seq_show_v2(struct seq_file *s, void *v) static int dl_seq_show_v1(struct seq_file *s, void *v) { - struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private)); + struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file)); unsigned int *bucket = v; struct dsthash_ent *ent; @@ -1202,7 +1202,7 @@ static int dl_seq_show_v1(struct seq_file *s, void *v) static int dl_seq_show(struct seq_file *s, void *v) { - struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private)); + struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file)); unsigned int *bucket = v; struct dsthash_ent *ent; -- GitLab From 200f351e27f014fcbf69b544b0b4b72aeaf45fd3 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 20 Jul 2018 20:17:35 -0700 Subject: [PATCH 1270/1692] arch/hexagon: fix kernel/dma.c build warning Fix build warning in arch/hexagon/kernel/dma.c by casting a void * to unsigned long to match the function parameter type. ../arch/hexagon/kernel/dma.c: In function 'arch_dma_alloc': ../arch/hexagon/kernel/dma.c:51:5: warning: passing argument 2 of 'gen_pool_add' makes integer from pointer without a cast [enabled by default] ../include/linux/genalloc.h:112:19: note: expected 'long unsigned int' but argument is of type 'void *' Signed-off-by: Randy Dunlap Cc: Yoshinori Sato Cc: Rich Felker Cc: linux-sh@vger.kernel.org Patch-mainline: linux-kernel @ 07/20/2018, 20:17 [rkuo@codeaurora.org: fixed architecture name] Signed-off-by: Richard Kuo --- arch/hexagon/kernel/dma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c index 77459df34e2e..7ebe7ad19d15 100644 --- a/arch/hexagon/kernel/dma.c +++ b/arch/hexagon/kernel/dma.c @@ -60,7 +60,7 @@ static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size, panic("Can't create %s() memory pool!", __func__); else gen_pool_add(coherent_pool, - pfn_to_virt(max_low_pfn), + (unsigned long)pfn_to_virt(max_low_pfn), hexagon_coherent_pool_size, -1); } -- GitLab From 5c41aaad409c097cf1ef74f2c649fed994744ef5 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sun, 22 Jul 2018 16:03:58 -0700 Subject: [PATCH 1271/1692] hexagon: modify ffs() and fls() to return int Building drivers/mtd/nand/raw/nandsim.c on arch/hexagon/ produces a printk format build warning. This is due to hexagon's ffs() being coded as returning long instead of int. Fix the printk format warning by changing all of hexagon's ffs() and fls() functions to return int instead of long. The variables that they return are already int instead of long. This return type matches the return type in . ../drivers/mtd/nand/raw/nandsim.c: In function 'init_nandsim': ../drivers/mtd/nand/raw/nandsim.c:760:2: warning: format '%u' expects argument of type 'unsigned int', but argument 2 has type 'long int' [-Wformat] There are no ffs() or fls() allmodconfig build errors after making this change. Signed-off-by: Randy Dunlap Cc: Richard Kuo Cc: linux-hexagon@vger.kernel.org Cc: Geert Uytterhoeven Patch-mainline: linux-kernel @ 07/22/2018, 16:03 Signed-off-by: Richard Kuo --- arch/hexagon/include/asm/bitops.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/hexagon/include/asm/bitops.h b/arch/hexagon/include/asm/bitops.h index 5e4a59b3ec1b..2691a1857d20 100644 --- a/arch/hexagon/include/asm/bitops.h +++ b/arch/hexagon/include/asm/bitops.h @@ -211,7 +211,7 @@ static inline long ffz(int x) * This is defined the same way as ffs. * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. */ -static inline long fls(int x) +static inline int fls(int x) { int r; @@ -232,7 +232,7 @@ static inline long fls(int x) * the libc and compiler builtin ffs routines, therefore * differs in spirit from the above ffz (man ffs). */ -static inline long ffs(int x) +static inline int ffs(int x) { int r; -- GitLab From d5bf26539494d16dfabbbea0854a47d202ea15c0 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Wed, 29 Aug 2018 14:38:50 +0800 Subject: [PATCH 1272/1692] drm/amd/powerplay: added vega20 overdrive support V3 Added vega20 overdrive support based on existing OD sysfs APIs. However, the OD logics are simplified on vega20. So, the behavior will be a little different and works only on some limited levels. V2: fix typo fix commit description revise error logs add support for clock OD V3: separate clock from voltage OD settings Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 45 +++ .../gpu/drm/amd/include/kgd_pp_interface.h | 2 + .../drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 289 +++++++++++++++++- 3 files changed, 335 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index b7b16cb5ff0f..396c826100e6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -474,6 +474,8 @@ static ssize_t amdgpu_set_pp_table(struct device *dev, * in each power level within a power state. The pp_od_clk_voltage is used for * this. * + * < For Vega10 and previous ASICs > + * * Reading the file will display: * * - a list of engine clock levels and voltages labeled OD_SCLK @@ -491,6 +493,44 @@ static ssize_t amdgpu_set_pp_table(struct device *dev, * "c" (commit) to the file to commit your changes. If you want to reset to the * default power levels, write "r" (reset) to the file to reset them. * + * + * < For Vega20 > + * + * Reading the file will display: + * + * - minimum and maximum engine clock labeled OD_SCLK + * + * - maximum memory clock labeled OD_MCLK + * + * - three points labeled OD_VDDC_CURVE. + * They can be used to calibrate the sclk voltage curve. + * + * - a list of valid ranges for sclk, mclk, and voltage curve points + * labeled OD_RANGE + * + * To manually adjust these settings: + * + * - First select manual using power_dpm_force_performance_level + * + * - For clock frequency setting, enter a new value by writing a + * string that contains "s/m index clock" to the file. The index + * should be 0 if to set minimum clock. And 1 if to set maximum + * clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz. + * "m 1 800" will update maximum mclk to be 800Mhz. + * + * For sclk voltage curve, enter the new values by writing a + * string that contains "vc point clock voff" to the file. The + * points are indexed by 0, 1 and 2. E.g., "vc 0 300 10" will + * update point1 with clock set as 300Mhz and voltage increased + * by 10mV. "vc 2 1000 -10" will update point3 with clock set + * as 1000Mhz and voltage drop by 10mV. + * + * - When you have edited all of the states as needed, write "c" (commit) + * to the file to commit your changes + * + * - If you want to reset to the default power levels, write "r" (reset) + * to the file to reset them + * */ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, @@ -520,6 +560,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, type = PP_OD_RESTORE_DEFAULT_TABLE; else if (*buf == 'c') type = PP_OD_COMMIT_DPM_TABLE; + else if (!strncmp(buf, "vc", 2)) + type = PP_OD_EDIT_VDDC_CURVE; else return -EINVAL; @@ -527,6 +569,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, tmp_str = buf_cpy; + if (type == PP_OD_EDIT_VDDC_CURVE) + tmp_str++; while (isspace(*++tmp_str)); while (tmp_str[0]) { @@ -570,6 +614,7 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, if (adev->powerplay.pp_funcs->print_clock_levels) { size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf); size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size); + size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size); size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size); return size; } else { diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 6a41b81c7325..448dee481a38 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -94,6 +94,7 @@ enum pp_clock_type { PP_PCIE, OD_SCLK, OD_MCLK, + OD_VDDC_CURVE, OD_RANGE, }; @@ -141,6 +142,7 @@ enum { enum PP_OD_DPM_TABLE_COMMAND { PP_OD_EDIT_SCLK_VDDC_TABLE, PP_OD_EDIT_MCLK_VDDC_TABLE, + PP_OD_EDIT_VDDC_CURVE, PP_OD_RESTORE_DEFAULT_TABLE, PP_OD_COMMIT_DPM_TABLE }; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index fb32b28afa66..3efd59e984a3 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -2325,11 +2325,207 @@ static int vega20_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, return 0; } +static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, + enum PP_OD_DPM_TABLE_COMMAND type, + long *input, uint32_t size) +{ + struct vega20_hwmgr *data = + (struct vega20_hwmgr *)(hwmgr->backend); + struct vega20_od8_single_setting *od8_settings = + data->od8_settings.od8_settings_array; + OverDriveTable_t *od_table = + &(data->smc_state_table.overdrive_table); + struct pp_clock_levels_with_latency clocks; + int32_t input_index, input_clk, input_vol, i; + int ret; + + PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage", + return -EINVAL); + + switch (type) { + case PP_OD_EDIT_SCLK_VDDC_TABLE: + if (!(od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id && + od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id)) { + pr_info("Sclk min/max frequency overdrive not supported\n"); + return -EOPNOTSUPP; + } + + for (i = 0; i < size; i += 2) { + if (i + 2 > size) { + pr_info("invalid number of input parameters %d\n", + size); + return -EINVAL; + } + + input_index = input[i]; + input_clk = input[i + 1]; + + if (input_index != 0 && input_index != 1) { + pr_info("Invalid index %d\n", input_index); + pr_info("Support min/max sclk frequency setting only which index by 0/1\n"); + return -EINVAL; + } + + if (input_clk < od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value || + input_clk > od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value) { + pr_info("clock freq %d is not within allowed range [%d - %d]\n", + input_clk, + od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value, + od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value); + return -EINVAL; + } + + if (input_index == 0) + od_table->GfxclkFmin = input_clk; + else + od_table->GfxclkFmax = input_clk; + } + + break; + + case PP_OD_EDIT_MCLK_VDDC_TABLE: + if (!od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) { + pr_info("Mclk max frequency overdrive not supported\n"); + return -EOPNOTSUPP; + } + + ret = vega20_get_memclocks(hwmgr, &clocks); + PP_ASSERT_WITH_CODE(!ret, + "Attempt to get memory clk levels failed!", + return ret); + + for (i = 0; i < size; i += 2) { + if (i + 2 > size) { + pr_info("invalid number of input parameters %d\n", + size); + return -EINVAL; + } + + input_index = input[i]; + input_clk = input[i + 1]; + + if (input_index != 1) { + pr_info("Invalid index %d\n", input_index); + pr_info("Support max Mclk frequency setting only which index by 1\n"); + return -EINVAL; + } + + if (input_clk < clocks.data[0].clocks_in_khz / 100 || + input_clk > od8_settings[OD8_SETTING_UCLK_FMAX].max_value) { + pr_info("clock freq %d is not within allowed range [%d - %d]\n", + input_clk, + clocks.data[0].clocks_in_khz / 100, + od8_settings[OD8_SETTING_UCLK_FMAX].max_value); + return -EINVAL; + } + + od_table->UclkFmax = input_clk; + } + + break; + + case PP_OD_EDIT_VDDC_CURVE: + if (!(od8_settings[OD8_SETTING_GFXCLK_FREQ1].feature_id && + od8_settings[OD8_SETTING_GFXCLK_FREQ2].feature_id && + od8_settings[OD8_SETTING_GFXCLK_FREQ3].feature_id && + od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id && + od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id && + od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id)) { + pr_info("Voltage curve calibrate not supported\n"); + return -EOPNOTSUPP; + } + + for (i = 0; i < size; i += 3) { + if (i + 3 > size) { + pr_info("invalid number of input parameters %d\n", + size); + return -EINVAL; + } + + input_index = input[i]; + input_clk = input[i + 1]; + input_vol = input[i + 2]; + + if (input_index > 2) { + pr_info("Setting for point %d is not supported\n", + input_index + 1); + pr_info("Three supported points index by 0, 1, 2\n"); + return -EINVAL; + } + + if (input_clk < od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value || + input_clk > od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value) { + pr_info("clock freq %d is not within allowed range [%d - %d]\n", + input_clk, + od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value, + od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value); + return -EINVAL; + } + + /* TODO: suppose voltage1/2/3 has the same min/max value */ + if (input_vol < od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].min_value || + input_vol > od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].max_value) { + pr_info("clock voltage offset %d is not within allowed range [%d - %d]\n", + input_vol, + od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].min_value, + od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].max_value); + return -EINVAL; + } + + switch (input_index) { + case 0: + od_table->GfxclkFreq1 = input_clk; + od_table->GfxclkOffsetVolt1 = input_vol; + break; + case 1: + od_table->GfxclkFreq2 = input_clk; + od_table->GfxclkOffsetVolt2 = input_vol; + break; + case 2: + od_table->GfxclkFreq3 = input_clk; + od_table->GfxclkOffsetVolt3 = input_vol; + break; + } + } + break; + + case PP_OD_RESTORE_DEFAULT_TABLE: + ret = vega20_copy_table_from_smc(hwmgr, + (uint8_t *)od_table, + TABLE_OVERDRIVE); + PP_ASSERT_WITH_CODE(!ret, + "Failed to export overdrive table!", + return ret); + break; + + case PP_OD_COMMIT_DPM_TABLE: + ret = vega20_copy_table_to_smc(hwmgr, + (uint8_t *)od_table, + TABLE_OVERDRIVE); + PP_ASSERT_WITH_CODE(!ret, + "Failed to import overdrive table!", + return ret); + + break; + + default: + return -EINVAL; + } + + return 0; +} + static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf) { - int i, now, size = 0; + struct vega20_hwmgr *data = + (struct vega20_hwmgr *)(hwmgr->backend); + struct vega20_od8_single_setting *od8_settings = + data->od8_settings.od8_settings_array; + OverDriveTable_t *od_table = + &(data->smc_state_table.overdrive_table); struct pp_clock_levels_with_latency clocks; + int i, now, size = 0; int ret = 0; switch (type) { @@ -2370,6 +2566,95 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, case PP_PCIE: break; + case OD_SCLK: + if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id && + od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) { + size = sprintf(buf, "%s:\n", "OD_SCLK"); + size += sprintf(buf + size, "0: %10uMhz\n", + od_table->GfxclkFmin); + size += sprintf(buf + size, "1: %10uMhz\n", + od_table->GfxclkFmax); + } + break; + + case OD_MCLK: + if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) { + size = sprintf(buf, "%s:\n", "OD_MCLK"); + size += sprintf(buf + size, "1: %10uMhz\n", + od_table->UclkFmax); + } + + break; + + case OD_VDDC_CURVE: + if (od8_settings[OD8_SETTING_GFXCLK_FREQ1].feature_id && + od8_settings[OD8_SETTING_GFXCLK_FREQ2].feature_id && + od8_settings[OD8_SETTING_GFXCLK_FREQ3].feature_id && + od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id && + od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id && + od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) { + size = sprintf(buf, "%s:\n", "OD_VDDC_CURVE"); + size += sprintf(buf + size, "0: %10uMhz %10dmV\n", + od_table->GfxclkFreq1, + od_table->GfxclkOffsetVolt1); + size += sprintf(buf + size, "1: %10uMhz %10dmV\n", + od_table->GfxclkFreq2, + od_table->GfxclkOffsetVolt2); + size += sprintf(buf + size, "2: %10uMhz %10dmV\n", + od_table->GfxclkFreq3, + od_table->GfxclkOffsetVolt3); + } + + break; + + case OD_RANGE: + size = sprintf(buf, "%s:\n", "OD_RANGE"); + + if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id && + od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) { + size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n", + od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value, + od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value); + } + + if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) { + ret = vega20_get_memclocks(hwmgr, &clocks); + PP_ASSERT_WITH_CODE(!ret, + "Fail to get memory clk levels!", + return ret); + + size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n", + clocks.data[0].clocks_in_khz / 100, + od8_settings[OD8_SETTING_UCLK_FMAX].max_value); + } + + if (od8_settings[OD8_SETTING_GFXCLK_FREQ1].feature_id && + od8_settings[OD8_SETTING_GFXCLK_FREQ2].feature_id && + od8_settings[OD8_SETTING_GFXCLK_FREQ3].feature_id && + od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id && + od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id && + od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) { + size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n", + od8_settings[OD8_SETTING_GFXCLK_FREQ1].min_value, + od8_settings[OD8_SETTING_GFXCLK_FREQ1].max_value); + size += sprintf(buf + size, "VDDC_CURVE_VOFF[0]: %7dmV %11dmV\n", + od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].min_value, + od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].max_value); + size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n", + od8_settings[OD8_SETTING_GFXCLK_FREQ2].min_value, + od8_settings[OD8_SETTING_GFXCLK_FREQ2].max_value); + size += sprintf(buf + size, "VDDC_CURVE_VOFF[1]: %7dmV %11dmV\n", + od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].min_value, + od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].max_value); + size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n", + od8_settings[OD8_SETTING_GFXCLK_FREQ3].min_value, + od8_settings[OD8_SETTING_GFXCLK_FREQ3].max_value); + size += sprintf(buf + size, "VDDC_CURVE_VOFF[2]: %7dmV %11dmV\n", + od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].min_value, + od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].max_value); + } + + break; default: break; } @@ -2977,6 +3262,8 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = { vega20_get_mclk_od, .set_mclk_od = vega20_set_mclk_od, + .odn_edit_dpm_table = + vega20_odn_edit_dpm_table, /* for sysfs to retrive/set gfxclk/memclk */ .force_clock_level = vega20_force_clock_level, -- GitLab From 9a412063f0940d23a5ef393c2607ca9ae9f8f0b7 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Thu, 30 Aug 2018 12:38:45 +0800 Subject: [PATCH 1273/1692] drm/amd/powerplay: correct data type to support under voltage For under voltage, negative value will be applied to voltage offset. Update the data type to cover this case. Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h index 0a39a4c564d2..59e621ef33ac 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h @@ -569,11 +569,11 @@ typedef struct { uint16_t GfxclkFmin; uint16_t GfxclkFmax; uint16_t GfxclkFreq1; - uint16_t GfxclkOffsetVolt1; + int16_t GfxclkOffsetVolt1; uint16_t GfxclkFreq2; - uint16_t GfxclkOffsetVolt2; + int16_t GfxclkOffsetVolt2; uint16_t GfxclkFreq3; - uint16_t GfxclkOffsetVolt3; + int16_t GfxclkOffsetVolt3; uint16_t UclkFmax; int16_t OverDrivePct; uint16_t FanMaximumRpm; -- GitLab From c460f8a6f5918c2a8a2354a60b03a71310b943aa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 30 Aug 2018 10:31:52 +0200 Subject: [PATCH 1274/1692] drm/amdgpu: move size calculations to the front of the file again MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit amdgpu_vm_bo_* functions should come much later. Signed-off-by: Christian König Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 90 +++++++++++++------------- 1 file changed, 45 insertions(+), 45 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index d59222fb5931..a9275a99d793 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -133,51 +133,6 @@ struct amdgpu_prt_cb { struct dma_fence_cb cb; }; -/** - * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm - * - * @base: base structure for tracking BO usage in a VM - * @vm: vm to which bo is to be added - * @bo: amdgpu buffer object - * - * Initialize a bo_va_base structure and add it to the appropriate lists - * - */ -static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, - struct amdgpu_vm *vm, - struct amdgpu_bo *bo) -{ - base->vm = vm; - base->bo = bo; - INIT_LIST_HEAD(&base->bo_list); - INIT_LIST_HEAD(&base->vm_status); - - if (!bo) - return; - list_add_tail(&base->bo_list, &bo->va); - - if (bo->tbo.resv != vm->root.base.bo->tbo.resv) - return; - - vm->bulk_moveable = false; - if (bo->tbo.type == ttm_bo_type_kernel) - list_move(&base->vm_status, &vm->relocated); - else - list_move(&base->vm_status, &vm->idle); - - if (bo->preferred_domains & - amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type)) - return; - - /* - * we checked all the prerequisites, but it looks like this per vm bo - * is currently evicted. add the bo to the evicted list to make sure it - * is validated on next vm use to avoid fault. - * */ - list_move_tail(&base->vm_status, &vm->evicted); - base->moved = true; -} - /** * amdgpu_vm_level_shift - return the addr shift for each level * @@ -249,6 +204,51 @@ static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level) return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8); } +/** + * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm + * + * @base: base structure for tracking BO usage in a VM + * @vm: vm to which bo is to be added + * @bo: amdgpu buffer object + * + * Initialize a bo_va_base structure and add it to the appropriate lists + * + */ +static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, + struct amdgpu_vm *vm, + struct amdgpu_bo *bo) +{ + base->vm = vm; + base->bo = bo; + INIT_LIST_HEAD(&base->bo_list); + INIT_LIST_HEAD(&base->vm_status); + + if (!bo) + return; + list_add_tail(&base->bo_list, &bo->va); + + if (bo->tbo.resv != vm->root.base.bo->tbo.resv) + return; + + vm->bulk_moveable = false; + if (bo->tbo.type == ttm_bo_type_kernel) + list_move(&base->vm_status, &vm->relocated); + else + list_move(&base->vm_status, &vm->idle); + + if (bo->preferred_domains & + amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type)) + return; + + /* + * we checked all the prerequisites, but it looks like this per vm bo + * is currently evicted. add the bo to the evicted list to make sure it + * is validated on next vm use to avoid fault. + * */ + list_move_tail(&base->vm_status, &vm->evicted); + base->moved = true; +} + /** * amdgpu_vm_get_pd_bo - add the VM PD to a validation list * -- GitLab From c12a2ee5d002e39a387001cdb5065b560568b4f5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Sat, 1 Sep 2018 13:25:31 +0200 Subject: [PATCH 1275/1692] drm/amdgpu: separate per VM BOs from normal in the moved state MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Allows us to avoid taking the spinlock in more places. Signed-off-by: Christian König Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 67 +++++++++++++------------- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 7 ++- 2 files changed, 38 insertions(+), 36 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index a9275a99d793..65977e7c94dc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -342,9 +342,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, break; if (bo->tbo.type != ttm_bo_type_kernel) { - spin_lock(&vm->moved_lock); list_move(&bo_base->vm_status, &vm->moved); - spin_unlock(&vm->moved_lock); } else { if (vm->use_cpu_for_update) r = amdgpu_bo_kmap(bo, NULL); @@ -1734,10 +1732,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, amdgpu_asic_flush_hdp(adev, NULL); } - spin_lock(&vm->moved_lock); - list_del_init(&bo_va->base.vm_status); - spin_unlock(&vm->moved_lock); - /* If the BO is not in its preferred location add it back to * the evicted list so that it gets validated again on the * next command submission. @@ -1746,9 +1740,13 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, uint32_t mem_type = bo->tbo.mem.mem_type; if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type))) - list_add_tail(&bo_va->base.vm_status, &vm->evicted); + list_move_tail(&bo_va->base.vm_status, &vm->evicted); else - list_add(&bo_va->base.vm_status, &vm->idle); + list_move(&bo_va->base.vm_status, &vm->idle); + } else { + spin_lock(&vm->invalidated_lock); + list_del_init(&bo_va->base.vm_status); + spin_unlock(&vm->invalidated_lock); } list_splice_init(&bo_va->invalids, &bo_va->valids); @@ -1974,40 +1972,40 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm) { struct amdgpu_bo_va *bo_va, *tmp; - struct list_head moved; + struct reservation_object *resv; bool clear; int r; - INIT_LIST_HEAD(&moved); - spin_lock(&vm->moved_lock); - list_splice_init(&vm->moved, &moved); - spin_unlock(&vm->moved_lock); + list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { + /* Per VM BOs never need to bo cleared in the page tables */ + r = amdgpu_vm_bo_update(adev, bo_va, false); + if (r) + return r; + } - list_for_each_entry_safe(bo_va, tmp, &moved, base.vm_status) { - struct reservation_object *resv = bo_va->base.bo->tbo.resv; + spin_lock(&vm->invalidated_lock); + while (!list_empty(&vm->invalidated)) { + bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va, + base.vm_status); + resv = bo_va->base.bo->tbo.resv; + spin_unlock(&vm->invalidated_lock); - /* Per VM BOs never need to bo cleared in the page tables */ - if (resv == vm->root.base.bo->tbo.resv) - clear = false; /* Try to reserve the BO to avoid clearing its ptes */ - else if (!amdgpu_vm_debug && reservation_object_trylock(resv)) + if (!amdgpu_vm_debug && reservation_object_trylock(resv)) clear = false; /* Somebody else is using the BO right now */ else clear = true; r = amdgpu_vm_bo_update(adev, bo_va, clear); - if (r) { - spin_lock(&vm->moved_lock); - list_splice(&moved, &vm->moved); - spin_unlock(&vm->moved_lock); + if (r) return r; - } - if (!clear && resv != vm->root.base.bo->tbo.resv) + if (!clear) reservation_object_unlock(resv); - + spin_lock(&vm->invalidated_lock); } + spin_unlock(&vm->invalidated_lock); return 0; } @@ -2072,9 +2070,7 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev, if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv && !bo_va->base.moved) { - spin_lock(&vm->moved_lock); list_move(&bo_va->base.vm_status, &vm->moved); - spin_unlock(&vm->moved_lock); } trace_amdgpu_vm_bo_map(bo_va, mapping); } @@ -2430,9 +2426,9 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, list_del(&bo_va->base.bo_list); - spin_lock(&vm->moved_lock); + spin_lock(&vm->invalidated_lock); list_del(&bo_va->base.vm_status); - spin_unlock(&vm->moved_lock); + spin_unlock(&vm->invalidated_lock); list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { list_del(&mapping->list); @@ -2489,10 +2485,12 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, if (bo->tbo.type == ttm_bo_type_kernel) { list_move(&bo_base->vm_status, &vm->relocated); - } else { - spin_lock(&bo_base->vm->moved_lock); + } else if (bo->tbo.resv == vm->root.base.bo->tbo.resv) { list_move(&bo_base->vm_status, &vm->moved); - spin_unlock(&bo_base->vm->moved_lock); + } else { + spin_lock(&vm->invalidated_lock); + list_move(&bo_base->vm_status, &vm->invalidated); + spin_unlock(&vm->invalidated_lock); } } } @@ -2637,9 +2635,10 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, vm->reserved_vmid[i] = NULL; INIT_LIST_HEAD(&vm->evicted); INIT_LIST_HEAD(&vm->relocated); - spin_lock_init(&vm->moved_lock); INIT_LIST_HEAD(&vm->moved); INIT_LIST_HEAD(&vm->idle); + INIT_LIST_HEAD(&vm->invalidated); + spin_lock_init(&vm->invalidated_lock); INIT_LIST_HEAD(&vm->freed); /* create scheduler entity for page table updates */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 62116fa44718..6ea162ca296a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -201,13 +201,16 @@ struct amdgpu_vm { /* PT BOs which relocated and their parent need an update */ struct list_head relocated; - /* BOs moved, but not yet updated in the PT */ + /* per VM BOs moved, but not yet updated in the PT */ struct list_head moved; - spinlock_t moved_lock; /* All BOs of this VM not currently in the state machine */ struct list_head idle; + /* regular invalidated BOs, but not yet updated in the PT */ + struct list_head invalidated; + spinlock_t invalidated_lock; + /* BO mappings freed, but not yet updated in the PT */ struct list_head freed; -- GitLab From 0a53b69cce846b42adf03ccee49ae0a37a731c20 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 3 Sep 2018 10:51:51 +0200 Subject: [PATCH 1276/1692] drm/amdgpu: fix amdgpu_mn_unlock() in the CS error path MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Avoid unlocking a lock we never locked. Signed-off-by: Christian König Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 349dcc37ee64..04a2733b5ccc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1247,10 +1247,10 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, error_abort: dma_fence_put(&job->base.s_fence->finished); job->base.s_fence = NULL; + amdgpu_mn_unlock(p->mn); error_unlock: amdgpu_job_free(job); - amdgpu_mn_unlock(p->mn); return r; } -- GitLab From bcdc9fd634d1f0949774690e9e79ffdfc5d094c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 30 Aug 2018 10:27:15 +0200 Subject: [PATCH 1277/1692] drm/amdgpu: improve VM state machine documentation v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since we have a lot of FAQ on the VM state machine try to improve the documentation by adding functions for each state move. v2: fix typo in amdgpu_vm_bo_invalidated, use amdgpu_vm_bo_relocated in one more place as well. Signed-off-by: Christian König Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 141 +++++++++++++++++++------ 1 file changed, 109 insertions(+), 32 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 65977e7c94dc..1f79a0ddc78a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -204,6 +204,95 @@ static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level) return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8); } +/** + * amdgpu_vm_bo_evicted - vm_bo is evicted + * + * @vm_bo: vm_bo which is evicted + * + * State for PDs/PTs and per VM BOs which are not at the location they should + * be. + */ +static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo) +{ + struct amdgpu_vm *vm = vm_bo->vm; + struct amdgpu_bo *bo = vm_bo->bo; + + vm_bo->moved = true; + if (bo->tbo.type == ttm_bo_type_kernel) + list_move(&vm_bo->vm_status, &vm->evicted); + else + list_move_tail(&vm_bo->vm_status, &vm->evicted); +} + +/** + * amdgpu_vm_bo_relocated - vm_bo is reloacted + * + * @vm_bo: vm_bo which is relocated + * + * State for PDs/PTs which needs to update their parent PD. + */ +static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo) +{ + list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); +} + +/** + * amdgpu_vm_bo_moved - vm_bo is moved + * + * @vm_bo: vm_bo which is moved + * + * State for per VM BOs which are moved, but that change is not yet reflected + * in the page tables. + */ +static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo) +{ + list_move(&vm_bo->vm_status, &vm_bo->vm->moved); +} + +/** + * amdgpu_vm_bo_idle - vm_bo is idle + * + * @vm_bo: vm_bo which is now idle + * + * State for PDs/PTs and per VM BOs which have gone through the state machine + * and are now idle. + */ +static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo) +{ + list_move(&vm_bo->vm_status, &vm_bo->vm->idle); + vm_bo->moved = false; +} + +/** + * amdgpu_vm_bo_invalidated - vm_bo is invalidated + * + * @vm_bo: vm_bo which is now invalidated + * + * State for normal BOs which are invalidated and that change not yet reflected + * in the PTs. + */ +static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo) +{ + spin_lock(&vm_bo->vm->invalidated_lock); + list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated); + spin_unlock(&vm_bo->vm->invalidated_lock); +} + +/** + * amdgpu_vm_bo_done - vm_bo is done + * + * @vm_bo: vm_bo which is now done + * + * State for normal BOs which are invalidated and that change has been updated + * in the PTs. + */ +static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo) +{ + spin_lock(&vm_bo->vm->invalidated_lock); + list_del_init(&vm_bo->vm_status); + spin_unlock(&vm_bo->vm->invalidated_lock); +} + /** * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm * @@ -232,9 +321,9 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, vm->bulk_moveable = false; if (bo->tbo.type == ttm_bo_type_kernel) - list_move(&base->vm_status, &vm->relocated); + amdgpu_vm_bo_relocated(base); else - list_move(&base->vm_status, &vm->idle); + amdgpu_vm_bo_idle(base); if (bo->preferred_domains & amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type)) @@ -245,8 +334,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, * is currently evicted. add the bo to the evicted list to make sure it * is validated on next vm use to avoid fault. * */ - list_move_tail(&base->vm_status, &vm->evicted); - base->moved = true; + amdgpu_vm_bo_evicted(base); } /** @@ -342,7 +430,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, break; if (bo->tbo.type != ttm_bo_type_kernel) { - list_move(&bo_base->vm_status, &vm->moved); + amdgpu_vm_bo_moved(bo_base); } else { if (vm->use_cpu_for_update) r = amdgpu_bo_kmap(bo, NULL); @@ -350,7 +438,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, r = amdgpu_ttm_alloc_gart(&bo->tbo); if (r) break; - list_move(&bo_base->vm_status, &vm->relocated); + amdgpu_vm_bo_relocated(bo_base); } } @@ -1066,7 +1154,7 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev, continue; if (!entry->base.moved) - list_move(&entry->base.vm_status, &vm->relocated); + amdgpu_vm_bo_relocated(&entry->base); amdgpu_vm_invalidate_level(adev, vm, entry, level + 1); } } @@ -1121,8 +1209,7 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev, bo_base = list_first_entry(&vm->relocated, struct amdgpu_vm_bo_base, vm_status); - bo_base->moved = false; - list_move(&bo_base->vm_status, &vm->idle); + amdgpu_vm_bo_idle(bo_base); bo = bo_base->bo->parent; if (!bo) @@ -1241,7 +1328,7 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p, if (entry->huge) { /* Add the entry to the relocated list to update it. */ entry->huge = false; - list_move(&entry->base.vm_status, &p->vm->relocated); + amdgpu_vm_bo_relocated(&entry->base); } return; } @@ -1740,13 +1827,11 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, uint32_t mem_type = bo->tbo.mem.mem_type; if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type))) - list_move_tail(&bo_va->base.vm_status, &vm->evicted); + amdgpu_vm_bo_evicted(&bo_va->base); else - list_move(&bo_va->base.vm_status, &vm->idle); + amdgpu_vm_bo_idle(&bo_va->base); } else { - spin_lock(&vm->invalidated_lock); - list_del_init(&bo_va->base.vm_status); - spin_unlock(&vm->invalidated_lock); + amdgpu_vm_bo_done(&bo_va->base); } list_splice_init(&bo_va->invalids, &bo_va->valids); @@ -2468,30 +2553,22 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, list_for_each_entry(bo_base, &bo->va, bo_list) { struct amdgpu_vm *vm = bo_base->vm; - bool was_moved = bo_base->moved; - bo_base->moved = true; if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) { - if (bo->tbo.type == ttm_bo_type_kernel) - list_move(&bo_base->vm_status, &vm->evicted); - else - list_move_tail(&bo_base->vm_status, - &vm->evicted); + amdgpu_vm_bo_evicted(bo_base); continue; } - if (was_moved) + if (bo_base->moved) continue; + bo_base->moved = true; - if (bo->tbo.type == ttm_bo_type_kernel) { - list_move(&bo_base->vm_status, &vm->relocated); - } else if (bo->tbo.resv == vm->root.base.bo->tbo.resv) { - list_move(&bo_base->vm_status, &vm->moved); - } else { - spin_lock(&vm->invalidated_lock); - list_move(&bo_base->vm_status, &vm->invalidated); - spin_unlock(&vm->invalidated_lock); - } + if (bo->tbo.type == ttm_bo_type_kernel) + amdgpu_vm_bo_relocated(bo_base); + else if (bo->tbo.resv == vm->root.base.bo->tbo.resv) + amdgpu_vm_bo_moved(bo_base); + else + amdgpu_vm_bo_invalidated(bo_base); } } -- GitLab From ad9a5b78f585e9a9bd5ad06dfaf1269659a99f43 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 27 Aug 2018 18:22:31 +0200 Subject: [PATCH 1278/1692] drm/amdgpu: correctly sign extend 48bit addresses v3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Correct sign extend the GMC addresses to 48bit. v2: sign extending turned out easier than thought. v3: clean up the defines and move them into amdgpu_gmc.h as well Signed-off-by: Christian König Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 10 ++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | 26 ++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 8 +++---- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 6 ++--- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 7 +++--- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 13 ----------- 9 files changed, 44 insertions(+), 32 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 8bee9a0a1dec..db9872f83d03 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -135,7 +135,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) .num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe, .gpuvm_size = min(adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT, - AMDGPU_VA_HOLE_START), + AMDGPU_GMC_HOLE_START), .drm_render_minor = adev->ddev->render->index }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 04a2733b5ccc..135d9d8c9506 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -835,7 +835,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB) continue; - va_start = chunk_ib->va_start & AMDGPU_VA_HOLE_MASK; + va_start = chunk_ib->va_start & AMDGPU_GMC_HOLE_MASK; r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m); if (r) { DRM_ERROR("IB va_start is invalid\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 71792d820ae0..d30a0838851b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -572,16 +572,16 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, return -EINVAL; } - if (args->va_address >= AMDGPU_VA_HOLE_START && - args->va_address < AMDGPU_VA_HOLE_END) { + if (args->va_address >= AMDGPU_GMC_HOLE_START && + args->va_address < AMDGPU_GMC_HOLE_END) { dev_dbg(&dev->pdev->dev, "va_address 0x%LX is in VA hole 0x%LX-0x%LX\n", - args->va_address, AMDGPU_VA_HOLE_START, - AMDGPU_VA_HOLE_END); + args->va_address, AMDGPU_GMC_HOLE_START, + AMDGPU_GMC_HOLE_END); return -EINVAL; } - args->va_address &= AMDGPU_VA_HOLE_MASK; + args->va_address &= AMDGPU_GMC_HOLE_MASK; if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) { dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n", diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index 588a62f7aebc..d84ef1634eb2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -30,6 +30,19 @@ #include "amdgpu_irq.h" +/* VA hole for 48bit addresses on Vega10 */ +#define AMDGPU_GMC_HOLE_START 0x0000800000000000ULL +#define AMDGPU_GMC_HOLE_END 0xffff800000000000ULL + +/* + * Hardware is programmed as if the hole doesn't exists with start and end + * address values. + * + * This mask is used to remove the upper 16bits of the VA and so come up with + * the linear addr value. + */ +#define AMDGPU_GMC_HOLE_MASK 0x0000ffffffffffffULL + struct firmware; /* @@ -133,6 +146,19 @@ static inline bool amdgpu_gmc_vram_full_visible(struct amdgpu_gmc *gmc) return (gmc->real_vram_size == gmc->visible_vram_size); } +/** + * amdgpu_gmc_sign_extend - sign extend the given gmc address + * + * @addr: address to extend + */ +static inline uint64_t amdgpu_gmc_sign_extend(uint64_t addr) +{ + if (addr >= AMDGPU_GMC_HOLE_START) + addr |= AMDGPU_GMC_HOLE_END; + + return addr; +} + void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level, uint64_t *addr, uint64_t *flags); uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index ad7978bab5fc..86e8772b6852 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -655,11 +655,11 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE; dev_info.virtual_address_max = - min(vm_size, AMDGPU_VA_HOLE_START); + min(vm_size, AMDGPU_GMC_HOLE_START); - if (vm_size > AMDGPU_VA_HOLE_START) { - dev_info.high_va_offset = AMDGPU_VA_HOLE_END; - dev_info.high_va_max = AMDGPU_VA_HOLE_END | vm_size; + if (vm_size > AMDGPU_GMC_HOLE_START) { + dev_info.high_va_offset = AMDGPU_GMC_HOLE_END; + dev_info.high_va_max = AMDGPU_GMC_HOLE_END | vm_size; } dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); dev_info.pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index b5f20b42439e..0cbf651a88a6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1368,7 +1368,7 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM && !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)); - return bo->tbo.offset; + return amdgpu_gmc_sign_extend(bo->tbo.offset); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 38856365580d..f2f358aa0597 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -28,9 +28,7 @@ uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev) uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT; addr -= AMDGPU_VA_RESERVED_SIZE; - - if (addr >= AMDGPU_VA_HOLE_START) - addr |= AMDGPU_VA_HOLE_END; + addr = amdgpu_gmc_sign_extend(addr); return addr; } @@ -73,7 +71,7 @@ void amdgpu_free_static_csa(struct amdgpu_device *adev) { int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_bo_va **bo_va) { - uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_VA_HOLE_MASK; + uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK; struct ww_acquire_ctx ticket; struct list_head list; struct amdgpu_bo_list_entry pd; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 1f79a0ddc78a..3163351508cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -492,7 +492,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, if (level == adev->vm_manager.root_level) { ats_entries = amdgpu_vm_level_shift(adev, level); ats_entries += AMDGPU_GPU_PAGE_SHIFT; - ats_entries = AMDGPU_VA_HOLE_START >> ats_entries; + ats_entries = AMDGPU_GMC_HOLE_START >> ats_entries; ats_entries = min(ats_entries, entries); entries -= ats_entries; } else { @@ -722,7 +722,7 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, eaddr = saddr + size - 1; if (vm->pte_support_ats) - ats = saddr < AMDGPU_VA_HOLE_START; + ats = saddr < AMDGPU_GMC_HOLE_START; saddr /= AMDGPU_GPU_PAGE_SIZE; eaddr /= AMDGPU_GPU_PAGE_SIZE; @@ -2016,7 +2016,8 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, struct amdgpu_bo_va_mapping, list); list_del(&mapping->list); - if (vm->pte_support_ats && mapping->start < AMDGPU_VA_HOLE_START) + if (vm->pte_support_ats && + mapping->start < AMDGPU_GMC_HOLE_START) init_pte_value = AMDGPU_PTE_DEFAULT_ATC; r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 6ea162ca296a..e275ee7c1bc1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -101,19 +101,6 @@ struct amdgpu_bo_list_entry; /* hardcode that limit for now */ #define AMDGPU_VA_RESERVED_SIZE (1ULL << 20) -/* VA hole for 48bit addresses on Vega10 */ -#define AMDGPU_VA_HOLE_START 0x0000800000000000ULL -#define AMDGPU_VA_HOLE_END 0xffff800000000000ULL - -/* - * Hardware is programmed as if the hole doesn't exists with start and end - * address values. - * - * This mask is used to remove the upper 16bits of the VA and so come up with - * the linear addr value. - */ -#define AMDGPU_VA_HOLE_MASK 0x0000ffffffffffffULL - /* max vmids dedicated for process */ #define AMDGPU_VM_MAX_RESERVED_VMID 1 -- GitLab From d76364fc7fde36b60c592c504f0f0ed636f1d2f3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 24 Aug 2018 12:08:06 +0200 Subject: [PATCH 1279/1692] drm/amdgpu: add amdgpu_gmc_agp_location v3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Helper to figure out the location of the AGP BAR. v2: fix a couple of bugs v3: correctly add one to vram_end Signed-off-by: Christian König Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 43 +++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | 5 +++ 2 files changed, 48 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index c6bcc4715373..86887c1496f8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -143,3 +143,46 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n", mc->gart_size >> 20, mc->gart_start, mc->gart_end); } + +/** + * amdgpu_gmc_agp_location - try to find AGP location + * @adev: amdgpu device structure holding all necessary informations + * @mc: memory controller structure holding memory informations + * + * Function will place try to find a place for the AGP BAR in the MC address + * space. + * + * AGP BAR will be assigned the largest available hole in the address space. + * Should be called after VRAM and GART locations are setup. + */ +void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) +{ + const uint64_t sixteen_gb = 1ULL << 34; + const uint64_t sixteen_gb_mask = ~(sixteen_gb - 1); + u64 size_af, size_bf; + + if (mc->vram_start > mc->gart_start) { + size_bf = (mc->vram_start & sixteen_gb_mask) - + ALIGN(mc->gart_end + 1, sixteen_gb); + size_af = mc->mc_mask + 1 - ALIGN(mc->vram_end + 1, sixteen_gb); + } else { + size_bf = mc->vram_start & sixteen_gb_mask; + size_af = (mc->gart_start & sixteen_gb_mask) - + ALIGN(mc->vram_end + 1, sixteen_gb); + } + + if (size_bf > size_af) { + mc->agp_start = mc->vram_start > mc->gart_start ? + mc->gart_end + 1 : 0; + mc->agp_size = size_bf; + } else { + mc->agp_start = (mc->vram_start > mc->gart_start ? + mc->vram_end : mc->gart_end) + 1, + mc->agp_size = size_af; + } + + mc->agp_start = ALIGN(mc->agp_start, sixteen_gb); + mc->agp_end = mc->agp_start + mc->agp_size - 1; + dev_info(adev->dev, "AGP: %lluM 0x%016llX - 0x%016llX\n", + mc->agp_size >> 20, mc->agp_start, mc->agp_end); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index d84ef1634eb2..baedd6d6266d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -94,6 +94,9 @@ struct amdgpu_gmc { * about vram size near mc fb location */ u64 mc_vram_size; u64 visible_vram_size; + u64 agp_size; + u64 agp_start; + u64 agp_end; u64 gart_size; u64 gart_start; u64 gart_end; @@ -166,5 +169,7 @@ void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc, u64 base); void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc); +void amdgpu_gmc_agp_location(struct amdgpu_device *adev, + struct amdgpu_gmc *mc); #endif -- GitLab From 485fc361d38aad265239547a2d0a02517576f309 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 27 Aug 2018 18:19:48 +0200 Subject: [PATCH 1280/1692] drm/amdgpu: use the AGP aperture for system memory access v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Start to use the old AGP aperture for system memory access. v2: Move that to amdgpu_ttm_alloc_gart Signed-off-by: Christian König Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 23 ++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 58 ++++++++++++++----------- 3 files changed, 57 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 86887c1496f8..6acdeebabfc0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -79,6 +79,29 @@ uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo) return pd_addr; } +/** + * amdgpu_gmc_agp_addr - return the address in the AGP address space + * + * @tbo: TTM BO which needs the address, must be in GTT domain + * + * Tries to figure out how to access the BO through the AGP aperture. Returns + * AMDGPU_BO_INVALID_OFFSET if that is not possible. + */ +uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo) +{ + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); + struct ttm_dma_tt *ttm; + + if (bo->num_pages != 1 || bo->ttm->caching_state == tt_cached) + return AMDGPU_BO_INVALID_OFFSET; + + ttm = container_of(bo->ttm, struct ttm_dma_tt, ttm); + if (ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size) + return AMDGPU_BO_INVALID_OFFSET; + + return adev->gmc.agp_start + ttm->dma_address[0]; +} + /** * amdgpu_gmc_vram_location - try to find VRAM location * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index baedd6d6266d..17ffc35d1366 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -165,6 +165,7 @@ static inline uint64_t amdgpu_gmc_sign_extend(uint64_t addr) void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level, uint64_t *addr, uint64_t *flags); uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo); +uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo); void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc, u64 base); void amdgpu_gmc_gart_location(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index d9f3201c9e5c..8a158ee922f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1081,41 +1081,49 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) struct ttm_mem_reg tmp; struct ttm_placement placement; struct ttm_place placements; - uint64_t flags; + uint64_t addr, flags; int r; if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET) return 0; - /* allocate GART space */ - tmp = bo->mem; - tmp.mm_node = NULL; - placement.num_placement = 1; - placement.placement = &placements; - placement.num_busy_placement = 1; - placement.busy_placement = &placements; - placements.fpfn = 0; - placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT; - placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) | - TTM_PL_FLAG_TT; + addr = amdgpu_gmc_agp_addr(bo); + if (addr != AMDGPU_BO_INVALID_OFFSET) { + bo->mem.start = addr >> PAGE_SHIFT; + } else { - r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx); - if (unlikely(r)) - return r; + /* allocate GART space */ + tmp = bo->mem; + tmp.mm_node = NULL; + placement.num_placement = 1; + placement.placement = &placements; + placement.num_busy_placement = 1; + placement.busy_placement = &placements; + placements.fpfn = 0; + placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT; + placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) | + TTM_PL_FLAG_TT; + + r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx); + if (unlikely(r)) + return r; - /* compute PTE flags for this buffer object */ - flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp); + /* compute PTE flags for this buffer object */ + flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp); - /* Bind pages */ - gtt->offset = ((u64)tmp.start << PAGE_SHIFT) - adev->gmc.gart_start; - r = amdgpu_ttm_gart_bind(adev, bo, flags); - if (unlikely(r)) { - ttm_bo_mem_put(bo, &tmp); - return r; + /* Bind pages */ + gtt->offset = ((u64)tmp.start << PAGE_SHIFT) - + adev->gmc.gart_start; + r = amdgpu_ttm_gart_bind(adev, bo, flags); + if (unlikely(r)) { + ttm_bo_mem_put(bo, &tmp); + return r; + } + + ttm_bo_mem_put(bo, &bo->mem); + bo->mem = tmp; } - ttm_bo_mem_put(bo, &bo->mem); - bo->mem = tmp; bo->offset = (bo->mem.start << PAGE_SHIFT) + bo->bdev->man[bo->mem.mem_type].gpu_offset; -- GitLab From 3d5fe658b5b3f3e0d5605a540d6c1a67d9b15735 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 29 Aug 2018 14:52:50 +0200 Subject: [PATCH 1281/1692] drm/amdgpu: manually map the shadow BOs again MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Otherwise we won't be able to use the AGP aperture. Signed-off-by: Christian König Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 5 +---- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 5 +++++ 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 0cbf651a88a6..de990bdcdd6c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -163,10 +163,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) if (domain & AMDGPU_GEM_DOMAIN_GTT) { places[c].fpfn = 0; - if (flags & AMDGPU_GEM_CREATE_SHADOW) - places[c].lpfn = adev->gmc.gart_size >> PAGE_SHIFT; - else - places[c].lpfn = 0; + places[c].lpfn = 0; places[c].flags = TTM_PL_FLAG_TT; if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) places[c].flags |= TTM_PL_FLAG_WC | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 3163351508cf..ea5e277ae038 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -438,6 +438,11 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, r = amdgpu_ttm_alloc_gart(&bo->tbo); if (r) break; + if (bo->shadow) { + r = amdgpu_ttm_alloc_gart(&bo->shadow->tbo); + if (r) + break; + } amdgpu_vm_bo_relocated(bo_base); } } -- GitLab From 43c40a02c1b1bbedbf7c1c10392cf885a0bb3f46 Mon Sep 17 00:00:00 2001 From: Tony Cheng Date: Wed, 18 Jul 2018 20:30:09 -0400 Subject: [PATCH 1282/1692] drm/amd/display: dc 3.1.64 Signed-off-by: Tony Cheng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 2faff1b8821d..dee0f28e683d 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -38,7 +38,7 @@ #include "inc/compressor.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.1.63" +#define DC_VER "3.1.64" #define MAX_SURFACES 3 #define MAX_STREAMS 6 -- GitLab From a87fa9938749bcdcdec4376e9e7d1ccb39ebbd8a Mon Sep 17 00:00:00 2001 From: Bhawanpreet Lakha Date: Mon, 20 Aug 2018 13:32:07 -0400 Subject: [PATCH 1283/1692] drm/amd/display: Build stream update and plane updates in dm [Why] We currently lock modeset by setting a boolean in dm. We want to lock Based on what DC tells us. [How] Build stream_updates and plane_update based on what changed. Then we call check_update_surfaces_for_stream() to get the update type We lock only if update_type is not fast Signed-off-by: Bhawanpreet Lakha Reviewed-by: Harry Wentland Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 116 +++++++++++++++++- 1 file changed, 115 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index d878b124dd20..32f634eedcc3 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -5070,6 +5070,100 @@ static int dm_update_planes_state(struct dc *dc, return ret; } +enum surface_update_type dm_determine_update_type_for_commit(struct dc *dc, struct drm_atomic_state *state) +{ + + + int i, j, num_plane; + struct drm_plane_state *old_plane_state, *new_plane_state; + struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state; + struct drm_crtc *new_plane_crtc, *old_plane_crtc; + struct drm_plane *plane; + + struct drm_crtc *crtc; + struct drm_crtc_state *new_crtc_state, *old_crtc_state; + struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state; + struct dc_stream_status *status = NULL; + + struct dc_surface_update *updates = kzalloc(MAX_SURFACES * sizeof(struct dc_surface_update), GFP_KERNEL); + struct dc_plane_state *surface = kzalloc(MAX_SURFACES * sizeof(struct dc_plane_state), GFP_KERNEL); + struct dc_stream_update stream_update; + enum surface_update_type update_type = UPDATE_TYPE_FAST; + + + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + new_dm_crtc_state = to_dm_crtc_state(new_crtc_state); + old_dm_crtc_state = to_dm_crtc_state(old_crtc_state); + num_plane = 0; + + if (new_dm_crtc_state->stream) { + + for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) { + new_plane_crtc = new_plane_state->crtc; + old_plane_crtc = old_plane_state->crtc; + new_dm_plane_state = to_dm_plane_state(new_plane_state); + old_dm_plane_state = to_dm_plane_state(old_plane_state); + + if (plane->type == DRM_PLANE_TYPE_CURSOR) + continue; + + if (!state->allow_modeset) + continue; + + if (crtc == new_plane_crtc) { + updates[num_plane].surface = &surface[num_plane]; + + if (new_crtc_state->mode_changed) { + updates[num_plane].surface->src_rect = + new_dm_plane_state->dc_state->src_rect; + updates[num_plane].surface->dst_rect = + new_dm_plane_state->dc_state->dst_rect; + updates[num_plane].surface->rotation = + new_dm_plane_state->dc_state->rotation; + updates[num_plane].surface->in_transfer_func = + new_dm_plane_state->dc_state->in_transfer_func; + stream_update.dst = new_dm_crtc_state->stream->dst; + stream_update.src = new_dm_crtc_state->stream->src; + } + + if (new_crtc_state->color_mgmt_changed) { + updates[num_plane].gamma = + new_dm_plane_state->dc_state->gamma_correction; + updates[num_plane].in_transfer_func = + new_dm_plane_state->dc_state->in_transfer_func; + stream_update.gamut_remap = + &new_dm_crtc_state->stream->gamut_remap_matrix; + stream_update.out_transfer_func = + new_dm_crtc_state->stream->out_transfer_func; + } + + num_plane++; + } + } + + if (num_plane > 0) { + status = dc_stream_get_status(new_dm_crtc_state->stream); + update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane, + &stream_update, status); + + if (update_type > UPDATE_TYPE_MED) { + update_type = UPDATE_TYPE_FULL; + goto ret; + } + } + + } else if (!new_dm_crtc_state->stream && old_dm_crtc_state->stream) { + update_type = UPDATE_TYPE_FULL; + goto ret; + } + } + +ret: + kfree(updates); + kfree(surface); + + return update_type; +} static int amdgpu_dm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) @@ -5081,6 +5175,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, struct drm_connector_state *old_con_state, *new_con_state; struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state, *new_crtc_state; + enum surface_update_type update_type = UPDATE_TYPE_FAST; + enum surface_update_type overall_update_type = UPDATE_TYPE_FAST; + int ret, i; /* @@ -5166,6 +5263,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state)) continue; + overall_update_type = UPDATE_TYPE_FULL; lock_and_validation_needed = true; } @@ -5178,8 +5276,24 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, * will wait for completion of any outstanding flip using DRMs * synchronization events. */ + update_type = dm_determine_update_type_for_commit(dc, state); + + if (overall_update_type < update_type) + overall_update_type = update_type; + + /* + * lock_and_validation_needed was an old way to determine if we need to set + * the global lock. Leaving it in to check if we broke any corner cases + * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED + * lock_and_validation_needed false = UPDATE_TYPE_FAST + */ + if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST) + WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL"); + else if (!lock_and_validation_needed && overall_update_type > UPDATE_TYPE_FAST) + WARN(1, "Global lock should NOT be set, overall_update_type should be UPDATE_TYPE_FAST"); + - if (lock_and_validation_needed) { + if (overall_update_type > UPDATE_TYPE_FAST) { ret = do_aquire_global_lock(dev, state); if (ret) -- GitLab From 182388fcc4c820b34038f474c1a47e9700cd3c09 Mon Sep 17 00:00:00 2001 From: Eric Bernstein Date: Thu, 24 May 2018 15:50:27 -0400 Subject: [PATCH 1284/1692] drm/amd/display: Add DP YCbCr 4:2:0 support [Why] For supporting DP YCbCr 4:2:0 output. [How] Update mod_build_vsc_infopacket to support Pixel Encoding/Colorimetry Format indication for VSC SDP rev5. Signed-off-by: Eric Bernstein Reviewed-by: Dmytro Laktyushkin Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- .../display/modules/info_packet/info_packet.c | 189 +++++++++++++++++- 1 file changed, 188 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c index 24b6cc1dfc64..52378fc69079 100644 --- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c +++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c @@ -26,20 +26,38 @@ #include "mod_info_packet.h" #include "core_types.h" +enum ColorimetryRGBDP { + ColorimetryRGB_DP_sRGB = 0, + ColorimetryRGB_DP_AdobeRGB = 3, + ColorimetryRGB_DP_P3 = 4, + ColorimetryRGB_DP_CustomColorProfile = 5, + ColorimetryRGB_DP_ITU_R_BT2020RGB = 6, +}; +enum ColorimetryYCCDP { + ColorimetryYCC_DP_ITU601 = 0, + ColorimetryYCC_DP_ITU709 = 1, + ColorimetryYCC_DP_AdobeYCC = 5, + ColorimetryYCC_DP_ITU2020YCC = 6, + ColorimetryYCC_DP_ITU2020YCbCr = 7, +}; + static void mod_build_vsc_infopacket(const struct dc_stream_state *stream, struct dc_info_packet *info_packet) { unsigned int vscPacketRevision = 0; unsigned int i; + unsigned int pixelEncoding = 0; + unsigned int colorimetryFormat = 0; if (stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE && stream->view_format != VIEW_3D_FORMAT_NONE) vscPacketRevision = 1; - /*VSC packet set to 2 when DP revision >= 1.2*/ if (stream->psr_version != 0) vscPacketRevision = 2; + if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) + vscPacketRevision = 5; /* VSC packet not needed based on the features * supported by this DP display @@ -81,6 +99,175 @@ static void mod_build_vsc_infopacket(const struct dc_stream_state *stream, info_packet->valid = true; } + + /* 05h = VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/Colorimetry Format indication. + * Added in DP1.3, a DP Source device is allowed to indicate the pixel encoding/colorimetry + * format to the DP Sink device with VSC SDP only when the DP Sink device supports it + * (i.e., VSC_SDP_EXTENSION_FOR_COLORIMETRY_SUPPORTED bit in the DPRX_FEATURE_ENUMERATION_LIST + * register (DPCD Address 02210h, bit 3) is set to 1). + * (Requires VSC_SDP_EXTENSION_FOR_COLORIMETRY_SUPPORTED bit set to 1 in DPCD 02210h. This + * DPCD register is exposed in the new Extended Receiver Capability field for DPCD Rev. 1.4 + * (and higher). When MISC1. bit 6. is Set to 1, a Source device uses a VSC SDP to indicate + * the Pixel Encoding/Colorimetry Format and that a Sink device must ignore MISC1, bit 7, and + * MISC0, bits 7:1 (MISC1, bit 7. and MISC0, bits 7:1 become “don’t care”).) + */ + if (vscPacketRevision == 0x5) { + /* Secondary-data Packet ID = 0 */ + info_packet->hb0 = 0x00; + /* 07h - Packet Type Value indicating Video Stream Configuration packet */ + info_packet->hb1 = 0x07; + /* 05h = VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/Colorimetry Format indication. */ + info_packet->hb2 = 0x05; + /* 13h = VSC SDP supporting 3D stereo, + PSR2, + Pixel Encoding/Colorimetry Format indication (HB2 = 05h). */ + info_packet->hb3 = 0x13; + + info_packet->valid = true; + + /* Set VSC SDP fields for pixel encoding and colorimetry format from DP 1.3 specs + * Data Bytes DB 18~16 + * Bits 3:0 (Colorimetry Format) | Bits 7:4 (Pixel Encoding) + * ---------------------------------------------------------------------------------------------------- + * 0x0 = sRGB | 0 = RGB + * 0x1 = RGB Wide Gamut Fixed Point + * 0x2 = RGB Wide Gamut Floating Point + * 0x3 = AdobeRGB + * 0x4 = DCI-P3 + * 0x5 = CustomColorProfile + * (others reserved) + * ---------------------------------------------------------------------------------------------------- + * 0x0 = ITU-R BT.601 | 1 = YCbCr444 + * 0x1 = ITU-R BT.709 + * 0x2 = xvYCC601 + * 0x3 = xvYCC709 + * 0x4 = sYCC601 + * 0x5 = AdobeYCC601 + * 0x6 = ITU-R BT.2020 Y'cC'bcC'rc + * 0x7 = ITU-R BT.2020 Y'C'bC'r + * (others reserved) + * ---------------------------------------------------------------------------------------------------- + * 0x0 = ITU-R BT.601 | 2 = YCbCr422 + * 0x1 = ITU-R BT.709 + * 0x2 = xvYCC601 + * 0x3 = xvYCC709 + * 0x4 = sYCC601 + * 0x5 = AdobeYCC601 + * 0x6 = ITU-R BT.2020 Y'cC'bcC'rc + * 0x7 = ITU-R BT.2020 Y'C'bC'r + * (others reserved) + * ---------------------------------------------------------------------------------------------------- + * 0x0 = ITU-R BT.601 | 3 = YCbCr420 + * 0x1 = ITU-R BT.709 + * 0x2 = xvYCC601 + * 0x3 = xvYCC709 + * 0x4 = sYCC601 + * 0x5 = AdobeYCC601 + * 0x6 = ITU-R BT.2020 Y'cC'bcC'rc + * 0x7 = ITU-R BT.2020 Y'C'bC'r + * (others reserved) + * ---------------------------------------------------------------------------------------------------- + * 0x0 =DICOM Part14 Grayscale | 4 = Yonly + * Display Function + * (others reserved) + */ + + /* Set Pixel Encoding */ + switch (stream->timing.pixel_encoding) { + case PIXEL_ENCODING_RGB: + pixelEncoding = 0x0; /* RGB = 0h */ + break; + case PIXEL_ENCODING_YCBCR444: + pixelEncoding = 0x1; /* YCbCr444 = 1h */ + break; + case PIXEL_ENCODING_YCBCR422: + pixelEncoding = 0x2; /* YCbCr422 = 2h */ + break; + case PIXEL_ENCODING_YCBCR420: + pixelEncoding = 0x3; /* YCbCr420 = 3h */ + break; + default: + pixelEncoding = 0x0; /* default RGB = 0h */ + break; + } + + /* Set Colorimetry format based on pixel encoding */ + switch (stream->timing.pixel_encoding) { + case PIXEL_ENCODING_RGB: + if ((stream->output_color_space == COLOR_SPACE_SRGB) || + (stream->output_color_space == COLOR_SPACE_SRGB_LIMITED)) + colorimetryFormat = ColorimetryRGB_DP_sRGB; + else if (stream->output_color_space == COLOR_SPACE_ADOBERGB) + colorimetryFormat = ColorimetryRGB_DP_AdobeRGB; + else if ((stream->output_color_space == COLOR_SPACE_2020_RGB_FULLRANGE) || + (stream->output_color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE)) + colorimetryFormat = ColorimetryRGB_DP_ITU_R_BT2020RGB; + break; + + case PIXEL_ENCODING_YCBCR444: + case PIXEL_ENCODING_YCBCR422: + case PIXEL_ENCODING_YCBCR420: + /* Note: xvYCC probably not supported correctly here on DP since colorspace translation + * loses distinction between BT601 vs xvYCC601 in translation + */ + if (stream->output_color_space == COLOR_SPACE_YCBCR601) + colorimetryFormat = ColorimetryYCC_DP_ITU601; + else if (stream->output_color_space == COLOR_SPACE_YCBCR709) + colorimetryFormat = ColorimetryYCC_DP_ITU709; + else if (stream->output_color_space == COLOR_SPACE_ADOBERGB) + colorimetryFormat = ColorimetryYCC_DP_AdobeYCC; + else if (stream->output_color_space == COLOR_SPACE_2020_YCBCR) + colorimetryFormat = ColorimetryYCC_DP_ITU2020YCbCr; + break; + + default: + colorimetryFormat = ColorimetryRGB_DP_sRGB; + break; + } + + info_packet->sb[16] = (pixelEncoding << 4) | colorimetryFormat; + + /* Set color depth */ + switch (stream->timing.display_color_depth) { + case COLOR_DEPTH_666: + /* NOTE: This is actually not valid for YCbCr pixel encoding to have 6 bpc + * as of DP1.4 spec, but value of 0 probably reserved here for potential future use. + */ + info_packet->sb[17] = 0; + break; + case COLOR_DEPTH_888: + info_packet->sb[17] = 1; + break; + case COLOR_DEPTH_101010: + info_packet->sb[17] = 2; + break; + case COLOR_DEPTH_121212: + info_packet->sb[17] = 3; + break; + /*case COLOR_DEPTH_141414: -- NO SUCH FORMAT IN DP SPEC */ + case COLOR_DEPTH_161616: + info_packet->sb[17] = 4; + break; + default: + info_packet->sb[17] = 0; + break; + } + + /* all YCbCr are always limited range */ + if ((stream->output_color_space == COLOR_SPACE_SRGB_LIMITED) || + (stream->output_color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE) || + (pixelEncoding != 0x0)) { + info_packet->sb[17] |= 0x80; /* DB17 bit 7 set to 1 for CEA timing. */ + } + + /* Content Type (Bits 2:0) + * 0 = Not defined. + * 1 = Graphics. + * 2 = Photo. + * 3 = Video. + * 4 = Game. + */ + info_packet->sb[18] = 0; + } + } void mod_build_infopackets(struct info_packet_inputs *inputs, -- GitLab From 550db288129591c4b2f669d724f12e43a380c286 Mon Sep 17 00:00:00 2001 From: Gary Kattan Date: Mon, 20 Aug 2018 15:12:14 -0700 Subject: [PATCH 1285/1692] drm/amd/display: Fix DAL217 tests modify DTN logs for other tests [Why]Update Code to get DTN golden log check to pass for tests run after DAL217 tests. [How]Change how dcn10_log_hw_state function prints HW state info (CM_GAMUT_REMAP_Cx_Cx registers) when GAMUT REMAP is in bypass mode. Signed-off-by: Gary Kattan Reviewed-by: Dmytro Laktyushkin Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c index 5f2054a1d563..dcb3c5530236 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c @@ -116,12 +116,14 @@ void dpp_read_state(struct dpp *dpp_base, REG_GET(CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE, &s->gamut_remap_mode); - s->gamut_remap_c11_c12 = REG_READ(CM_GAMUT_REMAP_C11_C12); - s->gamut_remap_c13_c14 = REG_READ(CM_GAMUT_REMAP_C13_C14); - s->gamut_remap_c21_c22 = REG_READ(CM_GAMUT_REMAP_C21_C22); - s->gamut_remap_c23_c24 = REG_READ(CM_GAMUT_REMAP_C23_C24); - s->gamut_remap_c31_c32 = REG_READ(CM_GAMUT_REMAP_C31_C32); - s->gamut_remap_c33_c34 = REG_READ(CM_GAMUT_REMAP_C33_C34); + if (s->gamut_remap_mode) { + s->gamut_remap_c11_c12 = REG_READ(CM_GAMUT_REMAP_C11_C12); + s->gamut_remap_c13_c14 = REG_READ(CM_GAMUT_REMAP_C13_C14); + s->gamut_remap_c21_c22 = REG_READ(CM_GAMUT_REMAP_C21_C22); + s->gamut_remap_c23_c24 = REG_READ(CM_GAMUT_REMAP_C23_C24); + s->gamut_remap_c31_c32 = REG_READ(CM_GAMUT_REMAP_C31_C32); + s->gamut_remap_c33_c34 = REG_READ(CM_GAMUT_REMAP_C33_C34); + } } /* Program gamut remap in bypass mode */ -- GitLab From 61ea4c6f70ffd18eed7fc0d3fb678245f499c756 Mon Sep 17 00:00:00 2001 From: Jun Lei Date: Mon, 13 Aug 2018 15:11:44 -0400 Subject: [PATCH 1286/1692] drm/amd/display: Add driver-side parsing for CM MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Although 4 unique register values exist for gamma modes, two are actually the same (the two RAMs) It’s not possible for caller to understand this HW specific behavior, so some parsing is necessary in driver Signed-off-by: Jun Lei Reviewed-by: Wesley Chalmers Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- .../dc/dcn10/dcn10_hw_sequencer_debug.c | 29 ++++++++++++++++--- 1 file changed, 25 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c index 9288b00e49b4..9c218252004f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c @@ -314,14 +314,35 @@ static unsigned int dcn10_get_cm_states(struct dc *dc, char *pBuf, unsigned int struct dpp *dpp = pool->dpps[i]; struct dcn_dpp_state s = {0}; + + + dpp->funcs->dpp_read_state(dpp, &s); if (s.is_enabled) { - chars_printed = snprintf_count(pBuf, remaining_buffer, "%x,%x,%x,%x,%x,%x," - "%08x,%08x,%08x,%08x,%08x,%08x" + chars_printed = snprintf_count(pBuf, remaining_buffer, "%x,%x," + "%s,%s,%s," + "%x,%08x,%08x,%08x,%08x,%08x,%08x" "\n", - dpp->inst, s.igam_input_format, s.igam_lut_mode, s.dgam_lut_mode, - s.rgam_lut_mode, s.gamut_remap_mode, s.gamut_remap_c11_c12, + dpp->inst, s.igam_input_format, + (s.igam_lut_mode == 0) ? "BypassFixed" : + ((s.igam_lut_mode == 1) ? "BypassFloat" : + ((s.igam_lut_mode == 2) ? "RAM" : + ((s.igam_lut_mode == 3) ? "RAM" : + "Unknown"))), + (s.dgam_lut_mode == 0) ? "Bypass" : + ((s.dgam_lut_mode == 1) ? "sRGB" : + ((s.dgam_lut_mode == 2) ? "Ycc" : + ((s.dgam_lut_mode == 3) ? "RAM" : + ((s.dgam_lut_mode == 4) ? "RAM" : + "Unknown")))), + (s.rgam_lut_mode == 0) ? "Bypass" : + ((s.rgam_lut_mode == 1) ? "sRGB" : + ((s.rgam_lut_mode == 2) ? "Ycc" : + ((s.rgam_lut_mode == 3) ? "RAM" : + ((s.rgam_lut_mode == 4) ? "RAM" : + "Unknown")))), + s.gamut_remap_mode, s.gamut_remap_c11_c12, s.gamut_remap_c13_c14, s.gamut_remap_c21_c22, s.gamut_remap_c23_c24, s.gamut_remap_c31_c32, s.gamut_remap_c33_c34); -- GitLab From 5a8132b9f6063b36369b2afd85112ff37d56e183 Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Tue, 14 Aug 2018 16:12:54 -0400 Subject: [PATCH 1287/1692] drm/amd/display: remove dead dc vbios code Signed-off-by: Dmytro Laktyushkin Reviewed-by: Charlene Liu Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/bios/bios_parser.c | 1177 ----------------- .../drm/amd/display/dc/bios/bios_parser2.c | 312 +---- .../gpu/drm/amd/display/dc/dc_bios_types.h | 64 - 3 files changed, 39 insertions(+), 1514 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c index bfa5816cfc92..0e1dc1b1a48d 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c @@ -52,24 +52,13 @@ #define DC_LOGGER \ bp->base.ctx->logger -/* GUID to validate external display connection info table (aka OPM module) */ -static const uint8_t ext_display_connection_guid[NUMBER_OF_UCHAR_FOR_GUID] = { - 0x91, 0x6E, 0x57, 0x09, - 0x3F, 0x6D, 0xD2, 0x11, - 0x39, 0x8E, 0x00, 0xA0, - 0xC9, 0x69, 0x72, 0x3B}; - #define DATA_TABLES(table) (bp->master_data_tbl->ListOfDataTables.table) static void get_atom_data_table_revision( ATOM_COMMON_TABLE_HEADER *atom_data_tbl, struct atom_data_revision *tbl_revision); -static uint32_t get_dst_number_from_object(struct bios_parser *bp, - ATOM_OBJECT *object); static uint32_t get_src_obj_list(struct bios_parser *bp, ATOM_OBJECT *object, uint16_t **id_list); -static uint32_t get_dest_obj_list(struct bios_parser *bp, - ATOM_OBJECT *object, uint16_t **id_list); static ATOM_OBJECT *get_bios_object(struct bios_parser *bp, struct graphics_object_id id); static enum bp_result get_gpio_i2c_info(struct bios_parser *bp, @@ -163,29 +152,6 @@ static uint8_t bios_parser_get_connectors_number(struct dc_bios *dcb) le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset)); } -static struct graphics_object_id bios_parser_get_encoder_id( - struct dc_bios *dcb, - uint32_t i) -{ - struct bios_parser *bp = BP_FROM_DCB(dcb); - struct graphics_object_id object_id = dal_graphics_object_id_init( - 0, ENUM_ID_UNKNOWN, OBJECT_TYPE_UNKNOWN); - - uint32_t encoder_table_offset = bp->object_info_tbl_offset - + le16_to_cpu(bp->object_info_tbl.v1_1->usEncoderObjectTableOffset); - - ATOM_OBJECT_TABLE *tbl = - GET_IMAGE(ATOM_OBJECT_TABLE, encoder_table_offset); - - if (tbl && tbl->ucNumberOfObjects > i) { - const uint16_t id = le16_to_cpu(tbl->asObjects[i].usObjectID); - - object_id = object_id_from_bios_object_id(id); - } - - return object_id; -} - static struct graphics_object_id bios_parser_get_connector_id( struct dc_bios *dcb, uint8_t i) @@ -217,15 +183,6 @@ static struct graphics_object_id bios_parser_get_connector_id( return object_id; } -static uint32_t bios_parser_get_dst_number(struct dc_bios *dcb, - struct graphics_object_id id) -{ - struct bios_parser *bp = BP_FROM_DCB(dcb); - ATOM_OBJECT *object = get_bios_object(bp, id); - - return get_dst_number_from_object(bp, object); -} - static enum bp_result bios_parser_get_src_obj(struct dc_bios *dcb, struct graphics_object_id object_id, uint32_t index, struct graphics_object_id *src_object_id) @@ -255,30 +212,6 @@ static enum bp_result bios_parser_get_src_obj(struct dc_bios *dcb, return BP_RESULT_OK; } -static enum bp_result bios_parser_get_dst_obj(struct dc_bios *dcb, - struct graphics_object_id object_id, uint32_t index, - struct graphics_object_id *dest_object_id) -{ - uint32_t number; - uint16_t *id = NULL; - ATOM_OBJECT *object; - struct bios_parser *bp = BP_FROM_DCB(dcb); - - if (!dest_object_id) - return BP_RESULT_BADINPUT; - - object = get_bios_object(bp, object_id); - - number = get_dest_obj_list(bp, object, &id); - - if (number <= index || !id) - return BP_RESULT_BADINPUT; - - *dest_object_id = object_id_from_bios_object_id(id[index]); - - return BP_RESULT_OK; -} - static enum bp_result bios_parser_get_i2c_info(struct dc_bios *dcb, struct graphics_object_id id, struct graphics_object_i2c_info *info) @@ -325,196 +258,6 @@ static enum bp_result bios_parser_get_i2c_info(struct dc_bios *dcb, return BP_RESULT_NORECORD; } -static enum bp_result get_voltage_ddc_info_v1(uint8_t *i2c_line, - ATOM_COMMON_TABLE_HEADER *header, - uint8_t *address) -{ - enum bp_result result = BP_RESULT_NORECORD; - ATOM_VOLTAGE_OBJECT_INFO *info = - (ATOM_VOLTAGE_OBJECT_INFO *) address; - - uint8_t *voltage_current_object = (uint8_t *) &info->asVoltageObj[0]; - - while ((address + le16_to_cpu(header->usStructureSize)) > voltage_current_object) { - ATOM_VOLTAGE_OBJECT *object = - (ATOM_VOLTAGE_OBJECT *) voltage_current_object; - - if ((object->ucVoltageType == SET_VOLTAGE_INIT_MODE) && - (object->ucVoltageType & - VOLTAGE_CONTROLLED_BY_I2C_MASK)) { - - *i2c_line = object->asControl.ucVoltageControlI2cLine - ^ 0x90; - result = BP_RESULT_OK; - break; - } - - voltage_current_object += object->ucSize; - } - return result; -} - -static enum bp_result get_voltage_ddc_info_v3(uint8_t *i2c_line, - uint32_t index, - ATOM_COMMON_TABLE_HEADER *header, - uint8_t *address) -{ - enum bp_result result = BP_RESULT_NORECORD; - ATOM_VOLTAGE_OBJECT_INFO_V3_1 *info = - (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *) address; - - uint8_t *voltage_current_object = - (uint8_t *) (&(info->asVoltageObj[0])); - - while ((address + le16_to_cpu(header->usStructureSize)) > voltage_current_object) { - ATOM_I2C_VOLTAGE_OBJECT_V3 *object = - (ATOM_I2C_VOLTAGE_OBJECT_V3 *) voltage_current_object; - - if (object->sHeader.ucVoltageMode == - ATOM_INIT_VOLTAGE_REGULATOR) { - if (object->sHeader.ucVoltageType == index) { - *i2c_line = object->ucVoltageControlI2cLine - ^ 0x90; - result = BP_RESULT_OK; - break; - } - } - - voltage_current_object += le16_to_cpu(object->sHeader.usSize); - } - return result; -} - -static enum bp_result bios_parser_get_thermal_ddc_info( - struct dc_bios *dcb, - uint32_t i2c_channel_id, - struct graphics_object_i2c_info *info) -{ - struct bios_parser *bp = BP_FROM_DCB(dcb); - ATOM_I2C_ID_CONFIG_ACCESS *config; - ATOM_I2C_RECORD record; - - if (!info) - return BP_RESULT_BADINPUT; - - config = (ATOM_I2C_ID_CONFIG_ACCESS *) &i2c_channel_id; - - record.sucI2cId.bfHW_Capable = config->sbfAccess.bfHW_Capable; - record.sucI2cId.bfI2C_LineMux = config->sbfAccess.bfI2C_LineMux; - record.sucI2cId.bfHW_EngineID = config->sbfAccess.bfHW_EngineID; - - return get_gpio_i2c_info(bp, &record, info); -} - -static enum bp_result bios_parser_get_voltage_ddc_info(struct dc_bios *dcb, - uint32_t index, - struct graphics_object_i2c_info *info) -{ - uint8_t i2c_line = 0; - enum bp_result result = BP_RESULT_NORECORD; - uint8_t *voltage_info_address; - ATOM_COMMON_TABLE_HEADER *header; - struct atom_data_revision revision = {0}; - struct bios_parser *bp = BP_FROM_DCB(dcb); - - if (!DATA_TABLES(VoltageObjectInfo)) - return result; - - voltage_info_address = bios_get_image(&bp->base, DATA_TABLES(VoltageObjectInfo), sizeof(ATOM_COMMON_TABLE_HEADER)); - - header = (ATOM_COMMON_TABLE_HEADER *) voltage_info_address; - - get_atom_data_table_revision(header, &revision); - - switch (revision.major) { - case 1: - case 2: - result = get_voltage_ddc_info_v1(&i2c_line, header, - voltage_info_address); - break; - case 3: - if (revision.minor != 1) - break; - result = get_voltage_ddc_info_v3(&i2c_line, index, header, - voltage_info_address); - break; - } - - if (result == BP_RESULT_OK) - result = bios_parser_get_thermal_ddc_info(dcb, - i2c_line, info); - - return result; -} - -/* TODO: temporary commented out to suppress 'defined but not used' warning */ -#if 0 -static enum bp_result bios_parser_get_ddc_info_for_i2c_line( - struct bios_parser *bp, - uint8_t i2c_line, struct graphics_object_i2c_info *info) -{ - uint32_t offset; - ATOM_OBJECT *object; - ATOM_OBJECT_TABLE *table; - uint32_t i; - - if (!info) - return BP_RESULT_BADINPUT; - - offset = le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset); - - offset += bp->object_info_tbl_offset; - - table = GET_IMAGE(ATOM_OBJECT_TABLE, offset); - - if (!table) - return BP_RESULT_BADBIOSTABLE; - - for (i = 0; i < table->ucNumberOfObjects; i++) { - object = &table->asObjects[i]; - - if (!object) { - BREAK_TO_DEBUGGER(); /* Invalid object id */ - return BP_RESULT_BADINPUT; - } - - offset = le16_to_cpu(object->usRecordOffset) - + bp->object_info_tbl_offset; - - for (;;) { - ATOM_COMMON_RECORD_HEADER *header = - GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset); - - if (!header) - return BP_RESULT_BADBIOSTABLE; - - offset += header->ucRecordSize; - - if (LAST_RECORD_TYPE == header->ucRecordType || - !header->ucRecordSize) - break; - - if (ATOM_I2C_RECORD_TYPE == header->ucRecordType - && sizeof(ATOM_I2C_RECORD) <= - header->ucRecordSize) { - ATOM_I2C_RECORD *record = - (ATOM_I2C_RECORD *) header; - - if (i2c_line != record->sucI2cId.bfI2C_LineMux) - continue; - - /* get the I2C info */ - if (get_gpio_i2c_info(bp, record, info) == - BP_RESULT_OK) - return BP_RESULT_OK; - } - } - } - - return BP_RESULT_NORECORD; -} -#endif - static enum bp_result bios_parser_get_hpd_info(struct dc_bios *dcb, struct graphics_object_id id, struct graphics_object_hpd_info *info) @@ -1129,62 +872,6 @@ static bool bios_parser_is_device_id_supported( return (le16_to_cpu(bp->object_info_tbl.v1_1->usDeviceSupport) & mask) != 0; } -static enum bp_result bios_parser_crt_control( - struct dc_bios *dcb, - enum engine_id engine_id, - bool enable, - uint32_t pixel_clock) -{ - struct bios_parser *bp = BP_FROM_DCB(dcb); - uint8_t standard; - - if (!bp->cmd_tbl.dac1_encoder_control && - engine_id == ENGINE_ID_DACA) - return BP_RESULT_FAILURE; - if (!bp->cmd_tbl.dac2_encoder_control && - engine_id == ENGINE_ID_DACB) - return BP_RESULT_FAILURE; - /* validate params */ - switch (engine_id) { - case ENGINE_ID_DACA: - case ENGINE_ID_DACB: - break; - default: - /* unsupported engine */ - return BP_RESULT_FAILURE; - } - - standard = ATOM_DAC1_PS2; /* == ATOM_DAC2_PS2 */ - - if (enable) { - if (engine_id == ENGINE_ID_DACA) { - bp->cmd_tbl.dac1_encoder_control(bp, enable, - pixel_clock, standard); - if (bp->cmd_tbl.dac1_output_control != NULL) - bp->cmd_tbl.dac1_output_control(bp, enable); - } else { - bp->cmd_tbl.dac2_encoder_control(bp, enable, - pixel_clock, standard); - if (bp->cmd_tbl.dac2_output_control != NULL) - bp->cmd_tbl.dac2_output_control(bp, enable); - } - } else { - if (engine_id == ENGINE_ID_DACA) { - if (bp->cmd_tbl.dac1_output_control != NULL) - bp->cmd_tbl.dac1_output_control(bp, enable); - bp->cmd_tbl.dac1_encoder_control(bp, enable, - pixel_clock, standard); - } else { - if (bp->cmd_tbl.dac2_output_control != NULL) - bp->cmd_tbl.dac2_output_control(bp, enable); - bp->cmd_tbl.dac2_encoder_control(bp, enable, - pixel_clock, standard); - } - } - - return BP_RESULT_OK; -} - static ATOM_HPD_INT_RECORD *get_hpd_record(struct bios_parser *bp, ATOM_OBJECT *object) { @@ -1219,49 +906,6 @@ static ATOM_HPD_INT_RECORD *get_hpd_record(struct bios_parser *bp, return NULL; } -/** - * Get I2C information of input object id - * - * search all records to find the ATOM_I2C_RECORD_TYPE record IR - */ -static ATOM_I2C_RECORD *get_i2c_record( - struct bios_parser *bp, - ATOM_OBJECT *object) -{ - uint32_t offset; - ATOM_COMMON_RECORD_HEADER *record_header; - - if (!object) { - BREAK_TO_DEBUGGER(); - /* Invalid object */ - return NULL; - } - - offset = le16_to_cpu(object->usRecordOffset) - + bp->object_info_tbl_offset; - - for (;;) { - record_header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset); - - if (!record_header) - return NULL; - - if (LAST_RECORD_TYPE == record_header->ucRecordType || - 0 == record_header->ucRecordSize) - break; - - if (ATOM_I2C_RECORD_TYPE == record_header->ucRecordType && - sizeof(ATOM_I2C_RECORD) <= - record_header->ucRecordSize) { - return (ATOM_I2C_RECORD *)record_header; - } - - offset += record_header->ucRecordSize; - } - - return NULL; -} - static enum bp_result get_ss_info_from_ss_info_table( struct bios_parser *bp, uint32_t id, @@ -2356,40 +2000,6 @@ static ATOM_OBJECT *get_bios_object(struct bios_parser *bp, return NULL; } -static uint32_t get_dest_obj_list(struct bios_parser *bp, - ATOM_OBJECT *object, uint16_t **id_list) -{ - uint32_t offset; - uint8_t *number; - - if (!object) { - BREAK_TO_DEBUGGER(); /* Invalid object id */ - return 0; - } - - offset = le16_to_cpu(object->usSrcDstTableOffset) - + bp->object_info_tbl_offset; - - number = GET_IMAGE(uint8_t, offset); - if (!number) - return 0; - - offset += sizeof(uint8_t); - offset += sizeof(uint16_t) * (*number); - - number = GET_IMAGE(uint8_t, offset); - if ((!number) || (!*number)) - return 0; - - offset += sizeof(uint8_t); - *id_list = (uint16_t *)bios_get_image(&bp->base, offset, *number * sizeof(uint16_t)); - - if (!*id_list) - return 0; - - return *number; -} - static uint32_t get_src_obj_list(struct bios_parser *bp, ATOM_OBJECT *object, uint16_t **id_list) { @@ -2417,35 +2027,6 @@ static uint32_t get_src_obj_list(struct bios_parser *bp, ATOM_OBJECT *object, return *number; } -static uint32_t get_dst_number_from_object(struct bios_parser *bp, - ATOM_OBJECT *object) -{ - uint32_t offset; - uint8_t *number; - - if (!object) { - BREAK_TO_DEBUGGER(); /* Invalid encoder object id*/ - return 0; - } - - offset = le16_to_cpu(object->usSrcDstTableOffset) - + bp->object_info_tbl_offset; - - number = GET_IMAGE(uint8_t, offset); - if (!number) - return 0; - - offset += sizeof(uint8_t); - offset += sizeof(uint16_t) * (*number); - - number = GET_IMAGE(uint8_t, offset); - - if (!number) - return 0; - - return *number; -} - static struct device_id device_type_from_device_id(uint16_t device_id) { @@ -2624,750 +2205,6 @@ static uint32_t get_support_mask_for_device_id(struct device_id device_id) return 0; } -/** - * HwContext interface for writing MM registers - */ - -static bool i2c_read( - struct bios_parser *bp, - struct graphics_object_i2c_info *i2c_info, - uint8_t *buffer, - uint32_t length) -{ - struct ddc *ddc; - uint8_t offset[2] = { 0, 0 }; - bool result = false; - struct i2c_command cmd; - struct gpio_ddc_hw_info hw_info = { - i2c_info->i2c_hw_assist, - i2c_info->i2c_line }; - - ddc = dal_gpio_create_ddc(bp->base.ctx->gpio_service, - i2c_info->gpio_info.clk_a_register_index, - (1 << i2c_info->gpio_info.clk_a_shift), &hw_info); - - if (!ddc) - return result; - - /*Using SW engine */ - cmd.engine = I2C_COMMAND_ENGINE_SW; - cmd.speed = ddc->ctx->dc->caps.i2c_speed_in_khz; - - { - struct i2c_payload payloads[] = { - { - .address = i2c_info->i2c_slave_address >> 1, - .data = offset, - .length = sizeof(offset), - .write = true - }, - { - .address = i2c_info->i2c_slave_address >> 1, - .data = buffer, - .length = length, - .write = false - } - }; - - cmd.payloads = payloads; - cmd.number_of_payloads = ARRAY_SIZE(payloads); - result = dc_submit_i2c( - ddc->ctx->dc, - ddc->hw_info.ddc_channel, - &cmd); - } - - dal_gpio_destroy_ddc(&ddc); - - return result; -} - -/** - * Read external display connection info table through i2c. - * validate the GUID and checksum. - * - * @return enum bp_result whether all data was sucessfully read - */ -static enum bp_result get_ext_display_connection_info( - struct bios_parser *bp, - ATOM_OBJECT *opm_object, - ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO *ext_display_connection_info_tbl) -{ - bool config_tbl_present = false; - ATOM_I2C_RECORD *i2c_record = NULL; - uint32_t i = 0; - - if (opm_object == NULL) - return BP_RESULT_BADINPUT; - - i2c_record = get_i2c_record(bp, opm_object); - - if (i2c_record != NULL) { - ATOM_GPIO_I2C_INFO *gpio_i2c_header; - struct graphics_object_i2c_info i2c_info; - - gpio_i2c_header = GET_IMAGE(ATOM_GPIO_I2C_INFO, - bp->master_data_tbl->ListOfDataTables.GPIO_I2C_Info); - - if (NULL == gpio_i2c_header) - return BP_RESULT_BADBIOSTABLE; - - if (get_gpio_i2c_info(bp, i2c_record, &i2c_info) != - BP_RESULT_OK) - return BP_RESULT_BADBIOSTABLE; - - if (i2c_read(bp, - &i2c_info, - (uint8_t *)ext_display_connection_info_tbl, - sizeof(ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO))) { - config_tbl_present = true; - } - } - - /* Validate GUID */ - if (config_tbl_present) - for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; i++) { - if (ext_display_connection_info_tbl->ucGuid[i] - != ext_display_connection_guid[i]) { - config_tbl_present = false; - break; - } - } - - /* Validate checksum */ - if (config_tbl_present) { - uint8_t check_sum = 0; - uint8_t *buf = - (uint8_t *)ext_display_connection_info_tbl; - - for (i = 0; i < sizeof(ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO); - i++) { - check_sum += buf[i]; - } - - if (check_sum != 0) - config_tbl_present = false; - } - - if (config_tbl_present) - return BP_RESULT_OK; - else - return BP_RESULT_FAILURE; -} - -/* - * Gets the first device ID in the same group as the given ID for enumerating. - * For instance, if any DFP device ID is passed, returns the device ID for DFP1. - * - * The first device ID in the same group as the passed device ID, or 0 if no - * matching device group found. - */ -static uint32_t enum_first_device_id(uint32_t dev_id) -{ - /* Return the first in the group that this ID belongs to. */ - if (dev_id & ATOM_DEVICE_CRT_SUPPORT) - return ATOM_DEVICE_CRT1_SUPPORT; - else if (dev_id & ATOM_DEVICE_DFP_SUPPORT) - return ATOM_DEVICE_DFP1_SUPPORT; - else if (dev_id & ATOM_DEVICE_LCD_SUPPORT) - return ATOM_DEVICE_LCD1_SUPPORT; - else if (dev_id & ATOM_DEVICE_TV_SUPPORT) - return ATOM_DEVICE_TV1_SUPPORT; - else if (dev_id & ATOM_DEVICE_CV_SUPPORT) - return ATOM_DEVICE_CV_SUPPORT; - - /* No group found for this device ID. */ - - dm_error("%s: incorrect input %d\n", __func__, dev_id); - /* No matching support flag for given device ID */ - return 0; -} - -/* - * Gets the next device ID in the group for a given device ID. - * - * The current device ID being enumerated on. - * - * The next device ID in the group, or 0 if no device exists. - */ -static uint32_t enum_next_dev_id(uint32_t dev_id) -{ - /* Get next device ID in the group. */ - switch (dev_id) { - case ATOM_DEVICE_CRT1_SUPPORT: - return ATOM_DEVICE_CRT2_SUPPORT; - case ATOM_DEVICE_LCD1_SUPPORT: - return ATOM_DEVICE_LCD2_SUPPORT; - case ATOM_DEVICE_DFP1_SUPPORT: - return ATOM_DEVICE_DFP2_SUPPORT; - case ATOM_DEVICE_DFP2_SUPPORT: - return ATOM_DEVICE_DFP3_SUPPORT; - case ATOM_DEVICE_DFP3_SUPPORT: - return ATOM_DEVICE_DFP4_SUPPORT; - case ATOM_DEVICE_DFP4_SUPPORT: - return ATOM_DEVICE_DFP5_SUPPORT; - case ATOM_DEVICE_DFP5_SUPPORT: - return ATOM_DEVICE_DFP6_SUPPORT; - } - - /* Done enumerating through devices. */ - return 0; -} - -/* - * Returns the new device tag record for patched BIOS object. - * - * [IN] pExtDisplayPath - External display path to copy device tag from. - * [IN] deviceSupport - Bit vector for device ID support flags. - * [OUT] pDeviceTag - Device tag structure to fill with patched data. - * - * True if a compatible device ID was found, false otherwise. - */ -static bool get_patched_device_tag( - struct bios_parser *bp, - EXT_DISPLAY_PATH *ext_display_path, - uint32_t device_support, - ATOM_CONNECTOR_DEVICE_TAG *device_tag) -{ - uint32_t dev_id; - /* Use fallback behaviour if not supported. */ - if (!bp->remap_device_tags) { - device_tag->ulACPIDeviceEnum = - cpu_to_le32((uint32_t) le16_to_cpu(ext_display_path->usDeviceACPIEnum)); - device_tag->usDeviceID = - cpu_to_le16(le16_to_cpu(ext_display_path->usDeviceTag)); - return true; - } - - /* Find the first unused in the same group. */ - dev_id = enum_first_device_id(le16_to_cpu(ext_display_path->usDeviceTag)); - while (dev_id != 0) { - /* Assign this device ID if supported. */ - if ((device_support & dev_id) != 0) { - device_tag->ulACPIDeviceEnum = - cpu_to_le32((uint32_t) le16_to_cpu(ext_display_path->usDeviceACPIEnum)); - device_tag->usDeviceID = cpu_to_le16((USHORT) dev_id); - return true; - } - - dev_id = enum_next_dev_id(dev_id); - } - - /* No compatible device ID found. */ - return false; -} - -/* - * Adds a device tag to a BIOS object's device tag record if there is - * matching device ID supported. - * - * pObject - Pointer to the BIOS object to add the device tag to. - * pExtDisplayPath - Display path to retrieve base device ID from. - * pDeviceSupport - Pointer to bit vector for supported device IDs. - */ -static void add_device_tag_from_ext_display_path( - struct bios_parser *bp, - ATOM_OBJECT *object, - EXT_DISPLAY_PATH *ext_display_path, - uint32_t *device_support) -{ - /* Get device tag record for object. */ - ATOM_CONNECTOR_DEVICE_TAG *device_tag = NULL; - ATOM_CONNECTOR_DEVICE_TAG_RECORD *device_tag_record = NULL; - enum bp_result result = - bios_parser_get_device_tag_record( - bp, object, &device_tag_record); - - if ((le16_to_cpu(ext_display_path->usDeviceTag) != CONNECTOR_OBJECT_ID_NONE) - && (result == BP_RESULT_OK)) { - uint8_t index; - - if ((device_tag_record->ucNumberOfDevice == 1) && - (le16_to_cpu(device_tag_record->asDeviceTag[0].usDeviceID) == 0)) { - /*Workaround bug in current VBIOS releases where - * ucNumberOfDevice = 1 but there is no actual device - * tag data. This w/a is temporary until the updated - * VBIOS is distributed. */ - device_tag_record->ucNumberOfDevice = - device_tag_record->ucNumberOfDevice - 1; - } - - /* Attempt to find a matching device ID. */ - index = device_tag_record->ucNumberOfDevice; - device_tag = &device_tag_record->asDeviceTag[index]; - if (get_patched_device_tag( - bp, - ext_display_path, - *device_support, - device_tag)) { - /* Update cached device support to remove assigned ID. - */ - *device_support &= ~le16_to_cpu(device_tag->usDeviceID); - device_tag_record->ucNumberOfDevice++; - } - } -} - -/* - * Read out a single EXT_DISPLAY_PATH from the external display connection info - * table. The specific entry in the table is determined by the enum_id passed - * in. - * - * EXT_DISPLAY_PATH describing a single Configuration table entry - */ - -#define INVALID_CONNECTOR 0xffff - -static EXT_DISPLAY_PATH *get_ext_display_path_entry( - ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO *config_table, - uint32_t bios_object_id) -{ - EXT_DISPLAY_PATH *ext_display_path; - uint32_t ext_display_path_index = - ((bios_object_id & ENUM_ID_MASK) >> ENUM_ID_SHIFT) - 1; - - if (ext_display_path_index >= MAX_NUMBER_OF_EXT_DISPLAY_PATH) - return NULL; - - ext_display_path = &config_table->sPath[ext_display_path_index]; - - if (le16_to_cpu(ext_display_path->usDeviceConnector) == INVALID_CONNECTOR) - ext_display_path->usDeviceConnector = cpu_to_le16(0); - - return ext_display_path; -} - -/* - * Get AUX/DDC information of input object id - * - * search all records to find the ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE record - * IR - */ -static ATOM_CONNECTOR_AUXDDC_LUT_RECORD *get_ext_connector_aux_ddc_lut_record( - struct bios_parser *bp, - ATOM_OBJECT *object) -{ - uint32_t offset; - ATOM_COMMON_RECORD_HEADER *header; - - if (!object) { - BREAK_TO_DEBUGGER(); - /* Invalid object */ - return NULL; - } - - offset = le16_to_cpu(object->usRecordOffset) - + bp->object_info_tbl_offset; - - for (;;) { - header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset); - - if (!header) - return NULL; - - if (LAST_RECORD_TYPE == header->ucRecordType || - 0 == header->ucRecordSize) - break; - - if (ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE == - header->ucRecordType && - sizeof(ATOM_CONNECTOR_AUXDDC_LUT_RECORD) <= - header->ucRecordSize) - return (ATOM_CONNECTOR_AUXDDC_LUT_RECORD *)(header); - - offset += header->ucRecordSize; - } - - return NULL; -} - -/* - * Get AUX/DDC information of input object id - * - * search all records to find the ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE record - * IR - */ -static ATOM_CONNECTOR_HPDPIN_LUT_RECORD *get_ext_connector_hpd_pin_lut_record( - struct bios_parser *bp, - ATOM_OBJECT *object) -{ - uint32_t offset; - ATOM_COMMON_RECORD_HEADER *header; - - if (!object) { - BREAK_TO_DEBUGGER(); - /* Invalid object */ - return NULL; - } - - offset = le16_to_cpu(object->usRecordOffset) - + bp->object_info_tbl_offset; - - for (;;) { - header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset); - - if (!header) - return NULL; - - if (LAST_RECORD_TYPE == header->ucRecordType || - 0 == header->ucRecordSize) - break; - - if (ATOM_CONNECTOR_HPDPIN_LUT_RECORD_TYPE == - header->ucRecordType && - sizeof(ATOM_CONNECTOR_HPDPIN_LUT_RECORD) <= - header->ucRecordSize) - return (ATOM_CONNECTOR_HPDPIN_LUT_RECORD *)header; - - offset += header->ucRecordSize; - } - - return NULL; -} - -/* - * Check whether we need to patch the VBIOS connector info table with - * data from an external display connection info table. This is - * necessary to support MXM boards with an OPM (output personality - * module). With these designs, the VBIOS connector info table - * specifies an MXM_CONNECTOR with a unique ID. The driver retrieves - * the external connection info table through i2c and then looks up the - * connector ID to find the real connector type (e.g. DFP1). - * - */ -static enum bp_result patch_bios_image_from_ext_display_connection_info( - struct bios_parser *bp) -{ - ATOM_OBJECT_TABLE *connector_tbl; - uint32_t connector_tbl_offset; - struct graphics_object_id object_id; - ATOM_OBJECT *object; - ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO ext_display_connection_info_tbl; - EXT_DISPLAY_PATH *ext_display_path; - ATOM_CONNECTOR_AUXDDC_LUT_RECORD *aux_ddc_lut_record = NULL; - ATOM_I2C_RECORD *i2c_record = NULL; - ATOM_CONNECTOR_HPDPIN_LUT_RECORD *hpd_pin_lut_record = NULL; - ATOM_HPD_INT_RECORD *hpd_record = NULL; - ATOM_OBJECT_TABLE *encoder_table; - uint32_t encoder_table_offset; - ATOM_OBJECT *opm_object = NULL; - uint32_t i = 0; - struct graphics_object_id opm_object_id = - dal_graphics_object_id_init( - GENERIC_ID_MXM_OPM, - ENUM_ID_1, - OBJECT_TYPE_GENERIC); - ATOM_CONNECTOR_DEVICE_TAG_RECORD *dev_tag_record; - uint32_t cached_device_support = - le16_to_cpu(bp->object_info_tbl.v1_1->usDeviceSupport); - - uint32_t dst_number; - uint16_t *dst_object_id_list; - - opm_object = get_bios_object(bp, opm_object_id); - if (!opm_object) - return BP_RESULT_UNSUPPORTED; - - memset(&ext_display_connection_info_tbl, 0, - sizeof(ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO)); - - connector_tbl_offset = bp->object_info_tbl_offset - + le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset); - connector_tbl = GET_IMAGE(ATOM_OBJECT_TABLE, connector_tbl_offset); - - /* Read Connector info table from EEPROM through i2c */ - if (get_ext_display_connection_info(bp, - opm_object, - &ext_display_connection_info_tbl) != BP_RESULT_OK) { - - DC_LOG_WARNING("%s: Failed to read Connection Info Table", __func__); - return BP_RESULT_UNSUPPORTED; - } - - /* Get pointer to AUX/DDC and HPD LUTs */ - aux_ddc_lut_record = - get_ext_connector_aux_ddc_lut_record(bp, opm_object); - hpd_pin_lut_record = - get_ext_connector_hpd_pin_lut_record(bp, opm_object); - - if ((aux_ddc_lut_record == NULL) || (hpd_pin_lut_record == NULL)) - return BP_RESULT_UNSUPPORTED; - - /* Cache support bits for currently unmapped device types. */ - if (bp->remap_device_tags) { - for (i = 0; i < connector_tbl->ucNumberOfObjects; ++i) { - uint32_t j; - /* Remove support for all non-MXM connectors. */ - object = &connector_tbl->asObjects[i]; - object_id = object_id_from_bios_object_id( - le16_to_cpu(object->usObjectID)); - if ((OBJECT_TYPE_CONNECTOR != object_id.type) || - (CONNECTOR_ID_MXM == object_id.id)) - continue; - - /* Remove support for all device tags. */ - if (bios_parser_get_device_tag_record( - bp, object, &dev_tag_record) != BP_RESULT_OK) - continue; - - for (j = 0; j < dev_tag_record->ucNumberOfDevice; ++j) { - ATOM_CONNECTOR_DEVICE_TAG *device_tag = - &dev_tag_record->asDeviceTag[j]; - cached_device_support &= - ~le16_to_cpu(device_tag->usDeviceID); - } - } - } - - /* Find all MXM connector objects and patch them with connector info - * from the external display connection info table. */ - for (i = 0; i < connector_tbl->ucNumberOfObjects; i++) { - uint32_t j; - - object = &connector_tbl->asObjects[i]; - object_id = object_id_from_bios_object_id(le16_to_cpu(object->usObjectID)); - if ((OBJECT_TYPE_CONNECTOR != object_id.type) || - (CONNECTOR_ID_MXM != object_id.id)) - continue; - - /* Get the correct connection info table entry based on the enum - * id. */ - ext_display_path = get_ext_display_path_entry( - &ext_display_connection_info_tbl, - le16_to_cpu(object->usObjectID)); - if (!ext_display_path) - return BP_RESULT_FAILURE; - - /* Patch device connector ID */ - object->usObjectID = - cpu_to_le16(le16_to_cpu(ext_display_path->usDeviceConnector)); - - /* Patch device tag, ulACPIDeviceEnum. */ - add_device_tag_from_ext_display_path( - bp, - object, - ext_display_path, - &cached_device_support); - - /* Patch HPD info */ - if (ext_display_path->ucExtHPDPINLutIndex < - MAX_NUMBER_OF_EXT_HPDPIN_LUT_ENTRIES) { - hpd_record = get_hpd_record(bp, object); - if (hpd_record) { - uint8_t index = - ext_display_path->ucExtHPDPINLutIndex; - hpd_record->ucHPDIntGPIOID = - hpd_pin_lut_record->ucHPDPINMap[index]; - } else { - BREAK_TO_DEBUGGER(); - /* Invalid hpd record */ - return BP_RESULT_FAILURE; - } - } - - /* Patch I2C/AUX info */ - if (ext_display_path->ucExtHPDPINLutIndex < - MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES) { - i2c_record = get_i2c_record(bp, object); - if (i2c_record) { - uint8_t index = - ext_display_path->ucExtAUXDDCLutIndex; - i2c_record->sucI2cId = - aux_ddc_lut_record->ucAUXDDCMap[index]; - } else { - BREAK_TO_DEBUGGER(); - /* Invalid I2C record */ - return BP_RESULT_FAILURE; - } - } - - /* Merge with other MXM connectors that map to the same physical - * connector. */ - for (j = i + 1; - j < connector_tbl->ucNumberOfObjects; j++) { - ATOM_OBJECT *next_object; - struct graphics_object_id next_object_id; - EXT_DISPLAY_PATH *next_ext_display_path; - - next_object = &connector_tbl->asObjects[j]; - next_object_id = object_id_from_bios_object_id( - le16_to_cpu(next_object->usObjectID)); - - if ((OBJECT_TYPE_CONNECTOR != next_object_id.type) && - (CONNECTOR_ID_MXM == next_object_id.id)) - continue; - - next_ext_display_path = get_ext_display_path_entry( - &ext_display_connection_info_tbl, - le16_to_cpu(next_object->usObjectID)); - - if (next_ext_display_path == NULL) - return BP_RESULT_FAILURE; - - /* Merge if using same connector. */ - if ((le16_to_cpu(next_ext_display_path->usDeviceConnector) == - le16_to_cpu(ext_display_path->usDeviceConnector)) && - (le16_to_cpu(ext_display_path->usDeviceConnector) != 0)) { - /* Clear duplicate connector from table. */ - next_object->usObjectID = cpu_to_le16(0); - add_device_tag_from_ext_display_path( - bp, - object, - ext_display_path, - &cached_device_support); - } - } - } - - /* Find all encoders which have an MXM object as their destination. - * Replace the MXM object with the real connector Id from the external - * display connection info table */ - - encoder_table_offset = bp->object_info_tbl_offset - + le16_to_cpu(bp->object_info_tbl.v1_1->usEncoderObjectTableOffset); - encoder_table = GET_IMAGE(ATOM_OBJECT_TABLE, encoder_table_offset); - - for (i = 0; i < encoder_table->ucNumberOfObjects; i++) { - uint32_t j; - - object = &encoder_table->asObjects[i]; - - dst_number = get_dest_obj_list(bp, object, &dst_object_id_list); - - for (j = 0; j < dst_number; j++) { - object_id = object_id_from_bios_object_id( - dst_object_id_list[j]); - - if ((OBJECT_TYPE_CONNECTOR != object_id.type) || - (CONNECTOR_ID_MXM != object_id.id)) - continue; - - /* Get the correct connection info table entry based on - * the enum id. */ - ext_display_path = - get_ext_display_path_entry( - &ext_display_connection_info_tbl, - dst_object_id_list[j]); - - if (ext_display_path == NULL) - return BP_RESULT_FAILURE; - - dst_object_id_list[j] = - le16_to_cpu(ext_display_path->usDeviceConnector); - } - } - - return BP_RESULT_OK; -} - -/* - * Check whether we need to patch the VBIOS connector info table with - * data from an external display connection info table. This is - * necessary to support MXM boards with an OPM (output personality - * module). With these designs, the VBIOS connector info table - * specifies an MXM_CONNECTOR with a unique ID. The driver retrieves - * the external connection info table through i2c and then looks up the - * connector ID to find the real connector type (e.g. DFP1). - * - */ - -static void process_ext_display_connection_info(struct bios_parser *bp) -{ - ATOM_OBJECT_TABLE *connector_tbl; - uint32_t connector_tbl_offset; - struct graphics_object_id object_id; - ATOM_OBJECT *object; - bool mxm_connector_found = false; - bool null_entry_found = false; - uint32_t i = 0; - - connector_tbl_offset = bp->object_info_tbl_offset + - le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset); - connector_tbl = GET_IMAGE(ATOM_OBJECT_TABLE, connector_tbl_offset); - - /* Look for MXM connectors to determine whether we need patch the VBIOS - * connector info table. Look for null entries to determine whether we - * need to compact connector table. */ - for (i = 0; i < connector_tbl->ucNumberOfObjects; i++) { - object = &connector_tbl->asObjects[i]; - object_id = object_id_from_bios_object_id(le16_to_cpu(object->usObjectID)); - - if ((OBJECT_TYPE_CONNECTOR == object_id.type) && - (CONNECTOR_ID_MXM == object_id.id)) { - /* Once we found MXM connector - we can break */ - mxm_connector_found = true; - break; - } else if (OBJECT_TYPE_CONNECTOR != object_id.type) { - /* We need to continue looping - to check if MXM - * connector present */ - null_entry_found = true; - } - } - - /* Patch BIOS image */ - if (mxm_connector_found || null_entry_found) { - uint32_t connectors_num = 0; - uint8_t *original_bios; - /* Step 1: Replace bios image with the new copy which will be - * patched */ - bp->base.bios_local_image = kzalloc(bp->base.bios_size, - GFP_KERNEL); - if (bp->base.bios_local_image == NULL) { - BREAK_TO_DEBUGGER(); - /* Failed to alloc bp->base.bios_local_image */ - return; - } - - memmove(bp->base.bios_local_image, bp->base.bios, bp->base.bios_size); - original_bios = bp->base.bios; - bp->base.bios = bp->base.bios_local_image; - connector_tbl = - GET_IMAGE(ATOM_OBJECT_TABLE, connector_tbl_offset); - - /* Step 2: (only if MXM connector found) Patch BIOS image with - * info from external module */ - if (mxm_connector_found && - patch_bios_image_from_ext_display_connection_info(bp) != - BP_RESULT_OK) { - /* Patching the bios image has failed. We will copy - * again original image provided and afterwards - * only remove null entries */ - memmove( - bp->base.bios_local_image, - original_bios, - bp->base.bios_size); - } - - /* Step 3: Compact connector table (remove null entries, valid - * entries moved to beginning) */ - for (i = 0; i < connector_tbl->ucNumberOfObjects; i++) { - object = &connector_tbl->asObjects[i]; - object_id = object_id_from_bios_object_id( - le16_to_cpu(object->usObjectID)); - - if (OBJECT_TYPE_CONNECTOR != object_id.type) - continue; - - if (i != connectors_num) { - memmove( - &connector_tbl-> - asObjects[connectors_num], - object, - sizeof(ATOM_OBJECT)); - } - ++connectors_num; - } - connector_tbl->ucNumberOfObjects = (uint8_t)connectors_num; - } -} - -static void bios_parser_post_init(struct dc_bios *dcb) -{ - struct bios_parser *bp = BP_FROM_DCB(dcb); - - process_ext_display_connection_info(bp); -} - /** * bios_parser_set_scratch_critical_state * @@ -3959,22 +2796,12 @@ static enum bp_result bios_get_board_layout_info( static const struct dc_vbios_funcs vbios_funcs = { .get_connectors_number = bios_parser_get_connectors_number, - .get_encoder_id = bios_parser_get_encoder_id, - .get_connector_id = bios_parser_get_connector_id, - .get_dst_number = bios_parser_get_dst_number, - .get_src_obj = bios_parser_get_src_obj, - .get_dst_obj = bios_parser_get_dst_obj, - .get_i2c_info = bios_parser_get_i2c_info, - .get_voltage_ddc_info = bios_parser_get_voltage_ddc_info, - - .get_thermal_ddc_info = bios_parser_get_thermal_ddc_info, - .get_hpd_info = bios_parser_get_hpd_info, .get_device_tag = bios_parser_get_device_tag, @@ -3993,7 +2820,6 @@ static const struct dc_vbios_funcs vbios_funcs = { /* bios scratch register communication */ .is_accelerated_mode = bios_is_accelerated_mode, - .get_vga_enabled_displays = bios_get_vga_enabled_displays, .set_scratch_critical_state = bios_parser_set_scratch_critical_state, @@ -4004,8 +2830,6 @@ static const struct dc_vbios_funcs vbios_funcs = { .transmitter_control = bios_parser_transmitter_control, - .crt_control = bios_parser_crt_control, /* not used in DAL3. keep for now in case we need to support VGA on Bonaire */ - .enable_crtc = bios_parser_enable_crtc, .adjust_pixel_clock = bios_parser_adjust_pixel_clock, @@ -4025,7 +2849,6 @@ static const struct dc_vbios_funcs vbios_funcs = { .enable_disp_power_gating = bios_parser_enable_disp_power_gating, /* SW init and patch */ - .post_init = bios_parser_post_init, /* patch vbios table for mxm module by reading i2c */ .bios_parser_destroy = bios_parser_destroy, diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index eab007e1793c..ff764da21b6f 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -166,21 +166,6 @@ static uint8_t bios_parser_get_connectors_number(struct dc_bios *dcb) return count; } -static struct graphics_object_id bios_parser_get_encoder_id( - struct dc_bios *dcb, - uint32_t i) -{ - struct bios_parser *bp = BP_FROM_DCB(dcb); - struct graphics_object_id object_id = dal_graphics_object_id_init( - 0, ENUM_ID_UNKNOWN, OBJECT_TYPE_UNKNOWN); - - if (bp->object_info_tbl.v1_4->number_of_path > i) - object_id = object_id_from_bios_object_id( - bp->object_info_tbl.v1_4->display_path[i].encoderobjid); - - return object_id; -} - static struct graphics_object_id bios_parser_get_connector_id( struct dc_bios *dcb, uint8_t i) @@ -204,26 +189,6 @@ static struct graphics_object_id bios_parser_get_connector_id( return object_id; } - -/* TODO: GetNumberOfSrc*/ - -static uint32_t bios_parser_get_dst_number(struct dc_bios *dcb, - struct graphics_object_id id) -{ - /* connector has 1 Dest, encoder has 0 Dest */ - switch (id.type) { - case OBJECT_TYPE_ENCODER: - return 0; - case OBJECT_TYPE_CONNECTOR: - return 1; - default: - return 0; - } -} - -/* removed getSrcObjList, getDestObjList*/ - - static enum bp_result bios_parser_get_src_obj(struct dc_bios *dcb, struct graphics_object_id object_id, uint32_t index, struct graphics_object_id *src_object_id) @@ -283,52 +248,10 @@ static enum bp_result bios_parser_get_src_obj(struct dc_bios *dcb, return bp_result; } -static enum bp_result bios_parser_get_dst_obj(struct dc_bios *dcb, - struct graphics_object_id object_id, uint32_t index, - struct graphics_object_id *dest_object_id) -{ - struct bios_parser *bp = BP_FROM_DCB(dcb); - unsigned int i; - enum bp_result bp_result = BP_RESULT_BADINPUT; - struct graphics_object_id obj_id = {0}; - struct object_info_table *tbl = &bp->object_info_tbl; - - if (!dest_object_id) - return BP_RESULT_BADINPUT; - - switch (object_id.type) { - case OBJECT_TYPE_ENCODER: - /* TODO: since num of src must be less than 2. - * If found in for loop, should break. - * DAL2 implementation may be changed too - */ - for (i = 0; i < tbl->v1_4->number_of_path; i++) { - obj_id = object_id_from_bios_object_id( - tbl->v1_4->display_path[i].encoderobjid); - if (object_id.type == obj_id.type && - object_id.id == obj_id.id && - object_id.enum_id == - obj_id.enum_id) { - *dest_object_id = - object_id_from_bios_object_id( - tbl->v1_4->display_path[i].display_objid); - /* break; */ - } - } - bp_result = BP_RESULT_OK; - break; - default: - break; - } - - return bp_result; -} - - /* from graphics_object_id, find display path which includes the object_id */ static struct atom_display_object_path_v2 *get_bios_object( - struct bios_parser *bp, - struct graphics_object_id id) + struct bios_parser *bp, + struct graphics_object_id id) { unsigned int i; struct graphics_object_id obj_id = {0}; @@ -337,27 +260,22 @@ static struct atom_display_object_path_v2 *get_bios_object( case OBJECT_TYPE_ENCODER: for (i = 0; i < bp->object_info_tbl.v1_4->number_of_path; i++) { obj_id = object_id_from_bios_object_id( - bp->object_info_tbl.v1_4->display_path[i].encoderobjid); - if (id.type == obj_id.type && - id.id == obj_id.id && - id.enum_id == obj_id.enum_id) - return - &bp->object_info_tbl.v1_4->display_path[i]; + bp->object_info_tbl.v1_4->display_path[i].encoderobjid); + if (id.type == obj_id.type && id.id == obj_id.id + && id.enum_id == obj_id.enum_id) + return &bp->object_info_tbl.v1_4->display_path[i]; } case OBJECT_TYPE_CONNECTOR: case OBJECT_TYPE_GENERIC: /* Both Generic and Connector Object ID * will be stored on display_objid - */ + */ for (i = 0; i < bp->object_info_tbl.v1_4->number_of_path; i++) { obj_id = object_id_from_bios_object_id( - bp->object_info_tbl.v1_4->display_path[i].display_objid - ); - if (id.type == obj_id.type && - id.id == obj_id.id && - id.enum_id == obj_id.enum_id) - return - &bp->object_info_tbl.v1_4->display_path[i]; + bp->object_info_tbl.v1_4->display_path[i].display_objid); + if (id.type == obj_id.type && id.id == obj_id.id + && id.enum_id == obj_id.enum_id) + return &bp->object_info_tbl.v1_4->display_path[i]; } default: return NULL; @@ -489,99 +407,6 @@ static enum bp_result get_gpio_i2c_info( return BP_RESULT_OK; } -static enum bp_result get_voltage_ddc_info_v4( - uint8_t *i2c_line, - uint32_t index, - struct atom_common_table_header *header, - uint8_t *address) -{ - enum bp_result result = BP_RESULT_NORECORD; - struct atom_voltage_objects_info_v4_1 *info = - (struct atom_voltage_objects_info_v4_1 *) address; - - uint8_t *voltage_current_object = - (uint8_t *) (&(info->voltage_object[0])); - - while ((address + le16_to_cpu(header->structuresize)) > - voltage_current_object) { - struct atom_i2c_voltage_object_v4 *object = - (struct atom_i2c_voltage_object_v4 *) - voltage_current_object; - - if (object->header.voltage_mode == - ATOM_INIT_VOLTAGE_REGULATOR) { - if (object->header.voltage_type == index) { - *i2c_line = object->i2c_id ^ 0x90; - result = BP_RESULT_OK; - break; - } - } - - voltage_current_object += - le16_to_cpu(object->header.object_size); - } - return result; -} - -static enum bp_result bios_parser_get_thermal_ddc_info( - struct dc_bios *dcb, - uint32_t i2c_channel_id, - struct graphics_object_i2c_info *info) -{ - struct bios_parser *bp = BP_FROM_DCB(dcb); - struct i2c_id_config_access *config; - struct atom_i2c_record record; - - if (!info) - return BP_RESULT_BADINPUT; - - config = (struct i2c_id_config_access *) &i2c_channel_id; - - record.i2c_id = config->bfHW_Capable; - record.i2c_id |= config->bfI2C_LineMux; - record.i2c_id |= config->bfHW_EngineID; - - return get_gpio_i2c_info(bp, &record, info); -} - -static enum bp_result bios_parser_get_voltage_ddc_info(struct dc_bios *dcb, - uint32_t index, - struct graphics_object_i2c_info *info) -{ - uint8_t i2c_line = 0; - enum bp_result result = BP_RESULT_NORECORD; - uint8_t *voltage_info_address; - struct atom_common_table_header *header; - struct atom_data_revision revision = {0}; - struct bios_parser *bp = BP_FROM_DCB(dcb); - - if (!DATA_TABLES(voltageobject_info)) - return result; - - voltage_info_address = bios_get_image(&bp->base, - DATA_TABLES(voltageobject_info), - sizeof(struct atom_common_table_header)); - - header = (struct atom_common_table_header *) voltage_info_address; - - get_atom_data_table_revision(header, &revision); - - switch (revision.major) { - case 4: - if (revision.minor != 1) - break; - result = get_voltage_ddc_info_v4(&i2c_line, index, header, - voltage_info_address); - break; - } - - if (result == BP_RESULT_OK) - result = bios_parser_get_thermal_ddc_info(dcb, - i2c_line, info); - - return result; -} - static enum bp_result bios_parser_get_hpd_info( struct dc_bios *dcb, struct graphics_object_id id, @@ -997,8 +822,8 @@ static enum bp_result bios_parser_get_spread_spectrum_info( } static enum bp_result get_embedded_panel_info_v2_1( - struct bios_parser *bp, - struct embedded_panel_info *info) + struct bios_parser *bp, + struct embedded_panel_info *info) { struct lcd_info_v2_1 *lvds; @@ -1021,92 +846,78 @@ static enum bp_result get_embedded_panel_info_v2_1( memset(info, 0, sizeof(struct embedded_panel_info)); /* We need to convert from 10KHz units into KHz units */ - info->lcd_timing.pixel_clk = - le16_to_cpu(lvds->lcd_timing.pixclk) * 10; + info->lcd_timing.pixel_clk = le16_to_cpu(lvds->lcd_timing.pixclk) * 10; /* usHActive does not include borders, according to VBIOS team */ - info->lcd_timing.horizontal_addressable = - le16_to_cpu(lvds->lcd_timing.h_active); + info->lcd_timing.horizontal_addressable = le16_to_cpu(lvds->lcd_timing.h_active); /* usHBlanking_Time includes borders, so we should really be * subtractingborders duing this translation, but LVDS generally * doesn't have borders, so we should be okay leaving this as is for * now. May need to revisit if we ever have LVDS with borders */ - info->lcd_timing.horizontal_blanking_time = - le16_to_cpu(lvds->lcd_timing.h_blanking_time); + info->lcd_timing.horizontal_blanking_time = le16_to_cpu(lvds->lcd_timing.h_blanking_time); /* usVActive does not include borders, according to VBIOS team*/ - info->lcd_timing.vertical_addressable = - le16_to_cpu(lvds->lcd_timing.v_active); + info->lcd_timing.vertical_addressable = le16_to_cpu(lvds->lcd_timing.v_active); /* usVBlanking_Time includes borders, so we should really be * subtracting borders duing this translation, but LVDS generally * doesn't have borders, so we should be okay leaving this as is for * now. May need to revisit if we ever have LVDS with borders */ - info->lcd_timing.vertical_blanking_time = - le16_to_cpu(lvds->lcd_timing.v_blanking_time); - info->lcd_timing.horizontal_sync_offset = - le16_to_cpu(lvds->lcd_timing.h_sync_offset); - info->lcd_timing.horizontal_sync_width = - le16_to_cpu(lvds->lcd_timing.h_sync_width); - info->lcd_timing.vertical_sync_offset = - le16_to_cpu(lvds->lcd_timing.v_sync_offset); - info->lcd_timing.vertical_sync_width = - le16_to_cpu(lvds->lcd_timing.v_syncwidth); + info->lcd_timing.vertical_blanking_time = le16_to_cpu(lvds->lcd_timing.v_blanking_time); + info->lcd_timing.horizontal_sync_offset = le16_to_cpu(lvds->lcd_timing.h_sync_offset); + info->lcd_timing.horizontal_sync_width = le16_to_cpu(lvds->lcd_timing.h_sync_width); + info->lcd_timing.vertical_sync_offset = le16_to_cpu(lvds->lcd_timing.v_sync_offset); + info->lcd_timing.vertical_sync_width = le16_to_cpu(lvds->lcd_timing.v_syncwidth); info->lcd_timing.horizontal_border = lvds->lcd_timing.h_border; info->lcd_timing.vertical_border = lvds->lcd_timing.v_border; /* not provided by VBIOS */ info->lcd_timing.misc_info.HORIZONTAL_CUT_OFF = 0; - info->lcd_timing.misc_info.H_SYNC_POLARITY = - ~(uint32_t) - (lvds->lcd_timing.miscinfo & ATOM_HSYNC_POLARITY); - info->lcd_timing.misc_info.V_SYNC_POLARITY = - ~(uint32_t) - (lvds->lcd_timing.miscinfo & ATOM_VSYNC_POLARITY); + info->lcd_timing.misc_info.H_SYNC_POLARITY = ~(uint32_t) (lvds->lcd_timing.miscinfo + & ATOM_HSYNC_POLARITY); + info->lcd_timing.misc_info.V_SYNC_POLARITY = ~(uint32_t) (lvds->lcd_timing.miscinfo + & ATOM_VSYNC_POLARITY); /* not provided by VBIOS */ info->lcd_timing.misc_info.VERTICAL_CUT_OFF = 0; - info->lcd_timing.misc_info.H_REPLICATION_BY2 = - !!(lvds->lcd_timing.miscinfo & ATOM_H_REPLICATIONBY2); - info->lcd_timing.misc_info.V_REPLICATION_BY2 = - !!(lvds->lcd_timing.miscinfo & ATOM_V_REPLICATIONBY2); - info->lcd_timing.misc_info.COMPOSITE_SYNC = - !!(lvds->lcd_timing.miscinfo & ATOM_COMPOSITESYNC); - info->lcd_timing.misc_info.INTERLACE = - !!(lvds->lcd_timing.miscinfo & ATOM_INTERLACE); + info->lcd_timing.misc_info.H_REPLICATION_BY2 = !!(lvds->lcd_timing.miscinfo + & ATOM_H_REPLICATIONBY2); + info->lcd_timing.misc_info.V_REPLICATION_BY2 = !!(lvds->lcd_timing.miscinfo + & ATOM_V_REPLICATIONBY2); + info->lcd_timing.misc_info.COMPOSITE_SYNC = !!(lvds->lcd_timing.miscinfo + & ATOM_COMPOSITESYNC); + info->lcd_timing.misc_info.INTERLACE = !!(lvds->lcd_timing.miscinfo & ATOM_INTERLACE); /* not provided by VBIOS*/ info->lcd_timing.misc_info.DOUBLE_CLOCK = 0; /* not provided by VBIOS*/ info->ss_id = 0; - info->realtek_eDPToLVDS = - !!(lvds->dplvdsrxid == eDP_TO_LVDS_REALTEK_ID); + info->realtek_eDPToLVDS = !!(lvds->dplvdsrxid == eDP_TO_LVDS_REALTEK_ID); return BP_RESULT_OK; } static enum bp_result bios_parser_get_embedded_panel_info( - struct dc_bios *dcb, - struct embedded_panel_info *info) + struct dc_bios *dcb, + struct embedded_panel_info *info) { - struct bios_parser *bp = BP_FROM_DCB(dcb); + struct bios_parser + *bp = BP_FROM_DCB(dcb); struct atom_common_table_header *header; struct atom_data_revision tbl_revision; if (!DATA_TABLES(lcd_info)) return BP_RESULT_FAILURE; - header = GET_IMAGE(struct atom_common_table_header, - DATA_TABLES(lcd_info)); + header = GET_IMAGE(struct atom_common_table_header, DATA_TABLES(lcd_info)); if (!header) return BP_RESULT_BADBIOSTABLE; get_atom_data_table_revision(header, &tbl_revision); - switch (tbl_revision.major) { case 2: switch (tbl_revision.minor) { @@ -1174,12 +985,6 @@ static bool bios_parser_is_device_id_supported( mask) != 0; } -static void bios_parser_post_init( - struct dc_bios *dcb) -{ - /* TODO for OPM module. Need implement later */ -} - static uint32_t bios_parser_get_ss_entry_number( struct dc_bios *dcb, enum as_signal_type signal) @@ -1238,17 +1043,6 @@ static enum bp_result bios_parser_set_dce_clock( return bp->cmd_tbl.set_dce_clock(bp, bp_params); } -static unsigned int bios_parser_get_smu_clock_info( - struct dc_bios *dcb) -{ - struct bios_parser *bp = BP_FROM_DCB(dcb); - - if (!bp->cmd_tbl.get_smu_clock_info) - return BP_RESULT_FAILURE; - - return bp->cmd_tbl.get_smu_clock_info(bp, 0); -} - static enum bp_result bios_parser_program_crtc_timing( struct dc_bios *dcb, struct bp_hw_crtc_timing_parameters *bp_params) @@ -1306,13 +1100,6 @@ static bool bios_parser_is_accelerated_mode( return bios_is_accelerated_mode(dcb); } -static uint32_t bios_parser_get_vga_enabled_displays( - struct dc_bios *bios) -{ - return bios_get_vga_enabled_displays(bios); -} - - /** * bios_parser_set_scratch_critical_state * @@ -2071,22 +1858,12 @@ static enum bp_result bios_get_board_layout_info( static const struct dc_vbios_funcs vbios_funcs = { .get_connectors_number = bios_parser_get_connectors_number, - .get_encoder_id = bios_parser_get_encoder_id, - .get_connector_id = bios_parser_get_connector_id, - .get_dst_number = bios_parser_get_dst_number, - .get_src_obj = bios_parser_get_src_obj, - .get_dst_obj = bios_parser_get_dst_obj, - .get_i2c_info = bios_parser_get_i2c_info, - .get_voltage_ddc_info = bios_parser_get_voltage_ddc_info, - - .get_thermal_ddc_info = bios_parser_get_thermal_ddc_info, - .get_hpd_info = bios_parser_get_hpd_info, .get_device_tag = bios_parser_get_device_tag, @@ -2105,10 +1882,7 @@ static const struct dc_vbios_funcs vbios_funcs = { .is_device_id_supported = bios_parser_is_device_id_supported, - - .is_accelerated_mode = bios_parser_is_accelerated_mode, - .get_vga_enabled_displays = bios_parser_get_vga_enabled_displays, .set_scratch_critical_state = bios_parser_set_scratch_critical_state, @@ -2126,20 +1900,12 @@ static const struct dc_vbios_funcs vbios_funcs = { .program_crtc_timing = bios_parser_program_crtc_timing, - /* .blank_crtc = bios_parser_blank_crtc, */ - .crtc_source_select = bios_parser_crtc_source_select, - /* .external_encoder_control = bios_parser_external_encoder_control, */ - .enable_disp_power_gating = bios_parser_enable_disp_power_gating, - .post_init = bios_parser_post_init, - .bios_parser_destroy = firmware_parser_destroy, - .get_smu_clock_info = bios_parser_get_smu_clock_info, - .get_board_layout_info = bios_get_board_layout_info, }; diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h index 90082bab71f0..8130b95ccc53 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h @@ -41,38 +41,17 @@ struct dc_vbios_funcs { uint8_t (*get_connectors_number)(struct dc_bios *bios); - struct graphics_object_id (*get_encoder_id)( - struct dc_bios *bios, - uint32_t i); struct graphics_object_id (*get_connector_id)( struct dc_bios *bios, uint8_t connector_index); - uint32_t (*get_dst_number)( - struct dc_bios *bios, - struct graphics_object_id id); - enum bp_result (*get_src_obj)( struct dc_bios *bios, struct graphics_object_id object_id, uint32_t index, struct graphics_object_id *src_object_id); - enum bp_result (*get_dst_obj)( - struct dc_bios *bios, - struct graphics_object_id object_id, uint32_t index, - struct graphics_object_id *dest_object_id); - enum bp_result (*get_i2c_info)( struct dc_bios *dcb, struct graphics_object_id id, struct graphics_object_i2c_info *info); - - enum bp_result (*get_voltage_ddc_info)( - struct dc_bios *bios, - uint32_t index, - struct graphics_object_i2c_info *info); - enum bp_result (*get_thermal_ddc_info)( - struct dc_bios *bios, - uint32_t i2c_channel_id, - struct graphics_object_i2c_info *info); enum bp_result (*get_hpd_info)( struct dc_bios *bios, struct graphics_object_id id, @@ -105,35 +84,8 @@ struct dc_vbios_funcs { struct graphics_object_id object_id, struct bp_encoder_cap_info *info); - bool (*is_lid_status_changed)( - struct dc_bios *bios); - bool (*is_display_config_changed)( - struct dc_bios *bios); bool (*is_accelerated_mode)( struct dc_bios *bios); - uint32_t (*get_vga_enabled_displays)( - struct dc_bios *bios); - void (*get_bios_event_info)( - struct dc_bios *bios, - struct bios_event_info *info); - void (*update_requested_backlight_level)( - struct dc_bios *bios, - uint32_t backlight_8bit); - uint32_t (*get_requested_backlight_level)( - struct dc_bios *bios); - void (*take_backlight_control)( - struct dc_bios *bios, - bool cntl); - - bool (*is_active_display)( - struct dc_bios *bios, - enum signal_type signal, - const struct connector_device_tag_info *device_tag); - enum controller_id (*get_embedded_display_controller_id)( - struct dc_bios *bios); - uint32_t (*get_embedded_display_refresh_rate)( - struct dc_bios *bios); - void (*set_scratch_critical_state)( struct dc_bios *bios, bool state); @@ -149,11 +101,6 @@ struct dc_vbios_funcs { enum bp_result (*transmitter_control)( struct dc_bios *bios, struct bp_transmitter_control *cntl); - enum bp_result (*crt_control)( - struct dc_bios *bios, - enum engine_id engine_id, - bool enable, - uint32_t pixel_clock); enum bp_result (*enable_crtc)( struct dc_bios *bios, enum controller_id id, @@ -167,8 +114,6 @@ struct dc_vbios_funcs { enum bp_result (*set_dce_clock)( struct dc_bios *bios, struct bp_set_dce_clock_parameters *bp_params); - unsigned int (*get_smu_clock_info)( - struct dc_bios *bios); enum bp_result (*enable_spread_spectrum_on_ppll)( struct dc_bios *bios, struct bp_spread_spectrum_parameters *bp_params, @@ -183,20 +128,11 @@ struct dc_vbios_funcs { enum bp_result (*program_display_engine_pll)( struct dc_bios *bios, struct bp_pixel_clock_parameters *bp_params); - - enum signal_type (*dac_load_detect)( - struct dc_bios *bios, - struct graphics_object_id encoder, - struct graphics_object_id connector, - enum signal_type display_signal); - enum bp_result (*enable_disp_power_gating)( struct dc_bios *bios, enum controller_id controller_id, enum bp_pipe_control_action action); - void (*post_init)(struct dc_bios *bios); - void (*bios_parser_destroy)(struct dc_bios **dcb); enum bp_result (*get_board_layout_info)( -- GitLab From e6ada54126cff629b422924e6497a13ce8bd890d Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Mon, 30 Jul 2018 14:45:42 -0400 Subject: [PATCH 1288/1692] drm/amd/display: remove unused clk_src code Signed-off-by: Dmytro Laktyushkin Reviewed-by: Charlene Liu Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/dce/dce_clock_source.c | 87 +------------------ .../gpu/drm/amd/display/dc/inc/clock_source.h | 4 - 2 files changed, 1 insertion(+), 90 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c index 1f23224d495a..5a9f3601ffb6 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c @@ -611,90 +611,6 @@ static uint32_t dce110_get_pix_clk_dividers( return pll_calc_error; } -static uint32_t dce110_get_pll_pixel_rate_in_hz( - struct clock_source *cs, - struct pixel_clk_params *pix_clk_params, - struct pll_settings *pll_settings) -{ - uint32_t inst = pix_clk_params->controller_id - CONTROLLER_ID_D0; - struct dc *dc_core = cs->ctx->dc; - struct dc_state *context = dc_core->current_state; - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[inst]; - - /* This function need separate to different DCE version, before separate, just use pixel clock */ - return pipe_ctx->stream->phy_pix_clk; - -} - -static uint32_t dce110_get_dp_pixel_rate_from_combo_phy_pll( - struct clock_source *cs, - struct pixel_clk_params *pix_clk_params, - struct pll_settings *pll_settings) -{ - uint32_t inst = pix_clk_params->controller_id - CONTROLLER_ID_D0; - struct dc *dc_core = cs->ctx->dc; - struct dc_state *context = dc_core->current_state; - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[inst]; - - /* This function need separate to different DCE version, before separate, just use pixel clock */ - return pipe_ctx->stream->phy_pix_clk; -} - -static uint32_t dce110_get_d_to_pixel_rate_in_hz( - struct clock_source *cs, - struct pixel_clk_params *pix_clk_params, - struct pll_settings *pll_settings) -{ - uint32_t inst = pix_clk_params->controller_id - CONTROLLER_ID_D0; - struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(cs); - int dto_enabled = 0; - struct fixed31_32 pix_rate; - - REG_GET(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, &dto_enabled); - - if (dto_enabled) { - uint32_t phase = 0; - uint32_t modulo = 0; - REG_GET(PHASE[inst], DP_DTO0_PHASE, &phase); - REG_GET(MODULO[inst], DP_DTO0_MODULO, &modulo); - - if (modulo == 0) { - return 0; - } - - pix_rate = dc_fixpt_from_int(clk_src->ref_freq_khz); - pix_rate = dc_fixpt_mul_int(pix_rate, 1000); - pix_rate = dc_fixpt_mul_int(pix_rate, phase); - pix_rate = dc_fixpt_div_int(pix_rate, modulo); - - return dc_fixpt_round(pix_rate); - } else { - return dce110_get_dp_pixel_rate_from_combo_phy_pll(cs, pix_clk_params, pll_settings); - } -} - -static uint32_t dce110_get_pix_rate_in_hz( - struct clock_source *cs, - struct pixel_clk_params *pix_clk_params, - struct pll_settings *pll_settings) -{ - uint32_t pix_rate = 0; - switch (pix_clk_params->signal_type) { - case SIGNAL_TYPE_DISPLAY_PORT: - case SIGNAL_TYPE_DISPLAY_PORT_MST: - case SIGNAL_TYPE_EDP: - case SIGNAL_TYPE_VIRTUAL: - pix_rate = dce110_get_d_to_pixel_rate_in_hz(cs, pix_clk_params, pll_settings); - break; - case SIGNAL_TYPE_HDMI_TYPE_A: - default: - pix_rate = dce110_get_pll_pixel_rate_in_hz(cs, pix_clk_params, pll_settings); - break; - } - - return pix_rate; -} - static bool disable_spread_spectrum(struct dce110_clk_src *clk_src) { enum bp_result result; @@ -1046,8 +962,7 @@ static bool dce110_clock_source_power_down( static const struct clock_source_funcs dce110_clk_src_funcs = { .cs_power_down = dce110_clock_source_power_down, .program_pix_clk = dce110_program_pix_clk, - .get_pix_clk_dividers = dce110_get_pix_clk_dividers, - .get_pix_rate_in_hz = dce110_get_pix_rate_in_hz + .get_pix_clk_dividers = dce110_get_pix_clk_dividers }; static void get_ss_info_from_atombios( diff --git a/drivers/gpu/drm/amd/display/dc/inc/clock_source.h b/drivers/gpu/drm/amd/display/dc/inc/clock_source.h index ebcf67b5fc57..47ef90495376 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/clock_source.h +++ b/drivers/gpu/drm/amd/display/dc/inc/clock_source.h @@ -166,10 +166,6 @@ struct clock_source_funcs { struct clock_source *, struct pixel_clk_params *, struct pll_settings *); - uint32_t (*get_pix_rate_in_hz)( - struct clock_source *, - struct pixel_clk_params *, - struct pll_settings *); }; struct clock_source { -- GitLab From 491e08c9b858a328e0d7d09a557edd748f2d1b93 Mon Sep 17 00:00:00 2001 From: Derek Lai Date: Thu, 23 Aug 2018 15:13:23 +0800 Subject: [PATCH 1289/1692] drm/amd/display: add disconnect_delay to dc_panel_patch Some display need disconnect delay. Adding this parameter for future use Signed-off-by: Derek Lai Reviewed-by: Charlene Liu Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc_types.h | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 58a6ef80a60e..4fb62780a696 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -191,6 +191,7 @@ union display_content_support { }; struct dc_panel_patch { + unsigned int disconnect_delay; unsigned int dppowerup_delay; unsigned int extra_t12_ms; }; -- GitLab From 86a2da705cc29a3a006c6571cadfa45676150622 Mon Sep 17 00:00:00 2001 From: Chiawen Huang Date: Fri, 24 Aug 2018 17:45:28 +0800 Subject: [PATCH 1290/1692] drm/amd/display: add aux transition event log. [Why] Enhance aux transition debugging information. [How] Added Aux request and reply event log. Signed-off-by: Chiawen Huang Reviewed-by: Tony Cheng Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dm_event_log.h | 39 +++++++++++++++++++ .../drm/amd/display/dc/i2caux/aux_engine.c | 13 +++++++ .../dc/i2caux/dce110/aux_engine_dce110.c | 7 +++- .../drm/amd/display/dc/i2caux/i2c_hw_engine.c | 18 +++++++++ 4 files changed, 76 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/amd/display/dc/dm_event_log.h diff --git a/drivers/gpu/drm/amd/display/dc/dm_event_log.h b/drivers/gpu/drm/amd/display/dc/dm_event_log.h new file mode 100644 index 000000000000..c1ce2dd52f9b --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dm_event_log.h @@ -0,0 +1,39 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/** + * This file defines external dependencies of Display Core. + */ + +#ifndef __DM_EVENT_LOG_H__ + +#define __DM_EVENT_LOG_H__ + +#define EVENT_LOG_I2CAUX_READ(transType, dcc, address, status, len, data) +#define EVENT_LOG_I2CAUX_WRITE(transType, dcc, address, status, len, data) +#define EVENT_LOG_AUX_REQ(dcc, type, action, address, len, data) +#define EVENT_LOG_AUX_Reply(dcc, type, swStatus, replyStatus, len, data) + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c index 0afd2fa57bbe..03292c52b18d 100644 --- a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c +++ b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c @@ -24,6 +24,7 @@ */ #include "dm_services.h" +#include "dm_event_log.h" /* * Pre-requisites: headers required by header of this unit @@ -296,6 +297,12 @@ static bool read_command( if (request->payload.address_space == I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) { + EVENT_LOG_I2CAUX_READ(request->payload.address_space, + engine->base.ddc->pin_data->en, + request->payload.address, + request->status, + request->payload.length, + request->payload.data); DC_LOG_I2C_AUX("READ: addr:0x%x value:0x%x Result:%d", request->payload.address, request->payload.data[0], @@ -512,6 +519,12 @@ static bool write_command( if (request->payload.address_space == I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) { + EVENT_LOG_I2CAUX_WRITE(request->payload.address_space, + engine->base.ddc->pin_data->en, + request->payload.address, + request->status, + request->payload.length, + request->payload.data); DC_LOG_I2C_AUX("WRITE: addr:0x%x value:0x%x Result:%d", request->payload.address, request->payload.data[0], diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c index ae5caa97caca..4a88fc76614e 100644 --- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c +++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c @@ -24,6 +24,7 @@ */ #include "dm_services.h" +#include "dm_event_log.h" /* * Pre-requisites: headers required by header of this unit @@ -273,6 +274,8 @@ static void submit_channel_request( REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0, 10, aux110->timeout_period/10); REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1); + EVENT_LOG_AUX_REQ(engine->base.ddc->pin_data->en, Native, request->action, + request->address, request->length, request->data); } static int read_channel_reply(struct aux_engine *engine, uint32_t size, @@ -336,7 +339,9 @@ static void process_channel_reply( uint32_t sw_status; bytes_replied = read_channel_reply(engine, reply->length, reply->data, - &reply_result, &sw_status); + &reply_result, &sw_status); + EVENT_LOG_AUX_Reply(engine->base.ddc->pin_data->en, Native, + sw_status, reply_result, bytes_replied, reply->data); /* in case HPD is LOW, exit AUX transaction */ if ((sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) { diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c index 4b54fcfb28ec..1747b9f5f10e 100644 --- a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c +++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c @@ -24,6 +24,7 @@ */ #include "dm_services.h" +#include "dm_event_log.h" /* * Pre-requisites: headers required by header of this unit @@ -170,6 +171,23 @@ bool dal_i2c_hw_engine_submit_request( process_channel_reply(&hw_engine->base, &reply); } + if (i2caux_request->operation == I2CAUX_TRANSACTION_READ) { + EVENT_LOG_I2CAUX_READ(i2caux_request->payload.address_space, + engine->ddc->pin_data->en, + i2caux_request->payload.address, + i2caux_request->status, + i2caux_request->payload.length, + i2caux_request->payload.data); + } else if (i2caux_request->operation == I2CAUX_TRANSACTION_WRITE) { + EVENT_LOG_I2CAUX_WRITE(i2caux_request->payload.address_space, + engine->ddc->pin_data->en, + i2caux_request->payload.address, + i2caux_request->status, + i2caux_request->payload.length, + i2caux_request->payload.data); + } + + return result; } -- GitLab From 0e8e4fbf8d8905071c045f2922de55adbe1a6abe Mon Sep 17 00:00:00 2001 From: Hersen Wu Date: Tue, 21 Aug 2018 09:35:47 -0400 Subject: [PATCH 1291/1692] drm/amd/display: num of sw i2c/aux engines less than num of connectors [why] AMD Stoney reference board, there are only 2 pipes (not include underlay), and 3 connectors. resource creation, only 2 I2C/AUX engines are created. Within dc_link_aux_transfer, when pin_data_en =2, refer to enengines[ddc_pin->pin_data->en] = NULL. NULL point is referred later causing system crash. [how] each asic design has fixed number of ddc engines at hw side. for each ddc engine, create its i2x/aux engine at sw side. Signed-off-by: Hersen Wu Reviewed-by: Tony Cheng Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- .../amd/display/dc/dce100/dce100_resource.c | 6 ++++- .../amd/display/dc/dce110/dce110_resource.c | 4 +++ .../amd/display/dc/dce112/dce112_resource.c | 5 ++++ .../amd/display/dc/dce120/dce120_resource.c | 9 +++++-- .../drm/amd/display/dc/dce80/dce80_resource.c | 25 +++++++++++++++++++ .../drm/amd/display/dc/dcn10/dcn10_resource.c | 7 ++++-- drivers/gpu/drm/amd/display/dc/inc/resource.h | 1 + 7 files changed, 52 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c index ae613b025756..b1cc38827f09 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c @@ -372,7 +372,8 @@ static const struct resource_caps res_cap = { .num_timing_generator = 6, .num_audio = 6, .num_stream_encoder = 6, - .num_pll = 3 + .num_pll = 3, + .num_ddc = 6, }; #define CTX ctx @@ -1004,6 +1005,9 @@ static bool construct( "DC: failed to create output pixel processor!\n"); goto res_create_fail; } + } + + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { pool->base.engines[i] = dce100_aux_engine_create(ctx, i); if (pool->base.engines[i] == NULL) { BREAK_TO_DEBUGGER(); diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c index 49c5c7037be2..9f44f1cad221 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c @@ -378,6 +378,7 @@ static const struct resource_caps carrizo_resource_cap = { .num_audio = 3, .num_stream_encoder = 3, .num_pll = 2, + .num_ddc = 3, }; static const struct resource_caps stoney_resource_cap = { @@ -386,6 +387,7 @@ static const struct resource_caps stoney_resource_cap = { .num_audio = 3, .num_stream_encoder = 3, .num_pll = 2, + .num_ddc = 3, }; #define CTX ctx @@ -1336,7 +1338,9 @@ static bool construct( "DC: failed to create output pixel processor!\n"); goto res_create_fail; } + } + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { pool->base.engines[i] = dce110_aux_engine_create(ctx, i); if (pool->base.engines[i] == NULL) { BREAK_TO_DEBUGGER(); diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c index d35dc730e01c..2aa922cdcc58 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c @@ -384,6 +384,7 @@ static const struct resource_caps polaris_10_resource_cap = { .num_audio = 6, .num_stream_encoder = 6, .num_pll = 8, /* why 8? 6 combo PHY PLL + 2 regular PLLs? */ + .num_ddc = 6, }; static const struct resource_caps polaris_11_resource_cap = { @@ -391,6 +392,7 @@ static const struct resource_caps polaris_11_resource_cap = { .num_audio = 5, .num_stream_encoder = 5, .num_pll = 8, /* why 8? 6 combo PHY PLL + 2 regular PLLs? */ + .num_ddc = 5, }; #define CTX ctx @@ -1286,6 +1288,9 @@ static bool construct( "DC:failed to create output pixel processor!\n"); goto res_create_fail; } + } + + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { pool->base.engines[i] = dce112_aux_engine_create(ctx, i); if (pool->base.engines[i] == NULL) { BREAK_TO_DEBUGGER(); diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c index b2fb06f37648..465f68655db2 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c @@ -436,6 +436,7 @@ static const struct resource_caps res_cap = { .num_audio = 7, .num_stream_encoder = 6, .num_pll = 6, + .num_ddc = 6, }; static const struct dc_debug_options debug_defaults = { @@ -1062,6 +1063,12 @@ static bool construct( dm_error( "DC: failed to create output pixel processor!\n"); } + + /* check next valid pipe */ + j++; + } + + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { pool->base.engines[i] = dce120_aux_engine_create(ctx, i); if (pool->base.engines[i] == NULL) { BREAK_TO_DEBUGGER(); @@ -1077,8 +1084,6 @@ static bool construct( goto res_create_fail; } pool->base.sw_i2cs[i] = NULL; - /* check next valid pipe */ - j++; } /* valid pipe num */ diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c index 4eae859e6383..1dc590ccc5f9 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c @@ -367,6 +367,7 @@ static const struct resource_caps res_cap = { .num_audio = 6, .num_stream_encoder = 6, .num_pll = 3, + .num_ddc = 6, }; static const struct resource_caps res_cap_81 = { @@ -374,6 +375,7 @@ static const struct resource_caps res_cap_81 = { .num_audio = 7, .num_stream_encoder = 7, .num_pll = 3, + .num_ddc = 6, }; static const struct resource_caps res_cap_83 = { @@ -381,6 +383,7 @@ static const struct resource_caps res_cap_83 = { .num_audio = 6, .num_stream_encoder = 6, .num_pll = 2, + .num_ddc = 2, }; static const struct dce_dmcu_registers dmcu_regs = { @@ -992,7 +995,9 @@ static bool dce80_construct( dm_error("DC: failed to create output pixel processor!\n"); goto res_create_fail; } + } + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { pool->base.engines[i] = dce80_aux_engine_create(ctx, i); if (pool->base.engines[i] == NULL) { BREAK_TO_DEBUGGER(); @@ -1200,6 +1205,16 @@ static bool dce81_construct( dm_error("DC: failed to create output pixel processor!\n"); goto res_create_fail; } + } + + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + pool->base.engines[i] = dce80_aux_engine_create(ctx, i); + if (pool->base.engines[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create aux engine!!\n"); + goto res_create_fail; + } pool->base.hw_i2cs[i] = dce80_i2c_hw_create(ctx, i); if (pool->base.hw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); @@ -1396,6 +1411,16 @@ static bool dce83_construct( dm_error("DC: failed to create output pixel processor!\n"); goto res_create_fail; } + } + + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + pool->base.engines[i] = dce80_aux_engine_create(ctx, i); + if (pool->base.engines[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create aux engine!!\n"); + goto res_create_fail; + } pool->base.hw_i2cs[i] = dce80_i2c_hw_create(ctx, i); if (pool->base.hw_i2cs[i] == NULL) { BREAK_TO_DEBUGGER(); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 28ebad8c3ec4..1b519f8f044f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -501,6 +501,7 @@ static const struct resource_caps res_cap = { .num_audio = 4, .num_stream_encoder = 4, .num_pll = 4, + .num_ddc = 4, }; static const struct dc_debug_options debug_defaults_drv = { @@ -1334,7 +1335,11 @@ static bool construct( dm_error("DC: failed to create tg!\n"); goto fail; } + /* check next valid pipe */ + j++; + } + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { pool->base.engines[i] = dcn10_aux_engine_create(ctx, i); if (pool->base.engines[i] == NULL) { BREAK_TO_DEBUGGER(); @@ -1350,8 +1355,6 @@ static bool construct( goto fail; } pool->base.sw_i2cs[i] = NULL; - /* check next valid pipe */ - j++; } /* valid pipe num */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index 5b321008b0b5..76d00c6dbca9 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -44,6 +44,7 @@ struct resource_caps { int num_stream_encoder; int num_pll; int num_dwb; + int num_ddc; }; struct resource_straps { -- GitLab From a487411a64816458bd4dbac91a1981ec31bb40d4 Mon Sep 17 00:00:00 2001 From: Leo Li Date: Thu, 23 Aug 2018 15:28:08 -0400 Subject: [PATCH 1292/1692] drm/amd/display: Use DRM helper for best_encoder [Why] Our implementation is functionally identical to DRM's Note that instead of checking if the provided id is 0, the helper follows through with the mode object search. However, It will still return NULL, since 0 is not a valid object id, and missed searches will return NULL. [How] Remove our implementation, and replace it with drm_atomic_helper_best_encoder. Signed-off-by: Leo Li Reviewed-by: David Francis Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 24 +------------------ 1 file changed, 1 insertion(+), 23 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 32f634eedcc3..9fd583c616e0 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2853,28 +2853,6 @@ static const struct drm_connector_funcs amdgpu_dm_connector_funcs = { .atomic_get_property = amdgpu_dm_connector_atomic_get_property }; -static struct drm_encoder *best_encoder(struct drm_connector *connector) -{ - int enc_id = connector->encoder_ids[0]; - struct drm_mode_object *obj; - struct drm_encoder *encoder; - - DRM_DEBUG_DRIVER("Finding the best encoder\n"); - - /* pick the encoder ids */ - if (enc_id) { - obj = drm_mode_object_find(connector->dev, NULL, enc_id, DRM_MODE_OBJECT_ENCODER); - if (!obj) { - DRM_ERROR("Couldn't find a matching encoder for our connector\n"); - return NULL; - } - encoder = obj_to_encoder(obj); - return encoder; - } - DRM_ERROR("No encoder id\n"); - return NULL; -} - static int get_modes(struct drm_connector *connector) { return amdgpu_dm_connector_get_modes(connector); @@ -2995,7 +2973,7 @@ amdgpu_dm_connector_helper_funcs = { */ .get_modes = get_modes, .mode_valid = amdgpu_dm_connector_mode_valid, - .best_encoder = best_encoder + .best_encoder = drm_atomic_helper_best_encoder }; static void dm_crtc_helper_disable(struct drm_crtc *crtc) -- GitLab From 43af9e040905ed71a77785b29d81889d87264bcb Mon Sep 17 00:00:00 2001 From: David Francis Date: Thu, 9 Aug 2018 10:05:10 -0400 Subject: [PATCH 1293/1692] drm/amd/display: Reorder resource_pool to put i2c with aux [Why] The i2c and aux engines are similar, and should be placed next to eachother for readability [How] Reorder the elements of the resource_pool struct Signed-off-by: David Francis Reviewed-by: Tony Cheng Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/inc/core_types.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index 609bff8ed72e..831a1bdf622c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -139,11 +139,11 @@ struct resource_pool { struct output_pixel_processor *opps[MAX_PIPES]; struct timing_generator *timing_generators[MAX_PIPES]; struct stream_encoder *stream_enc[MAX_PIPES * 2]; - struct aux_engine *engines[MAX_PIPES]; struct hubbub *hubbub; struct mpc *mpc; struct pp_smu_funcs_rv *pp_smu; struct pp_smu_display_requirement_rv pp_smu_req; + struct aux_engine *engines[MAX_PIPES]; struct dce_i2c_hw *hw_i2cs[MAX_PIPES]; struct dce_i2c_sw *sw_i2cs[MAX_PIPES]; bool i2c_hw_buffer_in_use; -- GitLab From 2222f4486bbe66dc130296623342cb04ed778968 Mon Sep 17 00:00:00 2001 From: Tony Cheng Date: Mon, 27 Aug 2018 13:35:13 -0400 Subject: [PATCH 1294/1692] drm/amd/display: dc 3.1.65 Signed-off-by: Tony Cheng Reviewed-by: Steven Chiu Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index dee0f28e683d..a769d07d947f 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -38,7 +38,7 @@ #include "inc/compressor.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.1.64" +#define DC_VER "3.1.65" #define MAX_SURFACES 3 #define MAX_STREAMS 6 -- GitLab From cae50a43b931c6d70c7e16e1128af10398d8635a Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Fri, 24 Aug 2018 16:54:14 -0400 Subject: [PATCH 1295/1692] drm/amd/display: use link type to decide stream enc acquisition [Why] Virtual sink is used when set mode happens on a disconnected display to allow the mode set to proceed. This did not work with MST because the logic for acquiring stream encoder uses stream signal to determine the special handling is required, and stream signal is virtual instead of DP in this case. [How] Use link type to decide instead. Signed-off-by: Eric Yang Reviewed-by: Tony Cheng Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index f85fa7b55efb..d981755d1e4d 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1762,7 +1762,7 @@ static struct stream_encoder *find_first_free_match_stream_enc_for_link( * required for non DP connectors. */ - if (j >= 0 && dc_is_dp_signal(stream->signal)) + if (j >= 0 && link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) return pool->stream_enc[j]; return NULL; -- GitLab From 9c09df569ddec780e8be986bbe232520c55f8a3b Mon Sep 17 00:00:00 2001 From: David Francis Date: Fri, 17 Aug 2018 14:24:26 -0400 Subject: [PATCH 1296/1692] drm/amd/display: Remove call to amdgpu_pm_compute_clocks [Why] The extraneous call to amdgpu_pm_compute_clocks is deprecated. [How] Remove it. Signed-off-by: David Francis Signed-off-by: Leo Li Reviewed-by: David Francis Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index cfa907b119c7..6d16b4a0353d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c @@ -101,18 +101,10 @@ bool dm_pp_apply_display_requirements( adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1; } - /* TODO: complete implementation of - * pp_display_configuration_change(). - * Follow example of: - * PHM_StoreDALConfigurationData - powerplay\hwmgr\hardwaremanager.c - * PP_IRI_DisplayConfigurationChange - powerplay\eventmgr\iri.c */ if (adev->powerplay.pp_funcs->display_configuration_change) adev->powerplay.pp_funcs->display_configuration_change( adev->powerplay.pp_handle, &adev->pm.pm_display_cfg); - - /* TODO: replace by a separate call to 'apply display cfg'? */ - amdgpu_pm_compute_clocks(adev); } return true; -- GitLab From 6787359b14710488a8646dcd243f78e1846b1037 Mon Sep 17 00:00:00 2001 From: Chiawen Huang Date: Tue, 28 Aug 2018 13:38:34 +0800 Subject: [PATCH 1297/1692] drm/amd/display: clean code for transition event log. [Why] There are same purpose transition events. [How] remove the redundant event log. Signed-off-by: Chiawen Huang Reviewed-by: Tony Cheng Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dm_event_log.h | 2 -- .../gpu/drm/amd/display/dc/i2caux/aux_engine.c | 12 ------------ .../gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c | 15 --------------- 3 files changed, 29 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dm_event_log.h b/drivers/gpu/drm/amd/display/dc/dm_event_log.h index c1ce2dd52f9b..00a275dfa472 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_event_log.h +++ b/drivers/gpu/drm/amd/display/dc/dm_event_log.h @@ -31,8 +31,6 @@ #define __DM_EVENT_LOG_H__ -#define EVENT_LOG_I2CAUX_READ(transType, dcc, address, status, len, data) -#define EVENT_LOG_I2CAUX_WRITE(transType, dcc, address, status, len, data) #define EVENT_LOG_AUX_REQ(dcc, type, action, address, len, data) #define EVENT_LOG_AUX_Reply(dcc, type, swStatus, replyStatus, len, data) diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c index 03292c52b18d..8cbf38b2470d 100644 --- a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c +++ b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c @@ -297,12 +297,6 @@ static bool read_command( if (request->payload.address_space == I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) { - EVENT_LOG_I2CAUX_READ(request->payload.address_space, - engine->base.ddc->pin_data->en, - request->payload.address, - request->status, - request->payload.length, - request->payload.data); DC_LOG_I2C_AUX("READ: addr:0x%x value:0x%x Result:%d", request->payload.address, request->payload.data[0], @@ -519,12 +513,6 @@ static bool write_command( if (request->payload.address_space == I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) { - EVENT_LOG_I2CAUX_WRITE(request->payload.address_space, - engine->base.ddc->pin_data->en, - request->payload.address, - request->status, - request->payload.length, - request->payload.data); DC_LOG_I2C_AUX("WRITE: addr:0x%x value:0x%x Result:%d", request->payload.address, request->payload.data[0], diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c index 1747b9f5f10e..c995ef4ea5a4 100644 --- a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c +++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c @@ -171,21 +171,6 @@ bool dal_i2c_hw_engine_submit_request( process_channel_reply(&hw_engine->base, &reply); } - if (i2caux_request->operation == I2CAUX_TRANSACTION_READ) { - EVENT_LOG_I2CAUX_READ(i2caux_request->payload.address_space, - engine->ddc->pin_data->en, - i2caux_request->payload.address, - i2caux_request->status, - i2caux_request->payload.length, - i2caux_request->payload.data); - } else if (i2caux_request->operation == I2CAUX_TRANSACTION_WRITE) { - EVENT_LOG_I2CAUX_WRITE(i2caux_request->payload.address_space, - engine->ddc->pin_data->en, - i2caux_request->payload.address, - i2caux_request->status, - i2caux_request->payload.length, - i2caux_request->payload.data); - } return result; -- GitLab From afd0384c2af286bcf72ff378e56d6d446d30b52e Mon Sep 17 00:00:00 2001 From: Jun Lei Date: Wed, 22 Aug 2018 17:00:34 -0400 Subject: [PATCH 1298/1692] drm/amd/display: Add invariant support instrumentation in driver MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Existing debug dump are all invariant, new “low 32-bit of address” dump is not invariant Signed-off-by: Jun Lei Reviewed-by: Eric Yang Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 3 + .../gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h | 1 + .../dc/dcn10/dcn10_hw_sequencer_debug.c | 84 +++++++++++++------ 3 files changed, 61 insertions(+), 27 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index 8da2b8a09a12..74132a1f3046 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -974,6 +974,9 @@ void hubp1_read_state(struct hubp *hubp) REG_GET(DCSURF_SURFACE_EARLIEST_INUSE_HIGH, SURFACE_EARLIEST_INUSE_ADDRESS_HIGH, &s->inuse_addr_hi); + REG_GET(DCSURF_SURFACE_EARLIEST_INUSE, + SURFACE_EARLIEST_INUSE_ADDRESS, &s->inuse_addr_lo); + REG_GET_2(DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH, &s->viewport_width, PRI_VIEWPORT_HEIGHT, &s->viewport_height); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h index 7605af9b4837..4890273b632b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h @@ -639,6 +639,7 @@ struct dcn_hubp_state { struct _vcs_dpi_display_rq_regs_st rq_regs; uint32_t pixel_format; uint32_t inuse_addr_hi; + uint32_t inuse_addr_lo; uint32_t viewport_width; uint32_t viewport_height; uint32_t rotation_angle; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c index 9c218252004f..64158900730f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c @@ -105,7 +105,7 @@ static unsigned int dcn10_get_hubbub_state(struct dc *dc, char *pBuf, unsigned i return bufSize - remaining_buffer; } -static unsigned int dcn10_get_hubp_states(struct dc *dc, char *pBuf, unsigned int bufSize) +static unsigned int dcn10_get_hubp_states(struct dc *dc, char *pBuf, unsigned int bufSize, bool invarOnly) { struct dc_context *dc_ctx = dc->ctx; struct resource_pool *pool = dc->res_pool; @@ -117,9 +117,15 @@ static unsigned int dcn10_get_hubp_states(struct dc *dc, char *pBuf, unsigned in const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clock_inKhz / 1000; static const unsigned int frac = 1000; - chars_printed = snprintf_count(pBuf, remaining_buffer, "instance,format,addr_hi,width,height,rotation,mirror,sw_mode,dcc_en,blank_en,ttu_dis,underflow," - "min_ttu_vblank,qos_low_wm,qos_high_wm" - "\n"); + if (invarOnly) + chars_printed = snprintf_count(pBuf, remaining_buffer, "instance,format,addr_hi,width,height,rotation,mirror,sw_mode,dcc_en,blank_en,ttu_dis,underflow," + "min_ttu_vblank,qos_low_wm,qos_high_wm" + "\n"); + else + chars_printed = snprintf_count(pBuf, remaining_buffer, "instance,format,addr_hi,addr_lo,width,height,rotation,mirror,sw_mode,dcc_en,blank_en,ttu_dis,underflow," + "min_ttu_vblank,qos_low_wm,qos_high_wm" + "\n"); + remaining_buffer -= chars_printed; pBuf += chars_printed; @@ -130,24 +136,45 @@ static unsigned int dcn10_get_hubp_states(struct dc *dc, char *pBuf, unsigned in hubp->funcs->hubp_read_state(hubp); if (!s->blank_en) { - chars_printed = snprintf_count(pBuf, remaining_buffer, "%x,%x,%x,%d,%d,%x,%x,%x,%x,%x,%x,%x," - "%d.%03d,%d.%03d,%d.%03d" - "\n", - hubp->inst, - s->pixel_format, - s->inuse_addr_hi, - s->viewport_width, - s->viewport_height, - s->rotation_angle, - s->h_mirror_en, - s->sw_mode, - s->dcc_en, - s->blank_en, - s->ttu_disable, - s->underflow_status, - (s->min_ttu_vblank * frac) / ref_clk_mhz / frac, (s->min_ttu_vblank * frac) / ref_clk_mhz % frac, - (s->qos_level_low_wm * frac) / ref_clk_mhz / frac, (s->qos_level_low_wm * frac) / ref_clk_mhz % frac, - (s->qos_level_high_wm * frac) / ref_clk_mhz / frac, (s->qos_level_high_wm * frac) / ref_clk_mhz % frac); + if (invarOnly) + chars_printed = snprintf_count(pBuf, remaining_buffer, "%x,%x,%x,%d,%d,%x,%x,%x,%x,%x,%x,%x," + "%d.%03d,%d.%03d,%d.%03d" + "\n", + hubp->inst, + s->pixel_format, + s->inuse_addr_hi, + s->viewport_width, + s->viewport_height, + s->rotation_angle, + s->h_mirror_en, + s->sw_mode, + s->dcc_en, + s->blank_en, + s->ttu_disable, + s->underflow_status, + (s->min_ttu_vblank * frac) / ref_clk_mhz / frac, (s->min_ttu_vblank * frac) / ref_clk_mhz % frac, + (s->qos_level_low_wm * frac) / ref_clk_mhz / frac, (s->qos_level_low_wm * frac) / ref_clk_mhz % frac, + (s->qos_level_high_wm * frac) / ref_clk_mhz / frac, (s->qos_level_high_wm * frac) / ref_clk_mhz % frac); + else + chars_printed = snprintf_count(pBuf, remaining_buffer, "%x,%x,%x,%x,%d,%d,%x,%x,%x,%x,%x,%x,%x," + "%d.%03d,%d.%03d,%d.%03d" + "\n", + hubp->inst, + s->pixel_format, + s->inuse_addr_hi, + s->inuse_addr_lo, + s->viewport_width, + s->viewport_height, + s->rotation_angle, + s->h_mirror_en, + s->sw_mode, + s->dcc_en, + s->blank_en, + s->ttu_disable, + s->underflow_status, + (s->min_ttu_vblank * frac) / ref_clk_mhz / frac, (s->min_ttu_vblank * frac) / ref_clk_mhz % frac, + (s->qos_level_low_wm * frac) / ref_clk_mhz / frac, (s->qos_level_low_wm * frac) / ref_clk_mhz % frac, + (s->qos_level_high_wm * frac) / ref_clk_mhz / frac, (s->qos_level_high_wm * frac) / ref_clk_mhz % frac); remaining_buffer -= chars_printed; pBuf += chars_printed; @@ -314,9 +341,6 @@ static unsigned int dcn10_get_cm_states(struct dc *dc, char *pBuf, unsigned int struct dpp *dpp = pool->dpps[i]; struct dcn_dpp_state s = {0}; - - - dpp->funcs->dpp_read_state(dpp, &s); if (s.is_enabled) { @@ -462,6 +486,11 @@ static unsigned int dcn10_get_clock_states(struct dc *dc, char *pBuf, unsigned i void dcn10_get_hw_state(struct dc *dc, char *pBuf, unsigned int bufSize, unsigned int mask) { + /* + * Mask Format + * Bit 0 - 15: Hardware block mask + * Bit 15: 1 = Invariant Only, 0 = All + */ const unsigned int DC_HW_STATE_MASK_HUBBUB = 0x1; const unsigned int DC_HW_STATE_MASK_HUBP = 0x2; const unsigned int DC_HW_STATE_MASK_RQ = 0x4; @@ -471,12 +500,13 @@ void dcn10_get_hw_state(struct dc *dc, char *pBuf, unsigned int bufSize, unsigne const unsigned int DC_HW_STATE_MASK_MPCC = 0x40; const unsigned int DC_HW_STATE_MASK_OTG = 0x80; const unsigned int DC_HW_STATE_MASK_CLOCKS = 0x100; + const unsigned int DC_HW_STATE_INVAR_ONLY = 0x8000; unsigned int chars_printed = 0; unsigned int remaining_buf_size = bufSize; if (mask == 0x0) - mask = 0xFFFF; + mask = 0xFFFF; // Default, capture all, invariant only if ((mask & DC_HW_STATE_MASK_HUBBUB) && remaining_buf_size > 0) { chars_printed = dcn10_get_hubbub_state(dc, pBuf, remaining_buf_size); @@ -485,7 +515,7 @@ void dcn10_get_hw_state(struct dc *dc, char *pBuf, unsigned int bufSize, unsigne } if ((mask & DC_HW_STATE_MASK_HUBP) && remaining_buf_size > 0) { - chars_printed = dcn10_get_hubp_states(dc, pBuf, remaining_buf_size); + chars_printed = dcn10_get_hubp_states(dc, pBuf, remaining_buf_size, mask & DC_HW_STATE_INVAR_ONLY); pBuf += chars_printed; remaining_buf_size -= chars_printed; } -- GitLab From 03a1c08d003bd9354f522d45a6e3dcd529f409c2 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Fri, 31 Aug 2018 16:54:12 -0400 Subject: [PATCH 1299/1692] drm/amdgpu: Clean up KFD init and fini Only initialize KFD once by moving amdgpu_amdkfd_init from amdgpu_pci_probe to amdgpu_init. This fixes kernel oopses and hangs when booting multi-GPU systems. Also removed some vestiges of KFD being its own module. Signed-off-by: Felix Kuehling Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 5 +---- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 12 ++++-------- 2 files changed, 5 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index db9872f83d03..1a0824e6c8d0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -28,7 +28,6 @@ #include const struct kgd2kfd_calls *kgd2kfd; -bool (*kgd2kfd_init_p)(unsigned int, const struct kgd2kfd_calls**); static const unsigned int compute_vmid_bitmap = 0xFF00; @@ -51,10 +50,8 @@ int amdgpu_amdkfd_init(void) void amdgpu_amdkfd_fini(void) { - if (kgd2kfd) { + if (kgd2kfd) kgd2kfd->exit(); - symbol_put(kgd2kfd_init); - } } void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index a96ceff8abe3..b5c2ccb585b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -922,14 +922,6 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, return -ENODEV; } - /* - * Initialize amdkfd before starting radeon. If it was not loaded yet, - * defer radeon probing - */ - ret = amdgpu_amdkfd_init(); - if (ret == -EPROBE_DEFER) - return ret; - /* Get rid of things like offb */ ret = amdgpu_kick_out_firmware_fb(pdev); if (ret) @@ -1274,6 +1266,10 @@ static int __init amdgpu_init(void) pdriver = &amdgpu_kms_pci_driver; driver->num_ioctls = amdgpu_max_kms_ioctl; amdgpu_register_atpx_handler(); + + /* Ignore KFD init failures. Normal when CONFIG_HSA_AMD is not set. */ + amdgpu_amdkfd_init(); + /* let modprobe override vga console setting */ return pci_register_driver(pdriver); -- GitLab From c3e1b43c2c1ef9d0eb735cc5e0675100c95b91fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 27 Aug 2018 18:23:11 +0200 Subject: [PATCH 1300/1692] drm/amdgpu: enable AGP aperture for GMC9 v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Enable the old AGP aperture to avoid GART mappings. v2: don't enable it for SRIOV Signed-off-by: Christian König Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c | 10 +++++----- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 2 ++ drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 10 +++++----- 3 files changed, 12 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index 3403ded39d13..ffd0ec9586d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -65,16 +65,16 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) { uint64_t value; - /* Disable AGP. */ + /* Program the AGP BAR */ WREG32_SOC15(GC, 0, mmMC_VM_AGP_BASE, 0); - WREG32_SOC15(GC, 0, mmMC_VM_AGP_TOP, 0); - WREG32_SOC15(GC, 0, mmMC_VM_AGP_BOT, 0xFFFFFFFF); + WREG32_SOC15(GC, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); + WREG32_SOC15(GC, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24); /* Program the system aperture low logical page number. */ WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, - adev->gmc.vram_start >> 18); + min(adev->gmc.vram_start, adev->gmc.agp_start) >> 18); WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, - adev->gmc.vram_end >> 18); + max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18); /* Set default page address. */ value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index f467638eb49d..3529c55ab52d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -772,6 +772,8 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, base = mmhub_v1_0_get_fb_location(adev); amdgpu_gmc_vram_location(adev, &adev->gmc, base); amdgpu_gmc_gart_location(adev, mc); + if (!amdgpu_sriov_vf(adev)) + amdgpu_gmc_agp_location(adev, mc); /* base offset of vram pages */ adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 5f6a9c85488f..73d7c075dd33 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -76,16 +76,16 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) uint64_t value; uint32_t tmp; - /* Disable AGP. */ + /* Program the AGP BAR */ WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_BASE, 0); - WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_TOP, 0); - WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_BOT, 0x00FFFFFF); + WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); + WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24); /* Program the system aperture low logical page number. */ WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, - adev->gmc.vram_start >> 18); + min(adev->gmc.vram_start, adev->gmc.agp_start) >> 18); WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, - adev->gmc.vram_end >> 18); + max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18); /* Set default page address. */ value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + -- GitLab From 03e9dee11db0cc244baf21a690fbb0664ab1b1b7 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Wed, 5 Sep 2018 20:19:54 -0400 Subject: [PATCH 1301/1692] drm/amdgpu: Fix compute VM BO params after rebase v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The intent of two commits was lost in the last rebase: 810955b drm/amdgpu: Fix acquiring VM on large-BAR systems b5d21aa drm/amdgpu: Don't use shadow BO for compute context This commit restores the original behaviour: * Don't set AMDGPU_GEM_CREATE_NO_CPU_ACCESS for page directories to allow them to be reused for compute VMs * Don't create shadow BOs for page tables in compute VMs v2: move more logic into amdgpu_vm_bo_param Signed-off-by: Felix Kuehling Tested-by: Kent Russell Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index ea5e277ae038..1d7e3c17e542 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -595,9 +595,8 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm, AMDGPU_GEM_CREATE_CPU_GTT_USWC; if (vm->use_cpu_for_update) bp->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; - else - bp->flags |= AMDGPU_GEM_CREATE_SHADOW | - AMDGPU_GEM_CREATE_NO_CPU_ACCESS; + else if (!vm->root.base.bo || vm->root.base.bo->shadow) + bp->flags |= AMDGPU_GEM_CREATE_SHADOW; bp->type = ttm_bo_type_kernel; if (vm->root.base.bo) bp->resv = vm->root.base.bo->tbo.resv; @@ -2749,6 +2748,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, vm->last_update = NULL; amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, &bp); + if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) + bp.flags &= ~AMDGPU_GEM_CREATE_SHADOW; r = amdgpu_bo_create(adev, &bp, &root); if (r) goto error_free_sched_entity; -- GitLab From 989edc699f65bb1f32a31c03619abff5390b9c42 Mon Sep 17 00:00:00 2001 From: Masanari Iida Date: Thu, 6 Sep 2018 11:10:57 +0900 Subject: [PATCH 1302/1692] drm/amdgpu: Fix warnings while make xmldocs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch fixes following warnings. ./drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c:3011: warning: Excess function parameter 'dev' description in 'amdgpu_vm_get_task_info' ./drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c:3012: warning: Function parameter or member 'adev' not described in 'amdgpu_vm_get_task_info' ./drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c:3012: warning: Excess function parameter 'dev' description in 'amdgpu_vm_get_task_info' Signed-off-by: Masanari Iida Signed-off-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 1d7e3c17e542..9a5b1bbfb77c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -3148,7 +3148,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) /** * amdgpu_vm_get_task_info - Extracts task info for a PASID. * - * @dev: drm device pointer + * @adev: drm device pointer * @pasid: PASID identifier for VM * @task_info: task_info to fill. */ -- GitLab From 28968375a7ec95562e402b43f795e04fd320ae18 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Wed, 5 Sep 2018 10:03:13 +0800 Subject: [PATCH 1303/1692] drm/amd/powerplay: fix compile warning for wrong data type V2 do_div expects the 1st argument in 64bit instead of 32bit. Drop the usage of do_div as it seems unnecessary. V2: drop usage of do_div completely Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index 3efd59e984a3..1e65ac01e0f5 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -1195,7 +1195,7 @@ static int vega20_set_sclk_od( int ret = 0; od_sclk = golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * value; - do_div(od_sclk, 100); + od_sclk /= 100; od_sclk += golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; ret = vega20_od8_set_settings(hwmgr, OD8_SETTING_GFXCLK_FMAX, od_sclk); @@ -1242,7 +1242,7 @@ static int vega20_set_mclk_od( int ret = 0; od_mclk = golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * value; - do_div(od_mclk, 100); + od_mclk /= 100; od_mclk += golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; ret = vega20_od8_set_settings(hwmgr, OD8_SETTING_UCLK_FMAX, od_mclk); -- GitLab From 03f67ed10d8522ea27be348e3f87d88f92043e71 Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Fri, 7 Sep 2018 13:50:31 +0800 Subject: [PATCH 1304/1692] drm/amdgpu: Fix SDMA hang in prt mode v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix SDMA hang in prt mode, clear XNACK_WATERMARK in reg SDMA0_UTCL1_WATERMK to avoid the issue Affected ASICs: VEGA10 VEGA12 RV1 RV2 v2: add reg clear for SDMA1 Signed-off-by: Tao Zhou Tested-by: Yukun Li Reviewed-by: Hawking Zhang Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index df138401fbf8..ee0213edca8e 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -70,6 +70,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = { SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0100, 0x00000100), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100), @@ -81,7 +82,8 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = { SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_IB_CNTL, 0x800f0100, 0x00000100), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), - SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0) + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_WATERMK, 0xfc000000, 0x00000000) }; static const struct soc15_reg_golden golden_settings_sdma_vg10[] = { @@ -108,7 +110,8 @@ static const struct soc15_reg_golden golden_settings_sdma_4_1[] = { SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), - SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0) + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000) }; static const struct soc15_reg_golden golden_settings_sdma0_4_2_init[] = { -- GitLab From fbbf794cbd4872cc4e894ab236d45c97b98008ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 5 Sep 2018 17:04:44 +0200 Subject: [PATCH 1305/1692] drm/amdgpu: set bulk_moveable to false when a per VM is released MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Otherwise we might run into a use after free during bulk move. Signed-off-by: Christian König Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 9a5b1bbfb77c..f5a960079705 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2512,8 +2512,12 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va) { struct amdgpu_bo_va_mapping *mapping, *next; + struct amdgpu_bo *bo = bo_va->base.bo; struct amdgpu_vm *vm = bo_va->base.vm; + if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) + vm->bulk_moveable = false; + list_del(&bo_va->base.bo_list); spin_lock(&vm->invalidated_lock); -- GitLab From 39186aefac362ba3df65a953290ebd025d0c2bf0 Mon Sep 17 00:00:00 2001 From: Emily Deng Date: Mon, 10 Sep 2018 17:51:31 +0800 Subject: [PATCH 1306/1692] drm/amdgpu: move PSP init prior to IH in gpu reset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit since we use PSP to program IH regs now Signed-off-by: Monk Liu Acked-by: Christian König Reviewed-by: Huang Rui Signed-off-by: Emily Deng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 93476b8c2e72..acfc63e68b08 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1957,6 +1957,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) static enum amd_ip_block_type ip_order[] = { AMD_IP_BLOCK_TYPE_GMC, AMD_IP_BLOCK_TYPE_COMMON, + AMD_IP_BLOCK_TYPE_PSP, AMD_IP_BLOCK_TYPE_IH, }; @@ -1987,7 +1988,6 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev) static enum amd_ip_block_type ip_order[] = { AMD_IP_BLOCK_TYPE_SMC, - AMD_IP_BLOCK_TYPE_PSP, AMD_IP_BLOCK_TYPE_DCE, AMD_IP_BLOCK_TYPE_GFX, AMD_IP_BLOCK_TYPE_SDMA, -- GitLab From 984564031a1a6ab2c87a6b98019065cf476c69d8 Mon Sep 17 00:00:00 2001 From: Shaoyun Liu Date: Tue, 7 Aug 2018 11:44:26 -0400 Subject: [PATCH 1307/1692] drm/amd/include: update the bitfield define for PF_MAX_REGION MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Correct the definition based on vega20 register spec Signed-off-by: Shaoyun Liu Reviewed-by: Felix Kuehling Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_2_1_sh_mask.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_2_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_2_1_sh_mask.h index 6626fc262a0a..76ea902340c1 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_2_1_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_2_1_sh_mask.h @@ -8241,9 +8241,9 @@ #define MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL__LOCK_MASK 0x00000001L //MC_VM_XGMI_LFB_CNTL #define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION__SHIFT 0x0 -#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION__SHIFT 0x3 +#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION__SHIFT 0x4 #define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION_MASK 0x00000007L -#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION_MASK 0x00000038L +#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION_MASK 0x00000070L //MC_VM_XGMI_LFB_SIZE #define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE__SHIFT 0x0 #define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE_MASK 0x0000FFFFL -- GitLab From 76a5b36776aa6c0cd75b8080e24d6f69c8eb41a9 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 19 Jun 2018 16:00:47 -0500 Subject: [PATCH 1308/1692] drm/amdgpu/gmc: add initial xgmi structure to amdgpu_gmc structure Initial pass at a structure to store xgmi info. xgmi is a high speed cross gpu interconnect. Acked-by: Huang Rui Acked-by: Slava Abramov Reviewed-by :Shaoyun liu Signed-off-by: Alex Deucher Signed-off-by: Shaoyun Liu --- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index 17ffc35d1366..a929a55b30c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -87,6 +87,18 @@ struct amdgpu_gmc_funcs { u64 *dst, u64 *flags); }; +struct amdgpu_xgmi { + /* from psp */ + u64 device_id; + u64 hive_id; + /* fixed per family */ + u64 node_segment_size; + /* physical node (0-3) */ + unsigned physical_node_id; + /* number of nodes (0-4) */ + unsigned num_physical_nodes; +}; + struct amdgpu_gmc { resource_size_t aper_size; resource_size_t aper_base; @@ -125,6 +137,8 @@ struct amdgpu_gmc { atomic_t vm_fault_info_updated; const struct amdgpu_gmc_funcs *gmc_funcs; + + struct amdgpu_xgmi xgmi; }; #define amdgpu_gmc_flush_gpu_tlb(adev, vmid) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid)) -- GitLab From bf0a60b78b61a7b31bb22e60cc7b2e7fc538d38f Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 19 Jun 2018 17:03:27 -0500 Subject: [PATCH 1309/1692] drm/amdgpu/gmc9: add a new gfxhub 1.1 helper for xgmi MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Used to populate the xgmi info on vega20. v2: PF_MAX_REGION is val - 1 (Ray) Acked-by: Huang Rui Acked-by: Slava Abramov Reviewed-by :Shaoyun liu Signed-off-by: Alex Deucher Acked-by: Christian König Signed-off-by :Shaoyun liu --- drivers/gpu/drm/amd/amdgpu/Makefile | 2 +- drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c | 50 ++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.h | 29 ++++++++++++++ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 7 ++++ 4 files changed, 87 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c create mode 100644 drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.h diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 847536b55f46..e83ba7b0904e 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -74,7 +74,7 @@ amdgpu-y += \ amdgpu-y += \ gmc_v7_0.o \ gmc_v8_0.o \ - gfxhub_v1_0.o mmhub_v1_0.o gmc_v9_0.o + gfxhub_v1_0.o mmhub_v1_0.o gmc_v9_0.o gfxhub_v1_1.o # add IH block amdgpu-y += \ diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c new file mode 100644 index 000000000000..d4170cb41055 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c @@ -0,0 +1,50 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "gfxhub_v1_1.h" + +#include "gc/gc_9_2_1_offset.h" +#include "gc/gc_9_2_1_sh_mask.h" + +#include "soc15_common.h" + +int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev) +{ + u32 xgmi_lfb_cntl = RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_CNTL); + u32 max_region = + REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_MAX_REGION); + + /* PF_MAX_REGION=0 means xgmi is disabled */ + if (max_region) { + adev->gmc.xgmi.num_physical_nodes = max_region + 1; + if (adev->gmc.xgmi.num_physical_nodes > 4) + return -EINVAL; + + adev->gmc.xgmi.physical_node_id = + REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_LFB_REGION); + if (adev->gmc.xgmi.physical_node_id > 3) + return -EINVAL; + } + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.h new file mode 100644 index 000000000000..d753cf28a0a6 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.h @@ -0,0 +1,29 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __GFXHUB_V1_1_H__ +#define __GFXHUB_V1_1_H__ + +int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 3529c55ab52d..e9b5a1300657 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -43,6 +43,7 @@ #include "gfxhub_v1_0.h" #include "mmhub_v1_0.h" +#include "gfxhub_v1_1.h" #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h" @@ -985,6 +986,12 @@ static int gmc_v9_0_sw_init(void *handle) } adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits); + if (adev->asic_type == CHIP_VEGA20) { + r = gfxhub_v1_1_get_xgmi_info(adev); + if (r) + return r; + } + r = gmc_v9_0_mc_init(adev); if (r) return r; -- GitLab From 6fdd68b14a943ead1d0ce1c0c7023cd2dbfde4c2 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 19 Jun 2018 16:11:56 -0500 Subject: [PATCH 1310/1692] drm/amdgpu/gmc9: Adjust GART and AGP location with xgmi offset (v2) On hives with xgmi enabled, the fb_location aperture is a size which defines the total framebuffer size of all nodes in the hive. Each GPU in the hive has the same view via the fb_location aperture. GPU0 starts at offset (0 * segment size), GPU1 starts at offset (1 * segment size), etc. For access to local vram on each GPU, we need to take this offset into account. This including on setting up GPUVM page table and GART table v2: squash in "drm/amdgpu: Init correct fb region for none XGMI configuration" Acked-by: Huang Rui Acked-by: Slava Abramov Signed-off-by: Shaoyun Liu Signed-off-by: Alex Deucher Reviewed-by: Felix Kuehling Acked-by: Huang Rui --- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 25 ++++++++++++++---------- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | 8 ++++++++ drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c | 3 +++ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 6 ++++++ drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 7 +++++++ 5 files changed, 39 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 6acdeebabfc0..ae4467113240 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -121,6 +121,11 @@ void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc, mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; if (limit && limit < mc->real_vram_size) mc->real_vram_size = limit; + + if (mc->xgmi.num_physical_nodes == 0) { + mc->fb_start = mc->vram_start; + mc->fb_end = mc->vram_end; + } dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", mc->mc_vram_size >> 20, mc->vram_start, mc->vram_end, mc->real_vram_size >> 20); @@ -147,8 +152,8 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) /* VCE doesn't like it when BOs cross a 4GB segment, so align * the GART base on a 4GB boundary as well. */ - size_bf = mc->vram_start; - size_af = adev->gmc.mc_mask + 1 - ALIGN(mc->vram_end + 1, four_gb); + size_bf = mc->fb_start; + size_af = adev->gmc.mc_mask + 1 - ALIGN(mc->fb_end + 1, four_gb); if (mc->gart_size > max(size_bf, size_af)) { dev_warn(adev->dev, "limiting GART\n"); @@ -184,23 +189,23 @@ void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) const uint64_t sixteen_gb_mask = ~(sixteen_gb - 1); u64 size_af, size_bf; - if (mc->vram_start > mc->gart_start) { - size_bf = (mc->vram_start & sixteen_gb_mask) - + if (mc->fb_start > mc->gart_start) { + size_bf = (mc->fb_start & sixteen_gb_mask) - ALIGN(mc->gart_end + 1, sixteen_gb); - size_af = mc->mc_mask + 1 - ALIGN(mc->vram_end + 1, sixteen_gb); + size_af = mc->mc_mask + 1 - ALIGN(mc->fb_end + 1, sixteen_gb); } else { - size_bf = mc->vram_start & sixteen_gb_mask; + size_bf = mc->fb_start & sixteen_gb_mask; size_af = (mc->gart_start & sixteen_gb_mask) - - ALIGN(mc->vram_end + 1, sixteen_gb); + ALIGN(mc->fb_end + 1, sixteen_gb); } if (size_bf > size_af) { - mc->agp_start = mc->vram_start > mc->gart_start ? + mc->agp_start = mc->fb_start > mc->gart_start ? mc->gart_end + 1 : 0; mc->agp_size = size_bf; } else { - mc->agp_start = (mc->vram_start > mc->gart_start ? - mc->vram_end : mc->gart_end) + 1, + mc->agp_start = (mc->fb_start > mc->gart_start ? + mc->fb_end : mc->gart_end) + 1, mc->agp_size = size_af; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index a929a55b30c2..b00b5165969b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -114,6 +114,14 @@ struct amdgpu_gmc { u64 gart_end; u64 vram_start; u64 vram_end; + /* FB region , it's same as local vram region in single GPU, in XGMI + * configuration, this region covers all GPUs in the same hive , + * each GPU in the hive has the same view of this FB region . + * GPU0's vram starts at offset (0 * segment size) , + * GPU1 starts at offset (1 * segment size), etc. + */ + u64 fb_start; + u64 fb_end; unsigned vram_width; u64 real_vram_size; int vram_mtrr; diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c index d4170cb41055..5e9ab8eb214a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c @@ -44,6 +44,9 @@ int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev) REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_LFB_REGION); if (adev->gmc.xgmi.physical_node_id > 3) return -EINVAL; + adev->gmc.xgmi.node_segment_size = REG_GET_FIELD( + RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_SIZE), + MC_VM_XGMI_LFB_SIZE, PF_LFB_SIZE) << 24; } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index e9b5a1300657..b1c848937e42 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -771,12 +771,18 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, u64 base = 0; if (!amdgpu_sriov_vf(adev)) base = mmhub_v1_0_get_fb_location(adev); + /* add the xgmi offset of the physical node */ + base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; amdgpu_gmc_vram_location(adev, &adev->gmc, base); amdgpu_gmc_gart_location(adev, mc); if (!amdgpu_sriov_vf(adev)) amdgpu_gmc_agp_location(adev, mc); /* base offset of vram pages */ adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev); + + /* XXX: add the xgmi offset of the physical node? */ + adev->vm_manager.vram_base_offset += + adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 73d7c075dd33..0e09549d1db8 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -38,10 +38,17 @@ u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev) { u64 base = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE); + u64 top = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP); base &= MC_VM_FB_LOCATION_BASE__FB_BASE_MASK; base <<= 24; + top &= MC_VM_FB_LOCATION_TOP__FB_TOP_MASK; + top <<= 24; + + adev->gmc.fb_start = base; + adev->gmc.fb_end = top; + return base; } -- GitLab From 6449724058c66408df599cd0b97d9df531137a08 Mon Sep 17 00:00:00 2001 From: Shaoyun Liu Date: Wed, 27 Jun 2018 17:24:46 -0400 Subject: [PATCH 1311/1692] drm/amdgpu : Add psp function interfaces for XGMI support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Place holder for XGMI support Signed-off-by: Shaoyun Liu Reviewed-by: Huang Rui Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h | 34 +++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 981887c928b7..8b8720e9c3f0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -35,6 +35,7 @@ #define PSP_TMR_SIZE 0x400000 struct psp_context; +struct psp_xgmi_topology_info; enum psp_ring_type { @@ -79,6 +80,12 @@ struct psp_funcs enum AMDGPU_UCODE_ID ucode_type); bool (*smu_reload_quirk)(struct psp_context *psp); int (*mode1_reset)(struct psp_context *psp); + uint64_t (*xgmi_get_device_id)(struct psp_context *psp); + uint64_t (*xgmi_get_hive_id)(struct psp_context *psp); + int (*xgmi_get_topology_info)(struct psp_context *psp, int number_devices, + struct psp_xgmi_topology_info *topology); + int (*xgmi_set_topology_info)(struct psp_context *psp, int number_devices, + struct psp_xgmi_topology_info *topology); }; struct psp_context @@ -134,6 +141,23 @@ struct amdgpu_psp_funcs { enum AMDGPU_UCODE_ID); }; +struct psp_xgmi_topology_info { + /* Generated by PSP to identify the GPU instance within xgmi connection */ + uint64_t device_id; + /* + * If all bits set to 0 , driver indicates it wants to retrieve the xgmi + * connection vector topology, but not access enable the connections + * if some or all bits are set to 1, driver indicates it want to retrieve the + * current xgmi topology and access enable the link to GPU[i] associated + * with the bit position in the vector. + * On return,: bits indicated which xgmi links are present/active depending + * on the value passed in. The relative bit offset for the relative GPU index + * within the hive is always marked active. + */ + uint32_t connection_mask; + uint32_t reserved; /* must be 0 */ +}; + #define psp_prep_cmd_buf(ucode, type) (psp)->funcs->prep_cmd_buf((ucode), (type)) #define psp_ring_init(psp, type) (psp)->funcs->ring_init((psp), (type)) #define psp_ring_create(psp, type) (psp)->funcs->ring_create((psp), (type)) @@ -153,6 +177,16 @@ struct amdgpu_psp_funcs { ((psp)->funcs->smu_reload_quirk ? (psp)->funcs->smu_reload_quirk((psp)) : false) #define psp_mode1_reset(psp) \ ((psp)->funcs->mode1_reset ? (psp)->funcs->mode1_reset((psp)) : false) +#define psp_xgmi_get_device_id(psp) \ + ((psp)->funcs->xgmi_get_device_id ? (psp)->funcs->xgmi_get_device_id((psp)) : 0) +#define psp_xgmi_get_hive_id(psp) \ + ((psp)->funcs->xgmi_get_hive_id ? (psp)->funcs->xgmi_get_hive_id((psp)) : 0) +#define psp_xgmi_get_topology_info(psp, num_device, topology) \ + ((psp)->funcs->xgmi_get_topology_info ? \ + (psp)->funcs->xgmi_get_topology_info((psp), (num_device), (topology)) : -EINVAL) +#define psp_xgmi_set_topology_info(psp, num_device, topology) \ + ((psp)->funcs->xgmi_set_topology_info ? \ + (psp)->funcs->xgmi_set_topology_info((psp), (num_device), (topology)) : -EINVAL) #define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i)) -- GitLab From 78122127a4180306e2360b7a9e418eed21f76cf3 Mon Sep 17 00:00:00 2001 From: Shaoyun Liu Date: Tue, 14 Aug 2018 13:30:00 -0400 Subject: [PATCH 1312/1692] drm/amdgpu: Add place holder functions for xgmi topology interface with psp Add dummy function for xgmi function interface with psp Signed-off-by: Shaoyun Liu Reviewed-by: Felix Kuehling Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/psp_v11_0.c | 30 ++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index b70cfa3fe1b2..9217af00bc8d 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -548,6 +548,33 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp) return 0; } +/* TODO: Fill in follow functions once PSP firmware interface for XGMI is ready. + * For now, return success and hack the hive_id so high level code can + * start testing + */ +static int psp_v11_0_xgmi_get_topology_info(struct psp_context *psp, + int number_devices, struct psp_xgmi_topology_info *topology) +{ + return 0; +} + +static int psp_v11_0_xgmi_set_topology_info(struct psp_context *psp, + int number_devices, struct psp_xgmi_topology_info *topology) +{ + return 0; +} + +static u64 psp_v11_0_xgmi_get_hive_id(struct psp_context *psp) +{ + u64 hive_id = 0; + + /* Remove me when we can get correct hive_id through PSP */ + if (psp->adev->gmc.xgmi.num_physical_nodes) + hive_id = 0x123456789abcdef; + + return hive_id; +} + static const struct psp_funcs psp_v11_0_funcs = { .init_microcode = psp_v11_0_init_microcode, .bootloader_load_sysdrv = psp_v11_0_bootloader_load_sysdrv, @@ -560,6 +587,9 @@ static const struct psp_funcs psp_v11_0_funcs = { .cmd_submit = psp_v11_0_cmd_submit, .compare_sram_data = psp_v11_0_compare_sram_data, .mode1_reset = psp_v11_0_mode1_reset, + .xgmi_get_topology_info = psp_v11_0_xgmi_get_topology_info, + .xgmi_set_topology_info = psp_v11_0_xgmi_set_topology_info, + .xgmi_get_hive_id = psp_v11_0_xgmi_get_hive_id, }; void psp_v11_0_set_psp_funcs(struct psp_context *psp) -- GitLab From fb30fc59a245512b94a065ee1557d7e1ae88484a Mon Sep 17 00:00:00 2001 From: Shaoyun Liu Date: Wed, 27 Jun 2018 17:25:53 -0400 Subject: [PATCH 1313/1692] drm/amdgpu : Generate XGMI topology info from driver level Driver will save an array of XGMI hive info, each hive will have a list of devices that have the same hive ID. Signed-off-by: Shaoyun Liu Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 6 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | 2 + drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c | 119 +++++++++++++++++++++ 5 files changed, 129 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index e83ba7b0904e..138cb787d27e 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -53,7 +53,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \ amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \ amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \ - amdgpu_gmc.o + amdgpu_gmc.o amdgpu_xgmi.o # add asic specific block amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 09bdedfc91c7..c43bc83c2d29 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1216,6 +1216,12 @@ void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe); long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); + +/* + * functions used by amdgpu_xgmi.c + */ +int amdgpu_xgmi_add_device(struct amdgpu_device *adev); + /* * functions used by amdgpu_encoder.c */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index acfc63e68b08..d4855d1ef51f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1586,6 +1586,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) adev->ip_blocks[i].status.hw = true; } + amdgpu_xgmi_add_device(adev); amdgpu_amdkfd_device_init(adev); if (amdgpu_sriov_vf(adev)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index b00b5165969b..6fa7ef446e46 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -97,6 +97,8 @@ struct amdgpu_xgmi { unsigned physical_node_id; /* number of nodes (0-4) */ unsigned num_physical_nodes; + /* gpu list in the same hive */ + struct list_head head; }; struct amdgpu_gmc { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c new file mode 100644 index 000000000000..897afbb348c1 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -0,0 +1,119 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * + */ +#include +#include "amdgpu.h" +#include "amdgpu_psp.h" + + +static DEFINE_MUTEX(xgmi_mutex); + +#define AMDGPU_MAX_XGMI_HIVE 8 +#define AMDGPU_MAX_XGMI_DEVICE_PER_HIVE 4 + +struct amdgpu_hive_info { + uint64_t hive_id; + struct list_head device_list; +}; + +static struct amdgpu_hive_info xgmi_hives[AMDGPU_MAX_XGMI_HIVE]; +static unsigned hive_count = 0; + +static struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev) +{ + int i; + struct amdgpu_hive_info *tmp; + + if (!adev->gmc.xgmi.hive_id) + return NULL; + for (i = 0 ; i < hive_count; ++i) { + tmp = &xgmi_hives[i]; + if (tmp->hive_id == adev->gmc.xgmi.hive_id) + return tmp; + } + if (i >= AMDGPU_MAX_XGMI_HIVE) + return NULL; + + /* initialize new hive if not exist */ + tmp = &xgmi_hives[hive_count++]; + tmp->hive_id = adev->gmc.xgmi.hive_id; + INIT_LIST_HEAD(&tmp->device_list); + return tmp; +} + +int amdgpu_xgmi_add_device(struct amdgpu_device *adev) +{ + struct psp_xgmi_topology_info tmp_topology[AMDGPU_MAX_XGMI_DEVICE_PER_HIVE]; + struct amdgpu_hive_info *hive; + struct amdgpu_xgmi *entry; + struct amdgpu_device *tmp_adev; + + int count = 0, ret = -EINVAL; + + if ((adev->asic_type < CHIP_VEGA20) || + (adev->flags & AMD_IS_APU) ) + return 0; + adev->gmc.xgmi.device_id = psp_xgmi_get_device_id(&adev->psp); + adev->gmc.xgmi.hive_id = psp_xgmi_get_hive_id(&adev->psp); + + memset(&tmp_topology[0], 0, sizeof(tmp_topology)); + mutex_lock(&xgmi_mutex); + hive = amdgpu_get_xgmi_hive(adev); + if (!hive) + goto exit; + + list_add_tail(&adev->gmc.xgmi.head, &hive->device_list); + list_for_each_entry(entry, &hive->device_list, head) + tmp_topology[count++].device_id = entry->device_id; + + ret = psp_xgmi_get_topology_info(&adev->psp, count, tmp_topology); + if (ret) { + dev_err(adev->dev, + "XGMI: Get topology failure on device %llx, hive %llx, ret %d", + adev->gmc.xgmi.device_id, + adev->gmc.xgmi.hive_id, ret); + goto exit; + } + /* Each psp need to set the latest topology */ + list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { + ret = psp_xgmi_set_topology_info(&tmp_adev->psp, count, tmp_topology); + if (ret) { + dev_err(tmp_adev->dev, + "XGMI: Set topology failure on device %llx, hive %llx, ret %d", + tmp_adev->gmc.xgmi.device_id, + tmp_adev->gmc.xgmi.hive_id, ret); + /* To do : continue with some node failed or disable the whole hive */ + break; + } + } + if (!ret) + dev_info(adev->dev, "XGMI: Add node %d to hive 0x%llx.\n", + adev->gmc.xgmi.physical_node_id, + adev->gmc.xgmi.hive_id); + +exit: + mutex_unlock(&xgmi_mutex); + return ret; +} + + -- GitLab From 6ef22c39edd4b5392e57403d01028c012f3e5e5e Mon Sep 17 00:00:00 2001 From: Shaoyun Liu Date: Fri, 6 Jul 2018 11:26:08 -0400 Subject: [PATCH 1314/1692] drm/amd/include: Add get_hive_id interface in kfd2kgd MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit KFD need to get hive id from amdgpu to build up the XGMI topology Signed-off-by: Shaoyun Liu Reviewed-by: Felix Kuehling Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/kgd_kfd_interface.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index 31c52c116e20..cb4deb28bb7f 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -313,6 +313,8 @@ struct tile_config { * @set_compute_idle: Indicates that compute is idle on a device. This * can be used to change power profiles depending on compute activity. * + * @get_hive_id: Returns hive id of current device, 0 if xgmi is not enabled + * * This structure contains function pointers to services that the kgd driver * provides to amdkfd driver. * @@ -438,6 +440,9 @@ struct kfd2kgd_calls { void (*gpu_recover)(struct kgd_dev *kgd); void (*set_compute_idle)(struct kgd_dev *kgd, bool idle); + + uint64_t (*get_hive_id)(struct kgd_dev *kgd); + }; /** -- GitLab From db8b62c04b2344f17570186f8f022fb96e71d8d8 Mon Sep 17 00:00:00 2001 From: Shaoyun Liu Date: Fri, 6 Jul 2018 11:28:23 -0400 Subject: [PATCH 1315/1692] drm/amdgpu: get_hive_id from amdgpu side (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Retrieve hive_id from amdgpu device v2: compile fix Signed-off-by: Shaoyun Liu Reviewed-by: Felix Kuehling Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 7 +++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 3 ++- 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 1a0824e6c8d0..5661c343a71d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -411,6 +411,13 @@ uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd) return amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); } +uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)kgd; + + return adev->gmc.xgmi.hive_id; +} + int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine, uint32_t vmid, uint64_t gpu_addr, uint32_t *ib_cmd, uint32_t ib_len) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 2a1da3fe2b06..41e7dfc3ced3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -145,6 +145,7 @@ uint64_t get_gpu_clock_counter(struct kgd_dev *kgd); uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd); void get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info); uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd); +uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd); #define read_user_wptr(mmptr, wptr, dst) \ ({ \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 3dc987cab0ea..c9176537550b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -215,7 +215,8 @@ static const struct kfd2kgd_calls kfd2kgd = { .invalidate_tlbs_vmid = invalidate_tlbs_vmid, .submit_ib = amdgpu_amdkfd_submit_ib, .gpu_recover = amdgpu_amdkfd_gpu_reset, - .set_compute_idle = amdgpu_amdkfd_set_compute_idle + .set_compute_idle = amdgpu_amdkfd_set_compute_idle, + .get_hive_id = amdgpu_amdkfd_get_hive_id, }; struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void) -- GitLab From 0c1690e38b5e688166d009145ba0a4806f774465 Mon Sep 17 00:00:00 2001 From: Shaoyun Liu Date: Fri, 6 Jul 2018 11:32:42 -0400 Subject: [PATCH 1316/1692] drm/amdkfd: kfd expose the hive_id of the device through its node properties MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Thunk will generate the XGMI topology information when necessary with the hive_id for each specified device Signed-off-by: Shaoyun Liu Reviewed-by: Felix Kuehling Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 3 +++ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 3 +++ drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 4 ++++ drivers/gpu/drm/amd/amdkfd/kfd_topology.h | 1 + 4 files changed, 11 insertions(+) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 1b048715ab8a..b4d9e6b4f583 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -476,6 +476,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, goto kfd_doorbell_error; } + if (kfd->kfd2kgd->get_hive_id) + kfd->hive_id = kfd->kfd2kgd->get_hive_id(kfd->kgd); + if (kfd_topology_add_device(kfd)) { dev_err(kfd_device, "Error adding device to topology\n"); goto kfd_topology_add_device_error; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 355f79da8a63..6a5418f3d8fb 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -254,6 +254,9 @@ struct kfd_dev { bool cwsr_enabled; const void *cwsr_isa; unsigned int cwsr_isa_size; + + /* xGMI */ + uint64_t hive_id; }; /* KGD2KFD callbacks */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index bc95d4dfee2e..19ecc8233d66 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -443,6 +443,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, dev->node_props.location_id); sysfs_show_32bit_prop(buffer, "drm_render_minor", dev->node_props.drm_render_minor); + sysfs_show_64bit_prop(buffer, "hive_id", + dev->node_props.hive_id); if (dev->gpu) { log_max_watch_addr = @@ -1219,6 +1221,8 @@ int kfd_topology_add_device(struct kfd_dev *gpu) dev->node_props.drm_render_minor = gpu->shared_resources.drm_render_minor; + dev->node_props.hive_id = gpu->hive_id; + kfd_fill_mem_clk_max_info(dev); kfd_fill_iolink_non_crat_info(dev); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h index 7d9c3f948dff..92a19be07344 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h @@ -49,6 +49,7 @@ #define HSA_CAP_AQL_QUEUE_DOUBLE_MAP 0x00004000 struct kfd_node_properties { + uint64_t hive_id; uint32_t cpu_cores_count; uint32_t simd_count; uint32_t mem_banks_count; -- GitLab From aa64ca38ed8253e293b5ce24b40f31f39426e232 Mon Sep 17 00:00:00 2001 From: Shaoyun Liu Date: Mon, 13 Aug 2018 14:02:17 -0400 Subject: [PATCH 1317/1692] drm/amdkfd: Add new iolink type defines MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Update the iolink type defines according to the new thunk spec Signed-off-by: Shaoyun Liu Reviewed-by: Felix Kuehling Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_crat.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h index b5cd182b9edd..7a93aeb984de 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h @@ -248,7 +248,12 @@ struct crat_subtype_ccompute { #define CRAT_IOLINK_TYPE_RAPID_IO 8 #define CRAT_IOLINK_TYPE_INFINIBAND 9 #define CRAT_IOLINK_TYPE_RESERVED3 10 -#define CRAT_IOLINK_TYPE_OTHER 11 +#define CRAT_IOLINK_TYPE_XGMI 11 +#define CRAT_IOLINK_TYPE_XGOP 12 +#define CRAT_IOLINK_TYPE_GZ 13 +#define CRAT_IOLINK_TYPE_ETHERNET_RDMA 14 +#define CRAT_IOLINK_TYPE_RDMA_OTHER 15 +#define CRAT_IOLINK_TYPE_OTHER 16 #define CRAT_IOLINK_TYPE_MAX 255 #define CRAT_IOLINK_RESERVED_LENGTH 24 -- GitLab From ae9a25aea7f33573f56a422818bfead12aa8bfd6 Mon Sep 17 00:00:00 2001 From: Shaoyun Liu Date: Mon, 13 Aug 2018 14:04:11 -0400 Subject: [PATCH 1318/1692] drm/amdkfd: Generate xGMI direct iolink MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Generate xGMI iolink for upper level usage Signed-off-by: Shaoyun Liu Reviewed-by: Felix Kuehling Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_crat.c | 78 +++++++++++++++++++++++---- 1 file changed, 68 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index ee4996029a86..130db4dc115f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -346,7 +346,7 @@ static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink, struct list_head *device_list) { struct kfd_iolink_properties *props = NULL, *props2; - struct kfd_topology_device *dev, *cpu_dev; + struct kfd_topology_device *dev, *to_dev; uint32_t id_from; uint32_t id_to; @@ -369,6 +369,8 @@ static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink, if (props->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS) props->weight = 20; + else if (props->iolink_type == CRAT_IOLINK_TYPE_XGMI) + props->weight = 15; else props->weight = node_distance(id_from, id_to); @@ -390,19 +392,22 @@ static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink, * links are not built at that time. If a PCIe type is discovered, it * means a GPU is detected and we are adding GPU->CPU to the topology. * At this time, also add the corresponded CPU->GPU link. + * For xGMI, we only added the link with one direction in the crat + * table, add corresponded reversed direction link now. */ - if (props && props->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS) { - cpu_dev = kfd_topology_device_by_proximity_domain(id_to); - if (!cpu_dev) + if (props && (props->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS || + props->iolink_type == CRAT_IOLINK_TYPE_XGMI)) { + to_dev = kfd_topology_device_by_proximity_domain(id_to); + if (!to_dev) return -ENODEV; /* same everything but the other direction */ props2 = kmemdup(props, sizeof(*props2), GFP_KERNEL); props2->node_from = id_to; props2->node_to = id_from; props2->kobj = NULL; - cpu_dev->io_link_count++; - cpu_dev->node_props.io_links_count++; - list_add_tail(&props2->list, &cpu_dev->io_link_props); + to_dev->io_link_count++; + to_dev->node_props.io_links_count++; + list_add_tail(&props2->list, &to_dev->io_link_props); } return 0; @@ -1037,7 +1042,7 @@ static int kfd_fill_gpu_memory_affinity(int *avail_size, * * Return 0 if successful else return -ve value */ -static int kfd_fill_gpu_direct_io_link(int *avail_size, +static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size, struct kfd_dev *kdev, struct crat_subtype_iolink *sub_type_hdr, uint32_t proximity_domain) @@ -1069,6 +1074,28 @@ static int kfd_fill_gpu_direct_io_link(int *avail_size, return 0; } +static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size, + struct kfd_dev *kdev, + struct crat_subtype_iolink *sub_type_hdr, + uint32_t proximity_domain_from, + uint32_t proximity_domain_to) +{ + *avail_size -= sizeof(struct crat_subtype_iolink); + if (*avail_size < 0) + return -ENOMEM; + + memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_iolink)); + + sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY; + sub_type_hdr->length = sizeof(struct crat_subtype_iolink); + sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED; + + sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI; + sub_type_hdr->proximity_domain_from = proximity_domain_from; + sub_type_hdr->proximity_domain_to = proximity_domain_to; + return 0; +} + /* kfd_create_vcrat_image_gpu - Create Virtual CRAT for CPU * * @pcrat_image: Fill in VCRAT for GPU @@ -1081,14 +1108,16 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image, { struct crat_header *crat_table = (struct crat_header *)pcrat_image; struct crat_subtype_generic *sub_type_hdr; + struct kfd_local_mem_info local_mem_info; + struct kfd_topology_device *peer_dev; struct crat_subtype_computeunit *cu; struct kfd_cu_info cu_info; int avail_size = *size; uint32_t total_num_of_cu; int num_of_cache_entries = 0; int cache_mem_filled = 0; + uint32_t nid = 0; int ret = 0; - struct kfd_local_mem_info local_mem_info; if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_GPU) return -EINVAL; @@ -1212,7 +1241,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image, */ sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr + cache_mem_filled); - ret = kfd_fill_gpu_direct_io_link(&avail_size, kdev, + ret = kfd_fill_gpu_direct_io_link_to_cpu(&avail_size, kdev, (struct crat_subtype_iolink *)sub_type_hdr, proximity_domain); if (ret < 0) @@ -1221,6 +1250,35 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image, crat_table->length += sub_type_hdr->length; crat_table->total_entries++; + + /* Fill in Subtype: IO_LINKS + * Direct links from GPU to other GPUs through xGMI. + * We will loop GPUs that already be processed (with lower value + * of proximity_domain), add the link for the GPUs with same + * hive id (from this GPU to other GPU) . The reversed iolink + * (from other GPU to this GPU) will be added + * in kfd_parse_subtype_iolink. + */ + if (kdev->hive_id) { + for (nid = 0; nid < proximity_domain; ++nid) { + peer_dev = kfd_topology_device_by_proximity_domain(nid); + if (!peer_dev->gpu) + continue; + if (peer_dev->gpu->hive_id != kdev->hive_id) + continue; + sub_type_hdr = (typeof(sub_type_hdr))( + (char *)sub_type_hdr + + sizeof(struct crat_subtype_iolink)); + ret = kfd_fill_gpu_xgmi_link_to_gpu( + &avail_size, kdev, + (struct crat_subtype_iolink *)sub_type_hdr, + proximity_domain, nid); + if (ret < 0) + return ret; + crat_table->length += sub_type_hdr->length; + crat_table->total_entries++; + } + } *size = crat_table->length; pr_info("Virtual CRAT table created for GPU\n"); -- GitLab From 67f7cf9f76bccaadafc41b541e361ddb925c8921 Mon Sep 17 00:00:00 2001 From: shaoyunl Date: Fri, 7 Sep 2018 12:00:07 -0400 Subject: [PATCH 1319/1692] drm/amdkfd: Only add bi-directional iolink on GPU with XGMI or largebar (v2) v2: compile fix Signed-off-by: shaoyunl Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_crat.c | 15 +++++++++------ drivers/gpu/drm/amd/amdkfd/kfd_crat.h | 3 ++- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 1 + 4 files changed, 13 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 297b36c26a05..758398bdb39b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1210,7 +1210,7 @@ static int kfd_ioctl_acquire_vm(struct file *filep, struct kfd_process *p, return ret; } -static bool kfd_dev_is_large_bar(struct kfd_dev *dev) +bool kfd_dev_is_large_bar(struct kfd_dev *dev) { struct kfd_local_mem_info mem_info; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 130db4dc115f..d4560f1869bd 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -353,8 +353,8 @@ static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink, id_from = iolink->proximity_domain_from; id_to = iolink->proximity_domain_to; - pr_debug("Found IO link entry in CRAT table with id_from=%d\n", - id_from); + pr_debug("Found IO link entry in CRAT table with id_from=%d, id_to %d\n", + id_from, id_to); list_for_each_entry(dev, device_list, list) { if (id_from == dev->proximity_domain) { props = kfd_alloc_struct(props); @@ -391,12 +391,12 @@ static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink, /* CPU topology is created before GPUs are detected, so CPU->GPU * links are not built at that time. If a PCIe type is discovered, it * means a GPU is detected and we are adding GPU->CPU to the topology. - * At this time, also add the corresponded CPU->GPU link. + * At this time, also add the corresponded CPU->GPU link if GPU + * is large bar. * For xGMI, we only added the link with one direction in the crat * table, add corresponded reversed direction link now. */ - if (props && (props->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS || - props->iolink_type == CRAT_IOLINK_TYPE_XGMI)) { + if (props && (iolink->flags & CRAT_IOLINK_FLAGS_BI_DIRECTIONAL)) { to_dev = kfd_topology_device_by_proximity_domain(id_to); if (!to_dev) return -ENODEV; @@ -1057,6 +1057,8 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size, sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY; sub_type_hdr->length = sizeof(struct crat_subtype_iolink); sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED; + if (kfd_dev_is_large_bar(kdev)) + sub_type_hdr->flags |= CRAT_IOLINK_FLAGS_BI_DIRECTIONAL; /* Fill in IOLINK subtype. * TODO: Fill-in other fields of iolink subtype @@ -1088,7 +1090,8 @@ static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size, sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY; sub_type_hdr->length = sizeof(struct crat_subtype_iolink); - sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED; + sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED | + CRAT_IOLINK_FLAGS_BI_DIRECTIONAL; sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI; sub_type_hdr->proximity_domain_from = proximity_domain_from; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h index 7a93aeb984de..7c3f192fe25f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h @@ -232,7 +232,8 @@ struct crat_subtype_ccompute { #define CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT (1 << 2) #define CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT (1 << 3) #define CRAT_IOLINK_FLAGS_NO_PEER_TO_PEER_DMA (1 << 4) -#define CRAT_IOLINK_FLAGS_RESERVED_MASK 0xffffffe0 +#define CRAT_IOLINK_FLAGS_BI_DIRECTIONAL (1 << 31) +#define CRAT_IOLINK_FLAGS_RESERVED_MASK 0x7fffffe0 /* * IO interface types diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 6a5418f3d8fb..05283c99d731 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -696,6 +696,7 @@ struct amdkfd_ioctl_desc { unsigned int cmd_drv; const char *name; }; +bool kfd_dev_is_large_bar(struct kfd_dev *dev); int kfd_process_create_wq(void); void kfd_process_destroy_wq(void); -- GitLab From b463d4e53ca9bdbf227e19b477fbfcdedaa14c84 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 3 Sep 2018 10:51:51 +0200 Subject: [PATCH 1320/1692] drm/amdgpu: fix amdgpu_mn_unlock() in the CS error path MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Avoid unlocking a lock we never locked. Signed-off-by: Christian König Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index b6e9df11115d..1b5a0a73d770 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1262,10 +1262,10 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, error_abort: dma_fence_put(&job->base.s_fence->finished); job->base.s_fence = NULL; + amdgpu_mn_unlock(p->mn); error_unlock: amdgpu_job_free(job); - amdgpu_mn_unlock(p->mn); return r; } -- GitLab From 68ebc13ea40656fddd3803735d621921a2d74a5e Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Fri, 7 Sep 2018 13:50:31 +0800 Subject: [PATCH 1321/1692] drm/amdgpu: Fix SDMA hang in prt mode v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix SDMA hang in prt mode, clear XNACK_WATERMARK in reg SDMA0_UTCL1_WATERMK to avoid the issue Affected ASICs: VEGA10 VEGA12 RV1 RV2 v2: add reg clear for SDMA1 Signed-off-by: Tao Zhou Tested-by: Yukun Li Reviewed-by: Hawking Zhang Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index e7ca4623cfb9..7c3b634d8d5f 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -70,6 +70,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = { SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0100, 0x00000100), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100), @@ -81,7 +82,8 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = { SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_IB_CNTL, 0x800f0100, 0x00000100), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), - SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0) + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_WATERMK, 0xfc000000, 0x00000000) }; static const struct soc15_reg_golden golden_settings_sdma_vg10[] = { @@ -109,7 +111,8 @@ static const struct soc15_reg_golden golden_settings_sdma_4_1[] = SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), - SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0) + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000) }; static const struct soc15_reg_golden golden_settings_sdma_4_2[] = -- GitLab From 3a74987b24279d242d17f522f8435f1942a3c948 Mon Sep 17 00:00:00 2001 From: Emily Deng Date: Mon, 10 Sep 2018 17:51:31 +0800 Subject: [PATCH 1322/1692] drm/amdgpu: move PSP init prior to IH in gpu reset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit since we use PSP to program IH regs now Signed-off-by: Monk Liu Acked-by: Christian König Reviewed-by: Huang Rui Signed-off-by: Emily Deng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 8ab5ccbc14ac..39bf2ce548c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2063,6 +2063,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) static enum amd_ip_block_type ip_order[] = { AMD_IP_BLOCK_TYPE_GMC, AMD_IP_BLOCK_TYPE_COMMON, + AMD_IP_BLOCK_TYPE_PSP, AMD_IP_BLOCK_TYPE_IH, }; @@ -2093,7 +2094,6 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev) static enum amd_ip_block_type ip_order[] = { AMD_IP_BLOCK_TYPE_SMC, - AMD_IP_BLOCK_TYPE_PSP, AMD_IP_BLOCK_TYPE_DCE, AMD_IP_BLOCK_TYPE_GFX, AMD_IP_BLOCK_TYPE_SDMA, -- GitLab From a6ae928c25835ca18deb4a527079f169b68ed292 Mon Sep 17 00:00:00 2001 From: Petr Mladek Date: Mon, 10 Sep 2018 15:52:06 +0200 Subject: [PATCH 1323/1692] Revert "printk: make sure to print log on console." This reverts commit 375899cddcbb26881b03cb3fbdcfd600e4e67f4a. The visibility of early messages did not longer take into account "quiet", "debug", and "loglevel" early parameters. It would be possible to invalidate and recompute LOG_NOCONS flag for the affected messages. But it would be hairy. Instead this patch just reverts the problematic commit. We could come up with a better solution for the original problem. For example, we could simplify the logic and just mark messages that should always be visible or always invisible on the console. Also this patch reverts the related build fix commit ffaa619af1b06 ("printk: Fix warning about unused suppress_message_printing"). Finally, this patch does not put back the unused LOG_NOCONS flag. Link: http://lkml.kernel.org/r/20180910145747.emvfzv4mzlk5dfqk@pathway.suse.cz Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H . Peter Anvin" Cc: x86@kernel.org Cc: linux-kernel@vger.kernel.org Cc: Steven Rostedt Cc: Maninder Singh Reported-by: Hans de Goede Acked-by: Hans de Goede Acked-by: Sergey Senozhatsky Signed-off-by: Petr Mladek --- kernel/printk/printk.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 9a63aeeaaf5d..e30e5023511b 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -349,7 +349,6 @@ static int console_msg_format = MSG_FORMAT_DEFAULT; */ enum log_flags { - LOG_NOCONS = 1, /* suppress print, do not print to console */ LOG_NEWLINE = 2, /* text ended with a newline */ LOG_PREFIX = 4, /* text started with a prefix */ LOG_CONT = 8, /* text is a fragment of a continuation line */ @@ -1879,9 +1878,6 @@ int vprintk_store(int facility, int level, if (dict) lflags |= LOG_PREFIX|LOG_NEWLINE; - if (suppress_message_printing(level)) - lflags |= LOG_NOCONS; - return log_output(facility, level, lflags, dict, dictlen, text, text_len); } @@ -2030,6 +2026,7 @@ static void call_console_drivers(const char *ext_text, size_t ext_len, const char *text, size_t len) {} static size_t msg_print_text(const struct printk_log *msg, bool syslog, char *buf, size_t size) { return 0; } +static bool suppress_message_printing(int level) { return false; } #endif /* CONFIG_PRINTK */ @@ -2365,10 +2362,11 @@ void console_unlock(void) break; msg = log_from_idx(console_idx); - if (msg->flags & LOG_NOCONS) { + if (suppress_message_printing(msg->level)) { /* - * Skip record if !ignore_loglevel, and - * record has level above the console loglevel. + * Skip record we have buffered and already printed + * directly to the console when we received it, and + * record that has level above the console loglevel. */ console_idx = log_next(console_idx); console_seq++; -- GitLab From 13aceef06adfaf93d52e01e28a8bc8a0ad471d83 Mon Sep 17 00:00:00 2001 From: Miguel Ojeda Date: Sun, 9 Sep 2018 17:47:31 +0200 Subject: [PATCH 1324/1692] arm64: jump_label.h: use asm_volatile_goto macro instead of "asm goto" All other uses of "asm goto" go through asm_volatile_goto, which avoids a miscompile when using GCC < 4.8.2. Replace our open-coded "asm goto" statements with the asm_volatile_goto macro to avoid issues with older toolchains. Cc: Catalin Marinas Reviewed-by: Nick Desaulniers Signed-off-by: Miguel Ojeda Signed-off-by: Will Deacon --- arch/arm64/include/asm/jump_label.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h index 1b5e0e843c3a..7e2b3e360086 100644 --- a/arch/arm64/include/asm/jump_label.h +++ b/arch/arm64/include/asm/jump_label.h @@ -28,7 +28,7 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool branch) { - asm goto("1: nop\n\t" + asm_volatile_goto("1: nop\n\t" ".pushsection __jump_table, \"aw\"\n\t" ".align 3\n\t" ".quad 1b, %l[l_yes], %c0\n\t" @@ -42,7 +42,7 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) { - asm goto("1: b %l[l_yes]\n\t" + asm_volatile_goto("1: b %l[l_yes]\n\t" ".pushsection __jump_table, \"aw\"\n\t" ".align 3\n\t" ".quad 1b, %l[l_yes], %c0\n\t" -- GitLab From 84c57dbd3c480fb2730c393a2cef994ddb4f42cc Mon Sep 17 00:00:00 2001 From: James Morse Date: Mon, 10 Sep 2018 15:20:54 +0100 Subject: [PATCH 1325/1692] arm64: kernel: arch_crash_save_vmcoreinfo() should depend on CONFIG_CRASH_CORE Since commit 23c85094fe18 ("proc/kcore: add vmcoreinfo note to /proc/kcore") the kernel has exported the vmcoreinfo PT_NOTE on /proc/kcore as well as /proc/vmcore. arm64 only exposes it's additional arch information via arch_crash_save_vmcoreinfo() if built with CONFIG_KEXEC, as kdump was previously the only user of vmcoreinfo. Move this weak function to a separate file that is built at the same time as its caller in kernel/crash_core.c. This ensures values like 'kimage_voffset' are always present in the vmcoreinfo PT_NOTE. CC: AKASHI Takahiro Reviewed-by: Bhupesh Sharma Signed-off-by: James Morse Signed-off-by: Will Deacon --- arch/arm64/kernel/Makefile | 1 + arch/arm64/kernel/crash_core.c | 19 +++++++++++++++++++ arch/arm64/kernel/machine_kexec.c | 11 ----------- 3 files changed, 20 insertions(+), 11 deletions(-) create mode 100644 arch/arm64/kernel/crash_core.c diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 95ac7374d723..4c8b13bede80 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -54,6 +54,7 @@ arm64-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o \ arm64-obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o +arm64-obj-$(CONFIG_CRASH_CORE) += crash_core.o arm64-obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o arm64-obj-$(CONFIG_ARM64_SSBD) += ssbd.o diff --git a/arch/arm64/kernel/crash_core.c b/arch/arm64/kernel/crash_core.c new file mode 100644 index 000000000000..ca4c3e12d8c5 --- /dev/null +++ b/arch/arm64/kernel/crash_core.c @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) Linaro. + * Copyright (C) Huawei Futurewei Technologies. + */ + +#include +#include + +void arch_crash_save_vmcoreinfo(void) +{ + VMCOREINFO_NUMBER(VA_BITS); + /* Please note VMCOREINFO_NUMBER() uses "%d", not "%x" */ + vmcoreinfo_append_str("NUMBER(kimage_voffset)=0x%llx\n", + kimage_voffset); + vmcoreinfo_append_str("NUMBER(PHYS_OFFSET)=0x%llx\n", + PHYS_OFFSET); + vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset()); +} diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c index f6a5c6bc1434..922add8adb74 100644 --- a/arch/arm64/kernel/machine_kexec.c +++ b/arch/arm64/kernel/machine_kexec.c @@ -358,14 +358,3 @@ void crash_free_reserved_phys_range(unsigned long begin, unsigned long end) } } #endif /* CONFIG_HIBERNATION */ - -void arch_crash_save_vmcoreinfo(void) -{ - VMCOREINFO_NUMBER(VA_BITS); - /* Please note VMCOREINFO_NUMBER() uses "%d", not "%x" */ - vmcoreinfo_append_str("NUMBER(kimage_voffset)=0x%llx\n", - kimage_voffset); - vmcoreinfo_append_str("NUMBER(PHYS_OFFSET)=0x%llx\n", - PHYS_OFFSET); - vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset()); -} -- GitLab From 94f14e4728125f979629b2b020d31cd718191626 Mon Sep 17 00:00:00 2001 From: Johan Hedberg Date: Tue, 11 Sep 2018 14:10:12 +0300 Subject: [PATCH 1326/1692] Bluetooth: SMP: Fix trying to use non-existent local OOB data A remote device may claim that it has received our OOB data, even though we never geneated it. Add a new flag to track whether we actually have OOB data, and ignore the remote peer's flag if haven't generated OOB data. Signed-off-by: Johan Hedberg Signed-off-by: Marcel Holtmann --- net/bluetooth/smp.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index ae91e2d40056..9752879fdd3a 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -83,6 +83,7 @@ enum { struct smp_dev { /* Secure Connections OOB data */ + bool local_oob; u8 local_pk[64]; u8 local_rand[16]; bool debug_key; @@ -599,6 +600,8 @@ int smp_generate_oob(struct hci_dev *hdev, u8 hash[16], u8 rand[16]) memcpy(rand, smp->local_rand, 16); + smp->local_oob = true; + return 0; } @@ -1785,7 +1788,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb) * successfully received our local OOB data - therefore set the * flag to indicate that local OOB is in use. */ - if (req->oob_flag == SMP_OOB_PRESENT) + if (req->oob_flag == SMP_OOB_PRESENT && SMP_DEV(hdev)->local_oob) set_bit(SMP_FLAG_LOCAL_OOB, &smp->flags); /* SMP over BR/EDR requires special treatment */ @@ -1967,7 +1970,7 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb) * successfully received our local OOB data - therefore set the * flag to indicate that local OOB is in use. */ - if (rsp->oob_flag == SMP_OOB_PRESENT) + if (rsp->oob_flag == SMP_OOB_PRESENT && SMP_DEV(hdev)->local_oob) set_bit(SMP_FLAG_LOCAL_OOB, &smp->flags); smp->prsp[0] = SMP_CMD_PAIRING_RSP; @@ -3230,6 +3233,7 @@ static struct l2cap_chan *smp_add_cid(struct hci_dev *hdev, u16 cid) return ERR_CAST(tfm_ecdh); } + smp->local_oob = false; smp->tfm_aes = tfm_aes; smp->tfm_cmac = tfm_cmac; smp->tfm_ecdh = tfm_ecdh; -- GitLab From 4ba5175f2c10affd412fa41855cecda02b66cd71 Mon Sep 17 00:00:00 2001 From: Matias Karhumaa Date: Tue, 11 Sep 2018 14:10:13 +0300 Subject: [PATCH 1327/1692] Bluetooth: Use correct tfm to generate OOB data In case local OOB data was generated and other device initiated pairing claiming that it has got OOB data, following crash occurred: [ 222.847853] general protection fault: 0000 [#1] SMP PTI [ 222.848025] CPU: 1 PID: 42 Comm: kworker/u5:0 Tainted: G C 4.18.0-custom #4 [ 222.848158] Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006 [ 222.848307] Workqueue: hci0 hci_rx_work [bluetooth] [ 222.848416] RIP: 0010:compute_ecdh_secret+0x5a/0x270 [bluetooth] [ 222.848540] Code: 0c af f5 48 8b 3d 46 de f0 f6 ba 40 00 00 00 be c0 00 60 00 e8 b7 7b c5 f5 48 85 c0 0f 84 ea 01 00 00 48 89 c3 e8 16 0c af f5 <49> 8b 47 38 be c0 00 60 00 8b 78 f8 48 83 c7 48 e8 51 84 c5 f5 48 [ 222.848914] RSP: 0018:ffffb1664087fbc0 EFLAGS: 00010293 [ 222.849021] RAX: ffff8a5750d7dc00 RBX: ffff8a5671096780 RCX: ffffffffc08bc32a [ 222.849111] RDX: 0000000000000000 RSI: 00000000006000c0 RDI: ffff8a5752003800 [ 222.849192] RBP: ffffb1664087fc60 R08: ffff8a57525280a0 R09: ffff8a5752003800 [ 222.849269] R10: ffffb1664087fc70 R11: 0000000000000093 R12: ffff8a5674396e00 [ 222.849350] R13: ffff8a574c2e79aa R14: ffff8a574c2e796a R15: 020e0e100d010101 [ 222.849429] FS: 0000000000000000(0000) GS:ffff8a5752500000(0000) knlGS:0000000000000000 [ 222.849518] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 222.849586] CR2: 000055856016a038 CR3: 0000000110d2c005 CR4: 00000000000606e0 [ 222.849671] Call Trace: [ 222.849745] ? sc_send_public_key+0x110/0x2a0 [bluetooth] [ 222.849825] ? sc_send_public_key+0x115/0x2a0 [bluetooth] [ 222.849925] smp_recv_cb+0x959/0x2490 [bluetooth] [ 222.850023] ? _cond_resched+0x19/0x40 [ 222.850105] ? mutex_lock+0x12/0x40 [ 222.850202] l2cap_recv_frame+0x109d/0x3420 [bluetooth] [ 222.850315] ? l2cap_recv_frame+0x109d/0x3420 [bluetooth] [ 222.850426] ? __switch_to_asm+0x34/0x70 [ 222.850515] ? __switch_to_asm+0x40/0x70 [ 222.850625] ? __switch_to_asm+0x34/0x70 [ 222.850724] ? __switch_to_asm+0x40/0x70 [ 222.850786] ? __switch_to_asm+0x34/0x70 [ 222.850846] ? __switch_to_asm+0x40/0x70 [ 222.852581] ? __switch_to_asm+0x34/0x70 [ 222.854976] ? __switch_to_asm+0x40/0x70 [ 222.857475] ? __switch_to_asm+0x40/0x70 [ 222.859775] ? __switch_to_asm+0x34/0x70 [ 222.861218] ? __switch_to_asm+0x40/0x70 [ 222.862327] ? __switch_to_asm+0x34/0x70 [ 222.863758] l2cap_recv_acldata+0x266/0x3c0 [bluetooth] [ 222.865122] hci_rx_work+0x1c9/0x430 [bluetooth] [ 222.867144] process_one_work+0x210/0x4c0 [ 222.868248] worker_thread+0x41/0x4d0 [ 222.869420] kthread+0x141/0x160 [ 222.870694] ? process_one_work+0x4c0/0x4c0 [ 222.871668] ? kthread_create_worker_on_cpu+0x90/0x90 [ 222.872896] ret_from_fork+0x35/0x40 [ 222.874132] Modules linked in: algif_hash algif_skcipher af_alg rfcomm bnep btusb btrtl btbcm btintel snd_intel8x0 cmac intel_rapl_perf vboxvideo(C) snd_ac97_codec bluetooth ac97_bus joydev ttm snd_pcm ecdh_generic drm_kms_helper snd_timer snd input_leds drm serio_raw fb_sys_fops soundcore syscopyarea sysfillrect sysimgblt mac_hid sch_fq_codel ib_iser rdma_cm iw_cm ib_cm ib_core iscsi_tcp libiscsi_tcp libiscsi scsi_transport_iscsi ip_tables x_tables autofs4 btrfs zstd_compress raid10 raid456 async_raid6_recov async_memcpy async_pq async_xor async_tx xor raid6_pq libcrc32c raid1 raid0 multipath linear hid_generic usbhid hid crct10dif_pclmul crc32_pclmul ghash_clmulni_intel pcbc aesni_intel aes_x86_64 crypto_simd cryptd glue_helper ahci psmouse libahci i2c_piix4 video e1000 pata_acpi [ 222.883153] fbcon_switch: detected unhandled fb_set_par error, error code -16 [ 222.886774] fbcon_switch: detected unhandled fb_set_par error, error code -16 [ 222.890503] ---[ end trace 6504aa7a777b5316 ]--- [ 222.890541] RIP: 0010:compute_ecdh_secret+0x5a/0x270 [bluetooth] [ 222.890551] Code: 0c af f5 48 8b 3d 46 de f0 f6 ba 40 00 00 00 be c0 00 60 00 e8 b7 7b c5 f5 48 85 c0 0f 84 ea 01 00 00 48 89 c3 e8 16 0c af f5 <49> 8b 47 38 be c0 00 60 00 8b 78 f8 48 83 c7 48 e8 51 84 c5 f5 48 [ 222.890555] RSP: 0018:ffffb1664087fbc0 EFLAGS: 00010293 [ 222.890561] RAX: ffff8a5750d7dc00 RBX: ffff8a5671096780 RCX: ffffffffc08bc32a [ 222.890565] RDX: 0000000000000000 RSI: 00000000006000c0 RDI: ffff8a5752003800 [ 222.890571] RBP: ffffb1664087fc60 R08: ffff8a57525280a0 R09: ffff8a5752003800 [ 222.890576] R10: ffffb1664087fc70 R11: 0000000000000093 R12: ffff8a5674396e00 [ 222.890581] R13: ffff8a574c2e79aa R14: ffff8a574c2e796a R15: 020e0e100d010101 [ 222.890586] FS: 0000000000000000(0000) GS:ffff8a5752500000(0000) knlGS:0000000000000000 [ 222.890591] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 222.890594] CR2: 000055856016a038 CR3: 0000000110d2c005 CR4: 00000000000606e0 This commit fixes a bug where invalid pointer to crypto tfm was used for SMP SC ECDH calculation when OOB was in use. Solution is to use same crypto tfm than when generating OOB material on generate_oob() function. This bug was introduced in commit c0153b0b901a ("Bluetooth: let the crypto subsystem generate the ecc privkey"). Bug was found by fuzzing kernel SMP implementation using Synopsys Defensics. Signed-off-by: Matias Karhumaa Signed-off-by: Johan Hedberg Signed-off-by: Marcel Holtmann --- net/bluetooth/smp.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index 9752879fdd3a..3a7b0773536b 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -2700,7 +2700,13 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb) * key was set/generated. */ if (test_bit(SMP_FLAG_LOCAL_OOB, &smp->flags)) { - struct smp_dev *smp_dev = chan->data; + struct l2cap_chan *hchan = hdev->smp_data; + struct smp_dev *smp_dev; + + if (!hchan || !hchan->data) + return SMP_UNSPECIFIED; + + smp_dev = hchan->data; tfm_ecdh = smp_dev->tfm_ecdh; } else { -- GitLab From e6a57d22f787e73635ce0d29eef0abb77928b3e9 Mon Sep 17 00:00:00 2001 From: Hermes Zhang Date: Tue, 28 Aug 2018 09:48:30 +0800 Subject: [PATCH 1328/1692] Bluetooth: hci_ldisc: Free rw_semaphore on close The percpu_rw_semaphore is not currently freed, and this leads to a crash when the stale rcu callback is invoked. DEBUG_OBJECTS detects this. ODEBUG: free active (active state 1) object type: rcu_head hint: (null) ------------[ cut here ]------------ WARNING: CPU: 1 PID: 2024 at debug_print_object+0xac/0xc8 PC is at debug_print_object+0xac/0xc8 LR is at debug_print_object+0xac/0xc8 Call trace: [] debug_print_object+0xac/0xc8 [] debug_check_no_obj_freed+0x1e8/0x228 [] kfree+0x1cc/0x250 [] hci_uart_tty_close+0x54/0x108 [] tty_ldisc_close.isra.1+0x40/0x58 [] tty_ldisc_kill+0x1c/0x40 [] tty_ldisc_release+0x94/0x170 [] tty_release_struct+0x1c/0x58 [] tty_release+0x3b0/0x490 [] __fput+0x88/0x1d0 [] ____fput+0xc/0x18 [] task_work_run+0x9c/0xc0 [] do_exit+0x24c/0x8a0 [] do_group_exit+0x38/0xa0 [] __wake_up_parent+0x0/0x28 [] el0_svc_naked+0x34/0x38 ---[ end trace bfe08cbd89098cdf ]--- Signed-off-by: Hermes Zhang Signed-off-by: Marcel Holtmann --- drivers/bluetooth/hci_ldisc.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index 963bb0309e25..ea6238ed5c0e 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -543,6 +543,8 @@ static void hci_uart_tty_close(struct tty_struct *tty) } clear_bit(HCI_UART_PROTO_SET, &hu->flags); + percpu_free_rwsem(&hu->proto_lock); + kfree(hu); } -- GitLab From c3f00182a83b590655cb68b43b9dcc6cdd05316a Mon Sep 17 00:00:00 2001 From: Tyrel Datwyler Date: Thu, 6 Sep 2018 14:16:53 -0500 Subject: [PATCH 1329/1692] MAINTAINERS: Add entries for PPC64 RPA PCI hotplug drivers Add myself as maintainer of the IBM RPA hotplug modules in the drivers/pci/hotplug directory. These modules provide kernel interfaces for support of Dynamic Logical Partitioning (DLPAR) of Logical and Physical IO slots, and hotplug of physical PCI slots of a PHB on RPA-compliant ppc64 platforms (pseries). Signed-off-by: Tyrel Datwyler Signed-off-by: Bjorn Helgaas --- MAINTAINERS | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index a5b256b25905..3f3ed8fcb202 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7015,6 +7015,20 @@ F: drivers/crypto/vmx/aes* F: drivers/crypto/vmx/ghash* F: drivers/crypto/vmx/ppc-xlate.pl +IBM Power PCI Hotplug Driver for RPA-compliant PPC64 platform +M: Tyrel Datwyler +L: linux-pci@vger.kernel.org +L: linuxppc-dev@lists.ozlabs.org +S: Supported +F: drivers/pci/hotplug/rpaphp* + +IBM Power IO DLPAR Driver for RPA-compliant PPC64 platform +M: Tyrel Datwyler +L: linux-pci@vger.kernel.org +L: linuxppc-dev@lists.ozlabs.org +S: Supported +F: drivers/pci/hotplug/rpadlpar* + IBM ServeRAID RAID DRIVER S: Orphan F: drivers/scsi/ips.* -- GitLab From f30cf498b4277dcb20514f3f1daa89c2281f3395 Mon Sep 17 00:00:00 2001 From: Joao Pinto Date: Tue, 11 Sep 2018 13:06:30 +0100 Subject: [PATCH 1330/1692] MAINTAINERS: Add Gustavo Pimentel as DesignWare PCI maintainer Currently I am managing the Synopsys drivers & tools team (full-time) and so I am passing the pcie-designware maintenance to Gustavo. Signed-off-by: Joao Pinto Signed-off-by: Bjorn Helgaas CC: Gustavo Pimentel CC: Jingoo Han --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index 3f3ed8fcb202..7e10ba65bfe4 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -11167,7 +11167,7 @@ F: drivers/pci/controller/dwc/pci-exynos.c PCI DRIVER FOR SYNOPSYS DESIGNWARE M: Jingoo Han -M: Joao Pinto +M: Gustavo Pimentel L: linux-pci@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/pci/designware-pcie.txt -- GitLab From 50ca031b51106b1b46162d4e9ecccb7edc95682f Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Wed, 5 Sep 2018 14:09:54 +0300 Subject: [PATCH 1331/1692] Revert "PCI: Add ACS quirk for Intel 300 series" This reverts f154a718e6cc ("PCI: Add ACS quirk for Intel 300 series"). It turns out that erratum "PCH PCIe* Controller Root Port (ACSCTLR) Appear As Read Only" has been fixed in 300 series chipsets, even though the datasheet [1] claims otherwise. To make ACS work properly on 300 series root ports, revert the faulty commit. [1] https://www.intel.com/content/dam/www/public/us/en/documents/specification-updates/300-series-c240-series-chipset-pch-spec-update.pdf Fixes: f154a718e6cc ("PCI: Add ACS quirk for Intel 300 series") Signed-off-by: Mika Westerberg Signed-off-by: Bjorn Helgaas Cc: stable@vger.kernel.org # v4.18+ --- drivers/pci/quirks.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index ef7143a274e0..6bc27b7fd452 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -4355,11 +4355,6 @@ static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags) * * 0x9d10-0x9d1b PCI Express Root port #{1-12} * - * The 300 series chipset suffers from the same bug so include those root - * ports here as well. - * - * 0xa32c-0xa343 PCI Express Root port #{0-24} - * * [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html * [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html * [3] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html @@ -4377,7 +4372,6 @@ static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev) case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a: /* Sunrise Point */ case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee: /* Union Point */ case 0x9d10 ... 0x9d1b: /* 7th & 8th Gen Mobile */ - case 0xa32c ... 0xa343: /* 300 series */ return true; } -- GitLab From 46feb6b495f7628a6dbf36c4e6d80faf378372d4 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 16 Aug 2018 14:06:46 -0500 Subject: [PATCH 1332/1692] switchtec: Fix Spectre v1 vulnerability p.port can is indirectly controlled by user-space, hence leading to a potential exploitation of the Spectre variant 1 vulnerability. This issue was detected with the help of Smatch: drivers/pci/switch/switchtec.c:912 ioctl_port_to_pff() warn: potential spectre issue 'pcfg->dsp_pff_inst_id' [r] Fix this by sanitizing p.port before using it to index pcfg->dsp_pff_inst_id Notice that given that speculation windows are large, the policy is to kill the speculation on the first load and not worry if it can be completed with a dependent load/store [1]. [1] https://marc.info/?l=linux-kernel&m=152449131114778&w=2 Signed-off-by: Gustavo A. R. Silva Signed-off-by: Bjorn Helgaas Acked-by: Logan Gunthorpe Cc: stable@vger.kernel.org --- drivers/pci/switch/switchtec.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c index 9940cc70f38b..54a8b30dda38 100644 --- a/drivers/pci/switch/switchtec.c +++ b/drivers/pci/switch/switchtec.c @@ -14,6 +14,8 @@ #include #include +#include + MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver"); MODULE_VERSION("0.1"); MODULE_LICENSE("GPL"); @@ -909,6 +911,8 @@ static int ioctl_port_to_pff(struct switchtec_dev *stdev, default: if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id)) return -EINVAL; + p.port = array_index_nospec(p.port, + ARRAY_SIZE(pcfg->dsp_pff_inst_id) + 1); p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]); break; } -- GitLab From 34fb6bf9b13aef4ca14224f2175ecd189e98160b Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Wed, 5 Sep 2018 14:35:41 -0600 Subject: [PATCH 1333/1692] PCI: pciehp: Fix hot-add vs powerfault detection order If both hot-add and power fault were observed in a single interrupt, we handled the hot-add first, then the power fault, in this path: pciehp_ist if (events & (PDC | DLLSC)) pciehp_handle_presence_or_link_change case OFF_STATE: pciehp_enable_slot __pciehp_enable_slot board_added pciehp_power_on_slot ctrl->power_fault_detected = 0 pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_ON, PCI_EXP_SLTCTL_PCC) pciehp_green_led_on(p_slot) # power LED on pciehp_set_attention_status(p_slot, 0) # attention LED off if ((events & PFD) && !ctrl->power_fault_detected) ctrl->power_fault_detected = 1 pciehp_set_attention_status(1) # attention LED on pciehp_green_led_off(slot) # power LED off This left the attention indicator on (even though the hot-add succeeded) and the power indicator off (even though the slot power was on). Fix this by checking for power faults before checking for new devices. Prior to 0e94916e6091, this was successful because everything was chained through work queues and the order was: INT_PRESENCE_ON -> INT_POWER_FAULT -> ENABLE_REQ The ENABLE_REQ cleared the power fault at the end, but now everything is handled inline with the interrupt thread, such that the work ENABLE_REQ was doing happens before power fault handling now. Fixes: 0e94916e6091 ("PCI: pciehp: Handle events synchronously") Signed-off-by: Keith Busch [bhelgaas: changelog] Signed-off-by: Bjorn Helgaas Reviewed-by: Lukas Wunner --- drivers/pci/hotplug/pciehp_hpc.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 7136e3430925..a938abdb41ce 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -496,7 +496,7 @@ int pciehp_power_on_slot(struct slot *slot) u16 slot_status; int retval; - /* Clear sticky power-fault bit from previous power failures */ + /* Clear power-fault bit from previous power failures */ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status); if (slot_status & PCI_EXP_SLTSTA_PFD) pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, @@ -646,6 +646,14 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id) pciehp_handle_button_press(slot); } + /* Check Power Fault Detected */ + if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) { + ctrl->power_fault_detected = 1; + ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(slot)); + pciehp_set_attention_status(slot, 1); + pciehp_green_led_off(slot); + } + /* * Disable requests have higher priority than Presence Detect Changed * or Data Link Layer State Changed events. @@ -657,14 +665,6 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id) pciehp_handle_presence_or_link_change(slot, events); up_read(&ctrl->reset_lock); - /* Check Power Fault Detected */ - if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) { - ctrl->power_fault_detected = 1; - ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(slot)); - pciehp_set_attention_status(slot, 1); - pciehp_green_led_off(slot); - } - pci_config_pm_runtime_put(pdev); wake_up(&ctrl->requester); return IRQ_HANDLED; -- GitLab From 0ee03d936cbb300309ed6154ac1cc12b63e9785f Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 11 Sep 2018 10:57:13 -0300 Subject: [PATCH 1334/1692] tools headers uapi: Update tools's copy of linux/perf_event.h To get the changes in: 09121255c784 ("perf/UAPI: Clearly mark __PERF_SAMPLE_CALLCHAIN_EARLY as internal use") This cures the following warning during perf's build: Warning: Kernel ABI header at 'tools/include/uapi/linux/perf_event.h' differs from latest version at 'include/uapi/linux/perf_event.h' diff -u tools/include/uapi/linux/perf_event.h include/uapi/linux/perf_event.h Cc: Peter Zijlstra Cc: Adrian Hunter Cc: David Ahern Cc: Jiri Olsa Cc: Namhyung Kim Cc: Wang Nan Link: https://lkml.kernel.org/n/tip-2vvwh2o19orn56di0ksrtgzr@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/include/uapi/linux/perf_event.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h index eeb787b1c53c..f35eb72739c0 100644 --- a/tools/include/uapi/linux/perf_event.h +++ b/tools/include/uapi/linux/perf_event.h @@ -144,7 +144,7 @@ enum perf_event_sample_format { PERF_SAMPLE_MAX = 1U << 20, /* non-ABI */ - __PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63, + __PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63, /* non-ABI; internal use */ }; /* -- GitLab From f9e6e4351e0bb0811a8b3696679cc6050e4f5947 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 11 Sep 2018 11:00:54 -0300 Subject: [PATCH 1335/1692] tools headers uapi: Update tools's copy of asm-generic/unistd.h To get the changes in: db7a2d1809a5 ("asm-generic: unistd.h: Wire up sys_rseq") That wires up the new 'rsec' system call, which will automagically support that syscall in the syscall table used by 'perf trace' on arm/arm64. This cures the following warning during perf's build: Warning: Kernel ABI header at 'tools/include/uapi/asm-generic/unistd.h' differs from latest version at 'include/uapi/asm-generic/unistd.h' diff -u tools/include/uapi/asm-generic/unistd.h include/uapi/asm-generic/unistd.h Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Arnd Bergmann Cc: David Ahern Cc: Hendrik Brueckner Cc: Jiri Olsa Cc: Kim Phillips Cc: Mathieu Desnoyers Cc: Michael Ellerman Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Thomas Richter Cc: Wang Nan Cc: Will Deacon Link: https://lkml.kernel.org/n/tip-vt7k2itnitp1t9p3dp7qeb08@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/include/uapi/asm-generic/unistd.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h index 42990676a55e..df4bedb9b01c 100644 --- a/tools/include/uapi/asm-generic/unistd.h +++ b/tools/include/uapi/asm-generic/unistd.h @@ -734,9 +734,11 @@ __SYSCALL(__NR_pkey_free, sys_pkey_free) __SYSCALL(__NR_statx, sys_statx) #define __NR_io_pgetevents 292 __SC_COMP(__NR_io_pgetevents, sys_io_pgetevents, compat_sys_io_pgetevents) +#define __NR_rseq 293 +__SYSCALL(__NR_rseq, sys_rseq) #undef __NR_syscalls -#define __NR_syscalls 293 +#define __NR_syscalls 294 /* * 32 bit systems traditionally used different -- GitLab From 434ea1bfbfc707f5fed9292df6a9b91dfb8e41f2 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 11 Sep 2018 11:07:56 -0300 Subject: [PATCH 1336/1692] tools headers uapi: Update tools's copy of drm/drm.h To get the changes in: d67b6a206507 ("drm: writeback: Add client capability for exposing writeback connectors") This is for an argument to a DRM ioctl, which is not being prettyfied in the 'perf trace' DRM ioctl beautifier, but will now that syscalls are starting to have pointer arguments augmented via BPF. This time around this just cures the following warning during perf's build: Warning: Kernel ABI header at 'tools/include/uapi/drm/drm.h' differs from latest version at 'include/uapi/drm/drm.h' diff -u tools/include/uapi/drm/drm.h include/uapi/drm/drm.h Cc: Adrian Hunter Cc: Brian Starkey Cc: David Ahern Cc: Eric Anholt Cc: Jiri Olsa Cc: Liviu Dudau Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Sean Paul Cc: Wang Nan Link: https://lkml.kernel.org/n/tip-n7qib1bac6mc6w9oke7r4qdc@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Arnaldo Carvalho de Melo --- tools/include/uapi/drm/drm.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tools/include/uapi/drm/drm.h b/tools/include/uapi/drm/drm.h index 9c660e1688ab..300f336633f2 100644 --- a/tools/include/uapi/drm/drm.h +++ b/tools/include/uapi/drm/drm.h @@ -687,6 +687,15 @@ struct drm_get_cap { */ #define DRM_CLIENT_CAP_ASPECT_RATIO 4 +/** + * DRM_CLIENT_CAP_WRITEBACK_CONNECTORS + * + * If set to 1, the DRM core will expose special connectors to be used for + * writing back to memory the scene setup in the commit. Depends on client + * also supporting DRM_CLIENT_CAP_ATOMIC + */ +#define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 5 + /** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */ struct drm_set_client_cap { __u64 capability; -- GitLab From 17dc7af70e89db773a7213f0b4270c69236a63ab Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 6 Sep 2018 20:01:43 +0100 Subject: [PATCH 1337/1692] drm/i915/overlay: Allocate physical registers from stolen MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Given that we are now reasonably confident in our ability to detect and reserve the stolen memory (physical memory reserved for graphics by the BIOS) for ourselves on most machines, we can put it to use. In this case, we need a page to hold the overlay registers. On an i915g running MythTv, H Buus noticed that commit 6a2c4232ece145d8b5a8f95f767bd6d0d2d2f2bb Author: Chris Wilson Date: Tue Nov 4 04:51:40 2014 -0800 drm/i915: Make the physical object coherent with GTT introduced stuttering into his video playback. After discarding the likely suspect of it being the physical cursor updates, we were left with the use of the phys object for the overlay. And lo, if we completely avoid using the phys object (allocated just once on module load!) by switching to stolen memory, the stuttering goes away. For lack of a better explanation, claim victory and kill two birds with one stone. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=107600 Fixes: 6a2c4232ece1 ("drm/i915: Make the physical object coherent with GTT") Signed-off-by: Chris Wilson Cc: Ville Syrjälä Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20180906190144.1272-1-chris@chris-wilson.co.uk (cherry picked from commit c8124d399224d626728e2ffb95a1d564a7c06968) Signed-off-by: Rodrigo Vivi --- drivers/gpu/drm/i915/intel_overlay.c | 228 +++++++++------------------ 1 file changed, 75 insertions(+), 153 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index c2f10d899329..443dfaefd7a6 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -181,8 +181,9 @@ struct intel_overlay { u32 brightness, contrast, saturation; u32 old_xscale, old_yscale; /* register access */ - u32 flip_addr; struct drm_i915_gem_object *reg_bo; + struct overlay_registers __iomem *regs; + u32 flip_addr; /* flip handling */ struct i915_gem_active last_flip; }; @@ -210,29 +211,6 @@ static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv, PCI_DEVFN(0, 0), I830_CLOCK_GATE, val); } -static struct overlay_registers __iomem * -intel_overlay_map_regs(struct intel_overlay *overlay) -{ - struct drm_i915_private *dev_priv = overlay->i915; - struct overlay_registers __iomem *regs; - - if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) - regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr; - else - regs = io_mapping_map_wc(&dev_priv->ggtt.iomap, - overlay->flip_addr, - PAGE_SIZE); - - return regs; -} - -static void intel_overlay_unmap_regs(struct intel_overlay *overlay, - struct overlay_registers __iomem *regs) -{ - if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915)) - io_mapping_unmap(regs); -} - static void intel_overlay_submit_request(struct intel_overlay *overlay, struct i915_request *rq, i915_gem_retire_fn retire) @@ -784,13 +762,13 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, struct drm_i915_gem_object *new_bo, struct put_image_params *params) { - int ret, tmp_width; - struct overlay_registers __iomem *regs; - bool scale_changed = false; + struct overlay_registers __iomem *regs = overlay->regs; struct drm_i915_private *dev_priv = overlay->i915; u32 swidth, swidthsw, sheight, ostride; enum pipe pipe = overlay->crtc->pipe; + bool scale_changed = false; struct i915_vma *vma; + int ret, tmp_width; lockdep_assert_held(&dev_priv->drm.struct_mutex); WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); @@ -815,30 +793,19 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, if (!overlay->active) { u32 oconfig; - regs = intel_overlay_map_regs(overlay); - if (!regs) { - ret = -ENOMEM; - goto out_unpin; - } + oconfig = OCONF_CC_OUT_8BIT; if (IS_GEN4(dev_priv)) oconfig |= OCONF_CSC_MODE_BT709; oconfig |= pipe == 0 ? OCONF_PIPE_A : OCONF_PIPE_B; iowrite32(oconfig, ®s->OCONFIG); - intel_overlay_unmap_regs(overlay, regs); ret = intel_overlay_on(overlay); if (ret != 0) goto out_unpin; } - regs = intel_overlay_map_regs(overlay); - if (!regs) { - ret = -ENOMEM; - goto out_unpin; - } - iowrite32((params->dst_y << 16) | params->dst_x, ®s->DWINPOS); iowrite32((params->dst_h << 16) | params->dst_w, ®s->DWINSZ); @@ -882,8 +849,6 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, iowrite32(overlay_cmd_reg(params), ®s->OCMD); - intel_overlay_unmap_regs(overlay, regs); - ret = intel_overlay_continue(overlay, vma, scale_changed); if (ret) goto out_unpin; @@ -901,7 +866,6 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, int intel_overlay_switch_off(struct intel_overlay *overlay) { struct drm_i915_private *dev_priv = overlay->i915; - struct overlay_registers __iomem *regs; int ret; lockdep_assert_held(&dev_priv->drm.struct_mutex); @@ -918,9 +882,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay) if (ret != 0) return ret; - regs = intel_overlay_map_regs(overlay); - iowrite32(0, ®s->OCMD); - intel_overlay_unmap_regs(overlay, regs); + iowrite32(0, &overlay->regs->OCMD); return intel_overlay_off(overlay); } @@ -1305,7 +1267,6 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data, struct drm_intel_overlay_attrs *attrs = data; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_overlay *overlay; - struct overlay_registers __iomem *regs; int ret; overlay = dev_priv->overlay; @@ -1345,15 +1306,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data, overlay->contrast = attrs->contrast; overlay->saturation = attrs->saturation; - regs = intel_overlay_map_regs(overlay); - if (!regs) { - ret = -ENOMEM; - goto out_unlock; - } - - update_reg_attrs(overlay, regs); - - intel_overlay_unmap_regs(overlay, regs); + update_reg_attrs(overlay, overlay->regs); if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) { if (IS_GEN2(dev_priv)) @@ -1386,12 +1339,47 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data, return ret; } +static int get_registers(struct intel_overlay *overlay, bool use_phys) +{ + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + int err; + + obj = i915_gem_object_create_stolen(overlay->i915, PAGE_SIZE); + if (obj == NULL) + obj = i915_gem_object_create_internal(overlay->i915, PAGE_SIZE); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err_put_bo; + } + + if (use_phys) + overlay->flip_addr = sg_dma_address(obj->mm.pages->sgl); + else + overlay->flip_addr = i915_ggtt_offset(vma); + overlay->regs = i915_vma_pin_iomap(vma); + i915_vma_unpin(vma); + + if (IS_ERR(overlay->regs)) { + err = PTR_ERR(overlay->regs); + goto err_put_bo; + } + + overlay->reg_bo = obj; + return 0; + +err_put_bo: + i915_gem_object_put(obj); + return err; +} + void intel_setup_overlay(struct drm_i915_private *dev_priv) { struct intel_overlay *overlay; - struct drm_i915_gem_object *reg_bo; - struct overlay_registers __iomem *regs; - struct i915_vma *vma = NULL; int ret; if (!HAS_OVERLAY(dev_priv)) @@ -1401,46 +1389,8 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv) if (!overlay) return; - mutex_lock(&dev_priv->drm.struct_mutex); - if (WARN_ON(dev_priv->overlay)) - goto out_free; - overlay->i915 = dev_priv; - reg_bo = NULL; - if (!OVERLAY_NEEDS_PHYSICAL(dev_priv)) - reg_bo = i915_gem_object_create_stolen(dev_priv, PAGE_SIZE); - if (reg_bo == NULL) - reg_bo = i915_gem_object_create(dev_priv, PAGE_SIZE); - if (IS_ERR(reg_bo)) - goto out_free; - overlay->reg_bo = reg_bo; - - if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) { - ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE); - if (ret) { - DRM_ERROR("failed to attach phys overlay regs\n"); - goto out_free_bo; - } - overlay->flip_addr = reg_bo->phys_handle->busaddr; - } else { - vma = i915_gem_object_ggtt_pin(reg_bo, NULL, - 0, PAGE_SIZE, PIN_MAPPABLE); - if (IS_ERR(vma)) { - DRM_ERROR("failed to pin overlay register bo\n"); - ret = PTR_ERR(vma); - goto out_free_bo; - } - overlay->flip_addr = i915_ggtt_offset(vma); - - ret = i915_gem_object_set_to_gtt_domain(reg_bo, true); - if (ret) { - DRM_ERROR("failed to move overlay register bo into the GTT\n"); - goto out_unpin_bo; - } - } - - /* init all values */ overlay->color_key = 0x0101fe; overlay->color_key_enabled = true; overlay->brightness = -19; @@ -1449,44 +1399,51 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv) init_request_active(&overlay->last_flip, NULL); - regs = intel_overlay_map_regs(overlay); - if (!regs) - goto out_unpin_bo; + mutex_lock(&dev_priv->drm.struct_mutex); + + ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv)); + if (ret) + goto out_free; + + ret = i915_gem_object_set_to_gtt_domain(overlay->reg_bo, true); + if (ret) + goto out_reg_bo; - memset_io(regs, 0, sizeof(struct overlay_registers)); - update_polyphase_filter(regs); - update_reg_attrs(overlay, regs); + mutex_unlock(&dev_priv->drm.struct_mutex); - intel_overlay_unmap_regs(overlay, regs); + memset_io(overlay->regs, 0, sizeof(struct overlay_registers)); + update_polyphase_filter(overlay->regs); + update_reg_attrs(overlay, overlay->regs); dev_priv->overlay = overlay; - mutex_unlock(&dev_priv->drm.struct_mutex); - DRM_INFO("initialized overlay support\n"); + DRM_INFO("Initialized overlay support.\n"); return; -out_unpin_bo: - if (vma) - i915_vma_unpin(vma); -out_free_bo: - i915_gem_object_put(reg_bo); +out_reg_bo: + i915_gem_object_put(overlay->reg_bo); out_free: mutex_unlock(&dev_priv->drm.struct_mutex); kfree(overlay); - return; } void intel_cleanup_overlay(struct drm_i915_private *dev_priv) { - if (!dev_priv->overlay) + struct intel_overlay *overlay; + + overlay = fetch_and_zero(&dev_priv->overlay); + if (!overlay) return; - /* The bo's should be free'd by the generic code already. + /* + * The bo's should be free'd by the generic code already. * Furthermore modesetting teardown happens beforehand so the - * hardware should be off already */ - WARN_ON(dev_priv->overlay->active); + * hardware should be off already. + */ + WARN_ON(overlay->active); + + i915_gem_object_put(overlay->reg_bo); - i915_gem_object_put(dev_priv->overlay->reg_bo); - kfree(dev_priv->overlay); + kfree(overlay); } #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) @@ -1498,37 +1455,11 @@ struct intel_overlay_error_state { u32 isr; }; -static struct overlay_registers __iomem * -intel_overlay_map_regs_atomic(struct intel_overlay *overlay) -{ - struct drm_i915_private *dev_priv = overlay->i915; - struct overlay_registers __iomem *regs; - - if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) - /* Cast to make sparse happy, but it's wc memory anyway, so - * equivalent to the wc io mapping on X86. */ - regs = (struct overlay_registers __iomem *) - overlay->reg_bo->phys_handle->vaddr; - else - regs = io_mapping_map_atomic_wc(&dev_priv->ggtt.iomap, - overlay->flip_addr); - - return regs; -} - -static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay, - struct overlay_registers __iomem *regs) -{ - if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915)) - io_mapping_unmap_atomic(regs); -} - struct intel_overlay_error_state * intel_overlay_capture_error_state(struct drm_i915_private *dev_priv) { struct intel_overlay *overlay = dev_priv->overlay; struct intel_overlay_error_state *error; - struct overlay_registers __iomem *regs; if (!overlay || !overlay->active) return NULL; @@ -1541,18 +1472,9 @@ intel_overlay_capture_error_state(struct drm_i915_private *dev_priv) error->isr = I915_READ(ISR); error->base = overlay->flip_addr; - regs = intel_overlay_map_regs_atomic(overlay); - if (!regs) - goto err; - - memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers)); - intel_overlay_unmap_regs_atomic(overlay, regs); + memcpy_fromio(&error->regs, overlay->regs, sizeof(error->regs)); return error; - -err: - kfree(error); - return NULL; } void -- GitLab From 10492ee8ed9188d6d420e1f79b2b9bdbc0624e65 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Wed, 25 Apr 2018 07:29:22 -0700 Subject: [PATCH 1338/1692] mfd: omap-usb-host: Fix dts probe of children It currently only works if the parent bus uses "simple-bus". We currently try to probe children with non-existing compatible values. And we're missing .probe. I noticed this while testing devices configured to probe using ti-sysc interconnect target module driver. For that we also may want to rebind the driver, so let's remove __init and __exit. Signed-off-by: Tony Lindgren Acked-by: Roger Quadros Signed-off-by: Lee Jones --- drivers/mfd/omap-usb-host.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c index e11ab12fbdf2..800986a79704 100644 --- a/drivers/mfd/omap-usb-host.c +++ b/drivers/mfd/omap-usb-host.c @@ -528,8 +528,8 @@ static int usbhs_omap_get_dt_pdata(struct device *dev, } static const struct of_device_id usbhs_child_match_table[] = { - { .compatible = "ti,omap-ehci", }, - { .compatible = "ti,omap-ohci", }, + { .compatible = "ti,ehci-omap", }, + { .compatible = "ti,ohci-omap3", }, { } }; @@ -855,6 +855,7 @@ static struct platform_driver usbhs_omap_driver = { .pm = &usbhsomap_dev_pm_ops, .of_match_table = usbhs_omap_dt_ids, }, + .probe = usbhs_omap_probe, .remove = usbhs_omap_remove, }; @@ -864,9 +865,9 @@ MODULE_ALIAS("platform:" USBHS_DRIVER_NAME); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("usb host common core driver for omap EHCI and OHCI"); -static int __init omap_usbhs_drvinit(void) +static int omap_usbhs_drvinit(void) { - return platform_driver_probe(&usbhs_omap_driver, usbhs_omap_probe); + return platform_driver_register(&usbhs_omap_driver); } /* @@ -878,7 +879,7 @@ static int __init omap_usbhs_drvinit(void) */ fs_initcall_sync(omap_usbhs_drvinit); -static void __exit omap_usbhs_drvexit(void) +static void omap_usbhs_drvexit(void) { platform_driver_unregister(&usbhs_omap_driver); } -- GitLab From 0210c156d7fd330bce1c2c842bee9d27f1c5dfeb Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 11 Sep 2018 11:18:58 -0300 Subject: [PATCH 1339/1692] tools headers uapi: Update tools's copies of kvm headers To get the changes in: a449938297e5 ("KVM: s390: Add huge page enablement control") 8fcc4b5923af ("kvm: nVMX: Introduce KVM_CAP_NESTED_STATE") be26b3a73413 ("arm64: KVM: export the capability to set guest SError syndrome") b7b27facc7b5 ("arm/arm64: KVM: Add KVM_GET/SET_VCPU_EVENTS") b0960b9569db ("KVM: arm: Add 32bit get/set events support") a3da7b4a3be5 ("KVM: s390: add etoken support for guests") This makes 'perf trace' automagically get aware of these new ioctls: $ cp include/uapi/linux/kvm.h tools/include/uapi/linux/kvm.h $ tools/perf/trace/beauty/kvm_ioctl.sh > /tmp/after $ diff -u /tmp/before /tmp/after --- /tmp/before 2018-09-11 11:18:29.173207586 -0300 +++ /tmp/after 2018-09-11 11:18:38.488200446 -0300 @@ -84,6 +84,8 @@ [0xbb] = "MEMORY_ENCRYPT_REG_REGION", [0xbc] = "MEMORY_ENCRYPT_UNREG_REGION", [0xbd] = "HYPERV_EVENTFD", + [0xbe] = "GET_NESTED_STATE", + [0xbf] = "SET_NESTED_STATE", [0xe0] = "CREATE_DEVICE", [0xe1] = "SET_DEVICE_ATTR", [0xe2] = "G And cures the following warning during perf's build: Warning: Kernel ABI header at 'tools/include/uapi/linux/kvm.h' differs from latest version at 'include/uapi/linux/kvm.h' diff -u tools/include/uapi/linux/kvm.h include/uapi/linux/kvm.h Cc: Adrian Hunter Cc: Christian Borntraeger Cc: Cornelia Huck Cc: David Ahern Cc: David Hildenbrand Cc: Dongjiu Geng Cc: Eduardo Habkost Cc: James Morse Cc: Janosch Frank Cc: Jim Mattson Cc: Jiri Olsa Cc: Marc Zyngier Cc: Namhyung Kim Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Wang Nan Link: https://lkml.kernel.org/n/tip-2vvwh2o19orn56di0ksrtgzr@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Arnaldo Carvalho de Melo --- tools/arch/arm/include/uapi/asm/kvm.h | 13 +++++++++ tools/arch/arm64/include/uapi/asm/kvm.h | 13 +++++++++ tools/arch/s390/include/uapi/asm/kvm.h | 5 +++- tools/arch/x86/include/uapi/asm/kvm.h | 37 +++++++++++++++++++++++++ tools/include/uapi/linux/kvm.h | 6 ++++ 5 files changed, 73 insertions(+), 1 deletion(-) diff --git a/tools/arch/arm/include/uapi/asm/kvm.h b/tools/arch/arm/include/uapi/asm/kvm.h index 16e006f708ca..4602464ebdfb 100644 --- a/tools/arch/arm/include/uapi/asm/kvm.h +++ b/tools/arch/arm/include/uapi/asm/kvm.h @@ -27,6 +27,7 @@ #define __KVM_HAVE_GUEST_DEBUG #define __KVM_HAVE_IRQ_LINE #define __KVM_HAVE_READONLY_MEM +#define __KVM_HAVE_VCPU_EVENTS #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 @@ -125,6 +126,18 @@ struct kvm_sync_regs { struct kvm_arch_memory_slot { }; +/* for KVM_GET/SET_VCPU_EVENTS */ +struct kvm_vcpu_events { + struct { + __u8 serror_pending; + __u8 serror_has_esr; + /* Align it to 8 bytes */ + __u8 pad[6]; + __u64 serror_esr; + } exception; + __u32 reserved[12]; +}; + /* If you need to interpret the index values, here is the key: */ #define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000 #define KVM_REG_ARM_COPROC_SHIFT 16 diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h index 4e76630dd655..97c3478ee6e7 100644 --- a/tools/arch/arm64/include/uapi/asm/kvm.h +++ b/tools/arch/arm64/include/uapi/asm/kvm.h @@ -39,6 +39,7 @@ #define __KVM_HAVE_GUEST_DEBUG #define __KVM_HAVE_IRQ_LINE #define __KVM_HAVE_READONLY_MEM +#define __KVM_HAVE_VCPU_EVENTS #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 @@ -154,6 +155,18 @@ struct kvm_sync_regs { struct kvm_arch_memory_slot { }; +/* for KVM_GET/SET_VCPU_EVENTS */ +struct kvm_vcpu_events { + struct { + __u8 serror_pending; + __u8 serror_has_esr; + /* Align it to 8 bytes */ + __u8 pad[6]; + __u64 serror_esr; + } exception; + __u32 reserved[12]; +}; + /* If you need to interpret the index values, here is the key: */ #define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000 #define KVM_REG_ARM_COPROC_SHIFT 16 diff --git a/tools/arch/s390/include/uapi/asm/kvm.h b/tools/arch/s390/include/uapi/asm/kvm.h index 4cdaa55fabfe..9a50f02b9894 100644 --- a/tools/arch/s390/include/uapi/asm/kvm.h +++ b/tools/arch/s390/include/uapi/asm/kvm.h @@ -4,7 +4,7 @@ /* * KVM s390 specific structures and definitions * - * Copyright IBM Corp. 2008 + * Copyright IBM Corp. 2008, 2018 * * Author(s): Carsten Otte * Christian Borntraeger @@ -225,6 +225,7 @@ struct kvm_guest_debug_arch { #define KVM_SYNC_FPRS (1UL << 8) #define KVM_SYNC_GSCB (1UL << 9) #define KVM_SYNC_BPBC (1UL << 10) +#define KVM_SYNC_ETOKEN (1UL << 11) /* length and alignment of the sdnx as a power of two */ #define SDNXC 8 #define SDNXL (1UL << SDNXC) @@ -258,6 +259,8 @@ struct kvm_sync_regs { struct { __u64 reserved1[2]; __u64 gscb[4]; + __u64 etoken; + __u64 etoken_extension; }; }; }; diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h index c535c2fdea13..86299efa804a 100644 --- a/tools/arch/x86/include/uapi/asm/kvm.h +++ b/tools/arch/x86/include/uapi/asm/kvm.h @@ -378,4 +378,41 @@ struct kvm_sync_regs { #define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0) #define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1) +#define KVM_STATE_NESTED_GUEST_MODE 0x00000001 +#define KVM_STATE_NESTED_RUN_PENDING 0x00000002 + +#define KVM_STATE_NESTED_SMM_GUEST_MODE 0x00000001 +#define KVM_STATE_NESTED_SMM_VMXON 0x00000002 + +struct kvm_vmx_nested_state { + __u64 vmxon_pa; + __u64 vmcs_pa; + + struct { + __u16 flags; + } smm; +}; + +/* for KVM_CAP_NESTED_STATE */ +struct kvm_nested_state { + /* KVM_STATE_* flags */ + __u16 flags; + + /* 0 for VMX, 1 for SVM. */ + __u16 format; + + /* 128 for SVM, 128 + VMCS size for VMX. */ + __u32 size; + + union { + /* VMXON, VMCS */ + struct kvm_vmx_nested_state vmx; + + /* Pad the header to 128 bytes. */ + __u8 pad[120]; + }; + + __u8 data[0]; +}; + #endif /* _ASM_X86_KVM_H */ diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h index b6270a3b38e9..07548de5c988 100644 --- a/tools/include/uapi/linux/kvm.h +++ b/tools/include/uapi/linux/kvm.h @@ -949,6 +949,9 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_GET_MSR_FEATURES 153 #define KVM_CAP_HYPERV_EVENTFD 154 #define KVM_CAP_HYPERV_TLBFLUSH 155 +#define KVM_CAP_S390_HPAGE_1M 156 +#define KVM_CAP_NESTED_STATE 157 +#define KVM_CAP_ARM_INJECT_SERROR_ESR 158 #ifdef KVM_CAP_IRQ_ROUTING @@ -1391,6 +1394,9 @@ struct kvm_enc_region { /* Available with KVM_CAP_HYPERV_EVENTFD */ #define KVM_HYPERV_EVENTFD _IOW(KVMIO, 0xbd, struct kvm_hyperv_eventfd) +/* Available with KVM_CAP_NESTED_STATE */ +#define KVM_GET_NESTED_STATE _IOWR(KVMIO, 0xbe, struct kvm_nested_state) +#define KVM_SET_NESTED_STATE _IOW(KVMIO, 0xbf, struct kvm_nested_state) /* Secure Encrypted Virtualization command */ enum sev_cmd_id { -- GitLab From 7f28785c41f4d5635e69c183b3de8ea19093ccef Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 11 Sep 2018 13:12:40 -0300 Subject: [PATCH 1340/1692] tools headers uapi: Update tools's copy of linux/vhost.h To get the changes in: c48300c92ad9 ("vhost: fix VHOST_GET_BACKEND_FEATURES ioctl request definition") This makes 'perf trace' and other tools in the future using its beautifiers in a libbeauty.so library be able to translate these new ioctl to strings: $ tools/perf/trace/beauty/vhost_virtio_ioctl.sh > /tmp/after $ diff -u /tmp/before /tmp/after --- /tmp/before 2018-09-11 13:10:57.923038244 -0300 +++ /tmp/after 2018-09-11 13:11:20.329012685 -0300 @@ -15,6 +15,7 @@ [0x22] = "SET_VRING_ERR", [0x23] = "SET_VRING_BUSYLOOP_TIMEOUT", [0x24] = "GET_VRING_BUSYLOOP_TIMEOUT", + [0x25] = "SET_BACKEND_FEATURES", [0x30] = "NET_SET_BACKEND", [0x40] = "SCSI_SET_ENDPOINT", [0x41] = "SCSI_CLEAR_ENDPOINT", @@ -27,4 +28,5 @@ static const char *vhost_virtio_ioctl_read_cmds[] = { [0x00] = "GET_FEATURES", [0x12] = "GET_VRING_BASE", + [0x26] = "GET_BACKEND_FEATURES", }; $ We'll also use this to be able to express syscall filters using symbolic these symbolic names, something like: # perf trace --all-cpus -e ioctl(cmd=*GET_FEATURES) This silences the following warning during perf's build: Warning: Kernel ABI header at 'tools/include/uapi/linux/vhost.h' differs from latest version at 'include/uapi/linux/vhost.h' diff -u tools/include/uapi/linux/vhost.h include/uapi/linux/vhost.h Cc: Adrian Hunter Cc: David Ahern Cc: David S. Miller Cc: Gleb Fotengauer-Malinovskiy Cc: Jiri Olsa Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Wang Nan Link: https://lkml.kernel.org/n/tip-35x71oei2hdui9u0tarpimbq@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Arnaldo Carvalho de Melo --- tools/include/uapi/linux/vhost.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/tools/include/uapi/linux/vhost.h b/tools/include/uapi/linux/vhost.h index c51f8e5cc608..84c3de89696a 100644 --- a/tools/include/uapi/linux/vhost.h +++ b/tools/include/uapi/linux/vhost.h @@ -65,6 +65,7 @@ struct vhost_iotlb_msg { }; #define VHOST_IOTLB_MSG 0x1 +#define VHOST_IOTLB_MSG_V2 0x2 struct vhost_msg { int type; @@ -74,6 +75,15 @@ struct vhost_msg { }; }; +struct vhost_msg_v2 { + __u32 type; + __u32 reserved; + union { + struct vhost_iotlb_msg iotlb; + __u8 padding[64]; + }; +}; + struct vhost_memory_region { __u64 guest_phys_addr; __u64 memory_size; /* bytes */ @@ -160,6 +170,14 @@ struct vhost_memory { #define VHOST_GET_VRING_BUSYLOOP_TIMEOUT _IOW(VHOST_VIRTIO, 0x24, \ struct vhost_vring_state) +/* Set or get vhost backend capability */ + +/* Use message type V2 */ +#define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1 + +#define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64) +#define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64) + /* VHOST_NET specific defines */ /* Attach virtio net ring to a raw socket, or tap device. -- GitLab From e54192b48da75f025ae4b277925eaf6aca1d13bd Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Tue, 11 Sep 2018 09:28:14 -0500 Subject: [PATCH 1341/1692] of: fix phandle cache creation for DTs with no phandles With commit 0b3ce78e90fc ("of: cache phandle nodes to reduce cost of of_find_node_by_phandle()"), a G3 PowerMac fails to boot. The root cause is the DT for this system has no phandle properties when booted with BootX. of_populate_phandle_cache() does not handle the case of no phandles correctly. The problem is roundup_pow_of_two() for 0 is undefined. The implementation subtracts 1 underflowing and then things are in the weeds. Fixes: 0b3ce78e90fc ("of: cache phandle nodes to reduce cost of of_find_node_by_phandle()") Cc: stable@vger.kernel.org # 4.17+ Reported-by: Finn Thain Tested-by: Stan Johnson Reviewed-by: Frank Rowand Cc: Benjamin Herrenschmidt Signed-off-by: Rob Herring --- drivers/of/base.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/of/base.c b/drivers/of/base.c index 9095b8290150..74eaedd5b860 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -140,6 +140,9 @@ void of_populate_phandle_cache(void) if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) phandles++; + if (!phandles) + goto out; + cache_entries = roundup_pow_of_two(phandles); phandle_cache_mask = cache_entries - 1; -- GitLab From 1ebafd1561a05ea7868f46d88420fe9323f981f6 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Mon, 10 Sep 2018 20:30:38 +0200 Subject: [PATCH 1342/1692] staging: vboxvideo: Fix IRQs no longer working Commit 1daddbc8dec5 ("staging: vboxvideo: Update driver to use drm_dev_register.") replaced the obsolere drm_get_pci_dev() with normal pci probe and remove functions. But the new vbox_pci_probe() is missing a pci_enable_device() call, causing interrupts to not be delivered. This causes resizes of the vm window to not get seen by the drm/kms code. This commit adds the missing pci_enable_device() call, fixing this. Fixes: 1daddbc8dec5 ("staging: vboxvideo: Update driver to use ...") Cc: Fabio Rafael da Rosa Signed-off-by: Hans de Goede Reviewed-by: Nicholas Mc Guire Signed-off-by: Greg Kroah-Hartman --- drivers/staging/vboxvideo/vbox_drv.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/staging/vboxvideo/vbox_drv.c b/drivers/staging/vboxvideo/vbox_drv.c index da92c493f157..69cc508af1bc 100644 --- a/drivers/staging/vboxvideo/vbox_drv.c +++ b/drivers/staging/vboxvideo/vbox_drv.c @@ -59,6 +59,11 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ret = PTR_ERR(dev); goto err_drv_alloc; } + + ret = pci_enable_device(pdev); + if (ret) + goto err_pci_enable; + dev->pdev = pdev; pci_set_drvdata(pdev, dev); @@ -75,6 +80,8 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err_drv_dev_register: vbox_driver_unload(dev); err_vbox_driver_load: + pci_disable_device(pdev); + err_pci_enable: drm_dev_put(dev); err_drv_alloc: return ret; -- GitLab From 65aac17423284634169489f298169c3e3f099cc7 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Mon, 10 Sep 2018 20:30:39 +0200 Subject: [PATCH 1343/1692] staging: vboxvideo: Change address of scanout buffer on page-flip Commit 2408898e3b6c ("staging: vboxvideo: Add page-flip support") only calls vbox_crtc_do_set_base() on page-flips, but despite that function's name it only pins the new fb, unpins the old fb and sets vbox_crtc->fb_offset. It does not program the hardware to scan out at the new vbox_crtc->fb_offset value. This was causing only every other frame (assuming page-flipping between 2 buffers) to be shown since we kept scanning out of the old (now unpinned!) buffer. This commit fixes this by adding code to vbox_crtc_page_flip() to tell the hardware to scanout from the new fb_offset. Fixes: 2408898e3b6c ("staging: vboxvideo: Add page-flip support") Cc: Steve Longerbeam Signed-off-by: Hans de Goede Signed-off-by: Greg Kroah-Hartman --- drivers/staging/vboxvideo/vbox_mode.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/staging/vboxvideo/vbox_mode.c b/drivers/staging/vboxvideo/vbox_mode.c index a83eac8668d0..79836c8fb909 100644 --- a/drivers/staging/vboxvideo/vbox_mode.c +++ b/drivers/staging/vboxvideo/vbox_mode.c @@ -323,6 +323,11 @@ static int vbox_crtc_page_flip(struct drm_crtc *crtc, if (rc) return rc; + mutex_lock(&vbox->hw_mutex); + vbox_set_view(crtc); + vbox_do_modeset(crtc, &crtc->mode); + mutex_unlock(&vbox->hw_mutex); + spin_lock_irqsave(&drm->event_lock, flags); if (event) -- GitLab From 01c5f85aebaaddfd7e6051fb2ec80c1d4b463554 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 11 Sep 2018 10:59:53 -0600 Subject: [PATCH 1344/1692] blk-cgroup: increase number of supported policies After merging the iolatency policy, we potentially now have 4 policies being registered, but only support 3. This causes one of them to fail loading. Takashi reports that BFQ no longer works for him, because it fails to load due to policy registration failure. Bump to 5 policies, and also add a warning for when we have exceeded the global amount. If we have to touch this again, we should switch to a dynamic scheme instead. Reported-by: Takashi Iwai Reviewed-by: Jeff Moyer Tested-by: Takashi Iwai Signed-off-by: Jens Axboe --- block/blk-cgroup.c | 4 +++- include/linux/blkdev.h | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index c19f9078da1e..c630e02836a8 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1510,8 +1510,10 @@ int blkcg_policy_register(struct blkcg_policy *pol) for (i = 0; i < BLKCG_MAX_POLS; i++) if (!blkcg_policy[i]) break; - if (i >= BLKCG_MAX_POLS) + if (i >= BLKCG_MAX_POLS) { + pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n"); goto err_unlock; + } /* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */ if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) || diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index d6869e0e2b64..6980014357d4 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -54,7 +54,7 @@ struct blk_stat_callback; * Maximum number of blkcg policies allowed to be registered concurrently. * Defined here to simplify include dependency. */ -#define BLKCG_MAX_POLS 3 +#define BLKCG_MAX_POLS 5 typedef void (rq_end_io_fn)(struct request *, blk_status_t); -- GitLab From 5db48a8d01319620d390bf6d9da5410be14f98e3 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 11 Sep 2018 14:10:52 -0300 Subject: [PATCH 1345/1692] tools headers uapi: Update tools's copy of linux/if_link.h To get the changes in: 3e7a50ceb11e ("net: report min and max mtu network device settings") 2756f68c3149 ("net: bridge: add support for backup port") a25717d2b604 ("xdp: support simultaneous driver and hw XDP attachment") 4f91da26c811 ("xdp: add per mode attributes for attached programs") f203b76d7809 ("xfrm: Add virtual xfrm interfaces") Silencing this libbpf build warning: Warning: Kernel ABI header at 'tools/include/uapi/linux/if_link.h' differs from latest version at 'include/uapi/linux/if_link.h' Cc: Adrian Hunter Cc: Daniel Borkmann Cc: David Ahern Cc: David S. Miller Cc: Jakub Kicinski Cc: Jiri Olsa Cc: Namhyung Kim Cc: Nikolay Aleksandrov Cc: Steffen Klassert Cc: Stephen Hemminger Cc: Wang Nan Link: https://lkml.kernel.org/n/tip-xd9ztioa894zemv8ag8kg64u@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/include/uapi/linux/if_link.h | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h index cf01b6824244..43391e2d1153 100644 --- a/tools/include/uapi/linux/if_link.h +++ b/tools/include/uapi/linux/if_link.h @@ -164,6 +164,8 @@ enum { IFLA_CARRIER_UP_COUNT, IFLA_CARRIER_DOWN_COUNT, IFLA_NEW_IFINDEX, + IFLA_MIN_MTU, + IFLA_MAX_MTU, __IFLA_MAX }; @@ -334,6 +336,7 @@ enum { IFLA_BRPORT_GROUP_FWD_MASK, IFLA_BRPORT_NEIGH_SUPPRESS, IFLA_BRPORT_ISOLATED, + IFLA_BRPORT_BACKUP_PORT, __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) @@ -459,6 +462,16 @@ enum { #define IFLA_MACSEC_MAX (__IFLA_MACSEC_MAX - 1) +/* XFRM section */ +enum { + IFLA_XFRM_UNSPEC, + IFLA_XFRM_LINK, + IFLA_XFRM_IF_ID, + __IFLA_XFRM_MAX +}; + +#define IFLA_XFRM_MAX (__IFLA_XFRM_MAX - 1) + enum macsec_validation_type { MACSEC_VALIDATE_DISABLED = 0, MACSEC_VALIDATE_CHECK = 1, @@ -920,6 +933,7 @@ enum { XDP_ATTACHED_DRV, XDP_ATTACHED_SKB, XDP_ATTACHED_HW, + XDP_ATTACHED_MULTI, }; enum { @@ -928,6 +942,9 @@ enum { IFLA_XDP_ATTACHED, IFLA_XDP_FLAGS, IFLA_XDP_PROG_ID, + IFLA_XDP_DRV_PROG_ID, + IFLA_XDP_SKB_PROG_ID, + IFLA_XDP_HW_PROG_ID, __IFLA_XDP_MAX, }; -- GitLab From 03db8b583d1c3c84963e08e2abf6c79081da5c31 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Fri, 7 Sep 2018 11:51:16 +0300 Subject: [PATCH 1346/1692] perf tools: Fix maps__find_symbol_by_name() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit 1c5aae7710bb ("perf machine: Create maps for x86 PTI entry trampolines") revealed a problem with maps__find_symbol_by_name() that resulted in probes not being found e.g. $ sudo perf probe xsk_mmap xsk_mmap is out of .text, skip it. Probe point 'xsk_mmap' not found. Error: Failed to add events. maps__find_symbol_by_name() can optionally return the map of the found symbol. It can get the map wrong because, in fact, the symbol is found on the map's dso, not allowing for the possibility that the dso has more than one map. Fix by always checking the map contains the symbol. Reported-by: Björn Töpel Signed-off-by: Adrian Hunter Tested-by: Björn Töpel Cc: Jiri Olsa Cc: stable@vger.kernel.org Fixes: 1c5aae7710bb ("perf machine: Create maps for x86 PTI entry trampolines") Link: http://lkml.kernel.org/r/20180907085116.25782-1-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/map.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index 36d0763311ef..6a6929f208b4 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -576,6 +576,13 @@ struct symbol *map_groups__find_symbol(struct map_groups *mg, return NULL; } +static bool map__contains_symbol(struct map *map, struct symbol *sym) +{ + u64 ip = map->unmap_ip(map, sym->start); + + return ip >= map->start && ip < map->end; +} + struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp) { @@ -591,6 +598,10 @@ struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, if (sym == NULL) continue; + if (!map__contains_symbol(pos, sym)) { + sym = NULL; + continue; + } if (mapp != NULL) *mapp = pos; goto out; -- GitLab From b1f382178d150f256c1cf95b9341fda6eb764459 Mon Sep 17 00:00:00 2001 From: Ross Zwisler Date: Tue, 11 Sep 2018 13:31:16 -0400 Subject: [PATCH 1347/1692] ext4: close race between direct IO and ext4_break_layouts() If the refcount of a page is lowered between the time that it is returned by dax_busy_page() and when the refcount is again checked in ext4_break_layouts() => ___wait_var_event(), the waiting function ext4_wait_dax_page() will never be called. This means that ext4_break_layouts() will still have 'retry' set to false, so we'll stop looping and never check the refcount of other pages in this inode. Instead, always continue looping as long as dax_layout_busy_page() gives us a page which it found with an elevated refcount. Signed-off-by: Ross Zwisler Reviewed-by: Jan Kara Signed-off-by: Jan Kara Signed-off-by: Theodore Ts'o Cc: stable@vger.kernel.org --- fs/ext4/inode.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 694f31364206..723058bfe43b 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -4195,9 +4195,8 @@ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset, return 0; } -static void ext4_wait_dax_page(struct ext4_inode_info *ei, bool *did_unlock) +static void ext4_wait_dax_page(struct ext4_inode_info *ei) { - *did_unlock = true; up_write(&ei->i_mmap_sem); schedule(); down_write(&ei->i_mmap_sem); @@ -4207,14 +4206,12 @@ int ext4_break_layouts(struct inode *inode) { struct ext4_inode_info *ei = EXT4_I(inode); struct page *page; - bool retry; int error; if (WARN_ON_ONCE(!rwsem_is_locked(&ei->i_mmap_sem))) return -EINVAL; do { - retry = false; page = dax_layout_busy_page(inode->i_mapping); if (!page) return 0; @@ -4222,8 +4219,8 @@ int ext4_break_layouts(struct inode *inode) error = ___wait_var_event(&page->_refcount, atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE, 0, 0, - ext4_wait_dax_page(ei, &retry)); - } while (error == 0 && retry); + ext4_wait_dax_page(ei)); + } while (error == 0); return error; } -- GitLab From 7893499e3022542f6522847837487019ea83f142 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 10 Sep 2018 15:52:55 +0200 Subject: [PATCH 1348/1692] drm/amdgpu: fix error handling in amdgpu_cs_user_fence_chunk MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Slowly leaking memory one page at a time :) Signed-off-by: Christian König Reviewed-by: Andrey Grodzovsky Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 135d9d8c9506..c5cc648a1b4e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -40,6 +40,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, { struct drm_gem_object *gobj; unsigned long size; + int r; gobj = drm_gem_object_lookup(p->filp, data->handle); if (gobj == NULL) @@ -51,20 +52,26 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, p->uf_entry.tv.shared = true; p->uf_entry.user_pages = NULL; - size = amdgpu_bo_size(p->uf_entry.robj); - if (size != PAGE_SIZE || (data->offset + 8) > size) - return -EINVAL; - - *offset = data->offset; - drm_gem_object_put_unlocked(gobj); + size = amdgpu_bo_size(p->uf_entry.robj); + if (size != PAGE_SIZE || (data->offset + 8) > size) { + r = -EINVAL; + goto error_unref; + } + if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) { - amdgpu_bo_unref(&p->uf_entry.robj); - return -EINVAL; + r = -EINVAL; + goto error_unref; } + *offset = data->offset; + return 0; + +error_unref: + amdgpu_bo_unref(&p->uf_entry.robj); + return r; } static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p, -- GitLab From ba79fde47b9b3a04932a5385e5530616a77d4536 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 30 Aug 2018 15:55:54 +0200 Subject: [PATCH 1349/1692] drm/amdgpu: add amdgpu_vm_pt_parent helper MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a function to get the parent of a PD/PT. Signed-off-by: Christian König Reviewed-by: Felix Kuehling Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 36 ++++++++++++++++---------- 1 file changed, 23 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index f5a960079705..92c34e4290a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -337,6 +337,24 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, amdgpu_vm_bo_evicted(base); } +/** + * amdgpu_vm_pt_parent - get the parent page directory + * + * @pt: child page table + * + * Helper to get the parent entry for the child page table. NULL if we are at + * the root page directory. + */ +static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt) +{ + struct amdgpu_bo *parent = pt->base.bo->parent; + + if (!parent) + return NULL; + + return list_first_entry(&parent->va, struct amdgpu_vm_pt, base.bo_list); +} + /** * amdgpu_vm_get_pd_bo - add the VM PD to a validation list * @@ -1206,24 +1224,16 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev, } while (!list_empty(&vm->relocated)) { - struct amdgpu_vm_bo_base *bo_base, *parent; struct amdgpu_vm_pt *pt, *entry; - struct amdgpu_bo *bo; - bo_base = list_first_entry(&vm->relocated, - struct amdgpu_vm_bo_base, - vm_status); - amdgpu_vm_bo_idle(bo_base); + entry = list_first_entry(&vm->relocated, struct amdgpu_vm_pt, + base.vm_status); + amdgpu_vm_bo_idle(&entry->base); - bo = bo_base->bo->parent; - if (!bo) + pt = amdgpu_vm_pt_parent(entry); + if (!pt) continue; - parent = list_first_entry(&bo->va, struct amdgpu_vm_bo_base, - bo_list); - pt = container_of(parent, struct amdgpu_vm_pt, base); - entry = container_of(bo_base, struct amdgpu_vm_pt, base); - amdgpu_vm_update_pde(¶ms, vm, pt, entry); if (!vm->use_cpu_for_update && -- GitLab From 1c860a022f65224d6e8af71cc9f1411cb779f666 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 30 Aug 2018 15:55:54 +0200 Subject: [PATCH 1350/1692] drm/amdgpu: add amdgpu_vm_update_func MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add helper to call the update function for both BO and shadow. Signed-off-by: Christian König Reviewed-by: Felix Kuehling Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 32 +++++++++++++++++--------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 92c34e4290a8..e793a7855bb3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1113,6 +1113,22 @@ static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm, return r; } +/** + * amdgpu_vm_update_func - helper to call update function + * + * Calls the update function for both the given BO as well as its shadow. + */ +static void amdgpu_vm_update_func(struct amdgpu_pte_update_params *params, + struct amdgpu_bo *bo, + uint64_t pe, uint64_t addr, + unsigned count, uint32_t incr, + uint64_t flags) +{ + if (bo->shadow) + params->func(params, bo->shadow, pe, addr, count, incr, flags); + params->func(params, bo, pe, addr, count, incr, flags); +} + /* * amdgpu_vm_update_pde - update a single level in the hierarchy * @@ -1142,9 +1158,7 @@ static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params, level += params->adev->vm_manager.root_level; amdgpu_gmc_get_pde_for_bo(entry->base.bo, level, &pt, &flags); pde = (entry - parent->entries) * 8; - if (bo->shadow) - params->func(params, bo->shadow, pde, pt, 1, 0, flags); - params->func(params, bo, pde, pt, 1, 0, flags); + amdgpu_vm_update_func(params, bo, pde, pt, 1, 0, flags); } /* @@ -1351,9 +1365,7 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p, amdgpu_gmc_get_vm_pde(p->adev, AMDGPU_VM_PDB0, &dst, &flags); pde = (entry - parent->entries) * 8; - if (parent->base.bo->shadow) - p->func(p, parent->base.bo->shadow, pde, dst, 1, 0, flags); - p->func(p, parent->base.bo, pde, dst, 1, 0, flags); + amdgpu_vm_update_func(p, parent->base.bo, pde, dst, 1, 0, flags); } /** @@ -1403,11 +1415,9 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, pt = entry->base.bo; pe_start = (addr & mask) * 8; - if (pt->shadow) - params->func(params, pt->shadow, pe_start, dst, nptes, - AMDGPU_GPU_PAGE_SIZE, flags); - params->func(params, pt, pe_start, dst, nptes, - AMDGPU_GPU_PAGE_SIZE, flags); + amdgpu_vm_update_func(params, pt, pe_start, dst, nptes, + AMDGPU_GPU_PAGE_SIZE, flags); + } return 0; -- GitLab From d8de8260a45aae8f74af77eae9a162bdc0ed48d2 Mon Sep 17 00:00:00 2001 From: Andrey Grodzovsky Date: Mon, 10 Sep 2018 18:43:58 -0400 Subject: [PATCH 1351/1692] drm/amdgpu: Fix SDMA TO after GPU reset v3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit After GPU reset amdgpu_vm_clear_bo triggers VM flush but job->vm_pd_addr is not set causing SDMA TO. v2: Per advise by Christian König avoid flushing VM for jobs where job->vm_pd_addr wasn't explicitly set. v3: Shortcut vm_flush_needed early. Fixes cbd5285 drm/amdgpu: move setting the GART addr into TTM. Signed-off-by: Andrey Grodzovsky Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 34e54d41f5ca..755f733bf0d9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -73,6 +73,7 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, amdgpu_sync_create(&(*job)->sync); amdgpu_sync_create(&(*job)->sched_sync); (*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter); + (*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index e793a7855bb3..136b00412dc8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -870,7 +870,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_ } gds_switch_needed &= !!ring->funcs->emit_gds_switch; - vm_flush_needed &= !!ring->funcs->emit_vm_flush; + vm_flush_needed &= !!ring->funcs->emit_vm_flush && + job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET; pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping && ring->funcs->emit_wreg; -- GitLab From 0165de983272d1fae0809ed9db47c46a412279bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 10 Sep 2018 15:52:55 +0200 Subject: [PATCH 1352/1692] drm/amdgpu: fix error handling in amdgpu_cs_user_fence_chunk MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Slowly leaking memory one page at a time :) Signed-off-by: Christian König Reviewed-by: Andrey Grodzovsky Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 1b5a0a73d770..b31d121a876b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -39,6 +39,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, { struct drm_gem_object *gobj; unsigned long size; + int r; gobj = drm_gem_object_lookup(p->filp, data->handle); if (gobj == NULL) @@ -50,20 +51,26 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, p->uf_entry.tv.shared = true; p->uf_entry.user_pages = NULL; - size = amdgpu_bo_size(p->uf_entry.robj); - if (size != PAGE_SIZE || (data->offset + 8) > size) - return -EINVAL; - - *offset = data->offset; - drm_gem_object_put_unlocked(gobj); + size = amdgpu_bo_size(p->uf_entry.robj); + if (size != PAGE_SIZE || (data->offset + 8) > size) { + r = -EINVAL; + goto error_unref; + } + if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) { - amdgpu_bo_unref(&p->uf_entry.robj); - return -EINVAL; + r = -EINVAL; + goto error_unref; } + *offset = data->offset; + return 0; + +error_unref: + amdgpu_bo_unref(&p->uf_entry.robj); + return r; } static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p, -- GitLab From 6a92b11169a65b3f8cc512c75a252cbd0d096ba0 Mon Sep 17 00:00:00 2001 From: Boris Ostrovsky Date: Tue, 11 Sep 2018 15:55:38 -0400 Subject: [PATCH 1353/1692] x86/EISA: Don't probe EISA bus for Xen PV guests For unprivileged Xen PV guests this is normal memory and ioremap will not be able to properly map it. While at it, since ioremap may return NULL, add a test for pointer's validity. Reported-by: Andy Smith Signed-off-by: Boris Ostrovsky Signed-off-by: Thomas Gleixner Cc: hpa@zytor.com Cc: xen-devel@lists.xenproject.org Cc: jgross@suse.com Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20180911195538.23289-1-boris.ostrovsky@oracle.com --- arch/x86/kernel/eisa.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/eisa.c b/arch/x86/kernel/eisa.c index f260e452e4f8..e8c8c5d78dbd 100644 --- a/arch/x86/kernel/eisa.c +++ b/arch/x86/kernel/eisa.c @@ -7,11 +7,17 @@ #include #include +#include + static __init int eisa_bus_probe(void) { - void __iomem *p = ioremap(0x0FFFD9, 4); + void __iomem *p; + + if (xen_pv_domain() && !xen_initial_domain()) + return 0; - if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24)) + p = ioremap(0x0FFFD9, 4); + if (p && readl(p) == 'E' + ('I' << 8) + ('S' << 16) + ('A' << 24)) EISA_bus = 1; iounmap(p); return 0; -- GitLab From 425333bf3a7743715c17e503049d0837d6c4a603 Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Mon, 10 Sep 2018 18:29:07 +1000 Subject: [PATCH 1354/1692] KVM: PPC: Avoid marking DMA-mapped pages dirty in real mode At the moment the real mode handler of H_PUT_TCE calls iommu_tce_xchg_rm() which in turn reads the old TCE and if it was a valid entry, marks the physical page dirty if it was mapped for writing. Since it is in real mode, realmode_pfn_to_page() is used instead of pfn_to_page() to get the page struct. However SetPageDirty() itself reads the compound page head and returns a virtual address for the head page struct and setting dirty bit for that kills the system. This adds additional dirty bit tracking into the MM/IOMMU API for use in the real mode. Note that this does not change how VFIO and KVM (in virtual mode) set this bit. The KVM (real mode) changes include: - use the lowest bit of the cached host phys address to carry the dirty bit; - mark pages dirty when they are unpinned which happens when the preregistered memory is released which always happens in virtual mode; - add mm_iommu_ua_mark_dirty_rm() helper to set delayed dirty bit; - change iommu_tce_xchg_rm() to take the kvm struct for the mm to use in the new mm_iommu_ua_mark_dirty_rm() helper; - move iommu_tce_xchg_rm() to book3s_64_vio_hv.c (which is the only caller anyway) to reduce the real mode KVM and IOMMU knowledge across different subsystems. This removes realmode_pfn_to_page() as it is not used anymore. While we at it, remove some EXPORT_SYMBOL_GPL() as that code is for the real mode only and modules cannot call it anyway. Signed-off-by: Alexey Kardashevskiy Reviewed-by: David Gibson Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/book3s/64/pgtable.h | 1 - arch/powerpc/include/asm/iommu.h | 2 - arch/powerpc/include/asm/mmu_context.h | 1 + arch/powerpc/kernel/iommu.c | 25 ---------- arch/powerpc/kvm/book3s_64_vio_hv.c | 39 ++++++++++++---- arch/powerpc/mm/init_64.c | 49 -------------------- arch/powerpc/mm/mmu_context_iommu.c | 34 ++++++++++++-- 7 files changed, 62 insertions(+), 89 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 13a688fc8cd0..2fdc865ca374 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -1051,7 +1051,6 @@ static inline void vmemmap_remove_mapping(unsigned long start, return hash__vmemmap_remove_mapping(start, page_size); } #endif -struct page *realmode_pfn_to_page(unsigned long pfn); static inline pte_t pmd_pte(pmd_t pmd) { diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index ab3a4fba38e3..3d4b88cb8599 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h @@ -220,8 +220,6 @@ extern void iommu_del_device(struct device *dev); extern int __init tce_iommu_bus_notifier_init(void); extern long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry, unsigned long *hpa, enum dma_data_direction *direction); -extern long iommu_tce_xchg_rm(struct iommu_table *tbl, unsigned long entry, - unsigned long *hpa, enum dma_data_direction *direction); #else static inline void iommu_register_group(struct iommu_table_group *table_group, int pci_domain_number, diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index b2f89b621b15..b694d6af1150 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -38,6 +38,7 @@ extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, unsigned long ua, unsigned int pageshift, unsigned long *hpa); extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, unsigned long ua, unsigned int pageshift, unsigned long *hpa); +extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua); extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem); extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem); #endif diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index af7a20dc6e09..19b4c628f3be 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -1013,31 +1013,6 @@ long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry, } EXPORT_SYMBOL_GPL(iommu_tce_xchg); -#ifdef CONFIG_PPC_BOOK3S_64 -long iommu_tce_xchg_rm(struct iommu_table *tbl, unsigned long entry, - unsigned long *hpa, enum dma_data_direction *direction) -{ - long ret; - - ret = tbl->it_ops->exchange_rm(tbl, entry, hpa, direction); - - if (!ret && ((*direction == DMA_FROM_DEVICE) || - (*direction == DMA_BIDIRECTIONAL))) { - struct page *pg = realmode_pfn_to_page(*hpa >> PAGE_SHIFT); - - if (likely(pg)) { - SetPageDirty(pg); - } else { - tbl->it_ops->exchange_rm(tbl, entry, hpa, direction); - ret = -EFAULT; - } - } - - return ret; -} -EXPORT_SYMBOL_GPL(iommu_tce_xchg_rm); -#endif - int iommu_take_ownership(struct iommu_table *tbl) { unsigned long flags, i, sz = (tbl->it_size + 7) >> 3; diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index 506a4d400458..6821ead4b4eb 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c @@ -187,12 +187,35 @@ long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa, EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua); #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE -static void kvmppc_rm_clear_tce(struct iommu_table *tbl, unsigned long entry) +static long iommu_tce_xchg_rm(struct mm_struct *mm, struct iommu_table *tbl, + unsigned long entry, unsigned long *hpa, + enum dma_data_direction *direction) +{ + long ret; + + ret = tbl->it_ops->exchange_rm(tbl, entry, hpa, direction); + + if (!ret && ((*direction == DMA_FROM_DEVICE) || + (*direction == DMA_BIDIRECTIONAL))) { + __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry); + /* + * kvmppc_rm_tce_iommu_do_map() updates the UA cache after + * calling this so we still get here a valid UA. + */ + if (pua && *pua) + mm_iommu_ua_mark_dirty_rm(mm, be64_to_cpu(*pua)); + } + + return ret; +} + +static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl, + unsigned long entry) { unsigned long hpa = 0; enum dma_data_direction dir = DMA_NONE; - iommu_tce_xchg_rm(tbl, entry, &hpa, &dir); + iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir); } static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm, @@ -224,7 +247,7 @@ static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm, unsigned long hpa = 0; long ret; - if (iommu_tce_xchg_rm(tbl, entry, &hpa, &dir)) + if (iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir)) /* * real mode xchg can fail if struct page crosses * a page boundary @@ -236,7 +259,7 @@ static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm, ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry); if (ret) - iommu_tce_xchg_rm(tbl, entry, &hpa, &dir); + iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir); return ret; } @@ -282,7 +305,7 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl, if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem))) return H_CLOSED; - ret = iommu_tce_xchg_rm(tbl, entry, &hpa, &dir); + ret = iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir); if (ret) { mm_iommu_mapped_dec(mem); /* @@ -371,7 +394,7 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, return ret; WARN_ON_ONCE_RM(1); - kvmppc_rm_clear_tce(stit->tbl, entry); + kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry); } kvmppc_tce_put(stt, entry, tce); @@ -520,7 +543,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, goto unlock_exit; WARN_ON_ONCE_RM(1); - kvmppc_rm_clear_tce(stit->tbl, entry); + kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry); } kvmppc_tce_put(stt, entry + i, tce); @@ -571,7 +594,7 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu, return ret; WARN_ON_ONCE_RM(1); - kvmppc_rm_clear_tce(stit->tbl, entry); + kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry); } } diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 51ce091914f9..7a9886f98b0c 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -308,55 +308,6 @@ void register_page_bootmem_memmap(unsigned long section_nr, { } -/* - * We do not have access to the sparsemem vmemmap, so we fallback to - * walking the list of sparsemem blocks which we already maintain for - * the sake of crashdump. In the long run, we might want to maintain - * a tree if performance of that linear walk becomes a problem. - * - * realmode_pfn_to_page functions can fail due to: - * 1) As real sparsemem blocks do not lay in RAM continously (they - * are in virtual address space which is not available in the real mode), - * the requested page struct can be split between blocks so get_page/put_page - * may fail. - * 2) When huge pages are used, the get_page/put_page API will fail - * in real mode as the linked addresses in the page struct are virtual - * too. - */ -struct page *realmode_pfn_to_page(unsigned long pfn) -{ - struct vmemmap_backing *vmem_back; - struct page *page; - unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; - unsigned long pg_va = (unsigned long) pfn_to_page(pfn); - - for (vmem_back = vmemmap_list; vmem_back; vmem_back = vmem_back->list) { - if (pg_va < vmem_back->virt_addr) - continue; - - /* After vmemmap_list entry free is possible, need check all */ - if ((pg_va + sizeof(struct page)) <= - (vmem_back->virt_addr + page_size)) { - page = (struct page *) (vmem_back->phys + pg_va - - vmem_back->virt_addr); - return page; - } - } - - /* Probably that page struct is split between real pages */ - return NULL; -} -EXPORT_SYMBOL_GPL(realmode_pfn_to_page); - -#else - -struct page *realmode_pfn_to_page(unsigned long pfn) -{ - struct page *page = pfn_to_page(pfn); - return page; -} -EXPORT_SYMBOL_GPL(realmode_pfn_to_page); - #endif /* CONFIG_SPARSEMEM_VMEMMAP */ #ifdef CONFIG_PPC_BOOK3S_64 diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c index c9ee9e23845f..56c2234cc6ae 100644 --- a/arch/powerpc/mm/mmu_context_iommu.c +++ b/arch/powerpc/mm/mmu_context_iommu.c @@ -18,11 +18,15 @@ #include #include #include +#include #include #include static DEFINE_MUTEX(mem_list_mutex); +#define MM_IOMMU_TABLE_GROUP_PAGE_DIRTY 0x1 +#define MM_IOMMU_TABLE_GROUP_PAGE_MASK ~(SZ_4K - 1) + struct mm_iommu_table_group_mem_t { struct list_head next; struct rcu_head rcu; @@ -263,6 +267,9 @@ static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem) if (!page) continue; + if (mem->hpas[i] & MM_IOMMU_TABLE_GROUP_PAGE_DIRTY) + SetPageDirty(page); + put_page(page); mem->hpas[i] = 0; } @@ -360,7 +367,6 @@ struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(struct mm_struct *mm, return ret; } -EXPORT_SYMBOL_GPL(mm_iommu_lookup_rm); struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, unsigned long ua, unsigned long entries) @@ -390,7 +396,7 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, if (pageshift > mem->pageshift) return -EFAULT; - *hpa = *va | (ua & ~PAGE_MASK); + *hpa = (*va & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK); return 0; } @@ -413,11 +419,31 @@ long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, if (!pa) return -EFAULT; - *hpa = *pa | (ua & ~PAGE_MASK); + *hpa = (*pa & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK); return 0; } -EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa_rm); + +extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua) +{ + struct mm_iommu_table_group_mem_t *mem; + long entry; + void *va; + unsigned long *pa; + + mem = mm_iommu_lookup_rm(mm, ua, PAGE_SIZE); + if (!mem) + return; + + entry = (ua - mem->ua) >> PAGE_SHIFT; + va = &mem->hpas[entry]; + + pa = (void *) vmalloc_to_phys(va); + if (!pa) + return; + + *pa |= MM_IOMMU_TABLE_GROUP_PAGE_DIRTY; +} long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem) { -- GitLab From 71d29f43b6332badc5598c656616a62575e83342 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Tue, 11 Sep 2018 20:48:34 +1000 Subject: [PATCH 1355/1692] KVM: PPC: Book3S HV: Don't use compound_order to determine host mapping size THP paths can defer splitting compound pages until after the actual remap and TLB flushes to split a huge PMD/PUD. This causes radix partition scope page table mappings to get out of synch with the host qemu page table mappings. This results in random memory corruption in the guest when running with THP. The easiest way to reproduce is use KVM balloon to free up a lot of memory in the guest and then shrink the balloon to give the memory back, while some work is being done in the guest. Cc: David Gibson Cc: "Aneesh Kumar K.V" Cc: kvm-ppc@vger.kernel.org Cc: linuxppc-dev@lists.ozlabs.org Signed-off-by: Nicholas Piggin Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_64_mmu_radix.c | 91 +++++++++++--------------- 1 file changed, 37 insertions(+), 54 deletions(-) diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index fd6e8c13685f..933c574e1cf7 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -525,8 +525,8 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned long ea, unsigned long dsisr) { struct kvm *kvm = vcpu->kvm; - unsigned long mmu_seq, pte_size; - unsigned long gpa, gfn, hva, pfn; + unsigned long mmu_seq; + unsigned long gpa, gfn, hva; struct kvm_memory_slot *memslot; struct page *page = NULL; long ret; @@ -623,9 +623,10 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, */ hva = gfn_to_hva_memslot(memslot, gfn); if (upgrade_p && __get_user_pages_fast(hva, 1, 1, &page) == 1) { - pfn = page_to_pfn(page); upgrade_write = true; } else { + unsigned long pfn; + /* Call KVM generic code to do the slow-path check */ pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL, writing, upgrade_p); @@ -639,63 +640,45 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, } } - /* See if we can insert a 1GB or 2MB large PTE here */ - level = 0; - if (page && PageCompound(page)) { - pte_size = PAGE_SIZE << compound_order(compound_head(page)); - if (pte_size >= PUD_SIZE && - (gpa & (PUD_SIZE - PAGE_SIZE)) == - (hva & (PUD_SIZE - PAGE_SIZE))) { - level = 2; - pfn &= ~((PUD_SIZE >> PAGE_SHIFT) - 1); - } else if (pte_size >= PMD_SIZE && - (gpa & (PMD_SIZE - PAGE_SIZE)) == - (hva & (PMD_SIZE - PAGE_SIZE))) { - level = 1; - pfn &= ~((PMD_SIZE >> PAGE_SHIFT) - 1); - } - } - /* - * Compute the PTE value that we need to insert. + * Read the PTE from the process' radix tree and use that + * so we get the shift and attribute bits. */ - if (page) { - pgflags = _PAGE_READ | _PAGE_EXEC | _PAGE_PRESENT | _PAGE_PTE | - _PAGE_ACCESSED; - if (writing || upgrade_write) - pgflags |= _PAGE_WRITE | _PAGE_DIRTY; - pte = pfn_pte(pfn, __pgprot(pgflags)); + local_irq_disable(); + ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift); + pte = *ptep; + local_irq_enable(); + + /* Get pte level from shift/size */ + if (shift == PUD_SHIFT && + (gpa & (PUD_SIZE - PAGE_SIZE)) == + (hva & (PUD_SIZE - PAGE_SIZE))) { + level = 2; + } else if (shift == PMD_SHIFT && + (gpa & (PMD_SIZE - PAGE_SIZE)) == + (hva & (PMD_SIZE - PAGE_SIZE))) { + level = 1; } else { - /* - * Read the PTE from the process' radix tree and use that - * so we get the attribute bits. - */ - local_irq_disable(); - ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift); - pte = *ptep; - local_irq_enable(); - if (shift == PUD_SHIFT && - (gpa & (PUD_SIZE - PAGE_SIZE)) == - (hva & (PUD_SIZE - PAGE_SIZE))) { - level = 2; - } else if (shift == PMD_SHIFT && - (gpa & (PMD_SIZE - PAGE_SIZE)) == - (hva & (PMD_SIZE - PAGE_SIZE))) { - level = 1; - } else if (shift && shift != PAGE_SHIFT) { - /* Adjust PFN */ - unsigned long mask = (1ul << shift) - PAGE_SIZE; - pte = __pte(pte_val(pte) | (hva & mask)); - } - pte = __pte(pte_val(pte) | _PAGE_EXEC | _PAGE_ACCESSED); - if (writing || upgrade_write) { - if (pte_val(pte) & _PAGE_WRITE) - pte = __pte(pte_val(pte) | _PAGE_DIRTY); - } else { - pte = __pte(pte_val(pte) & ~(_PAGE_WRITE | _PAGE_DIRTY)); + level = 0; + if (shift > PAGE_SHIFT) { + /* + * If the pte maps more than one page, bring over + * bits from the virtual address to get the real + * address of the specific single page we want. + */ + unsigned long rpnmask = (1ul << shift) - PAGE_SIZE; + pte = __pte(pte_val(pte) | (hva & rpnmask)); } } + pte = __pte(pte_val(pte) | _PAGE_EXEC | _PAGE_ACCESSED); + if (writing || upgrade_write) { + if (pte_val(pte) & _PAGE_WRITE) + pte = __pte(pte_val(pte) | _PAGE_DIRTY); + } else { + pte = __pte(pte_val(pte) & ~(_PAGE_WRITE | _PAGE_DIRTY)); + } + /* Allocate space in the tree and write the PTE */ ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq); -- GitLab From 679fcae46c8b2352bba3485d521da070cfbe68e6 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Tue, 4 Sep 2018 11:47:40 -0700 Subject: [PATCH 1356/1692] scsi: iscsi: target: Don't use stack buffer for scatterlist Fedora got a bug report of a crash with iSCSI: kernel BUG at include/linux/scatterlist.h:143! ... RIP: 0010:iscsit_do_crypto_hash_buf+0x154/0x180 [iscsi_target_mod] ... Call Trace: ? iscsi_target_tx_thread+0x200/0x200 [iscsi_target_mod] iscsit_get_rx_pdu+0x4cd/0xa90 [iscsi_target_mod] ? native_sched_clock+0x3e/0xa0 ? iscsi_target_tx_thread+0x200/0x200 [iscsi_target_mod] iscsi_target_rx_thread+0x81/0xf0 [iscsi_target_mod] kthread+0x120/0x140 ? kthread_create_worker_on_cpu+0x70/0x70 ret_from_fork+0x3a/0x50 This is a BUG_ON for using a stack buffer with a scatterlist. There are two cases that trigger this bug. Switch to using a dynamically allocated buffer for one case and do not assign a NULL buffer in another case. Signed-off-by: Laura Abbott Reviewed-by: Mike Christie Signed-off-by: Martin K. Petersen --- drivers/target/iscsi/iscsi_target.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 9cdfccbdd06f..cc756a123fd8 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -1416,7 +1416,8 @@ static void iscsit_do_crypto_hash_buf(struct ahash_request *hash, sg_init_table(sg, ARRAY_SIZE(sg)); sg_set_buf(sg, buf, payload_length); - sg_set_buf(sg + 1, pad_bytes, padding); + if (padding) + sg_set_buf(sg + 1, pad_bytes, padding); ahash_request_set_crypt(hash, sg, data_crc, payload_length + padding); @@ -3910,10 +3911,14 @@ static bool iscsi_target_check_conn_state(struct iscsi_conn *conn) static void iscsit_get_rx_pdu(struct iscsi_conn *conn) { int ret; - u8 buffer[ISCSI_HDR_LEN], opcode; + u8 *buffer, opcode; u32 checksum = 0, digest = 0; struct kvec iov; + buffer = kcalloc(ISCSI_HDR_LEN, sizeof(*buffer), GFP_KERNEL); + if (!buffer) + return; + while (!kthread_should_stop()) { /* * Ensure that both TX and RX per connection kthreads @@ -3921,7 +3926,6 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn) */ iscsit_thread_check_cpumask(conn, current, 0); - memset(buffer, 0, ISCSI_HDR_LEN); memset(&iov, 0, sizeof(struct kvec)); iov.iov_base = buffer; @@ -3930,7 +3934,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn) ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN); if (ret != ISCSI_HDR_LEN) { iscsit_rx_thread_wait_for_tcp(conn); - return; + break; } if (conn->conn_ops->HeaderDigest) { @@ -3940,7 +3944,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn) ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN); if (ret != ISCSI_CRC_LEN) { iscsit_rx_thread_wait_for_tcp(conn); - return; + break; } iscsit_do_crypto_hash_buf(conn->conn_rx_hash, buffer, @@ -3964,7 +3968,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn) } if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) - return; + break; opcode = buffer[0] & ISCSI_OPCODE_MASK; @@ -3975,13 +3979,15 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn) " while in Discovery Session, rejecting.\n", opcode); iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, buffer); - return; + break; } ret = iscsi_target_rx_opcode(conn, buffer); if (ret < 0) - return; + break; } + + kfree(buffer); } int iscsi_target_rx_thread(void *arg) -- GitLab From cbe3fd39d223f14b1c60c80fe9347a3dd08c2edb Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Sat, 8 Sep 2018 11:42:27 +0300 Subject: [PATCH 1357/1692] scsi: qla2xxx: Fix an endian bug in fcpcmd_is_corrupted() We should first do the le16_to_cpu endian conversion and then apply the FCP_CMD_LENGTH_MASK mask. Fixes: 5f35509db179 ("qla2xxx: Terminate exchange if corrupted") Signed-off-by: Dan Carpenter Acked-by: Quinn Tran Acked-by: Himanshu Madhani Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_target.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index fecf96f0225c..199d3ba1916d 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -374,8 +374,8 @@ struct atio_from_isp { static inline int fcpcmd_is_corrupted(struct atio *atio) { if (atio->entry_type == ATIO_TYPE7 && - (le16_to_cpu(atio->attr_n_length & FCP_CMD_LENGTH_MASK) < - FCP_CMD_LENGTH_MIN)) + ((le16_to_cpu(atio->attr_n_length) & FCP_CMD_LENGTH_MASK) < + FCP_CMD_LENGTH_MIN)) return 1; else return 0; -- GitLab From d8a5281035895cdb5ff77756eff72966ec76edd0 Mon Sep 17 00:00:00 2001 From: Dennis Dalessandro Date: Wed, 5 Sep 2018 16:08:03 +0000 Subject: [PATCH 1358/1692] PCI: Fix faulty logic in pci_reset_bus() The pci_reset_bus() function calls pci_probe_reset_slot() to determine whether to call the slot or bus reset. The check has faulty logic in that it does not account for pci_probe_reset_slot() being able to return an errno. Fix by only calling the slot reset when the function returns 0. Fixes: 811c5cb37df4 ("PCI: Unify try slot and bus reset API") Signed-off-by: Dennis Dalessandro Signed-off-by: Bjorn Helgaas Reviewed-by: Michael J. Ruhl Cc: Sinan Kaya --- drivers/pci/pci.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 29ff9619b5fa..30b260332a10 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -5200,7 +5200,7 @@ static int __pci_reset_bus(struct pci_bus *bus) */ int pci_reset_bus(struct pci_dev *pdev) { - return pci_probe_reset_slot(pdev->slot) ? + return (!pci_probe_reset_slot(pdev->slot)) ? __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus); } EXPORT_SYMBOL_GPL(pci_reset_bus); -- GitLab From bfc456060d0cbcf6902a436d358b60cb1534668c Mon Sep 17 00:00:00 2001 From: Dennis Dalessandro Date: Fri, 31 Aug 2018 10:34:14 -0700 Subject: [PATCH 1359/1692] IB/hfi1,PCI: Allow bus reset while probing Calling into the new API to reset the secondary bus results in a deadlock. This occurs because the device/bus is already locked at probe time. Reverting back to the old behavior while the API is improved. Link: https://bugzilla.kernel.org/show_bug.cgi?id=200985 Fixes: c6a44ba950d1 ("PCI: Rename pci_try_reset_bus() to pci_reset_bus()") Fixes: 409888e0966e ("IB/hfi1: Use pci_try_reset_bus() for initiating PCI Secondary Bus Reset") Signed-off-by: Dennis Dalessandro Signed-off-by: Bjorn Helgaas Reviewed-by: Michael J. Ruhl Cc: Sinan Kaya --- drivers/infiniband/hw/hfi1/pcie.c | 11 ++++------- drivers/pci/pci.c | 1 + include/linux/pci.h | 3 +++ 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c index eec83757d55f..6c967dde58e7 100644 --- a/drivers/infiniband/hw/hfi1/pcie.c +++ b/drivers/infiniband/hw/hfi1/pcie.c @@ -893,14 +893,11 @@ static int trigger_sbr(struct hfi1_devdata *dd) } /* - * A secondary bus reset (SBR) issues a hot reset to our device. - * The following routine does a 1s wait after the reset is dropped - * per PCI Trhfa (recovery time). PCIe 3.0 section 6.6.1 - - * Conventional Reset, paragraph 3, line 35 also says that a 1s - * delay after a reset is required. Per spec requirements, - * the link is either working or not after that point. + * This is an end around to do an SBR during probe time. A new API needs + * to be implemented to have cleaner interface but this fixes the + * current brokenness */ - return pci_reset_bus(dev); + return pci_bridge_secondary_bus_reset(dev->bus->self); } /* diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 30b260332a10..1835f3a7aa8d 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -4547,6 +4547,7 @@ int pci_bridge_secondary_bus_reset(struct pci_dev *dev) return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS); } +EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset); static int pci_parent_bus_reset(struct pci_dev *dev, int probe) { diff --git a/include/linux/pci.h b/include/linux/pci.h index e72ca8dd6241..6925828f9f25 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1235,6 +1235,9 @@ void pci_bus_remove_resources(struct pci_bus *bus); int devm_request_pci_bus_resources(struct device *dev, struct list_head *resources); +/* Temporary until new and working PCI SBR API in place */ +int pci_bridge_secondary_bus_reset(struct pci_dev *dev); + #define pci_bus_for_each_resource(bus, res, i) \ for (i = 0; \ (res = pci_bus_resource_n(bus, i)) || i < PCI_BRIDGE_RESOURCE_NUM; \ -- GitLab From 9d27e39d309c93025ae6aa97236af15bef2a5f1f Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Mon, 10 Sep 2018 15:27:42 -0400 Subject: [PATCH 1360/1692] PCI: Fix enabling of PASID on RC integrated endpoints Set the eetlp_prefix_path on PCIE_EXP_TYPE_RC_END devices to allow PASID to be enabled on them. This fixes IOMMUv2 initialization on AMD Carrizo APUs. Link: https://bugzilla.kernel.org/show_bug.cgi?id=201079 Fixes: 7ce3f912ae ("PCI: Enable PASID only if entire path supports End-End TLP prefixes") Signed-off-by: Felix Kuehling Signed-off-by: Bjorn Helgaas --- drivers/pci/probe.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index ec784009a36b..201f9e5ff55c 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -2074,6 +2074,7 @@ static void pci_configure_eetlp_prefix(struct pci_dev *dev) { #ifdef CONFIG_PCI_PASID struct pci_dev *bridge; + int pcie_type; u32 cap; if (!pci_is_pcie(dev)) @@ -2083,7 +2084,9 @@ static void pci_configure_eetlp_prefix(struct pci_dev *dev) if (!(cap & PCI_EXP_DEVCAP2_EE_PREFIX)) return; - if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) + pcie_type = pci_pcie_type(dev); + if (pcie_type == PCI_EXP_TYPE_ROOT_PORT || + pcie_type == PCI_EXP_TYPE_RC_END) dev->eetlp_prefix_path = 1; else { bridge = pci_upstream_bridge(dev); -- GitLab From 8e966fab8eeb45db9f5a570ac4521f684d9696e1 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Wed, 12 Sep 2018 13:25:19 +0900 Subject: [PATCH 1361/1692] xtensa: remove unnecessary KBUILD_SRC ifeq conditional You can always prefix variant/platform header search paths with $(srctree)/ because $(srctree) is '.' for in-tree building. Signed-off-by: Masahiro Yamada Signed-off-by: Max Filippov --- arch/xtensa/Makefile | 4 ---- 1 file changed, 4 deletions(-) diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile index 295c120ed099..d67e30faff9c 100644 --- a/arch/xtensa/Makefile +++ b/arch/xtensa/Makefile @@ -64,11 +64,7 @@ endif vardirs := $(patsubst %,arch/xtensa/variants/%/,$(variant-y)) plfdirs := $(patsubst %,arch/xtensa/platforms/%/,$(platform-y)) -ifeq ($(KBUILD_SRC),) -KBUILD_CPPFLAGS += $(patsubst %,-I%include,$(vardirs) $(plfdirs)) -else KBUILD_CPPFLAGS += $(patsubst %,-I$(srctree)/%include,$(vardirs) $(plfdirs)) -endif KBUILD_DEFCONFIG := iss_defconfig -- GitLab From 4a7f50f78c221aac7253ea7059e1986eb622b0e5 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Tue, 11 Sep 2018 22:12:59 -0700 Subject: [PATCH 1362/1692] xtensa: enable SG chaining in Kconfig Signed-off-by: Max Filippov --- arch/xtensa/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 04d038f3b6fa..b9ad83a0ee5d 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -4,6 +4,7 @@ config ZONE_DMA config XTENSA def_bool y + select ARCH_HAS_SG_CHAIN select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_NO_COHERENT_DMA_MMAP if !MMU -- GitLab From 2d946e5bcdabc1deef72d01bc92a2801c71d6d8d Mon Sep 17 00:00:00 2001 From: Hauke Mehrtens Date: Sun, 9 Sep 2018 21:26:23 +0200 Subject: [PATCH 1363/1692] MIPS: lantiq: dma: add dev pointer dma_zalloc_coherent() now crashes if no dev pointer is given. Add a dev pointer to the ltq_dma_channel structure and fill it in the driver using it. This fixes a bug introduced in kernel 4.19. Signed-off-by: Hauke Mehrtens Signed-off-by: David S. Miller --- arch/mips/include/asm/mach-lantiq/xway/xway_dma.h | 1 + arch/mips/lantiq/xway/dma.c | 4 ++-- drivers/net/ethernet/lantiq_etop.c | 1 + 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h b/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h index 4901833498f7..8441b2698e64 100644 --- a/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h +++ b/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h @@ -40,6 +40,7 @@ struct ltq_dma_channel { int desc; /* the current descriptor */ struct ltq_dma_desc *desc_base; /* the descriptor base */ int phys; /* physical addr */ + struct device *dev; }; enum { diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c index 4b9fbb6744ad..664f2f7f55c1 100644 --- a/arch/mips/lantiq/xway/dma.c +++ b/arch/mips/lantiq/xway/dma.c @@ -130,7 +130,7 @@ ltq_dma_alloc(struct ltq_dma_channel *ch) unsigned long flags; ch->desc = 0; - ch->desc_base = dma_zalloc_coherent(NULL, + ch->desc_base = dma_zalloc_coherent(ch->dev, LTQ_DESC_NUM * LTQ_DESC_SIZE, &ch->phys, GFP_ATOMIC); @@ -182,7 +182,7 @@ ltq_dma_free(struct ltq_dma_channel *ch) if (!ch->desc_base) return; ltq_dma_close(ch); - dma_free_coherent(NULL, LTQ_DESC_NUM * LTQ_DESC_SIZE, + dma_free_coherent(ch->dev, LTQ_DESC_NUM * LTQ_DESC_SIZE, ch->desc_base, ch->phys); } EXPORT_SYMBOL_GPL(ltq_dma_free); diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 7a637b51c7d2..e08301d833e2 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -274,6 +274,7 @@ ltq_etop_hw_init(struct net_device *dev) struct ltq_etop_chan *ch = &priv->ch[i]; ch->idx = ch->dma.nr = i; + ch->dma.dev = &priv->pdev->dev; if (IS_TX(i)) { ltq_dma_alloc_tx(&ch->dma); -- GitLab From 0297c1c2eadb5bd996a873b87597af3b91c0d4ba Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Sun, 9 Sep 2018 19:12:12 -0400 Subject: [PATCH 1364/1692] tcp: rate limit synflood warnings further Convert pr_info to net_info_ratelimited to limit the total number of synflood warnings. Commit 946cedccbd73 ("tcp: Change possible SYN flooding messages") rate limits synflood warnings to one per listener. Workloads that open many listener sockets can still see a high rate of log messages. Syzkaller is one frequent example. Signed-off-by: Willem de Bruijn Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 4c2dd9f863f7..4cf2f7bb2802 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -6367,8 +6367,8 @@ static bool tcp_syn_flood_action(const struct sock *sk, if (!queue->synflood_warned && net->ipv4.sysctl_tcp_syncookies != 2 && xchg(&queue->synflood_warned, 1) == 0) - pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n", - proto, ntohs(tcp_hdr(skb)->dest), msg); + net_info_ratelimited("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n", + proto, ntohs(tcp_hdr(skb)->dest), msg); return want_cookie; } -- GitLab From 5a64506b5c2c3cdb29d817723205330378075448 Mon Sep 17 00:00:00 2001 From: Haishuang Yan Date: Mon, 10 Sep 2018 22:19:47 +0800 Subject: [PATCH 1365/1692] erspan: return PACKET_REJECT when the appropriate tunnel is not found If erspan tunnel hasn't been established, we'd better send icmp port unreachable message after receive erspan packets. Fixes: 84e54fe0a5ea ("gre: introduce native tunnel support for ERSPAN") Cc: William Tu Signed-off-by: Haishuang Yan Acked-by: William Tu Signed-off-by: David S. Miller --- net/ipv4/ip_gre.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index ae714aecc31c..85a714d36b66 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -328,6 +328,8 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi, ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error); return PACKET_RCVD; } + return PACKET_REJECT; + drop: kfree_skb(skb); return PACKET_RCVD; -- GitLab From 51dc63e3911fbb1f0a7a32da2fe56253e2040ea4 Mon Sep 17 00:00:00 2001 From: Haishuang Yan Date: Mon, 10 Sep 2018 22:19:48 +0800 Subject: [PATCH 1366/1692] erspan: fix error handling for erspan tunnel When processing icmp unreachable message for erspan tunnel, tunnel id should be erspan_net_id instead of ipgre_net_id. Fixes: 84e54fe0a5ea ("gre: introduce native tunnel support for ERSPAN") Cc: William Tu Signed-off-by: Haishuang Yan Acked-by: William Tu Signed-off-by: David S. Miller --- net/ipv4/ip_gre.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 85a714d36b66..8cce0e9ea08c 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -178,6 +178,9 @@ static void ipgre_err(struct sk_buff *skb, u32 info, if (tpi->proto == htons(ETH_P_TEB)) itn = net_generic(net, gre_tap_net_id); + else if (tpi->proto == htons(ETH_P_ERSPAN) || + tpi->proto == htons(ETH_P_ERSPAN2)) + itn = net_generic(net, erspan_net_id); else itn = net_generic(net, ipgre_net_id); -- GitLab From 6ad569019999300afd8e614d296fdc356550b77f Mon Sep 17 00:00:00 2001 From: Kai-Heng Feng Date: Tue, 11 Sep 2018 01:51:43 +0800 Subject: [PATCH 1367/1692] r8169: Clear RTL_FLAG_TASK_*_PENDING when clearing RTL_FLAG_TASK_ENABLED After system suspend, sometimes the r8169 doesn't work when ethernet cable gets pluggued. This issue happens because rtl_reset_work() doesn't get called from rtl8169_runtime_resume(), after system suspend. In rtl_task(), RTL_FLAG_TASK_* only gets cleared if this condition is met: if (!netif_running(dev) || !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags)) ... If RTL_FLAG_TASK_ENABLED was cleared during system suspend while RTL_FLAG_TASK_RESET_PENDING was set, the next rtl_schedule_task() won't schedule task as the flag is still there. So in addition to clearing RTL_FLAG_TASK_ENABLED, also clears other flags. Cc: Heiner Kallweit Signed-off-by: Kai-Heng Feng Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index a1f37d58e2fe..1d8631303b53 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -631,7 +631,7 @@ struct rtl8169_tc_offsets { }; enum rtl_flag { - RTL_FLAG_TASK_ENABLED, + RTL_FLAG_TASK_ENABLED = 0, RTL_FLAG_TASK_SLOW_PENDING, RTL_FLAG_TASK_RESET_PENDING, RTL_FLAG_MAX @@ -6655,7 +6655,8 @@ static int rtl8169_close(struct net_device *dev) rtl8169_update_counters(tp); rtl_lock_work(tp); - clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); + /* Clear all task flags */ + bitmap_zero(tp->wk.flags, RTL_FLAG_MAX); rtl8169_down(dev); rtl_unlock_work(tp); @@ -6838,7 +6839,9 @@ static void rtl8169_net_suspend(struct net_device *dev) rtl_lock_work(tp); napi_disable(&tp->napi); - clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); + /* Clear all task flags */ + bitmap_zero(tp->wk.flags, RTL_FLAG_MAX); + rtl_unlock_work(tp); rtl_pll_power_down(tp); -- GitLab From cc4dfb7f70a344f24c1c71e298deea0771dadcb2 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Mon, 10 Sep 2018 18:27:26 -0700 Subject: [PATCH 1368/1692] rds: fix two RCU related problems When a rds sock is bound, it is inserted into the bind_hash_table which is protected by RCU. But when releasing rds sock, after it is removed from this hash table, it is freed immediately without respecting RCU grace period. This could cause some use-after-free as reported by syzbot. Mark the rds sock with SOCK_RCU_FREE before inserting it into the bind_hash_table, so that it would be always freed after a RCU grace period. The other problem is in rds_find_bound(), the rds sock could be freed in between rhashtable_lookup_fast() and rds_sock_addref(), so we need to extend RCU read lock protection in rds_find_bound() to close this race condition. Reported-and-tested-by: syzbot+8967084bcac563795dc6@syzkaller.appspotmail.com Reported-by: syzbot+93a5839deb355537440f@syzkaller.appspotmail.com Cc: Sowmini Varadhan Cc: Santosh Shilimkar Cc: rds-devel@oss.oracle.com Signed-off-by: Cong Wang Acked-by: Santosh Shilimkar Signed-off-by: David S. Miller --- net/rds/bind.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/net/rds/bind.c b/net/rds/bind.c index 3ab55784b637..762d2c6788a3 100644 --- a/net/rds/bind.c +++ b/net/rds/bind.c @@ -76,11 +76,13 @@ struct rds_sock *rds_find_bound(const struct in6_addr *addr, __be16 port, struct rds_sock *rs; __rds_create_bind_key(key, addr, port, scope_id); - rs = rhashtable_lookup_fast(&bind_hash_table, key, ht_parms); + rcu_read_lock(); + rs = rhashtable_lookup(&bind_hash_table, key, ht_parms); if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD)) rds_sock_addref(rs); else rs = NULL; + rcu_read_unlock(); rdsdebug("returning rs %p for %pI6c:%u\n", rs, addr, ntohs(port)); @@ -235,6 +237,7 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) goto out; } + sock_set_flag(sk, SOCK_RCU_FREE); ret = rds_add_bound(rs, binding_addr, &port, scope_id); if (ret) goto out; -- GitLab From 8d2d8935d30cc2acc57a3196dc10dfa8d5cbcdab Mon Sep 17 00:00:00 2001 From: Alexander Usyskin Date: Mon, 6 Aug 2018 17:47:33 +0300 Subject: [PATCH 1369/1692] mei: ignore not found client in the enumeration Some of the ME clients are available only for BIOS operation and are removed during hand off to an OS. However the removal is not instant. A client may be visible on the client list when the mei driver requests for enumeration, while the subsequent request for properties will be answered with client not found error value. The default behavior for an error is to perform client reset while this error is harmless and the link reset should be prevented. This issue started to be visible due to suspend/resume timing changes. Currently reported only on the Haswell based system. Fixes: [33.564957] mei_me 0000:00:16.0: hbm: properties response: wrong status = 1 CLIENT_NOT_FOUND [33.564978] mei_me 0000:00:16.0: mei_irq_read_handler ret = -71. [33.565270] mei_me 0000:00:16.0: unexpected reset: dev_state = INIT_CLIENTS fw status = 1E000255 60002306 00000200 00004401 00000000 00000010 Cc: Reported-by: Heiner Kallweit Signed-off-by: Alexander Usyskin Signed-off-by: Tomas Winkler Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mei/hbm.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index 09e233d4c0de..e56f3e72d57a 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c @@ -1161,15 +1161,18 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) props_res = (struct hbm_props_response *)mei_msg; - if (props_res->status) { + if (props_res->status == MEI_HBMS_CLIENT_NOT_FOUND) { + dev_dbg(dev->dev, "hbm: properties response: %d CLIENT_NOT_FOUND\n", + props_res->me_addr); + } else if (props_res->status) { dev_err(dev->dev, "hbm: properties response: wrong status = %d %s\n", props_res->status, mei_hbm_status_str(props_res->status)); return -EPROTO; + } else { + mei_hbm_me_cl_add(dev, props_res); } - mei_hbm_me_cl_add(dev, props_res); - /* request property for the next client */ if (mei_hbm_prop_req(dev, props_res->me_addr + 1)) return -EIO; -- GitLab From c1a214ad82d7ac6f19fe48f90b13403b40ead9dc Mon Sep 17 00:00:00 2001 From: John Hubbard Date: Thu, 23 Aug 2018 09:16:58 +0300 Subject: [PATCH 1370/1692] mei: fix use-after-free in mei_cl_write KASAN reports a use-after-free during startup, in mei_cl_write: BUG: KASAN: use-after-free in mei_cl_write+0x601/0x870 [mei] (drivers/misc/mei/client.c:1770) This is caused by commit 98e70866aacb ("mei: add support for variable length mei headers."), which changed the return value from len, to buf->size. That ends up using a stale buf pointer, because blocking call, the cb (callback) is deleted in me_cl_complete() function. However, fortunately, len remains unchanged throughout the function (and I don't see anything else that would require re-reading buf->size either), so the fix is to simply revert the change, and return len, as before. Fixes: 98e70866aacb ("mei: add support for variable length mei headers.") CC: Arnd Bergmann CC: Greg Kroah-Hartman Signed-off-by: John Hubbard Signed-off-by: Tomas Winkler Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mei/client.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index 4ab6251d418e..ebdcf0b450e2 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -1767,7 +1767,7 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) } } - rets = buf->size; + rets = len; err: cl_dbg(dev, cl, "rpm: autosuspend\n"); pm_runtime_mark_last_busy(dev->dev); -- GitLab From 69bf5313035926b0b6a6578de4f3168a8f5c19b8 Mon Sep 17 00:00:00 2001 From: Tomas Winkler Date: Mon, 27 Aug 2018 22:40:15 +0300 Subject: [PATCH 1371/1692] mei: bus: fix hw module get/put balance MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In case the device is not connected it doesn't 'get' hw module and hence should not 'put' it on disable. Cc: 4.16+ Fixes:'commit 257355a44b99 ("mei: make module referencing local to the bus.c")' Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=200455 Tested-by: Georg Müller Signed-off-by: Tomas Winkler Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mei/bus.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index 7bba62a72921..13c6c9a2248a 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -616,9 +616,8 @@ int mei_cldev_disable(struct mei_cl_device *cldev) if (err < 0) dev_err(bus->dev, "Could not disconnect from the ME client\n"); -out: mei_cl_bus_module_put(cldev); - +out: /* Flush queues and remove any pending read */ mei_cl_flush_queues(cl, NULL); mei_cl_unlink(cl); -- GitLab From 34f1166afd67f9f48a08c52f36180048908506a4 Mon Sep 17 00:00:00 2001 From: Tomas Winkler Date: Mon, 27 Aug 2018 22:40:16 +0300 Subject: [PATCH 1372/1692] mei: bus: need to unlink client before freeing MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In case a client fails to connect in mei_cldev_enable(), the caller won't call the mei_cldev_disable leaving the client in a linked stated. Upon driver unload the client structure will be freed in mei_cl_bus_dev_release(), leaving a stale pointer on a fail_list. This will eventually end up in crash during power down flow in mei_cl_set_disonnected(). RIP: mei_cl_set_disconnected+0x5/0x260[mei] Call trace: mei_cl_all_disconnect+0x22/0x30 mei_reset+0x194/0x250 __synchronize_hardirq+0x43/0x50 _cond_resched+0x15/0x30 mei_me_intr_clear+0x20/0x100 mei_stop+0x76/0xb0 mei_me_shutdown+0x3f/0x80 pci_device_shutdown+0x34/0x60 kernel_restart+0x0e/0x30 Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=200455 Fixes: 'c110cdb17148 ("mei: bus: make a client pointer always available")' Cc: 4.10+ Tested-by: Georg Müller Signed-off-by: Tomas Winkler Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mei/bus.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index 13c6c9a2248a..fc3872fe7b25 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -521,17 +521,15 @@ int mei_cldev_enable(struct mei_cl_device *cldev) cl = cldev->cl; + mutex_lock(&bus->device_lock); if (cl->state == MEI_FILE_UNINITIALIZED) { - mutex_lock(&bus->device_lock); ret = mei_cl_link(cl); - mutex_unlock(&bus->device_lock); if (ret) - return ret; + goto out; /* update pointers */ cl->cldev = cldev; } - mutex_lock(&bus->device_lock); if (mei_cl_is_connected(cl)) { ret = 0; goto out; @@ -875,12 +873,13 @@ static void mei_cl_bus_dev_release(struct device *dev) mei_me_cl_put(cldev->me_cl); mei_dev_bus_put(cldev->bus); + mei_cl_unlink(cldev->cl); kfree(cldev->cl); kfree(cldev); } static const struct device_type mei_cl_device_type = { - .release = mei_cl_bus_dev_release, + .release = mei_cl_bus_dev_release, }; /** -- GitLab From da1b9564e85b1d7baf66cbfabcab27e183a1db63 Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Thu, 23 Aug 2018 14:29:56 +0900 Subject: [PATCH 1373/1692] android: binder: fix the race mmap and alloc_new_buf_locked There is RaceFuzzer report like below because we have no lock to close below the race between binder_mmap and binder_alloc_new_buf_locked. To close the race, let's use memory barrier so that if someone see alloc->vma is not NULL, alloc->vma_vm_mm should be never NULL. (I didn't add stable mark intentionallybecause standard android userspace libraries that interact with binder (libbinder & libhwbinder) prevent the mmap/ioctl race. - from Todd) " Thread interleaving: CPU0 (binder_alloc_mmap_handler) CPU1 (binder_alloc_new_buf_locked) ===== ===== // drivers/android/binder_alloc.c // #L718 (v4.18-rc3) alloc->vma = vma; // drivers/android/binder_alloc.c // #L346 (v4.18-rc3) if (alloc->vma == NULL) { ... // alloc->vma is not NULL at this point return ERR_PTR(-ESRCH); } ... // #L438 binder_update_page_range(alloc, 0, (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr); // In binder_update_page_range() #L218 // But still alloc->vma_vm_mm is NULL here if (need_mm && mmget_not_zero(alloc->vma_vm_mm)) alloc->vma_vm_mm = vma->vm_mm; Crash Log: ================================================================== BUG: KASAN: null-ptr-deref in __atomic_add_unless include/asm-generic/atomic-instrumented.h:89 [inline] BUG: KASAN: null-ptr-deref in atomic_add_unless include/linux/atomic.h:533 [inline] BUG: KASAN: null-ptr-deref in mmget_not_zero include/linux/sched/mm.h:75 [inline] BUG: KASAN: null-ptr-deref in binder_update_page_range+0xece/0x18e0 drivers/android/binder_alloc.c:218 Write of size 4 at addr 0000000000000058 by task syz-executor0/11184 CPU: 1 PID: 11184 Comm: syz-executor0 Not tainted 4.18.0-rc3 #1 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.8.2-0-g33fbe13 by qemu-project.org 04/01/2014 Call Trace: __dump_stack lib/dump_stack.c:77 [inline] dump_stack+0x16e/0x22c lib/dump_stack.c:113 kasan_report_error mm/kasan/report.c:352 [inline] kasan_report+0x163/0x380 mm/kasan/report.c:412 check_memory_region_inline mm/kasan/kasan.c:260 [inline] check_memory_region+0x140/0x1a0 mm/kasan/kasan.c:267 kasan_check_write+0x14/0x20 mm/kasan/kasan.c:278 __atomic_add_unless include/asm-generic/atomic-instrumented.h:89 [inline] atomic_add_unless include/linux/atomic.h:533 [inline] mmget_not_zero include/linux/sched/mm.h:75 [inline] binder_update_page_range+0xece/0x18e0 drivers/android/binder_alloc.c:218 binder_alloc_new_buf_locked drivers/android/binder_alloc.c:443 [inline] binder_alloc_new_buf+0x467/0xc30 drivers/android/binder_alloc.c:513 binder_transaction+0x125b/0x4fb0 drivers/android/binder.c:2957 binder_thread_write+0xc08/0x2770 drivers/android/binder.c:3528 binder_ioctl_write_read.isra.39+0x24f/0x8e0 drivers/android/binder.c:4456 binder_ioctl+0xa86/0xf34 drivers/android/binder.c:4596 vfs_ioctl fs/ioctl.c:46 [inline] do_vfs_ioctl+0x154/0xd40 fs/ioctl.c:686 ksys_ioctl+0x94/0xb0 fs/ioctl.c:701 __do_sys_ioctl fs/ioctl.c:708 [inline] __se_sys_ioctl fs/ioctl.c:706 [inline] __x64_sys_ioctl+0x43/0x50 fs/ioctl.c:706 do_syscall_64+0x167/0x4b0 arch/x86/entry/common.c:290 entry_SYSCALL_64_after_hwframe+0x49/0xbe " Signed-off-by: Todd Kjos Signed-off-by: Minchan Kim Reviewed-by: Martijn Coenen Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/android/binder_alloc.c | 43 +++++++++++++++++++++++++++------- 1 file changed, 35 insertions(+), 8 deletions(-) diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 3f3b7b253445..64fd96eada31 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -332,6 +332,35 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, return vma ? -ENOMEM : -ESRCH; } + +static inline void binder_alloc_set_vma(struct binder_alloc *alloc, + struct vm_area_struct *vma) +{ + if (vma) + alloc->vma_vm_mm = vma->vm_mm; + /* + * If we see alloc->vma is not NULL, buffer data structures set up + * completely. Look at smp_rmb side binder_alloc_get_vma. + * We also want to guarantee new alloc->vma_vm_mm is always visible + * if alloc->vma is set. + */ + smp_wmb(); + alloc->vma = vma; +} + +static inline struct vm_area_struct *binder_alloc_get_vma( + struct binder_alloc *alloc) +{ + struct vm_area_struct *vma = NULL; + + if (alloc->vma) { + /* Look at description in binder_alloc_set_vma */ + smp_rmb(); + vma = alloc->vma; + } + return vma; +} + static struct binder_buffer *binder_alloc_new_buf_locked( struct binder_alloc *alloc, size_t data_size, @@ -348,7 +377,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked( size_t size, data_offsets_size; int ret; - if (alloc->vma == NULL) { + if (!binder_alloc_get_vma(alloc)) { binder_alloc_debug(BINDER_DEBUG_USER_ERROR, "%d: binder_alloc_buf, no vma\n", alloc->pid); @@ -723,9 +752,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, buffer->free = 1; binder_insert_free_buffer(alloc, buffer); alloc->free_async_space = alloc->buffer_size / 2; - barrier(); - alloc->vma = vma; - alloc->vma_vm_mm = vma->vm_mm; + binder_alloc_set_vma(alloc, vma); mmgrab(alloc->vma_vm_mm); return 0; @@ -754,10 +781,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) int buffers, page_count; struct binder_buffer *buffer; - BUG_ON(alloc->vma); - buffers = 0; mutex_lock(&alloc->mutex); + BUG_ON(alloc->vma); + while ((n = rb_first(&alloc->allocated_buffers))) { buffer = rb_entry(n, struct binder_buffer, rb_node); @@ -900,7 +927,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc) */ void binder_alloc_vma_close(struct binder_alloc *alloc) { - WRITE_ONCE(alloc->vma, NULL); + binder_alloc_set_vma(alloc, NULL); } /** @@ -935,7 +962,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item, index = page - alloc->pages; page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; - vma = alloc->vma; + vma = binder_alloc_get_vma(alloc); if (vma) { if (!mmget_not_zero(alloc->vma_vm_mm)) goto err_mmget; -- GitLab From c55e9318871cd06e4aa10f5023cc2dcdfbb08577 Mon Sep 17 00:00:00 2001 From: "Bryant G. Ly" Date: Mon, 6 Aug 2018 08:31:00 -0500 Subject: [PATCH 1374/1692] misc: ibmvsm: Fix wrong assignment of return code Currently the assignment is flipped and rc is always 0. Signed-off-by: Bryant G. Ly Fixes: 0eca353e7ae7 ("misc: IBM Virtual Management Channel Driver (VMC)") Reviewed-by: Bradley Warrum Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/misc/ibmvmc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/misc/ibmvmc.c b/drivers/misc/ibmvmc.c index 8f82bb9d11e2..b8aaa684c397 100644 --- a/drivers/misc/ibmvmc.c +++ b/drivers/misc/ibmvmc.c @@ -2131,7 +2131,7 @@ static int ibmvmc_init_crq_queue(struct crq_server_adapter *adapter) retrc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, queue->msg_token, PAGE_SIZE); - retrc = rc; + rc = retrc; if (rc == H_RESOURCE) rc = ibmvmc_reset_crq_queue(adapter); -- GitLab From 86503bd35dec0ce363e9fdbf5299927422ed3899 Mon Sep 17 00:00:00 2001 From: "K. Y. Srinivasan" Date: Fri, 10 Aug 2018 23:06:07 +0000 Subject: [PATCH 1375/1692] Tools: hv: Fix a bug in the key delete code Fix a bug in the key delete code - the num_records range from 0 to num_records-1. Signed-off-by: K. Y. Srinivasan Reported-by: David Binderman Cc: Reviewed-by: Michael Kelley Signed-off-by: Greg Kroah-Hartman --- tools/hv/hv_kvp_daemon.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c index dbf6e8bd98ba..bbb2a8ef367c 100644 --- a/tools/hv/hv_kvp_daemon.c +++ b/tools/hv/hv_kvp_daemon.c @@ -286,7 +286,7 @@ static int kvp_key_delete(int pool, const __u8 *key, int key_size) * Found a match; just move the remaining * entries up. */ - if (i == num_records) { + if (i == (num_records - 1)) { kvp_file_info[pool].num_records--; kvp_update_file(pool); return 0; -- GitLab From de916736aaaadddbd6061472969f667b14204aa9 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Wed, 15 Aug 2018 10:50:41 -0500 Subject: [PATCH 1376/1692] misc: hmc6352: fix potential Spectre v1 val is indirectly controlled by user-space, hence leading to a potential exploitation of the Spectre variant 1 vulnerability. This issue was detected with the help of Smatch: drivers/misc/hmc6352.c:54 compass_store() warn: potential spectre issue 'map' [r] Fix this by sanitizing val before using it to index map Notice that given that speculation windows are large, the policy is to kill the speculation on the first load and not worry if it can be completed with a dependent load/store [1]. [1] https://marc.info/?l=linux-kernel&m=152449131114778&w=2 Cc: stable@vger.kernel.org Signed-off-by: Gustavo A. R. Silva Signed-off-by: Greg Kroah-Hartman --- drivers/misc/hmc6352.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/misc/hmc6352.c b/drivers/misc/hmc6352.c index eeb7eef62174..38f90e179927 100644 --- a/drivers/misc/hmc6352.c +++ b/drivers/misc/hmc6352.c @@ -27,6 +27,7 @@ #include #include #include +#include static DEFINE_MUTEX(compass_mutex); @@ -50,6 +51,7 @@ static int compass_store(struct device *dev, const char *buf, size_t count, return ret; if (val >= strlen(map)) return -EINVAL; + val = array_index_nospec(val, strlen(map)); mutex_lock(&compass_mutex); ret = compass_command(c, map[val]); mutex_unlock(&compass_mutex); -- GitLab From 029d727b4f5d7c82f78e0395a0d220271c2f92b8 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Thu, 16 Aug 2018 14:42:13 -0500 Subject: [PATCH 1377/1692] fpga: dfl: fme: fix return value check in in pr_mgmt_init() In case of error, the function dfl_fme_create_region() returns ERR_PTR() and never returns NULL. The NULL test in the return value check should be replaced with IS_ERR(). Fixes: 29de76240e86 ("fpga: dfl: fme: add partial reconfiguration sub feature support") Signed-off-by: Wei Yongjun Acked-by: Moritz Fischer Acked-by: Alan Tull Signed-off-by: Greg Kroah-Hartman --- drivers/fpga/dfl-fme-pr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/fpga/dfl-fme-pr.c b/drivers/fpga/dfl-fme-pr.c index fc9fd2d0482f..0b840531ef33 100644 --- a/drivers/fpga/dfl-fme-pr.c +++ b/drivers/fpga/dfl-fme-pr.c @@ -420,7 +420,7 @@ static int pr_mgmt_init(struct platform_device *pdev, /* Create region for each port */ fme_region = dfl_fme_create_region(pdata, mgr, fme_br->br, i); - if (!fme_region) { + if (IS_ERR(fme_region)) { ret = PTR_ERR(fme_region); goto destroy_region; } -- GitLab From 6712cc9c22117a8af9f3df272b4a44fd2e4201cd Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Mon, 20 Aug 2018 21:16:40 +0000 Subject: [PATCH 1378/1692] vmbus: don't return values for uninitalized channels For unsupported device types, the vmbus channel ringbuffer is never initialized, and therefore reading the sysfs files will return garbage or cause a kernel OOPS. Fixes: c2e5df616e1a ("vmbus: add per-channel sysfs info") Signed-off-by: Stephen Hemminger Signed-off-by: K. Y. Srinivasan Cc: # 4.15 Signed-off-by: Greg Kroah-Hartman --- drivers/hv/vmbus_drv.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index b1b548a21f91..c71cc857b649 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -1291,6 +1291,9 @@ static ssize_t vmbus_chan_attr_show(struct kobject *kobj, if (!attribute->show) return -EIO; + if (chan->state != CHANNEL_OPENED_STATE) + return -EINVAL; + return attribute->show(chan, buf); } -- GitLab From 422b3db2a5036add39a82425b1dd9fb6c96481e8 Mon Sep 17 00:00:00 2001 From: Rishabh Bhatnagar Date: Fri, 31 Aug 2018 08:43:31 -0700 Subject: [PATCH 1379/1692] firmware: Fix security issue with request_firmware_into_buf() When calling request_firmware_into_buf() with the FW_OPT_NOCACHE flag it is expected that firmware is loaded into buffer from memory. But inside alloc_lookup_fw_priv every new firmware that is loaded is added to the firmware cache (fwc) list head. So if any driver requests a firmware that is already loaded the code iterates over the above mentioned list and it can end up giving a pointer to other device driver's firmware buffer. Also the existing copy may either be modified by drivers, remote processors or even freed. This causes a potential security issue with batched requests when using request_firmware_into_buf. Fix alloc_lookup_fw_priv to not add to the fwc head list if FW_OPT_NOCACHE is set, and also don't do the lookup in the list. Fixes: 0e742e9275 ("firmware: provide infrastructure to make fw caching optional") [mcgrof: broken since feature introduction on v4.8] Cc: stable@vger.kernel.org # v4.8+ Signed-off-by: Vikram Mulukutla Signed-off-by: Rishabh Bhatnagar Signed-off-by: Luis Chamberlain Signed-off-by: Greg Kroah-Hartman --- drivers/base/firmware_loader/main.c | 30 +++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c index 0943e7065e0e..b3c0498ee433 100644 --- a/drivers/base/firmware_loader/main.c +++ b/drivers/base/firmware_loader/main.c @@ -209,21 +209,24 @@ static struct fw_priv *__lookup_fw_priv(const char *fw_name) static int alloc_lookup_fw_priv(const char *fw_name, struct firmware_cache *fwc, struct fw_priv **fw_priv, void *dbuf, - size_t size) + size_t size, enum fw_opt opt_flags) { struct fw_priv *tmp; spin_lock(&fwc->lock); - tmp = __lookup_fw_priv(fw_name); - if (tmp) { - kref_get(&tmp->ref); - spin_unlock(&fwc->lock); - *fw_priv = tmp; - pr_debug("batched request - sharing the same struct fw_priv and lookup for multiple requests\n"); - return 1; + if (!(opt_flags & FW_OPT_NOCACHE)) { + tmp = __lookup_fw_priv(fw_name); + if (tmp) { + kref_get(&tmp->ref); + spin_unlock(&fwc->lock); + *fw_priv = tmp; + pr_debug("batched request - sharing the same struct fw_priv and lookup for multiple requests\n"); + return 1; + } } + tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size); - if (tmp) + if (tmp && !(opt_flags & FW_OPT_NOCACHE)) list_add(&tmp->list, &fwc->head); spin_unlock(&fwc->lock); @@ -493,7 +496,8 @@ int assign_fw(struct firmware *fw, struct device *device, */ static int _request_firmware_prepare(struct firmware **firmware_p, const char *name, - struct device *device, void *dbuf, size_t size) + struct device *device, void *dbuf, size_t size, + enum fw_opt opt_flags) { struct firmware *firmware; struct fw_priv *fw_priv; @@ -511,7 +515,8 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name, return 0; /* assigned */ } - ret = alloc_lookup_fw_priv(name, &fw_cache, &fw_priv, dbuf, size); + ret = alloc_lookup_fw_priv(name, &fw_cache, &fw_priv, dbuf, size, + opt_flags); /* * bind with 'priv' now to avoid warning in failure path @@ -571,7 +576,8 @@ _request_firmware(const struct firmware **firmware_p, const char *name, goto out; } - ret = _request_firmware_prepare(&fw, name, device, buf, size); + ret = _request_firmware_prepare(&fw, name, device, buf, size, + opt_flags); if (ret <= 0) /* error or already assigned */ goto out; -- GitLab From fa108f95c6769ec15ea59b7db00454b82afc6121 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Wed, 5 Sep 2018 07:45:11 +0200 Subject: [PATCH 1380/1692] s390/zcrypt: remove VLA usage from the AP bus The use of variable length arrays on the stack is deprecated. git commit 3d8f60d38e249f989a7fca9c2370c31c3d5487e1 "s390/zcrypt: hex string mask improvements for apmask and aqmask." added three new VLA arrays. Remove them again. Reviewed-by: Harald Freudenberger Signed-off-by: Martin Schwidefsky --- drivers/s390/crypto/ap_bus.c | 86 ++++++++++++++---------------------- 1 file changed, 33 insertions(+), 53 deletions(-) diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index ec891bc7d10a..f039266b275d 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -872,8 +872,6 @@ static int hex2bitmap(const char *str, unsigned long *bitmap, int bits) if (bits & 0x07) return -EINVAL; - memset(bitmap, 0, bits / 8); - if (str[0] == '0' && str[1] == 'x') str++; if (*str == 'x') @@ -895,25 +893,23 @@ static int hex2bitmap(const char *str, unsigned long *bitmap, int bits) } /* - * str2clrsetmasks() - parse bitmask argument and set the clear and - * the set bitmap mask. A concatenation (done with ',') of these terms - * is recognized: + * modify_bitmap() - parse bitmask argument and modify an existing + * bit mask accordingly. A concatenation (done with ',') of these + * terms is recognized: * +[-] or -[-] * may be any valid number (hex, decimal or octal) in the range * 0...bits-1; the leading + or - is required. Here are some examples: * +0-15,+32,-128,-0xFF * -0-255,+1-16,+0x128 * +1,+2,+3,+4,-5,-7-10 - * Returns a clear and a set bitmask. Every positive value in the string - * results in a bit set in the set mask and every negative value in the - * string results in a bit SET in the clear mask. As a bit may be touched - * more than once, the last 'operation' wins: +0-255,-128 = all but bit - * 128 set in the set mask, only bit 128 set in the clear mask. + * Returns the new bitmap after all changes have been applied. Every + * positive value in the string will set a bit and every negative value + * in the string will clear a bit. As a bit may be touched more than once, + * the last 'operation' wins: + * +0-255,-128 = first bits 0-255 will be set, then bit 128 will be + * cleared again. All other bits are unmodified. */ -static int str2clrsetmasks(const char *str, - unsigned long *clrmap, - unsigned long *setmap, - int bits) +static int modify_bitmap(const char *str, unsigned long *bitmap, int bits) { int a, i, z; char *np, sign; @@ -922,9 +918,6 @@ static int str2clrsetmasks(const char *str, if (bits & 0x07) return -EINVAL; - memset(clrmap, 0, bits / 8); - memset(setmap, 0, bits / 8); - while (*str) { sign = *str++; if (sign != '+' && sign != '-') @@ -940,13 +933,10 @@ static int str2clrsetmasks(const char *str, str = np; } for (i = a; i <= z; i++) - if (sign == '+') { - set_bit_inv(i, setmap); - clear_bit_inv(i, clrmap); - } else { - clear_bit_inv(i, setmap); - set_bit_inv(i, clrmap); - } + if (sign == '+') + set_bit_inv(i, bitmap); + else + clear_bit_inv(i, bitmap); while (*str == ',' || *str == '\n') str++; } @@ -970,44 +960,34 @@ static int process_mask_arg(const char *str, unsigned long *bitmap, int bits, struct mutex *lock) { - int i; + unsigned long *newmap, size; + int rc; /* bits needs to be a multiple of 8 */ if (bits & 0x07) return -EINVAL; + size = BITS_TO_LONGS(bits)*sizeof(unsigned long); + newmap = kmalloc(size, GFP_KERNEL); + if (!newmap) + return -ENOMEM; + if (mutex_lock_interruptible(lock)) { + kfree(newmap); + return -ERESTARTSYS; + } + if (*str == '+' || *str == '-') { - DECLARE_BITMAP(clrm, bits); - DECLARE_BITMAP(setm, bits); - - i = str2clrsetmasks(str, clrm, setm, bits); - if (i) - return i; - if (mutex_lock_interruptible(lock)) - return -ERESTARTSYS; - for (i = 0; i < bits; i++) { - if (test_bit_inv(i, clrm)) - clear_bit_inv(i, bitmap); - if (test_bit_inv(i, setm)) - set_bit_inv(i, bitmap); - } + memcpy(newmap, bitmap, size); + rc = modify_bitmap(str, newmap, bits); } else { - DECLARE_BITMAP(setm, bits); - - i = hex2bitmap(str, setm, bits); - if (i) - return i; - if (mutex_lock_interruptible(lock)) - return -ERESTARTSYS; - for (i = 0; i < bits; i++) - if (test_bit_inv(i, setm)) - set_bit_inv(i, bitmap); - else - clear_bit_inv(i, bitmap); + memset(newmap, 0, size); + rc = hex2bitmap(str, newmap, bits); } + if (rc == 0) + memcpy(bitmap, newmap, size); mutex_unlock(lock); - - return 0; + kfree(newmap); + return rc; } /* -- GitLab From 542cedec53c9e8b73f3f05bf8468823598c50489 Mon Sep 17 00:00:00 2001 From: Yu Zhao Date: Tue, 11 Sep 2018 15:12:46 -0600 Subject: [PATCH 1381/1692] Revert "ASoC: Intel: Skylake: Acquire irq after RIRB allocation" This reverts commit 12eeeb4f4733bbc4481d01df35933fc15beb8b19. The patch doesn't fix accessing memory with null pointer in skl_interrupt(). There are two problems: 1) skl_init_chip() is called twice, before and after dma buffer is allocate. The first call sets bus->chip_init which prevents the second from initializing bus->corb.buf and rirb.buf from bus->rb.area. 2) snd_hdac_bus_init_chip() enables interrupt before snd_hdac_bus_init_cmd_io() initializing dma buffers. There is a small window which skl_interrupt() can be called if irq has been acquired. If so, it crashes when using null dma buffer pointers. Will fix the problems in the following patches. Also attaching the crash for future reference. [ 16.949148] general protection fault: 0000 [#1] PREEMPT SMP KASAN PTI [ 16.950903] Call Trace: [ 16.950906] [ 16.950918] skl_interrupt+0x19e/0x2d6 [snd_soc_skl] [ 16.950926] ? dma_supported+0xb5/0xb5 [snd_soc_skl] [ 16.950933] __handle_irq_event_percpu+0x27a/0x6c8 [ 16.950937] ? __irq_wake_thread+0x1d1/0x1d1 [ 16.950942] ? __do_softirq+0x57a/0x69e [ 16.950944] handle_irq_event_percpu+0x95/0x1ba [ 16.950948] ? _raw_spin_unlock+0x65/0xdc [ 16.950951] ? __handle_irq_event_percpu+0x6c8/0x6c8 [ 16.950953] ? _raw_spin_unlock+0x65/0xdc [ 16.950957] ? time_cpufreq_notifier+0x483/0x483 [ 16.950959] handle_irq_event+0x89/0x123 [ 16.950962] handle_fasteoi_irq+0x16f/0x425 [ 16.950965] handle_irq+0x1fe/0x28e [ 16.950969] do_IRQ+0x6e/0x12e [ 16.950972] common_interrupt+0x7a/0x7a [ 16.950974] [ 16.951031] RIP: snd_hdac_bus_update_rirb+0x19b/0x4cf [snd_hda_core] RSP: ffff88015c807c08 [ 16.951036] ---[ end trace 58bf9ece1775bc92 ]--- Fixes: 2eeeb4f4733b ("ASoC: Intel: Skylake: Acquire irq after RIRB allocation") Signed-off-by: Yu Zhao Signed-off-by: Mark Brown --- sound/soc/intel/skylake/skl.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c index cf09721ca13e..dce649485649 100644 --- a/sound/soc/intel/skylake/skl.c +++ b/sound/soc/intel/skylake/skl.c @@ -838,7 +838,11 @@ static int skl_first_init(struct hdac_bus *bus) snd_hdac_bus_parse_capabilities(bus); + if (skl_acquire_irq(bus, 0) < 0) + return -EBUSY; + pci_set_master(pci); + synchronize_irq(bus->irq); gcap = snd_hdac_chip_readw(bus, GCAP); dev_dbg(bus->dev, "chipset global capabilities = 0x%x\n", gcap); @@ -871,12 +875,6 @@ static int skl_first_init(struct hdac_bus *bus) if (err < 0) return err; - err = skl_acquire_irq(bus, 0); - if (err < 0) - return err; - - synchronize_irq(bus->irq); - /* initialize chip */ skl_init_pci(skl); -- GitLab From b61749a89f826eb61fc59794d9e4697bd246eb61 Mon Sep 17 00:00:00 2001 From: Yu Zhao Date: Tue, 11 Sep 2018 15:14:04 -0600 Subject: [PATCH 1382/1692] sound: enable interrupt after dma buffer initialization In snd_hdac_bus_init_chip(), we enable interrupt before snd_hdac_bus_init_cmd_io() initializing dma buffers. If irq has been acquired and irq handler uses the dma buffer, kernel may crash when interrupt comes in. Fix the problem by postponing enabling irq after dma buffer initialization. And warn once on null dma buffer pointer during the initialization. Reviewed-by: Takashi Iwai Signed-off-by: Yu Zhao Signed-off-by: Mark Brown --- sound/hda/hdac_controller.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c index 560ec0986e1a..11057d9f84ec 100644 --- a/sound/hda/hdac_controller.c +++ b/sound/hda/hdac_controller.c @@ -40,6 +40,8 @@ static void azx_clear_corbrp(struct hdac_bus *bus) */ void snd_hdac_bus_init_cmd_io(struct hdac_bus *bus) { + WARN_ON_ONCE(!bus->rb.area); + spin_lock_irq(&bus->reg_lock); /* CORB set up */ bus->corb.addr = bus->rb.addr; @@ -479,13 +481,15 @@ bool snd_hdac_bus_init_chip(struct hdac_bus *bus, bool full_reset) /* reset controller */ azx_reset(bus, full_reset); - /* initialize interrupts */ + /* clear interrupts */ azx_int_clear(bus); - azx_int_enable(bus); /* initialize the codec command I/O */ snd_hdac_bus_init_cmd_io(bus); + /* enable interrupts after CORB/RIRB buffers are initialized above */ + azx_int_enable(bus); + /* program the position buffer */ if (bus->use_posbuf && bus->posbuf.addr) { snd_hdac_chip_writel(bus, DPLBASE, (u32)bus->posbuf.addr); -- GitLab From 75383f8d39d4c0fb96083dd460b7b139fbdac492 Mon Sep 17 00:00:00 2001 From: Yu Zhao Date: Tue, 11 Sep 2018 15:15:16 -0600 Subject: [PATCH 1383/1692] sound: don't call skl_init_chip() to reset intel skl soc Internally, skl_init_chip() calls snd_hdac_bus_init_chip() which 1) sets bus->chip_init to prevent multiple entrances before device is stopped; 2) enables interrupt. We shouldn't use it for the purpose of resetting device only because 1) when we really want to initialize device, we won't be able to do so; 2) we are ready to handle interrupt yet, and kernel crashes when interrupt comes in. Rename azx_reset() to snd_hdac_bus_reset_link(), and use it to reset device properly. Fixes: 60767abcea3d ("ASoC: Intel: Skylake: Reset the controller in probe") Reviewed-by: Takashi Iwai Signed-off-by: Yu Zhao Signed-off-by: Mark Brown --- include/sound/hdaudio.h | 1 + sound/hda/hdac_controller.c | 7 ++++--- sound/soc/intel/skylake/skl.c | 2 +- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h index ab5ee3ef2198..207e816ce6e1 100644 --- a/include/sound/hdaudio.h +++ b/include/sound/hdaudio.h @@ -384,6 +384,7 @@ void snd_hdac_bus_init_cmd_io(struct hdac_bus *bus); void snd_hdac_bus_stop_cmd_io(struct hdac_bus *bus); void snd_hdac_bus_enter_link_reset(struct hdac_bus *bus); void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus); +int snd_hdac_bus_reset_link(struct hdac_bus *bus, bool full_reset); void snd_hdac_bus_update_rirb(struct hdac_bus *bus); int snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status, diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c index 11057d9f84ec..74244d8e2909 100644 --- a/sound/hda/hdac_controller.c +++ b/sound/hda/hdac_controller.c @@ -385,7 +385,7 @@ void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus) EXPORT_SYMBOL_GPL(snd_hdac_bus_exit_link_reset); /* reset codec link */ -static int azx_reset(struct hdac_bus *bus, bool full_reset) +int snd_hdac_bus_reset_link(struct hdac_bus *bus, bool full_reset) { if (!full_reset) goto skip_reset; @@ -410,7 +410,7 @@ static int azx_reset(struct hdac_bus *bus, bool full_reset) skip_reset: /* check to see if controller is ready */ if (!snd_hdac_chip_readb(bus, GCTL)) { - dev_dbg(bus->dev, "azx_reset: controller not ready!\n"); + dev_dbg(bus->dev, "controller not ready!\n"); return -EBUSY; } @@ -425,6 +425,7 @@ static int azx_reset(struct hdac_bus *bus, bool full_reset) return 0; } +EXPORT_SYMBOL_GPL(snd_hdac_bus_reset_link); /* enable interrupts */ static void azx_int_enable(struct hdac_bus *bus) @@ -479,7 +480,7 @@ bool snd_hdac_bus_init_chip(struct hdac_bus *bus, bool full_reset) return false; /* reset controller */ - azx_reset(bus, full_reset); + snd_hdac_bus_reset_link(bus, full_reset); /* clear interrupts */ azx_int_clear(bus); diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c index dce649485649..1d17be0f78a0 100644 --- a/sound/soc/intel/skylake/skl.c +++ b/sound/soc/intel/skylake/skl.c @@ -834,7 +834,7 @@ static int skl_first_init(struct hdac_bus *bus) return -ENXIO; } - skl_init_chip(bus, true); + snd_hdac_bus_reset_link(bus, true); snd_hdac_bus_parse_capabilities(bus); -- GitLab From 1843abd03250115af6cec0892683e70cf2297c25 Mon Sep 17 00:00:00 2001 From: Janosch Frank Date: Thu, 16 Aug 2018 09:02:31 +0100 Subject: [PATCH 1384/1692] s390/mm: Check for valid vma before zapping in gmap_discard Userspace could have munmapped the area before doing unmapping from the gmap. This would leave us with a valid vmaddr, but an invalid vma from which we would try to zap memory. Let's check before using the vma. Fixes: 1e133ab296f3 ("s390/mm: split arch/s390/mm/pgtable.c") Signed-off-by: Janosch Frank Reviewed-by: David Hildenbrand Reported-by: Dan Carpenter Message-Id: <20180816082432.78828-1-frankja@linux.ibm.com> Signed-off-by: Janosch Frank --- arch/s390/mm/gmap.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index bb44990c8212..911c7ded35f1 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -708,11 +708,13 @@ void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to) vmaddr |= gaddr & ~PMD_MASK; /* Find vma in the parent mm */ vma = find_vma(gmap->mm, vmaddr); + if (!vma) + continue; /* * We do not discard pages that are backed by * hugetlbfs, so we don't have to refault them. */ - if (vma && is_vm_hugetlb_page(vma)) + if (is_vm_hugetlb_page(vma)) continue; size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK)); zap_page_range(vma, vmaddr, size); -- GitLab From 40ebdb8e59df36e2cc71810bd021a0808b16c956 Mon Sep 17 00:00:00 2001 From: Janosch Frank Date: Wed, 1 Aug 2018 11:48:28 +0100 Subject: [PATCH 1385/1692] KVM: s390: Make huge pages unavailable in ucontrol VMs We currently do not notify all gmaps when using gmap_pmdp_xchg(), due to locking constraints. This makes ucontrol VMs, which is the only VM type that creates multiple gmaps, incompatible with huge pages. Also we would need to hold the guest_table_lock of all gmaps that have this vmaddr maped to synchronize access to the pmd. ucontrol VMs are rather exotic and creating a new locking concept is no easy task. Hence we return EINVAL when trying to active KVM_CAP_S390_HPAGE_1M and report it as being not available when checking for it. Fixes: a4499382 ("KVM: s390: Add huge page enablement control") Signed-off-by: Janosch Frank Reviewed-by: David Hildenbrand Reviewed-by: Claudio Imbrenda Message-Id: <20180801112508.138159-1-frankja@linux.ibm.com> Signed-off-by: Janosch Frank --- Documentation/virtual/kvm/api.txt | 3 ++- arch/s390/kvm/kvm-s390.c | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index c664064f76fb..8d8a372c8340 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -4510,7 +4510,8 @@ Do not enable KVM_FEATURE_PV_UNHALT if you disable HLT exits. Architectures: s390 Parameters: none Returns: 0 on success, -EINVAL if hpage module parameter was not set - or cmma is enabled + or cmma is enabled, or the VM has the KVM_VM_S390_UCONTROL + flag set With this capability the KVM support for memory backing with 1m pages through hugetlbfs can be enabled for a VM. After the capability is diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index f69333fd2fa3..ac5da6b0b862 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -481,7 +481,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) break; case KVM_CAP_S390_HPAGE_1M: r = 0; - if (hpage) + if (hpage && !kvm_is_ucontrol(kvm)) r = 1; break; case KVM_CAP_S390_MEM_OP: @@ -691,7 +691,7 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) mutex_lock(&kvm->lock); if (kvm->created_vcpus) r = -EBUSY; - else if (!hpage || kvm->arch.use_cmma) + else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm)) r = -EINVAL; else { r = 0; -- GitLab From 6ee67e351cdae68b4c9c7df74124b9f9fb81e966 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Wed, 12 Sep 2018 03:15:30 +0000 Subject: [PATCH 1386/1692] drm/fb-helper: Remove set but not used variable 'connector_funcs' Fixes gcc '-Wunused-but-set-variable' warning: drivers/gpu/drm/drm_fb_helper.c: In function 'drm_pick_crtcs': drivers/gpu/drm/drm_fb_helper.c:2373:43: warning: variable 'connector_funcs' set but not used [-Wunused-but-set-variable] Signed-off-by: YueHaibing Signed-off-by: Sean Paul Link: https://patchwork.freedesktop.org/patch/msgid/1536722130-108819-1-git-send-email-yuehaibing@huawei.com --- drivers/gpu/drm/drm_fb_helper.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 4b0dd20bccb8..16ec93b75dbf 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -2370,7 +2370,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper, { int c, o; struct drm_connector *connector; - const struct drm_connector_helper_funcs *connector_funcs; int my_score, best_score, score; struct drm_fb_helper_crtc **crtcs, *crtc; struct drm_fb_helper_connector *fb_helper_conn; @@ -2399,8 +2398,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper, if (drm_has_preferred_mode(fb_helper_conn, width, height)) my_score++; - connector_funcs = connector->helper_private; - /* * select a crtc for this connector and then attempt to configure * remaining connectors -- GitLab From 8ad8aa353524d89fa2e09522f3078166ff78ec42 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 6 Sep 2018 12:47:51 +0300 Subject: [PATCH 1387/1692] cifs: prevent integer overflow in nxt_dir_entry() The "old_entry + le32_to_cpu(pDirInfo->NextEntryOffset)" can wrap around so I have added a check for integer overflow. Reported-by: Dr Silvio Cesare of InfoSect Reviewed-by: Ronnie Sahlberg Reviewed-by: Aurelien Aptel Signed-off-by: Dan Carpenter Signed-off-by: Steve French CC: Stable --- fs/cifs/readdir.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index eeab81c9452f..e169e1a5fd35 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c @@ -376,8 +376,15 @@ static char *nxt_dir_entry(char *old_entry, char *end_of_smb, int level) new_entry = old_entry + sizeof(FIND_FILE_STANDARD_INFO) + pfData->FileNameLength; - } else - new_entry = old_entry + le32_to_cpu(pDirInfo->NextEntryOffset); + } else { + u32 next_offset = le32_to_cpu(pDirInfo->NextEntryOffset); + + if (old_entry + next_offset < old_entry) { + cifs_dbg(VFS, "invalid offset %u\n", next_offset); + return NULL; + } + new_entry = old_entry + next_offset; + } cifs_dbg(FYI, "new entry %p old entry %p\n", new_entry, old_entry); /* validate that new_entry is not past end of SMB */ if (new_entry >= end_of_smb) { -- GitLab From 56446f218af1133c802dad8e9e116f07f381846c Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 6 Sep 2018 12:48:22 +0300 Subject: [PATCH 1388/1692] CIFS: fix wrapping bugs in num_entries() The problem is that "entryptr + next_offset" and "entryptr + len + size" can wrap. I ended up changing the type of "entryptr" because it makes the math easier when we don't have to do so much casting. Signed-off-by: Dan Carpenter Signed-off-by: Steve French Reviewed-by: Aurelien Aptel Reviewed-by: Pavel Shilovsky CC: Stable --- fs/cifs/smb2pdu.c | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index c08acfc77abc..6f0e6b42599c 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -3577,33 +3577,38 @@ num_entries(char *bufstart, char *end_of_buf, char **lastentry, size_t size) int len; unsigned int entrycount = 0; unsigned int next_offset = 0; - FILE_DIRECTORY_INFO *entryptr; + char *entryptr; + FILE_DIRECTORY_INFO *dir_info; if (bufstart == NULL) return 0; - entryptr = (FILE_DIRECTORY_INFO *)bufstart; + entryptr = bufstart; while (1) { - entryptr = (FILE_DIRECTORY_INFO *) - ((char *)entryptr + next_offset); - - if ((char *)entryptr + size > end_of_buf) { + if (entryptr + next_offset < entryptr || + entryptr + next_offset > end_of_buf || + entryptr + next_offset + size > end_of_buf) { cifs_dbg(VFS, "malformed search entry would overflow\n"); break; } - len = le32_to_cpu(entryptr->FileNameLength); - if ((char *)entryptr + len + size > end_of_buf) { + entryptr = entryptr + next_offset; + dir_info = (FILE_DIRECTORY_INFO *)entryptr; + + len = le32_to_cpu(dir_info->FileNameLength); + if (entryptr + len < entryptr || + entryptr + len > end_of_buf || + entryptr + len + size > end_of_buf) { cifs_dbg(VFS, "directory entry name would overflow frame end of buf %p\n", end_of_buf); break; } - *lastentry = (char *)entryptr; + *lastentry = entryptr; entrycount++; - next_offset = le32_to_cpu(entryptr->NextEntryOffset); + next_offset = le32_to_cpu(dir_info->NextEntryOffset); if (!next_offset) break; } -- GitLab From 2d204ee9d671327915260071c19350d84344e096 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 10 Sep 2018 14:12:07 +0300 Subject: [PATCH 1389/1692] cifs: integer overflow in in SMB2_ioctl() The "le32_to_cpu(rsp->OutputOffset) + *plen" addition can overflow and wrap around to a smaller value which looks like it would lead to an information leak. Fixes: 4a72dafa19ba ("SMB2 FSCTL and IOCTL worker function") Signed-off-by: Dan Carpenter Signed-off-by: Steve French Reviewed-by: Aurelien Aptel CC: Stable --- fs/cifs/smb2pdu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 6f0e6b42599c..f54d07bda067 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -2459,14 +2459,14 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, /* We check for obvious errors in the output buffer length and offset */ if (*plen == 0) goto ioctl_exit; /* server returned no data */ - else if (*plen > 0xFF00) { + else if (*plen > rsp_iov.iov_len || *plen > 0xFF00) { cifs_dbg(VFS, "srv returned invalid ioctl length: %d\n", *plen); *plen = 0; rc = -EIO; goto ioctl_exit; } - if (rsp_iov.iov_len < le32_to_cpu(rsp->OutputOffset) + *plen) { + if (rsp_iov.iov_len - *plen < le32_to_cpu(rsp->OutputOffset)) { cifs_dbg(VFS, "Malformed ioctl resp: len %d offset %d\n", *plen, le32_to_cpu(rsp->OutputOffset)); *plen = 0; -- GitLab From d310959365942b100f79b56bcce859968fe7ca9c Mon Sep 17 00:00:00 2001 From: Scott Branden Date: Tue, 11 Sep 2018 13:26:38 -0700 Subject: [PATCH 1390/1692] efi/libstub/arm: default EFI_ARMSTUB_DTB_LOADER to y Default EFI_ARMSTUB_DTB_LOADER to y to allow the dtb= command line parameter to function with efi loader. Required for development purposes and to boot on existing bootloaders that do not support devicetree provided by the firmware or by the bootloader. Fixes: 3d7ee348aa41 ("efi/libstub/arm: Add opt-in Kconfig option ...") Signed-off-by: Scott Branden Signed-off-by: Ard Biesheuvel --- drivers/firmware/efi/Kconfig | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index d8e159feb573..89110dfc7127 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -90,14 +90,17 @@ config EFI_ARMSTUB config EFI_ARMSTUB_DTB_LOADER bool "Enable the DTB loader" depends on EFI_ARMSTUB + default y help Select this config option to add support for the dtb= command line parameter, allowing a device tree blob to be loaded into memory from the EFI System Partition by the stub. - The device tree is typically provided by the platform or by - the bootloader, so this option is mostly for development - purposes only. + If the device tree is provided by the platform or by + the bootloader this option may not be needed. + But, for various development reasons and to maintain existing + functionality for bootloaders that do not have such support + this option is necessary. config EFI_BOOTLOADER_CONTROL tristate "EFI Bootloader Control" -- GitLab From b1f4ff74fcb0e82664e8633cc225c2ad4234878a Mon Sep 17 00:00:00 2001 From: Paulo Zanoni Date: Mon, 10 Sep 2018 10:59:56 -0700 Subject: [PATCH 1391/1692] tracing/Makefile: Fix handling redefinition of CC_FLAGS_FTRACE As a Kernel developer, I make heavy use of "make targz-pkg" in order to locally compile and remotely install my development Kernels. The nice feature I rely on is that after a normal "make", "make targz-pkg" only generates the tarball without having to recompile everything. That was true until commit f28bc3c32c05 ("tracing: Handle CC_FLAGS_FTRACE more accurately"). After it, running "make targz-pkg" after "make" will recompile the whole Kernel tree, making my development workflow much slower. The Kernel is choosing to recompile everything because it claims the command line has changed. A diff of the .cmd files show a repeated -mfentry in one of the files. That is because "make targz-pkg" calls "make modules_install" and the environment is already populated with the exported variables, CC_FLAGS_FTRACE being one of them. Then, -mfentry gets duplicated because it is not protected behind an ifndef block, like -pg. To complicate the problem a little bit more, architectures can define their own version CC_FLAGS_FTRACE, so our code not only has to consider recursive Makefiles, but also architecture overrides. So in this patch we move CC_FLAGS_FTRACE up and unconditionally define it to -pg. Then we let the architecture Makefiles possibly override it, and finally append the extra options later. This ensures the variable is always fully redefined at each invocation so recursive Makefiles don't keep appending, and hopefully it maintains the intended behavior on how architectures can override the defaults.. Thanks Steven Rostedt and Vasily Gorbik for the help on this regression. Cc: Michal Marek Cc: Ingo Molnar Cc: Tvrtko Ursulin Cc: linux-kbuild@vger.kernel.org Fixes: commit f28bc3c32c05 ("tracing: Handle CC_FLAGS_FTRACE more accurately") Acked-by: Vasily Gorbik Signed-off-by: Paulo Zanoni Signed-off-by: Steven Rostedt (VMware) --- Makefile | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 4d5c883a98e5..a5ef6818157a 100644 --- a/Makefile +++ b/Makefile @@ -616,6 +616,11 @@ CFLAGS_GCOV := -fprofile-arcs -ftest-coverage \ $(call cc-disable-warning,maybe-uninitialized,) export CFLAGS_GCOV +# The arch Makefiles can override CC_FLAGS_FTRACE. We may also append it later. +ifdef CONFIG_FUNCTION_TRACER + CC_FLAGS_FTRACE := -pg +endif + # The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default # values of the respective KBUILD_* variables ARCH_CPPFLAGS := @@ -755,9 +760,6 @@ KBUILD_CFLAGS += $(call cc-option, -femit-struct-debug-baseonly) \ endif ifdef CONFIG_FUNCTION_TRACER -ifndef CC_FLAGS_FTRACE -CC_FLAGS_FTRACE := -pg -endif ifdef CONFIG_FTRACE_MCOUNT_RECORD # gcc 5 supports generating the mcount tables directly ifeq ($(call cc-option-yn,-mrecord-mcount),y) -- GitLab From 999696752db1099aba595aac4f8d881f8c7cf4e6 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Wed, 12 Sep 2018 19:41:22 +0200 Subject: [PATCH 1392/1692] x86/xen: Disable CPU0 hotplug for Xen PV Xen PV guests don't allow CPU0 hotplug, so disable it. Signed-off-by: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: boris.ostrovsky@oracle.com Cc: xen-devel@lists.xenproject.org Link: http://lkml.kernel.org/r/20180912174122.24282-1-jgross@suse.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/topology.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/topology.c b/arch/x86/kernel/topology.c index 12cbe2b88c0f..738bf42b0218 100644 --- a/arch/x86/kernel/topology.c +++ b/arch/x86/kernel/topology.c @@ -111,8 +111,10 @@ int arch_register_cpu(int num) /* * Currently CPU0 is only hotpluggable on Intel platforms. Other * vendors can add hotplug support later. + * Xen PV guests don't support CPU0 hotplug at all. */ - if (c->x86_vendor != X86_VENDOR_INTEL) + if (c->x86_vendor != X86_VENDOR_INTEL || + boot_cpu_has(X86_FEATURE_XENPV)) cpu0_hotpluggable = 0; /* -- GitLab From cf40361ede6cf9dc09349e4c049dc0d166ca2d8b Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Tue, 11 Sep 2018 11:18:12 -0700 Subject: [PATCH 1393/1692] x86/efi: Load fixmap GDT in efi_call_phys_epilog() before setting %cr3 Commit eeb89e2bb1ac ("x86/efi: Load fixmap GDT in efi_call_phys_epilog()") moved loading the fixmap in efi_call_phys_epilog() after load_cr3() since it was assumed to be more logical. Turns out this is incorrect: In efi_call_phys_prolog(), the gdt with its physical address is loaded first, and when the %cr3 is reloaded in _epilog from initial_page_table to swapper_pg_dir again the gdt is no longer mapped. This results in a triple fault if an interrupt occurs after load_cr3() and before load_fixmap_gdt(0). Calling load_fixmap_gdt(0) first restores the execution order prior to commit eeb89e2bb1ac and fixes the problem. Fixes: eeb89e2bb1ac ("x86/efi: Load fixmap GDT in efi_call_phys_epilog()") Signed-off-by: Guenter Roeck Signed-off-by: Thomas Gleixner Acked-by: Linus Torvalds Cc: Ard Biesheuvel Cc: linux-efi@vger.kernel.org Cc: Andy Lutomirski Cc: Joerg Roedel Link: https://lkml.kernel.org/r/1536689892-21538-1-git-send-email-linux@roeck-us.net --- arch/x86/platform/efi/efi_32.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c index 05ca14222463..9959657127f4 100644 --- a/arch/x86/platform/efi/efi_32.c +++ b/arch/x86/platform/efi/efi_32.c @@ -85,10 +85,9 @@ pgd_t * __init efi_call_phys_prolog(void) void __init efi_call_phys_epilog(pgd_t *save_pgd) { + load_fixmap_gdt(0); load_cr3(save_pgd); __flush_tlb_all(); - - load_fixmap_gdt(0); } void __init efi_runtime_update_mappings(void) -- GitLab From 4b1c5d917d34f705096bb7dd8a2bd19b0881970e Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 12 Sep 2018 10:29:11 -0700 Subject: [PATCH 1394/1692] bpf: btf: Fix end boundary calculation for type section The end boundary math for type section is incorrect in btf_check_all_metas(). It just happens that hdr->type_off is always 0 for now because there are only two sections (type and string) and string section must be at the end (ensured in btf_parse_str_sec). However, type_off may not be 0 if a new section would be added later. This patch fixes it. Fixes: f80442a4cd18 ("bpf: btf: Change how section is supported in btf_header") Reported-by: Dmitry Vyukov Signed-off-by: Martin KaFai Lau Acked-by: Yonghong Song Signed-off-by: Daniel Borkmann --- kernel/bpf/btf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 2590700237c1..138f0302692e 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -1844,7 +1844,7 @@ static int btf_check_all_metas(struct btf_verifier_env *env) hdr = &btf->hdr; cur = btf->nohdr_data + hdr->type_off; - end = btf->nohdr_data + hdr->type_len; + end = cur + hdr->type_len; env->log_type_id = 1; while (cur < end) { -- GitLab From 778b1ac737494cec156f17c80da44664c1f77cf6 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Wed, 12 Sep 2018 15:31:32 +0200 Subject: [PATCH 1395/1692] s390/qeth: indicate error when netdev allocation fails Bailing out on allocation error is nice, but we also need to tell the ccwgroup core that creating the qeth groupdev failed. Fixes: d3d1b205e89f ("s390/qeth: allocate netdevice early") Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core_main.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 49f64eb3eab0..6b24face21d5 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -5768,8 +5768,10 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) qeth_update_from_chp_desc(card); card->dev = qeth_alloc_netdev(card); - if (!card->dev) + if (!card->dev) { + rc = -ENOMEM; goto err_card; + } qeth_determine_capabilities(card); enforced_disc = qeth_enforce_discipline(card); -- GitLab From 04db741d0df02fdb9ea4ddca32615153407dcf7f Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Wed, 12 Sep 2018 15:31:33 +0200 Subject: [PATCH 1396/1692] s390/qeth: switch on SG by default for IQD devices Scatter-gather transmit brings a nice performance boost. Considering the rather large MTU sizes at play, it's also totally the Right Thing To Do. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core_main.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 6b24face21d5..b60055e9cb1a 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -5706,6 +5706,8 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card) dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->hw_features |= NETIF_F_SG; dev->vlan_features |= NETIF_F_SG; + if (IS_IQD(card)) + dev->features |= NETIF_F_SG; } return dev; -- GitLab From aec45e857c5538664edb76a60dd452e3265f37d1 Mon Sep 17 00:00:00 2001 From: Wenjia Zhang Date: Wed, 12 Sep 2018 15:31:34 +0200 Subject: [PATCH 1397/1692] s390/qeth: use vzalloc for QUERY OAT buffer qeth_query_oat_command() currently allocates the kernel buffer for the SIOC_QETH_QUERY_OAT ioctl with kzalloc. So on systems with fragmented memory, large allocations may fail (eg. the qethqoat tool by default uses 132KB). Solve this issue by using vzalloc, backing the allocation with non-contiguous memory. Signed-off-by: Wenjia Zhang Reviewed-by: Julian Wiedmann Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core_main.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index b60055e9cb1a..de8282420f96 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -4699,7 +4700,7 @@ static int qeth_query_oat_command(struct qeth_card *card, char __user *udata) priv.buffer_len = oat_data.buffer_len; priv.response_len = 0; - priv.buffer = kzalloc(oat_data.buffer_len, GFP_KERNEL); + priv.buffer = vzalloc(oat_data.buffer_len); if (!priv.buffer) { rc = -ENOMEM; goto out; @@ -4740,7 +4741,7 @@ static int qeth_query_oat_command(struct qeth_card *card, char __user *udata) rc = -EFAULT; out_free: - kfree(priv.buffer); + vfree(priv.buffer); out: return rc; } -- GitLab From 0ac1487c4b2de383b91ecad1be561b8f7a2c15f4 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Wed, 12 Sep 2018 15:31:35 +0200 Subject: [PATCH 1398/1692] s390/qeth: don't dump past end of unknown HW header For inbound data with an unsupported HW header format, only dump the actual HW header. We have no idea how much payload follows it, and what it contains. Worst case, we dump past the end of the Inbound Buffer and access whatever is located next in memory. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_l2_main.c | 2 +- drivers/s390/net/qeth_l3_main.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 710fa74892ae..b5e38531733f 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -423,7 +423,7 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card, default: dev_kfree_skb_any(skb); QETH_CARD_TEXT(card, 3, "inbunkno"); - QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN); + QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr)); continue; } work_done++; diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 7175086677fb..ada258c01a08 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -1390,7 +1390,7 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card, default: dev_kfree_skb_any(skb); QETH_CARD_TEXT(card, 3, "inbunkno"); - QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN); + QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr)); continue; } work_done++; -- GitLab From 12a78b026f870c575d3a98998b25084aac5b3c61 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Tue, 11 Sep 2018 15:12:17 -0700 Subject: [PATCH 1399/1692] tipc: check return value of __tipc_dump_start() When __tipc_dump_start() fails with running out of memory, we have no reason to continue, especially we should avoid calling tipc_dump_done(). Fixes: 8f5c5fcf3533 ("tipc: call start and done ops directly in __tipc_nl_compat_dumpit()") Reported-and-tested-by: syzbot+3f8324abccfbf8c74a9f@syzkaller.appspotmail.com Cc: Jon Maloy Cc: Ying Xue Signed-off-by: Cong Wang Acked-by: Ying Xue Signed-off-by: David S. Miller --- net/tipc/netlink_compat.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c index 82f665728382..6376467e78f8 100644 --- a/net/tipc/netlink_compat.c +++ b/net/tipc/netlink_compat.c @@ -185,7 +185,10 @@ static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, return -ENOMEM; buf->sk = msg->dst_sk; - __tipc_dump_start(&cb, msg->net); + if (__tipc_dump_start(&cb, msg->net)) { + kfree_skb(buf); + return -ENOMEM; + } do { int rem; -- GitLab From db191db813722297be36ffce2862e0f2b0e54d82 Mon Sep 17 00:00:00 2001 From: Pieter Jansen van Vuuren Date: Tue, 11 Sep 2018 06:38:44 -0700 Subject: [PATCH 1400/1692] nfp: flower: fix vlan match by checking both vlan id and vlan pcp Previously we only checked if the vlan id field is present when trying to match a vlan tag. The vlan id and vlan pcp field should be treated independently. Fixes: 5571e8c9f241 ("nfp: extend flower matching capabilities") Signed-off-by: Pieter Jansen van Vuuren Reviewed-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/main.h | 1 + drivers/net/ethernet/netronome/nfp/flower/match.c | 2 +- drivers/net/ethernet/netronome/nfp/flower/offload.c | 11 +++++++++++ 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 85f8209bf007..81d941ab895c 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -70,6 +70,7 @@ struct nfp_app; #define NFP_FL_FEATS_GENEVE BIT(0) #define NFP_FL_NBI_MTU_SETTING BIT(1) #define NFP_FL_FEATS_GENEVE_OPT BIT(2) +#define NFP_FL_FEATS_VLAN_PCP BIT(3) #define NFP_FL_FEATS_LAG BIT(31) struct nfp_fl_mask_id { diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index a0c72f277faa..17acb8cc6044 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c @@ -56,7 +56,7 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *frame, FLOW_DISSECTOR_KEY_VLAN, target); /* Populate the tci field. */ - if (flow_vlan->vlan_id) { + if (flow_vlan->vlan_id || flow_vlan->vlan_priority) { tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, flow_vlan->vlan_priority) | FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 2edab01c3beb..bd19624f10cf 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -192,6 +192,17 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, key_size += sizeof(struct nfp_flower_mac_mpls); } + if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_dissector_key_vlan *flow_vlan; + + flow_vlan = skb_flow_dissector_target(flow->dissector, + FLOW_DISSECTOR_KEY_VLAN, + flow->mask); + if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) && + flow_vlan->vlan_priority) + return -EOPNOTSUPP; + } + if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL; -- GitLab From 224de549f0beca58fb95c0b8da9cb2bfa8c6cc12 Mon Sep 17 00:00:00 2001 From: Louis Peens Date: Tue, 11 Sep 2018 06:38:45 -0700 Subject: [PATCH 1401/1692] nfp: flower: reject tunnel encap with ipv6 outer headers for offloading This fixes a bug where ipv6 tunnels would report that it is getting offloaded to hardware but would actually be rejected by hardware. Fixes: b27d6a95a70d ("nfp: compile flower vxlan tunnel set actions") Signed-off-by: Louis Peens Reviewed-by: John Hurley Reviewed-by: Simon Horman Reviewed-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/action.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index 9044496803e6..46ba0cf257c6 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -52,6 +52,7 @@ #define NFP_FL_TUNNEL_CSUM cpu_to_be16(0x01) #define NFP_FL_TUNNEL_KEY cpu_to_be16(0x04) #define NFP_FL_TUNNEL_GENEVE_OPT cpu_to_be16(0x0800) +#define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS IP_TUNNEL_INFO_TX #define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS (NFP_FL_TUNNEL_CSUM | \ NFP_FL_TUNNEL_KEY | \ NFP_FL_TUNNEL_GENEVE_OPT) @@ -741,11 +742,16 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a, nfp_fl_push_vlan(psh_v, a); *a_len += sizeof(struct nfp_fl_push_vlan); } else if (is_tcf_tunnel_set(a)) { + struct ip_tunnel_info *ip_tun = tcf_tunnel_info(a); struct nfp_repr *repr = netdev_priv(netdev); + *tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a); if (*tun_type == NFP_FL_TUNNEL_NONE) return -EOPNOTSUPP; + if (ip_tun->mode & ~NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS) + return -EOPNOTSUPP; + /* Pre-tunnel action is required for tunnel encap. * This checks for next hop entries on NFP. * If none, the packet falls back before applying other actions. -- GitLab From 433ca054949a6c9daac0ace1be5c33b25092bffa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 7 Sep 2018 14:27:05 +0200 Subject: [PATCH 1402/1692] drm/amdgpu: try allocating VRAM as power of two MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Try to allocate VRAM in power of two sizes and only fallback to vram split sizes if that fails. Signed-off-by: Christian König Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 52 +++++++++++++++----- 1 file changed, 40 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 9cfa8a9ada92..3f9d5d00c9b3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -124,6 +124,28 @@ u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo) return usage; } +/** + * amdgpu_vram_mgr_virt_start - update virtual start address + * + * @mem: ttm_mem_reg to update + * @node: just allocated node + * + * Calculate a virtual BO start address to easily check if everything is CPU + * accessible. + */ +static void amdgpu_vram_mgr_virt_start(struct ttm_mem_reg *mem, + struct drm_mm_node *node) +{ + unsigned long start; + + start = node->start + node->size; + if (start > mem->num_pages) + start -= mem->num_pages; + else + start = 0; + mem->start = max(mem->start, start); +} + /** * amdgpu_vram_mgr_new - allocate new ranges * @@ -176,10 +198,25 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, pages_left = mem->num_pages; spin_lock(&mgr->lock); - for (i = 0; i < num_nodes; ++i) { + for (i = 0; pages_left >= pages_per_node; ++i) { + unsigned long pages = rounddown_pow_of_two(pages_left); + + r = drm_mm_insert_node_in_range(mm, &nodes[i], pages, + pages_per_node, 0, + place->fpfn, lpfn, + mode); + if (unlikely(r)) + break; + + usage += nodes[i].size << PAGE_SHIFT; + vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]); + amdgpu_vram_mgr_virt_start(mem, &nodes[i]); + pages_left -= pages; + } + + for (; pages_left; ++i) { unsigned long pages = min(pages_left, pages_per_node); uint32_t alignment = mem->page_alignment; - unsigned long start; if (pages == pages_per_node) alignment = pages_per_node; @@ -193,16 +230,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, usage += nodes[i].size << PAGE_SHIFT; vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]); - - /* Calculate a virtual BO start address to easily check if - * everything is CPU accessible. - */ - start = nodes[i].start + nodes[i].size; - if (start > mem->num_pages) - start -= mem->num_pages; - else - start = 0; - mem->start = max(mem->start, start); + amdgpu_vram_mgr_virt_start(mem, &nodes[i]); pages_left -= pages; } spin_unlock(&mgr->lock); -- GitLab From 7e7bf8de432db3de912050856e641458de72a7b1 Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Tue, 11 Sep 2018 17:22:40 +0800 Subject: [PATCH 1403/1692] drm/amdgpu: move cs dependencies front a bit MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit cs dependencies handling doesn't need in vm resv Signed-off-by: Chunming Zhou Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index c5cc648a1b4e..1081fd00b059 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1285,6 +1285,12 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) if (r) goto out; + r = amdgpu_cs_dependencies(adev, &parser); + if (r) { + DRM_ERROR("Failed in the dependencies handling %d!\n", r); + goto out; + } + r = amdgpu_cs_parser_bos(&parser, data); if (r) { if (r == -ENOMEM) @@ -1296,12 +1302,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) reserved_buffers = true; - r = amdgpu_cs_dependencies(adev, &parser); - if (r) { - DRM_ERROR("Failed in the dependencies handling %d!\n", r); - goto out; - } - for (i = 0; i < parser.job->num_ibs; i++) trace_amdgpu_cs(&parser, i); -- GitLab From 240cd9a64226e013ac1a608ebf720a1813790196 Mon Sep 17 00:00:00 2001 From: Oak Zeng Date: Wed, 5 Sep 2018 23:51:23 -0400 Subject: [PATCH 1404/1692] drm/amdgpu: Move fault hash table to amdgpu vm In stead of share one fault hash table per device, make it per vm. This can avoid inter-process lock issue when fault hash table is full. Change-Id: I5d1281b7c41eddc8e26113e010516557588d3708 Signed-off-by: Oak Zeng Suggested-by: Christian Konig Suggested-by: Felix Kuehling Reviewed-by: Christian Konig Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c | 75 ------------------ drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h | 11 --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 102 ++++++++++++++++++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 13 ++++ drivers/gpu/drm/amd/amdgpu/vega10_ih.c | 38 ++++----- 5 files changed, 128 insertions(+), 111 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c index 06373d44b3da..4ed86218cef3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c @@ -197,78 +197,3 @@ int amdgpu_ih_process(struct amdgpu_device *adev) return IRQ_HANDLED; } -/** - * amdgpu_ih_add_fault - Add a page fault record - * - * @adev: amdgpu device pointer - * @key: 64-bit encoding of PASID and address - * - * This should be called when a retry page fault interrupt is - * received. If this is a new page fault, it will be added to a hash - * table. The return value indicates whether this is a new fault, or - * a fault that was already known and is already being handled. - * - * If there are too many pending page faults, this will fail. Retry - * interrupts should be ignored in this case until there is enough - * free space. - * - * Returns 0 if the fault was added, 1 if the fault was already known, - * -ENOSPC if there are too many pending faults. - */ -int amdgpu_ih_add_fault(struct amdgpu_device *adev, u64 key) -{ - unsigned long flags; - int r = -ENOSPC; - - if (WARN_ON_ONCE(!adev->irq.ih.faults)) - /* Should be allocated in _ih_sw_init on GPUs that - * support retry faults and require retry filtering. - */ - return r; - - spin_lock_irqsave(&adev->irq.ih.faults->lock, flags); - - /* Only let the hash table fill up to 50% for best performance */ - if (adev->irq.ih.faults->count >= (1 << (AMDGPU_PAGEFAULT_HASH_BITS-1))) - goto unlock_out; - - r = chash_table_copy_in(&adev->irq.ih.faults->hash, key, NULL); - if (!r) - adev->irq.ih.faults->count++; - - /* chash_table_copy_in should never fail unless we're losing count */ - WARN_ON_ONCE(r < 0); - -unlock_out: - spin_unlock_irqrestore(&adev->irq.ih.faults->lock, flags); - return r; -} - -/** - * amdgpu_ih_clear_fault - Remove a page fault record - * - * @adev: amdgpu device pointer - * @key: 64-bit encoding of PASID and address - * - * This should be called when a page fault has been handled. Any - * future interrupt with this key will be processed as a new - * page fault. - */ -void amdgpu_ih_clear_fault(struct amdgpu_device *adev, u64 key) -{ - unsigned long flags; - int r; - - if (!adev->irq.ih.faults) - return; - - spin_lock_irqsave(&adev->irq.ih.faults->lock, flags); - - r = chash_table_remove(&adev->irq.ih.faults->hash, key, NULL); - if (!WARN_ON_ONCE(r < 0)) { - adev->irq.ih.faults->count--; - WARN_ON_ONCE(adev->irq.ih.faults->count < 0); - } - - spin_unlock_irqrestore(&adev->irq.ih.faults->lock, flags); -} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h index a23e1c0bed93..0d5b3f5201d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h @@ -24,7 +24,6 @@ #ifndef __AMDGPU_IH_H__ #define __AMDGPU_IH_H__ -#include #include "soc15_ih_clientid.h" struct amdgpu_device; @@ -32,13 +31,6 @@ struct amdgpu_device; #define AMDGPU_IH_CLIENTID_LEGACY 0 #define AMDGPU_IH_CLIENTID_MAX SOC15_IH_CLIENTID_MAX -#define AMDGPU_PAGEFAULT_HASH_BITS 8 -struct amdgpu_retryfault_hashtable { - DECLARE_CHASH_TABLE(hash, AMDGPU_PAGEFAULT_HASH_BITS, 8, 0); - spinlock_t lock; - int count; -}; - /* * R6xx+ IH ring */ @@ -57,7 +49,6 @@ struct amdgpu_ih_ring { bool use_doorbell; bool use_bus_addr; dma_addr_t rb_dma_addr; /* only used when use_bus_addr = true */ - struct amdgpu_retryfault_hashtable *faults; }; #define AMDGPU_IH_SRC_DATA_MAX_SIZE_DW 4 @@ -95,7 +86,5 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size, bool use_bus_addr); void amdgpu_ih_ring_fini(struct amdgpu_device *adev); int amdgpu_ih_process(struct amdgpu_device *adev); -int amdgpu_ih_add_fault(struct amdgpu_device *adev, u64 key); -void amdgpu_ih_clear_fault(struct amdgpu_device *adev, u64 key); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 136b00412dc8..be1659fedf94 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2717,6 +2717,22 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, adev->vm_manager.fragment_size); } +static struct amdgpu_retryfault_hashtable *init_fault_hash(void) +{ + struct amdgpu_retryfault_hashtable *fault_hash; + + fault_hash = kmalloc(sizeof(*fault_hash), GFP_KERNEL); + if (!fault_hash) + return fault_hash; + + INIT_CHASH_TABLE(fault_hash->hash, + AMDGPU_PAGEFAULT_HASH_BITS, 8, 0); + spin_lock_init(&fault_hash->lock); + fault_hash->count = 0; + + return fault_hash; +} + /** * amdgpu_vm_init - initialize a vm instance * @@ -2805,6 +2821,12 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, vm->pasid = pasid; } + vm->fault_hash = init_fault_hash(); + if (!vm->fault_hash) { + r = -ENOMEM; + goto error_free_root; + } + INIT_KFIFO(vm->faults); vm->fault_credit = 16; @@ -2998,7 +3020,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) /* Clear pending page faults from IH when the VM is destroyed */ while (kfifo_get(&vm->faults, &fault)) - amdgpu_ih_clear_fault(adev, fault); + amdgpu_vm_clear_fault(vm->fault_hash, fault); if (vm->pasid) { unsigned long flags; @@ -3008,6 +3030,9 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); } + kfree(vm->fault_hash); + vm->fault_hash = NULL; + drm_sched_entity_destroy(&vm->entity); if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { @@ -3208,3 +3233,78 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm) } } } + +/** + * amdgpu_vm_add_fault - Add a page fault record to fault hash table + * + * @fault_hash: fault hash table + * @key: 64-bit encoding of PASID and address + * + * This should be called when a retry page fault interrupt is + * received. If this is a new page fault, it will be added to a hash + * table. The return value indicates whether this is a new fault, or + * a fault that was already known and is already being handled. + * + * If there are too many pending page faults, this will fail. Retry + * interrupts should be ignored in this case until there is enough + * free space. + * + * Returns 0 if the fault was added, 1 if the fault was already known, + * -ENOSPC if there are too many pending faults. + */ +int amdgpu_vm_add_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key) +{ + unsigned long flags; + int r = -ENOSPC; + + if (WARN_ON_ONCE(!fault_hash)) + /* Should be allocated in amdgpu_vm_init + */ + return r; + + spin_lock_irqsave(&fault_hash->lock, flags); + + /* Only let the hash table fill up to 50% for best performance */ + if (fault_hash->count >= (1 << (AMDGPU_PAGEFAULT_HASH_BITS-1))) + goto unlock_out; + + r = chash_table_copy_in(&fault_hash->hash, key, NULL); + if (!r) + fault_hash->count++; + + /* chash_table_copy_in should never fail unless we're losing count */ + WARN_ON_ONCE(r < 0); + +unlock_out: + spin_unlock_irqrestore(&fault_hash->lock, flags); + return r; +} + +/** + * amdgpu_vm_clear_fault - Remove a page fault record + * + * @fault_hash: fault hash table + * @key: 64-bit encoding of PASID and address + * + * This should be called when a page fault has been handled. Any + * future interrupt with this key will be processed as a new + * page fault. + */ +void amdgpu_vm_clear_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key) +{ + unsigned long flags; + int r; + + if (!fault_hash) + return; + + spin_lock_irqsave(&fault_hash->lock, flags); + + r = chash_table_remove(&fault_hash->hash, key, NULL); + if (!WARN_ON_ONCE(r < 0)) { + fault_hash->count--; + WARN_ON_ONCE(fault_hash->count < 0); + } + + spin_unlock_irqrestore(&fault_hash->lock, flags); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index e275ee7c1bc1..12d21eec4568 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -30,6 +30,7 @@ #include #include #include +#include #include "amdgpu_sync.h" #include "amdgpu_ring.h" @@ -178,6 +179,13 @@ struct amdgpu_task_info { pid_t tgid; }; +#define AMDGPU_PAGEFAULT_HASH_BITS 8 +struct amdgpu_retryfault_hashtable { + DECLARE_CHASH_TABLE(hash, AMDGPU_PAGEFAULT_HASH_BITS, 8, 0); + spinlock_t lock; + int count; +}; + struct amdgpu_vm { /* tree of virtual addresses mapped */ struct rb_root_cached va; @@ -240,6 +248,7 @@ struct amdgpu_vm { struct ttm_lru_bulk_move lru_bulk_move; /* mark whether can do the bulk move */ bool bulk_moveable; + struct amdgpu_retryfault_hashtable *fault_hash; }; struct amdgpu_vm_manager { @@ -355,4 +364,8 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm); void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, struct amdgpu_vm *vm); +int amdgpu_vm_add_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key); + +void amdgpu_vm_clear_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c index 5ae5ed2e62d6..acbe5a770207 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c @@ -265,35 +265,36 @@ static bool vega10_ih_prescreen_iv(struct amdgpu_device *adev) return true; } - addr = ((u64)(dw5 & 0xf) << 44) | ((u64)dw4 << 12); - key = AMDGPU_VM_FAULT(pasid, addr); - r = amdgpu_ih_add_fault(adev, key); - - /* Hash table is full or the fault is already being processed, - * ignore further page faults - */ - if (r != 0) - goto ignore_iv; - /* Track retry faults in per-VM fault FIFO. */ spin_lock(&adev->vm_manager.pasid_lock); vm = idr_find(&adev->vm_manager.pasid_idr, pasid); + addr = ((u64)(dw5 & 0xf) << 44) | ((u64)dw4 << 12); + key = AMDGPU_VM_FAULT(pasid, addr); if (!vm) { /* VM not found, process it normally */ spin_unlock(&adev->vm_manager.pasid_lock); - amdgpu_ih_clear_fault(adev, key); return true; + } else { + r = amdgpu_vm_add_fault(vm->fault_hash, key); + + /* Hash table is full or the fault is already being processed, + * ignore further page faults + */ + if (r != 0) { + spin_unlock(&adev->vm_manager.pasid_lock); + goto ignore_iv; + } } /* No locking required with single writer and single reader */ r = kfifo_put(&vm->faults, key); if (!r) { /* FIFO is full. Ignore it until there is space */ + amdgpu_vm_clear_fault(vm->fault_hash, key); spin_unlock(&adev->vm_manager.pasid_lock); - amdgpu_ih_clear_fault(adev, key); goto ignore_iv; } - spin_unlock(&adev->vm_manager.pasid_lock); + spin_unlock(&adev->vm_manager.pasid_lock); /* It's the first fault for this address, process it normally */ return true; @@ -386,14 +387,6 @@ static int vega10_ih_sw_init(void *handle) adev->irq.ih.use_doorbell = true; adev->irq.ih.doorbell_index = AMDGPU_DOORBELL64_IH << 1; - adev->irq.ih.faults = kmalloc(sizeof(*adev->irq.ih.faults), GFP_KERNEL); - if (!adev->irq.ih.faults) - return -ENOMEM; - INIT_CHASH_TABLE(adev->irq.ih.faults->hash, - AMDGPU_PAGEFAULT_HASH_BITS, 8, 0); - spin_lock_init(&adev->irq.ih.faults->lock); - adev->irq.ih.faults->count = 0; - r = amdgpu_irq_init(adev); return r; @@ -406,9 +399,6 @@ static int vega10_ih_sw_fini(void *handle) amdgpu_irq_fini(adev); amdgpu_ih_ring_fini(adev); - kfree(adev->irq.ih.faults); - adev->irq.ih.faults = NULL; - return 0; } -- GitLab From 01fcfc83fe07ae42af707c3217f533fb350d4c19 Mon Sep 17 00:00:00 2001 From: David Francis Date: Tue, 11 Sep 2018 13:41:01 -0400 Subject: [PATCH 1405/1692] drm/amd: Add ucode DMCU support DMCU (Display Microcontroller Unit) is a GPU chip involved in eDP features like Adaptive Backlight Modulation and Panel Self Refresh. DMCU has two pieces of firmware: the ERAM and the interrupt vectors, which must be loaded seperately. To this end, the DMCU firmware has a custom header and parsing logic similar to MEC, to extract the two ucodes from a single struct firmware. Signed-off-by: David Francis Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 21 +++++++++++++++++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h | 10 ++++++++++ 2 files changed, 29 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index a942fd28dae8..1fa8bc337859 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -322,6 +322,7 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev, { const struct common_firmware_header *header = NULL; const struct gfx_firmware_header_v1_0 *cp_hdr = NULL; + const struct dmcu_firmware_header_v1_0 *dmcu_hdr = NULL; if (NULL == ucode->fw) return 0; @@ -333,8 +334,8 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev, return 0; header = (const struct common_firmware_header *)ucode->fw->data; - cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; + dmcu_hdr = (const struct dmcu_firmware_header_v1_0 *)ucode->fw->data; if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP || (ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC1 && @@ -343,7 +344,9 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev, ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC2_JT && ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL && ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM && - ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM)) { + ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM && + ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_ERAM && + ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_INTV)) { ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes); memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data + @@ -365,6 +368,20 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev, le32_to_cpu(header->ucode_array_offset_bytes) + le32_to_cpu(cp_hdr->jt_offset) * 4), ucode->ucode_size); + } else if (ucode->ucode_id == AMDGPU_UCODE_ID_DMCU_ERAM) { + ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes) - + le32_to_cpu(dmcu_hdr->intv_size_bytes); + + memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data + + le32_to_cpu(header->ucode_array_offset_bytes)), + ucode->ucode_size); + } else if (ucode->ucode_id == AMDGPU_UCODE_ID_DMCU_INTV) { + ucode->ucode_size = le32_to_cpu(dmcu_hdr->intv_size_bytes); + + memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data + + le32_to_cpu(header->ucode_array_offset_bytes) + + le32_to_cpu(dmcu_hdr->intv_offset_bytes)), + ucode->ucode_size); } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL) { ucode->ucode_size = adev->gfx.rlc.save_restore_list_cntl_size_bytes; memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_cntl, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index b358e7519987..8f3f1117728c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h @@ -157,6 +157,13 @@ struct gpu_info_firmware_header_v1_0 { uint16_t version_minor; /* version */ }; +/* version_major=1, version_minor=0 */ +struct dmcu_firmware_header_v1_0 { + struct common_firmware_header header; + uint32_t intv_offset_bytes; /* interrupt vectors offset from end of header, in bytes */ + uint32_t intv_size_bytes; /* size of interrupt vectors, in bytes */ +}; + /* header is fixed size */ union amdgpu_firmware_header { struct common_firmware_header common; @@ -170,6 +177,7 @@ union amdgpu_firmware_header { struct sdma_firmware_header_v1_0 sdma; struct sdma_firmware_header_v1_1 sdma_v1_1; struct gpu_info_firmware_header_v1_0 gpu_info; + struct dmcu_firmware_header_v1_0 dmcu; uint8_t raw[0x100]; }; @@ -196,6 +204,8 @@ enum AMDGPU_UCODE_ID { AMDGPU_UCODE_ID_UVD1, AMDGPU_UCODE_ID_VCE, AMDGPU_UCODE_ID_VCN, + AMDGPU_UCODE_ID_DMCU_ERAM, + AMDGPU_UCODE_ID_DMCU_INTV, AMDGPU_UCODE_ID_MAXIMUM, }; -- GitLab From 6b7eab2ce60d2363b0e4bfea6667439b926bcf54 Mon Sep 17 00:00:00 2001 From: David Francis Date: Tue, 11 Sep 2018 13:46:41 -0400 Subject: [PATCH 1406/1692] drm/amd: Add PSP DMCU support DMCU (Display Microcontroller Unit) is a GPU chip involved in eDP features like Adaptive Backlight Modulation and Panel Self Refresh. PSP is already equipped to handle DMCU firmware loading, all that is needed is to translate between the new DMCU ucode ID and the equivalent psp_gfx_fw_type. Signed-off-by: David Francis Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/psp_v10_0.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c index 02be34e72ed9..240dc8c85867 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c @@ -91,6 +91,12 @@ psp_v10_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type * case AMDGPU_UCODE_ID_VCN: *type = GFX_FW_TYPE_VCN; break; + case AMDGPU_UCODE_ID_DMCU_ERAM: + *type = GFX_FW_TYPE_DMCU_ERAM; + break; + case AMDGPU_UCODE_ID_DMCU_INTV: + *type = GFX_FW_TYPE_DMCU_ISR; + break; case AMDGPU_UCODE_ID_MAXIMUM: default: return -EINVAL; -- GitLab From a94d5569b23209306220fabb2a8d42d0f966d318 Mon Sep 17 00:00:00 2001 From: David Francis Date: Tue, 11 Sep 2018 13:49:49 -0400 Subject: [PATCH 1407/1692] drm/amd: Add DM DMCU support DMCU (Display Microcontroller Unit) is a GPU chip involved in eDP features like Adaptive Backlight Modulation and Panel Self Refresh. DC is already fully equipped to initialize DMCU as long as the firmware is loaded. At the moment only the raven firmware is available. A single .bin file is loaded by the kernel's loading mechanism and split into two ucodes according to the header. DMCU is optional, so if the firmware is not found, no error or warning is raised. Signed-off-by: David Francis Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 91 ++++++++++++++++++- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 2 + 2 files changed, 92 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 9fd583c616e0..eccae63d3ef1 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -30,6 +30,7 @@ #include "vid.h" #include "amdgpu.h" #include "amdgpu_display.h" +#include "amdgpu_ucode.h" #include "atom.h" #include "amdgpu_dm.h" #include "amdgpu_pm.h" @@ -50,6 +51,7 @@ #include #include #include +#include #include #include @@ -71,6 +73,9 @@ #include "modules/inc/mod_freesync.h" +#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin" +MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU); + /* basic init/fini API */ static int amdgpu_dm_init(struct amdgpu_device *adev); static void amdgpu_dm_fini(struct amdgpu_device *adev); @@ -514,13 +519,97 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) return; } -static int dm_sw_init(void *handle) +static int load_dmcu_fw(struct amdgpu_device *adev) { + const char *fw_name_dmcu; + int r; + const struct dmcu_firmware_header_v1_0 *hdr; + + switch(adev->asic_type) { + case CHIP_BONAIRE: + case CHIP_HAWAII: + case CHIP_KAVERI: + case CHIP_KABINI: + case CHIP_MULLINS: + case CHIP_TONGA: + case CHIP_FIJI: + case CHIP_CARRIZO: + case CHIP_STONEY: + case CHIP_POLARIS11: + case CHIP_POLARIS10: + case CHIP_POLARIS12: + case CHIP_VEGAM: + case CHIP_VEGA10: + case CHIP_VEGA12: + case CHIP_VEGA20: + return 0; + case CHIP_RAVEN: + fw_name_dmcu = FIRMWARE_RAVEN_DMCU; + break; + default: + DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type); + return -1; + } + + if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { + DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n"); + return 0; + } + + r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev); + if (r == -ENOENT) { + /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */ + DRM_DEBUG_KMS("dm: DMCU firmware not found\n"); + adev->dm.fw_dmcu = NULL; + return 0; + } + if (r) { + dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n", + fw_name_dmcu); + return r; + } + + r = amdgpu_ucode_validate(adev->dm.fw_dmcu); + if (r) { + dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n", + fw_name_dmcu); + release_firmware(adev->dm.fw_dmcu); + adev->dm.fw_dmcu = NULL; + return r; + } + + hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data; + adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM; + adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE); + + adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV; + adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE); + + DRM_DEBUG_KMS("PSP loading DMCU firmware\n"); + return 0; } +static int dm_sw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return load_dmcu_fw(adev); +} + static int dm_sw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if(adev->dm.fw_dmcu) { + release_firmware(adev->dm.fw_dmcu); + adev->dm.fw_dmcu = NULL; + } + return 0; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index c159584c04f7..9a57c654943a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -129,6 +129,8 @@ struct amdgpu_display_manager { struct drm_atomic_state *cached_state; struct dm_comressor_info compressor; + + const struct firmware *fw_dmcu; }; struct amdgpu_dm_connector { -- GitLab From 8901a65f080ad6f4d7c3ef9f23c6f3a0e3e194aa Mon Sep 17 00:00:00 2001 From: kbuild test robot Date: Wed, 12 Sep 2018 08:59:07 +0800 Subject: [PATCH 1408/1692] drm/amd/display: fix ptr_ret.cocci warnings drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c:771:1-3: WARNING: PTR_ERR_OR_ZERO can be used Use PTR_ERR_OR_ZERO rather than if(IS_ERR(...)) + PTR_ERR Generated by: scripts/coccinelle/api/ptr_ret.cocci Fixes: e498eb713604 ("drm/amd/display: Add support for hw_state logging via debugfs") CC: Nicholas Kazlauskas Signed-off-by: kbuild test robot Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 35ca732f7ffe..0ef4a40d2247 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -801,8 +801,5 @@ int dtn_debugfs_init(struct amdgpu_device *adev) adev, &dtn_log_fops); - if (IS_ERR(ent)) - return PTR_ERR(ent); - - return 0; + return PTR_ERR_OR_ZERO(ent); } -- GitLab From dd066823db2ac4e22f721ec85190817b58059a54 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Wed, 12 Sep 2018 14:06:10 -0700 Subject: [PATCH 1409/1692] bpf/verifier: disallow pointer subtraction Subtraction of pointers was accidentally allowed for unpriv programs by commit 82abbf8d2fc4. Revert that part of commit. Fixes: 82abbf8d2fc4 ("bpf: do not allow root to mangle valid pointers") Reported-by: Jann Horn Acked-by: Daniel Borkmann Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann --- kernel/bpf/verifier.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 92246117d2b0..bb07e74b34a2 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -3163,7 +3163,7 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, * an arbitrary scalar. Disallow all math except * pointer subtraction */ - if (opcode == BPF_SUB){ + if (opcode == BPF_SUB && env->allow_ptr_leaks) { mark_reg_unknown(env, regs, insn->dst_reg); return 0; } -- GitLab From 4c3d795cb012a378855543a775408fba1ccff6f2 Mon Sep 17 00:00:00 2001 From: Tushar Dave Date: Wed, 12 Sep 2018 22:15:29 +0200 Subject: [PATCH 1410/1692] bpf: use __GFP_COMP while allocating page Helper bpg_msg_pull_data() can allocate multiple pages while linearizing multiple scatterlist elements into one shared page. However, if the shared page has size > PAGE_SIZE, using copy_page_to_iter() causes below warning. e.g. [ 6367.019832] WARNING: CPU: 2 PID: 7410 at lib/iov_iter.c:825 page_copy_sane.part.8+0x0/0x8 To avoid above warning, use __GFP_COMP while allocating multiple contiguous pages. Fixes: 015632bb30da ("bpf: sk_msg program helper bpf_sk_msg_pull_data") Signed-off-by: Tushar Dave Signed-off-by: Daniel Borkmann --- net/core/filter.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/core/filter.c b/net/core/filter.c index aecdeba052d3..5e00f2b85a56 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2344,7 +2344,8 @@ BPF_CALL_4(bpf_msg_pull_data, if (unlikely(bytes_sg_total > copy)) return -EINVAL; - page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC, get_order(copy)); + page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP, + get_order(copy)); if (unlikely(!page)) return -ENOMEM; p = page_address(page); -- GitLab From 097f5863b1a0c9901f180bbd56ae7d630655faaa Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 6 Sep 2018 12:47:01 +0300 Subject: [PATCH 1411/1692] cifs: read overflow in is_valid_oplock_break() We need to verify that the "data_offset" is within bounds. Reported-by: Dr Silvio Cesare of InfoSect Signed-off-by: Dan Carpenter Signed-off-by: Steve French Reviewed-by: Aurelien Aptel --- fs/cifs/misc.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index dacb2c05674c..6926685e513c 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -402,9 +402,17 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv) (struct smb_com_transaction_change_notify_rsp *)buf; struct file_notify_information *pnotify; __u32 data_offset = 0; + size_t len = srv->total_read - sizeof(pSMBr->hdr.smb_buf_length); + if (get_bcc(buf) > sizeof(struct file_notify_information)) { data_offset = le32_to_cpu(pSMBr->DataOffset); + if (data_offset > + len - sizeof(struct file_notify_information)) { + cifs_dbg(FYI, "invalid data_offset %u\n", + data_offset); + return true; + } pnotify = (struct file_notify_information *) ((char *)&pSMBr->hdr.Protocol + data_offset); cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n", -- GitLab From b228ba1cb95afbaeeb86cf06cd9fd6f6369c3b14 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 12 Sep 2018 18:21:11 -0600 Subject: [PATCH 1412/1692] null_blk: fix zoned support for non-rq based operation The supported added for zones in null_blk seem to assume that only rq based operation is possible. But this depends on the queue_mode setting, if this is set to 0, then cmd->bio is what we need to be operating on. Right now any attempt to load null_blk with queue_mode=0 will insta-crash, since cmd->rq is NULL and null_handle_cmd() assumes it to always be set. Make the zoned code deal with bio's instead, or pass in the appropriate sector/nr_sectors instead. Fixes: ca4b2a011948 ("null_blk: add zone support") Tested-by: Omar Sandoval Signed-off-by: Jens Axboe --- drivers/block/null_blk.h | 17 +++++++------ drivers/block/null_blk_main.c | 45 ++++++++++++++++++++++++++++------ drivers/block/null_blk_zoned.c | 34 +++++++++++-------------- 3 files changed, 62 insertions(+), 34 deletions(-) diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h index d81781f22dba..34e0030f0592 100644 --- a/drivers/block/null_blk.h +++ b/drivers/block/null_blk.h @@ -87,10 +87,10 @@ struct nullb { #ifdef CONFIG_BLK_DEV_ZONED int null_zone_init(struct nullb_device *dev); void null_zone_exit(struct nullb_device *dev); -blk_status_t null_zone_report(struct nullb *nullb, - struct nullb_cmd *cmd); -void null_zone_write(struct nullb_cmd *cmd); -void null_zone_reset(struct nullb_cmd *cmd); +blk_status_t null_zone_report(struct nullb *nullb, struct bio *bio); +void null_zone_write(struct nullb_cmd *cmd, sector_t sector, + unsigned int nr_sectors); +void null_zone_reset(struct nullb_cmd *cmd, sector_t sector); #else static inline int null_zone_init(struct nullb_device *dev) { @@ -98,11 +98,14 @@ static inline int null_zone_init(struct nullb_device *dev) } static inline void null_zone_exit(struct nullb_device *dev) {} static inline blk_status_t null_zone_report(struct nullb *nullb, - struct nullb_cmd *cmd) + struct bio *bio) { return BLK_STS_NOTSUPP; } -static inline void null_zone_write(struct nullb_cmd *cmd) {} -static inline void null_zone_reset(struct nullb_cmd *cmd) {} +static inline void null_zone_write(struct nullb_cmd *cmd, sector_t sector, + unsigned int nr_sectors) +{ +} +static inline void null_zone_reset(struct nullb_cmd *cmd, sector_t sector) {} #endif /* CONFIG_BLK_DEV_ZONED */ #endif /* __NULL_BLK_H */ diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c index 6127e3ff7b4b..093b614d6524 100644 --- a/drivers/block/null_blk_main.c +++ b/drivers/block/null_blk_main.c @@ -1157,16 +1157,33 @@ static void null_restart_queue_async(struct nullb *nullb) } } +static bool cmd_report_zone(struct nullb *nullb, struct nullb_cmd *cmd) +{ + struct nullb_device *dev = cmd->nq->dev; + + if (dev->queue_mode == NULL_Q_BIO) { + if (bio_op(cmd->bio) == REQ_OP_ZONE_REPORT) { + cmd->error = null_zone_report(nullb, cmd->bio); + return true; + } + } else { + if (req_op(cmd->rq) == REQ_OP_ZONE_REPORT) { + cmd->error = null_zone_report(nullb, cmd->rq->bio); + return true; + } + } + + return false; +} + static blk_status_t null_handle_cmd(struct nullb_cmd *cmd) { struct nullb_device *dev = cmd->nq->dev; struct nullb *nullb = dev->nullb; int err = 0; - if (req_op(cmd->rq) == REQ_OP_ZONE_REPORT) { - cmd->error = null_zone_report(nullb, cmd); + if (cmd_report_zone(nullb, cmd)) goto out; - } if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) { struct request *rq = cmd->rq; @@ -1234,10 +1251,24 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd) cmd->error = errno_to_blk_status(err); if (!cmd->error && dev->zoned) { - if (req_op(cmd->rq) == REQ_OP_WRITE) - null_zone_write(cmd); - else if (req_op(cmd->rq) == REQ_OP_ZONE_RESET) - null_zone_reset(cmd); + sector_t sector; + unsigned int nr_sectors; + int op; + + if (dev->queue_mode == NULL_Q_BIO) { + op = bio_op(cmd->bio); + sector = cmd->bio->bi_iter.bi_sector; + nr_sectors = cmd->bio->bi_iter.bi_size >> 9; + } else { + op = req_op(cmd->rq); + sector = blk_rq_pos(cmd->rq); + nr_sectors = blk_rq_sectors(cmd->rq); + } + + if (op == REQ_OP_WRITE) + null_zone_write(cmd, sector, nr_sectors); + else if (op == REQ_OP_ZONE_RESET) + null_zone_reset(cmd, sector); } out: /* Complete IO by inline, softirq or timer */ diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c index a979ca00d7be..7c6b86d98700 100644 --- a/drivers/block/null_blk_zoned.c +++ b/drivers/block/null_blk_zoned.c @@ -48,8 +48,8 @@ void null_zone_exit(struct nullb_device *dev) kvfree(dev->zones); } -static void null_zone_fill_rq(struct nullb_device *dev, struct request *rq, - unsigned int zno, unsigned int nr_zones) +static void null_zone_fill_bio(struct nullb_device *dev, struct bio *bio, + unsigned int zno, unsigned int nr_zones) { struct blk_zone_report_hdr *hdr = NULL; struct bio_vec bvec; @@ -57,7 +57,7 @@ static void null_zone_fill_rq(struct nullb_device *dev, struct request *rq, void *addr; unsigned int zones_to_cpy; - bio_for_each_segment(bvec, rq->bio, iter) { + bio_for_each_segment(bvec, bio, iter) { addr = kmap_atomic(bvec.bv_page); zones_to_cpy = bvec.bv_len / sizeof(struct blk_zone); @@ -84,29 +84,24 @@ static void null_zone_fill_rq(struct nullb_device *dev, struct request *rq, } } -blk_status_t null_zone_report(struct nullb *nullb, - struct nullb_cmd *cmd) +blk_status_t null_zone_report(struct nullb *nullb, struct bio *bio) { struct nullb_device *dev = nullb->dev; - struct request *rq = cmd->rq; - unsigned int zno = null_zone_no(dev, blk_rq_pos(rq)); + unsigned int zno = null_zone_no(dev, bio->bi_iter.bi_sector); unsigned int nr_zones = dev->nr_zones - zno; - unsigned int max_zones = (blk_rq_bytes(rq) / - sizeof(struct blk_zone)) - 1; + unsigned int max_zones; + max_zones = (bio->bi_iter.bi_size / sizeof(struct blk_zone)) - 1; nr_zones = min_t(unsigned int, nr_zones, max_zones); - - null_zone_fill_rq(nullb->dev, rq, zno, nr_zones); + null_zone_fill_bio(nullb->dev, bio, zno, nr_zones); return BLK_STS_OK; } -void null_zone_write(struct nullb_cmd *cmd) +void null_zone_write(struct nullb_cmd *cmd, sector_t sector, + unsigned int nr_sectors) { struct nullb_device *dev = cmd->nq->dev; - struct request *rq = cmd->rq; - sector_t sector = blk_rq_pos(rq); - unsigned int rq_sectors = blk_rq_sectors(rq); unsigned int zno = null_zone_no(dev, sector); struct blk_zone *zone = &dev->zones[zno]; @@ -118,7 +113,7 @@ void null_zone_write(struct nullb_cmd *cmd) case BLK_ZONE_COND_EMPTY: case BLK_ZONE_COND_IMP_OPEN: /* Writes must be at the write pointer position */ - if (blk_rq_pos(rq) != zone->wp) { + if (sector != zone->wp) { cmd->error = BLK_STS_IOERR; break; } @@ -126,7 +121,7 @@ void null_zone_write(struct nullb_cmd *cmd) if (zone->cond == BLK_ZONE_COND_EMPTY) zone->cond = BLK_ZONE_COND_IMP_OPEN; - zone->wp += rq_sectors; + zone->wp += nr_sectors; if (zone->wp == zone->start + zone->len) zone->cond = BLK_ZONE_COND_FULL; break; @@ -137,11 +132,10 @@ void null_zone_write(struct nullb_cmd *cmd) } } -void null_zone_reset(struct nullb_cmd *cmd) +void null_zone_reset(struct nullb_cmd *cmd, sector_t sector) { struct nullb_device *dev = cmd->nq->dev; - struct request *rq = cmd->rq; - unsigned int zno = null_zone_no(dev, blk_rq_pos(rq)); + unsigned int zno = null_zone_no(dev, sector); struct blk_zone *zone = &dev->zones[zno]; zone->cond = BLK_ZONE_COND_EMPTY; -- GitLab From 3483f08106fcd0e8edad2b9f2fc4726d25177799 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 13 Sep 2018 10:56:38 +1000 Subject: [PATCH 1413/1692] drm/nouveau/devinit: fix warning when PMU/PRE_OS is missing Messed up when sending pull request and sent an outdated version of previous patch, this fixes it up to remove warnings. Signed-off-by: Ben Skeggs --- .../drm/nouveau/nvkm/subdev/devinit/gm200.c | 21 ++++++++++--------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c index d65959ef0564..17235e940ca9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c @@ -86,10 +86,8 @@ pmu_load(struct nv50_devinit *init, u8 type, bool post, struct nvkm_bios *bios = subdev->device->bios; struct nvbios_pmuR pmu; - if (!nvbios_pmuRm(bios, type, &pmu)) { - nvkm_error(subdev, "VBIOS PMU fuc %02x not found\n", type); + if (!nvbios_pmuRm(bios, type, &pmu)) return -EINVAL; - } if (!post) return 0; @@ -124,29 +122,30 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post) return -EINVAL; } + /* Upload DEVINIT application from VBIOS onto PMU. */ ret = pmu_load(init, 0x04, post, &exec, &args); - if (ret) + if (ret) { + nvkm_error(subdev, "VBIOS PMU/DEVINIT not found\n"); return ret; + } - /* upload first chunk of init data */ + /* Upload tables required by opcodes in boot scripts. */ if (post) { - // devinit tables u32 pmu = pmu_args(init, args + 0x08, 0x08); u32 img = nvbios_rd16(bios, bit_I.offset + 0x14); u32 len = nvbios_rd16(bios, bit_I.offset + 0x16); pmu_data(init, pmu, img, len); } - /* upload second chunk of init data */ + /* Upload boot scripts. */ if (post) { - // devinit boot scripts u32 pmu = pmu_args(init, args + 0x08, 0x10); u32 img = nvbios_rd16(bios, bit_I.offset + 0x18); u32 len = nvbios_rd16(bios, bit_I.offset + 0x1a); pmu_data(init, pmu, img, len); } - /* execute init tables */ + /* Execute DEVINIT. */ if (post) { nvkm_wr32(device, 0x10a040, 0x00005000); pmu_exec(init, exec); @@ -157,7 +156,9 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post) return -ETIMEDOUT; } - /* load and execute some other ucode image (bios therm?) */ + /* Optional: Execute PRE_OS application on PMU, which should at + * least take care of fans until a full PMU has been loaded. + */ pmu_load(init, 0x01, post, NULL, NULL); return 0; } -- GitLab From 3702a0585e64d70d5bf73bf3e943b8d6005b72c1 Mon Sep 17 00:00:00 2001 From: Brijesh Singh Date: Wed, 15 Aug 2018 16:11:25 -0500 Subject: [PATCH 1414/1692] crypto: ccp - add timeout support in the SEV command Currently, the CCP driver assumes that the SEV command issued to the PSP will always return (i.e. it will never hang). But recently, firmware bugs have shown that a command can hang. Since of the SEV commands are used in probe routines, this can cause boot hangs and/or loss of virtualization capabilities. To protect against firmware bugs, add a timeout in the SEV command execution flow. If a command does not complete within the specified timeout then return -ETIMEOUT and stop the driver from executing any further commands since the state of the SEV firmware is unknown. Cc: Tom Lendacky Cc: Gary Hook Cc: Herbert Xu Cc: linux-kernel@vger.kernel.org Signed-off-by: Brijesh Singh Signed-off-by: Herbert Xu --- drivers/crypto/ccp/psp-dev.c | 46 ++++++++++++++++++++++++++++++++---- 1 file changed, 41 insertions(+), 5 deletions(-) diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index 218739b961fe..72790d88236d 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -38,6 +38,17 @@ static DEFINE_MUTEX(sev_cmd_mutex); static struct sev_misc_dev *misc_dev; static struct psp_device *psp_master; +static int psp_cmd_timeout = 100; +module_param(psp_cmd_timeout, int, 0644); +MODULE_PARM_DESC(psp_cmd_timeout, " default timeout value, in seconds, for PSP commands"); + +static int psp_probe_timeout = 5; +module_param(psp_probe_timeout, int, 0644); +MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe"); + +static bool psp_dead; +static int psp_timeout; + static struct psp_device *psp_alloc_struct(struct sp_device *sp) { struct device *dev = sp->dev; @@ -82,10 +93,19 @@ static irqreturn_t psp_irq_handler(int irq, void *data) return IRQ_HANDLED; } -static void sev_wait_cmd_ioc(struct psp_device *psp, unsigned int *reg) +static int sev_wait_cmd_ioc(struct psp_device *psp, + unsigned int *reg, unsigned int timeout) { - wait_event(psp->sev_int_queue, psp->sev_int_rcvd); + int ret; + + ret = wait_event_timeout(psp->sev_int_queue, + psp->sev_int_rcvd, timeout * HZ); + if (!ret) + return -ETIMEDOUT; + *reg = ioread32(psp->io_regs + psp->vdata->cmdresp_reg); + + return 0; } static int sev_cmd_buffer_len(int cmd) @@ -133,12 +153,15 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) if (!psp) return -ENODEV; + if (psp_dead) + return -EBUSY; + /* Get the physical address of the command buffer */ phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0; phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0; - dev_dbg(psp->dev, "sev command id %#x buffer 0x%08x%08x\n", - cmd, phys_msb, phys_lsb); + dev_dbg(psp->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n", + cmd, phys_msb, phys_lsb, psp_timeout); print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data, sev_cmd_buffer_len(cmd), false); @@ -154,7 +177,18 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) iowrite32(reg, psp->io_regs + psp->vdata->cmdresp_reg); /* wait for command completion */ - sev_wait_cmd_ioc(psp, ®); + ret = sev_wait_cmd_ioc(psp, ®, psp_timeout); + if (ret) { + if (psp_ret) + *psp_ret = 0; + + dev_err(psp->dev, "sev command %#x timed out, disabling PSP \n", cmd); + psp_dead = true; + + return ret; + } + + psp_timeout = psp_cmd_timeout; if (psp_ret) *psp_ret = reg & PSP_CMDRESP_ERR_MASK; @@ -888,6 +922,8 @@ void psp_pci_init(void) psp_master = sp->psp_data; + psp_timeout = psp_probe_timeout; + if (sev_get_api_version()) goto err; -- GitLab From a49a83ab05e34edd6c71a4fbd062c9a7ba6d18aa Mon Sep 17 00:00:00 2001 From: Takashi Sakamoto Date: Thu, 13 Sep 2018 21:30:34 +0900 Subject: [PATCH 1415/1692] ALSA: firewire-digi00x: fix memory leak of private data Although private data of sound card instance is usually allocated in the tail of the instance, drivers in ALSA firewire stack allocate the private data before allocating the instance. In this case, the private data should be released explicitly at .private_free callback of the instance. This commit fixes memory leak following to the above design. Fixes: 86c8dd7f4da3 ('ALSA: firewire-digi00x: delayed registration of sound card') Cc: # v4.7+ Signed-off-by: Takashi Sakamoto Signed-off-by: Takashi Iwai --- sound/firewire/digi00x/digi00x.c | 1 + 1 file changed, 1 insertion(+) diff --git a/sound/firewire/digi00x/digi00x.c b/sound/firewire/digi00x/digi00x.c index 1f5e1d23f31a..ef689997d6a5 100644 --- a/sound/firewire/digi00x/digi00x.c +++ b/sound/firewire/digi00x/digi00x.c @@ -49,6 +49,7 @@ static void dg00x_free(struct snd_dg00x *dg00x) fw_unit_put(dg00x->unit); mutex_destroy(&dg00x->mutex); + kfree(dg00x); } static void dg00x_card_free(struct snd_card *card) -- GitLab From 8d28277c065a974873c6781d44b7bcdcd8fb4e8a Mon Sep 17 00:00:00 2001 From: Takashi Sakamoto Date: Thu, 13 Sep 2018 21:31:05 +0900 Subject: [PATCH 1416/1692] ALSA: firewire-tascam: fix memory leak of private data Although private data of sound card instance is usually allocated in the tail of the instance, drivers in ALSA firewire stack allocate the private data before allocating the instance. In this case, the private data should be released explicitly at .private_free callback of the instance. This commit fixes memory leak following to the above design. Fixes: b610386c8afb ('ALSA: firewire-tascam: deleyed registration of sound card') Cc: # v4.7+ Signed-off-by: Takashi Sakamoto Signed-off-by: Takashi Iwai --- sound/firewire/tascam/tascam.c | 1 + 1 file changed, 1 insertion(+) diff --git a/sound/firewire/tascam/tascam.c b/sound/firewire/tascam/tascam.c index 44ad41fb7374..d3fdc463a884 100644 --- a/sound/firewire/tascam/tascam.c +++ b/sound/firewire/tascam/tascam.c @@ -93,6 +93,7 @@ static void tscm_free(struct snd_tscm *tscm) fw_unit_put(tscm->unit); mutex_destroy(&tscm->mutex); + kfree(tscm); } static void tscm_card_free(struct snd_card *card) -- GitLab From 498fe23aad8e3b5a9554f55719c537603b4476ea Mon Sep 17 00:00:00 2001 From: Takashi Sakamoto Date: Thu, 13 Sep 2018 21:31:18 +0900 Subject: [PATCH 1417/1692] ALSA: oxfw: fix memory leak of private data Although private data of sound card instance is usually allocated in the tail of the instance, drivers in ALSA firewire stack allocate the private data before allocating the instance. In this case, the private data should be released explicitly at .private_free callback of the instance. This commit fixes memory leak following to the above design. Fixes: 6c29230e2a5f ('ALSA: oxfw: delayed registration of sound card') Cc: # v4.7+ Signed-off-by: Takashi Sakamoto Signed-off-by: Takashi Iwai --- sound/firewire/oxfw/oxfw.c | 1 + 1 file changed, 1 insertion(+) diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c index 1e5b2c802635..fd34ef2ac679 100644 --- a/sound/firewire/oxfw/oxfw.c +++ b/sound/firewire/oxfw/oxfw.c @@ -130,6 +130,7 @@ static void oxfw_free(struct snd_oxfw *oxfw) kfree(oxfw->spec); mutex_destroy(&oxfw->mutex); + kfree(oxfw); } /* -- GitLab From 22d0bd82cc7cec7d9ed4bd5913f3ab65643364be Mon Sep 17 00:00:00 2001 From: Xin Long Date: Tue, 11 Sep 2018 14:33:58 +0800 Subject: [PATCH 1418/1692] ipv6: use rt6_info members when dst is set in rt6_fill_node In inet6_rtm_getroute, since Commit 93531c674315 ("net/ipv6: separate handling of FIB entries from dst based routes"), it has used rt->from to dump route info instead of rt. However for some route like cache, some of its information like flags or gateway is not the same as that of the 'from' one. It caused 'ip route get' to dump the wrong route information. In Jianlin's testing, the output information even lost the expiration time for a pmtu route cache due to the wrong fib6_flags. So change to use rt6_info members for dst addr, src addr, flags and gateway when it tries to dump a route entry without fibmatch set. v1->v2: - not use rt6i_prefsrc. - also fix the gw dump issue. Fixes: 93531c674315 ("net/ipv6: separate handling of FIB entries from dst based routes") Reported-by: Jianlin Shi Signed-off-by: Xin Long Signed-off-by: David S. Miller --- net/ipv6/route.c | 42 ++++++++++++++++++++++++++++++------------ 1 file changed, 30 insertions(+), 12 deletions(-) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 18e00ce1719a..3eed045c65a5 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -4670,20 +4670,31 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, int iif, int type, u32 portid, u32 seq, unsigned int flags) { - struct rtmsg *rtm; + struct rt6_info *rt6 = (struct rt6_info *)dst; + struct rt6key *rt6_dst, *rt6_src; + u32 *pmetrics, table, rt6_flags; struct nlmsghdr *nlh; + struct rtmsg *rtm; long expires = 0; - u32 *pmetrics; - u32 table; nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags); if (!nlh) return -EMSGSIZE; + if (rt6) { + rt6_dst = &rt6->rt6i_dst; + rt6_src = &rt6->rt6i_src; + rt6_flags = rt6->rt6i_flags; + } else { + rt6_dst = &rt->fib6_dst; + rt6_src = &rt->fib6_src; + rt6_flags = rt->fib6_flags; + } + rtm = nlmsg_data(nlh); rtm->rtm_family = AF_INET6; - rtm->rtm_dst_len = rt->fib6_dst.plen; - rtm->rtm_src_len = rt->fib6_src.plen; + rtm->rtm_dst_len = rt6_dst->plen; + rtm->rtm_src_len = rt6_src->plen; rtm->rtm_tos = 0; if (rt->fib6_table) table = rt->fib6_table->tb6_id; @@ -4698,7 +4709,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, rtm->rtm_scope = RT_SCOPE_UNIVERSE; rtm->rtm_protocol = rt->fib6_protocol; - if (rt->fib6_flags & RTF_CACHE) + if (rt6_flags & RTF_CACHE) rtm->rtm_flags |= RTM_F_CLONED; if (dest) { @@ -4706,7 +4717,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, goto nla_put_failure; rtm->rtm_dst_len = 128; } else if (rtm->rtm_dst_len) - if (nla_put_in6_addr(skb, RTA_DST, &rt->fib6_dst.addr)) + if (nla_put_in6_addr(skb, RTA_DST, &rt6_dst->addr)) goto nla_put_failure; #ifdef CONFIG_IPV6_SUBTREES if (src) { @@ -4714,12 +4725,12 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, goto nla_put_failure; rtm->rtm_src_len = 128; } else if (rtm->rtm_src_len && - nla_put_in6_addr(skb, RTA_SRC, &rt->fib6_src.addr)) + nla_put_in6_addr(skb, RTA_SRC, &rt6_src->addr)) goto nla_put_failure; #endif if (iif) { #ifdef CONFIG_IPV6_MROUTE - if (ipv6_addr_is_multicast(&rt->fib6_dst.addr)) { + if (ipv6_addr_is_multicast(&rt6_dst->addr)) { int err = ip6mr_get_route(net, skb, rtm, portid); if (err == 0) @@ -4754,7 +4765,14 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, /* For multipath routes, walk the siblings list and add * each as a nexthop within RTA_MULTIPATH. */ - if (rt->fib6_nsiblings) { + if (rt6) { + if (rt6_flags & RTF_GATEWAY && + nla_put_in6_addr(skb, RTA_GATEWAY, &rt6->rt6i_gateway)) + goto nla_put_failure; + + if (dst->dev && nla_put_u32(skb, RTA_OIF, dst->dev->ifindex)) + goto nla_put_failure; + } else if (rt->fib6_nsiblings) { struct fib6_info *sibling, *next_sibling; struct nlattr *mp; @@ -4777,7 +4795,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, goto nla_put_failure; } - if (rt->fib6_flags & RTF_EXPIRES) { + if (rt6_flags & RTF_EXPIRES) { expires = dst ? dst->expires : rt->expires; expires -= jiffies; } @@ -4785,7 +4803,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0) goto nla_put_failure; - if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->fib6_flags))) + if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt6_flags))) goto nla_put_failure; -- GitLab From ad4f15dc2c70b1de5e0a64d27335962fbc9cf71c Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Tue, 11 Sep 2018 09:04:48 +0200 Subject: [PATCH 1419/1692] xen/netfront: don't bug in case of too many frags Commit 57f230ab04d291 ("xen/netfront: raise max number of slots in xennet_get_responses()") raised the max number of allowed slots by one. This seems to be problematic in some configurations with netback using a larger MAX_SKB_FRAGS value (e.g. old Linux kernel with MAX_SKB_FRAGS defined as 18 instead of nowadays 17). Instead of BUG_ON() in this case just fall back to retransmission. Fixes: 57f230ab04d291 ("xen/netfront: raise max number of slots in xennet_get_responses()") Cc: stable@vger.kernel.org Signed-off-by: Juergen Gross Signed-off-by: David S. Miller --- drivers/net/xen-netfront.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 9407acbd19a9..f17f602e6171 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -908,7 +908,11 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue, BUG_ON(pull_to <= skb_headlen(skb)); __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); } - BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS); + if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) { + queue->rx.rsp_cons = ++cons; + kfree_skb(nskb); + return ~0U; + } skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, skb_frag_page(nfrag), @@ -1045,6 +1049,8 @@ static int xennet_poll(struct napi_struct *napi, int budget) skb->len += rx->status; i = xennet_fill_frags(queue, skb, &tmpq); + if (unlikely(i == ~0U)) + goto err; if (rx->flags & XEN_NETRXF_csum_blank) skb->ip_summed = CHECKSUM_PARTIAL; -- GitLab From 37a3a98ef601f89100e3bb657fb0e190b857028c Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Mon, 10 Sep 2018 16:20:25 +0200 Subject: [PATCH 1420/1692] ALSA: hda - Enable runtime PM only for discrete GPU The recent change of vga_switcheroo allowed the runtime PM for HD-audio on AMD GPUs, but this also resulted in a regression. When the HD-audio controller driver gets runtime-suspended, HD-audio link is turned off, and the hotplug notification is ignored. This leads to the inconsistent audio state (the connection isn't notified and ELD is ignored). The best fix would be to implement the proper ELD notification via the audio component, but it's still not ready. As a quick workaround, this patch adds the check of runtime_idle and allows the runtime suspend only when the vga_switcheroo is bound with discrete GPU. That is, a system with a single GPU and APU would be again without runtime PM to keep the HD-audio link for the hotplug notification and ELD read out. Also, the codec->auto_runtime_pm flag is set only for the discrete GPU at the time GPU gets bound via vga_switcheroo (i.e. only dGPU is forcibly runtime-PM enabled), so that APU can still get the ELD notification. For identifying which GPU is bound, a new vga_switcheroo client callback, gpu_bound, is implemented. The vga_switcheroo simply calls this when GPU is bound, and tells whether it's dGPU or APU. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=200945 Fixes: 07f4f97d7b4b ("vga_switcheroo: Use device link for HDA controller") Reported-by: Jian-Hong Pan Tested-by: Jian-Hong Pan Acked-by: Lukas Wunner Signed-off-by: Takashi Iwai --- drivers/gpu/vga/vga_switcheroo.c | 2 + include/linux/vga_switcheroo.h | 3 ++ sound/pci/hda/hda_intel.c | 86 +++++++++++++++++++++++--------- sound/pci/hda/hda_intel.h | 1 + 4 files changed, 69 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c index a96bf46bc483..cf2a18571d48 100644 --- a/drivers/gpu/vga/vga_switcheroo.c +++ b/drivers/gpu/vga/vga_switcheroo.c @@ -215,6 +215,8 @@ static void vga_switcheroo_enable(void) return; client->id = ret | ID_BIT_AUDIO; + if (client->ops->gpu_bound) + client->ops->gpu_bound(client->pdev, ret); } vga_switcheroo_debugfs_init(&vgasr_priv); diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h index a34539b7f750..7e6ac0114d55 100644 --- a/include/linux/vga_switcheroo.h +++ b/include/linux/vga_switcheroo.h @@ -133,15 +133,18 @@ struct vga_switcheroo_handler { * @can_switch: check if the device is in a position to switch now. * Mandatory. The client should return false if a user space process * has one of its device files open + * @gpu_bound: notify the client id to audio client when the GPU is bound. * * Client callbacks. A client can be either a GPU or an audio device on a GPU. * The @set_gpu_state and @can_switch methods are mandatory, @reprobe may be * set to NULL. For audio clients, the @reprobe member is bogus. + * OTOH, @gpu_bound is only for audio clients, and not used for GPU clients. */ struct vga_switcheroo_client_ops { void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state); void (*reprobe)(struct pci_dev *dev); bool (*can_switch)(struct pci_dev *dev); + void (*gpu_bound)(struct pci_dev *dev, enum vga_switcheroo_client_id); }; #if defined(CONFIG_VGA_SWITCHEROO) diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 1b2ce304152a..aa4c672dbaf7 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -365,8 +365,10 @@ enum { */ #ifdef SUPPORT_VGA_SWITCHEROO #define use_vga_switcheroo(chip) ((chip)->use_vga_switcheroo) +#define needs_eld_notify_link(chip) ((chip)->need_eld_notify_link) #else #define use_vga_switcheroo(chip) 0 +#define needs_eld_notify_link(chip) false #endif #define CONTROLLER_IN_GPU(pci) (((pci)->device == 0x0a0c) || \ @@ -453,6 +455,7 @@ static inline void mark_runtime_wc(struct azx *chip, struct azx_dev *azx_dev, #endif static int azx_acquire_irq(struct azx *chip, int do_disconnect); +static void set_default_power_save(struct azx *chip); /* * initialize the PCI registers @@ -1201,6 +1204,10 @@ static int azx_runtime_idle(struct device *dev) azx_bus(chip)->codec_powered || !chip->running) return -EBUSY; + /* ELD notification gets broken when HD-audio bus is off */ + if (needs_eld_notify_link(hda)) + return -EBUSY; + return 0; } @@ -1298,6 +1305,36 @@ static bool azx_vs_can_switch(struct pci_dev *pci) return true; } +/* + * The discrete GPU cannot power down unless the HDA controller runtime + * suspends, so activate runtime PM on codecs even if power_save == 0. + */ +static void setup_vga_switcheroo_runtime_pm(struct azx *chip) +{ + struct hda_intel *hda = container_of(chip, struct hda_intel, chip); + struct hda_codec *codec; + + if (hda->use_vga_switcheroo && !hda->need_eld_notify_link) { + list_for_each_codec(codec, &chip->bus) + codec->auto_runtime_pm = 1; + /* reset the power save setup */ + if (chip->running) + set_default_power_save(chip); + } +} + +static void azx_vs_gpu_bound(struct pci_dev *pci, + enum vga_switcheroo_client_id client_id) +{ + struct snd_card *card = pci_get_drvdata(pci); + struct azx *chip = card->private_data; + struct hda_intel *hda = container_of(chip, struct hda_intel, chip); + + if (client_id == VGA_SWITCHEROO_DIS) + hda->need_eld_notify_link = 0; + setup_vga_switcheroo_runtime_pm(chip); +} + static void init_vga_switcheroo(struct azx *chip) { struct hda_intel *hda = container_of(chip, struct hda_intel, chip); @@ -1306,6 +1343,7 @@ static void init_vga_switcheroo(struct azx *chip) dev_info(chip->card->dev, "Handle vga_switcheroo audio client\n"); hda->use_vga_switcheroo = 1; + hda->need_eld_notify_link = 1; /* cleared in gpu_bound op */ chip->driver_caps |= AZX_DCAPS_PM_RUNTIME; pci_dev_put(p); } @@ -1314,6 +1352,7 @@ static void init_vga_switcheroo(struct azx *chip) static const struct vga_switcheroo_client_ops azx_vs_ops = { .set_gpu_state = azx_vs_set_state, .can_switch = azx_vs_can_switch, + .gpu_bound = azx_vs_gpu_bound, }; static int register_vga_switcheroo(struct azx *chip) @@ -1339,6 +1378,7 @@ static int register_vga_switcheroo(struct azx *chip) #define init_vga_switcheroo(chip) /* NOP */ #define register_vga_switcheroo(chip) 0 #define check_hdmi_disabled(pci) false +#define setup_vga_switcheroo_runtime_pm(chip) /* NOP */ #endif /* SUPPORT_VGA_SWITCHER */ /* @@ -1352,6 +1392,7 @@ static int azx_free(struct azx *chip) if (azx_has_pm_runtime(chip) && chip->running) pm_runtime_get_noresume(&pci->dev); + chip->running = 0; azx_del_card_list(chip); @@ -2230,6 +2271,25 @@ static struct snd_pci_quirk power_save_blacklist[] = { }; #endif /* CONFIG_PM */ +static void set_default_power_save(struct azx *chip) +{ + int val = power_save; + +#ifdef CONFIG_PM + if (pm_blacklist) { + const struct snd_pci_quirk *q; + + q = snd_pci_quirk_lookup(chip->pci, power_save_blacklist); + if (q && val) { + dev_info(chip->card->dev, "device %04x:%04x is on the power_save blacklist, forcing power_save to 0\n", + q->subvendor, q->subdevice); + val = 0; + } + } +#endif /* CONFIG_PM */ + snd_hda_set_power_save(&chip->bus, val * 1000); +} + /* number of codec slots for each chipset: 0 = default slots (i.e. 4) */ static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] = { [AZX_DRIVER_NVIDIA] = 8, @@ -2241,9 +2301,7 @@ static int azx_probe_continue(struct azx *chip) struct hda_intel *hda = container_of(chip, struct hda_intel, chip); struct hdac_bus *bus = azx_bus(chip); struct pci_dev *pci = chip->pci; - struct hda_codec *codec; int dev = chip->dev_index; - int val; int err; hda->probe_continued = 1; @@ -2322,31 +2380,13 @@ static int azx_probe_continue(struct azx *chip) if (err < 0) goto out_free; + setup_vga_switcheroo_runtime_pm(chip); + chip->running = 1; azx_add_card_list(chip); - val = power_save; -#ifdef CONFIG_PM - if (pm_blacklist) { - const struct snd_pci_quirk *q; - - q = snd_pci_quirk_lookup(chip->pci, power_save_blacklist); - if (q && val) { - dev_info(chip->card->dev, "device %04x:%04x is on the power_save blacklist, forcing power_save to 0\n", - q->subvendor, q->subdevice); - val = 0; - } - } -#endif /* CONFIG_PM */ - /* - * The discrete GPU cannot power down unless the HDA controller runtime - * suspends, so activate runtime PM on codecs even if power_save == 0. - */ - if (use_vga_switcheroo(hda)) - list_for_each_codec(codec, &chip->bus) - codec->auto_runtime_pm = 1; + set_default_power_save(chip); - snd_hda_set_power_save(&chip->bus, val * 1000); if (azx_has_pm_runtime(chip)) pm_runtime_put_autosuspend(&pci->dev); diff --git a/sound/pci/hda/hda_intel.h b/sound/pci/hda/hda_intel.h index e3a3d318d2e5..f59719e06b91 100644 --- a/sound/pci/hda/hda_intel.h +++ b/sound/pci/hda/hda_intel.h @@ -37,6 +37,7 @@ struct hda_intel { /* vga_switcheroo setup */ unsigned int use_vga_switcheroo:1; + unsigned int need_eld_notify_link:1; unsigned int vga_switcheroo_registered:1; unsigned int init_failed:1; /* delayed init failed */ -- GitLab From f5b9bac7451cfc962970cb3fa3a7027ffa69e369 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Tue, 11 Sep 2018 14:22:23 -0700 Subject: [PATCH 1421/1692] net_sched: notify filter deletion when deleting a chain When we delete a chain of filters, we need to notify user-space we are deleting each filters in this chain too. Fixes: 32a4f5ecd738 ("net: sched: introduce chain object to uapi") Cc: Jiri Pirko Signed-off-by: Cong Wang Signed-off-by: David S. Miller --- net/sched/cls_api.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 1a67af8a6e8c..0a75cb2e5e7b 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -1902,6 +1902,8 @@ static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n, RTM_NEWCHAIN, false); break; case RTM_DELCHAIN: + tfilter_notify_chain(net, skb, block, q, parent, n, + chain, RTM_DELTFILTER); /* Flush the chain first as the user requested chain removal. */ tcf_chain_flush(chain); /* In case the chain was successfully deleted, put a reference -- GitLab From 831b624df1b420c8f9281ed1307a8db23afb72df Mon Sep 17 00:00:00 2001 From: Bin Yang Date: Wed, 12 Sep 2018 03:36:34 +0000 Subject: [PATCH 1422/1692] pstore: Fix incorrect persistent ram buffer mapping persistent_ram_vmap() returns the page start vaddr. persistent_ram_iomap() supports non-page-aligned mapping. persistent_ram_buffer_map() always adds offset-in-page to the vaddr returned from these two functions, which causes incorrect mapping of non-page-aligned persistent ram buffer. By default ftrace_size is 4096 and max_ftrace_cnt is nr_cpu_ids. Without this patch, the zone_sz in ramoops_init_przs() is 4096/nr_cpu_ids which might not be page aligned. If the offset-in-page > 2048, the vaddr will be in next page. If the next page is not mapped, it will cause kernel panic: [ 0.074231] BUG: unable to handle kernel paging request at ffffa19e0081b000 ... [ 0.075000] RIP: 0010:persistent_ram_new+0x1f8/0x39f ... [ 0.075000] Call Trace: [ 0.075000] ramoops_init_przs.part.10.constprop.15+0x105/0x260 [ 0.075000] ramoops_probe+0x232/0x3a0 [ 0.075000] platform_drv_probe+0x3e/0xa0 [ 0.075000] driver_probe_device+0x2cd/0x400 [ 0.075000] __driver_attach+0xe4/0x110 [ 0.075000] ? driver_probe_device+0x400/0x400 [ 0.075000] bus_for_each_dev+0x70/0xa0 [ 0.075000] driver_attach+0x1e/0x20 [ 0.075000] bus_add_driver+0x159/0x230 [ 0.075000] ? do_early_param+0x95/0x95 [ 0.075000] driver_register+0x70/0xc0 [ 0.075000] ? init_pstore_fs+0x4d/0x4d [ 0.075000] __platform_driver_register+0x36/0x40 [ 0.075000] ramoops_init+0x12f/0x131 [ 0.075000] do_one_initcall+0x4d/0x12c [ 0.075000] ? do_early_param+0x95/0x95 [ 0.075000] kernel_init_freeable+0x19b/0x222 [ 0.075000] ? rest_init+0xbb/0xbb [ 0.075000] kernel_init+0xe/0xfc [ 0.075000] ret_from_fork+0x3a/0x50 Signed-off-by: Bin Yang [kees: add comments describing the mapping differences, updated commit log] Fixes: 24c3d2f342ed ("staging: android: persistent_ram: Make it possible to use memory outside of bootmem") Cc: stable@vger.kernel.org Signed-off-by: Kees Cook --- fs/pstore/ram_core.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c index 951a14edcf51..0792595ebcfb 100644 --- a/fs/pstore/ram_core.c +++ b/fs/pstore/ram_core.c @@ -429,7 +429,12 @@ static void *persistent_ram_vmap(phys_addr_t start, size_t size, vaddr = vmap(pages, page_count, VM_MAP, prot); kfree(pages); - return vaddr; + /* + * Since vmap() uses page granularity, we must add the offset + * into the page here, to get the byte granularity address + * into the mapping to represent the actual "start" location. + */ + return vaddr + offset_in_page(start); } static void *persistent_ram_iomap(phys_addr_t start, size_t size, @@ -448,6 +453,11 @@ static void *persistent_ram_iomap(phys_addr_t start, size_t size, else va = ioremap_wc(start, size); + /* + * Since request_mem_region() and ioremap() are byte-granularity + * there is no need handle anything special like we do when the + * vmap() case in persistent_ram_vmap() above. + */ return va; } @@ -468,7 +478,7 @@ static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size, return -ENOMEM; } - prz->buffer = prz->vaddr + offset_in_page(start); + prz->buffer = prz->vaddr; prz->buffer_size = size - sizeof(struct persistent_ram_buffer); return 0; @@ -515,7 +525,8 @@ void persistent_ram_free(struct persistent_ram_zone *prz) if (prz->vaddr) { if (pfn_valid(prz->paddr >> PAGE_SHIFT)) { - vunmap(prz->vaddr); + /* We must vunmap() at page-granularity. */ + vunmap(prz->vaddr - offset_in_page(prz->paddr)); } else { iounmap(prz->vaddr); release_mem_region(prz->paddr, prz->size); -- GitLab From 018349d70f28a78d5343b3660cb66e1667005f8a Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 13 Sep 2018 08:03:43 -0700 Subject: [PATCH 1423/1692] hv_netvsc: fix schedule in RCU context When netvsc device is removed it can call reschedule in RCU context. This happens because canceling the subchannel setup work could (in theory) cause a reschedule when manipulating the timer. To reproduce, run with lockdep enabled kernel and unbind a network device from hv_netvsc (via sysfs). [ 160.682011] WARNING: suspicious RCU usage [ 160.707466] 4.19.0-rc3-uio+ #2 Not tainted [ 160.709937] ----------------------------- [ 160.712352] ./include/linux/rcupdate.h:302 Illegal context switch in RCU read-side critical section! [ 160.723691] [ 160.723691] other info that might help us debug this: [ 160.723691] [ 160.730955] [ 160.730955] rcu_scheduler_active = 2, debug_locks = 1 [ 160.762813] 5 locks held by rebind-eth.sh/1812: [ 160.766851] #0: 000000008befa37a (sb_writers#6){.+.+}, at: vfs_write+0x184/0x1b0 [ 160.773416] #1: 00000000b097f236 (&of->mutex){+.+.}, at: kernfs_fop_write+0xe2/0x1a0 [ 160.783766] #2: 0000000041ee6889 (kn->count#3){++++}, at: kernfs_fop_write+0xeb/0x1a0 [ 160.787465] #3: 0000000056d92a74 (&dev->mutex){....}, at: device_release_driver_internal+0x39/0x250 [ 160.816987] #4: 0000000030f6031e (rcu_read_lock){....}, at: netvsc_remove+0x1e/0x250 [hv_netvsc] [ 160.828629] [ 160.828629] stack backtrace: [ 160.831966] CPU: 1 PID: 1812 Comm: rebind-eth.sh Not tainted 4.19.0-rc3-uio+ #2 [ 160.832952] Hardware name: Microsoft Corporation Virtual Machine/Virtual Machine, BIOS Hyper-V UEFI Release v1.0 11/26/2012 [ 160.832952] Call Trace: [ 160.832952] dump_stack+0x85/0xcb [ 160.832952] ___might_sleep+0x1a3/0x240 [ 160.832952] __flush_work+0x57/0x2e0 [ 160.832952] ? __mutex_lock+0x83/0x990 [ 160.832952] ? __kernfs_remove+0x24f/0x2e0 [ 160.832952] ? __kernfs_remove+0x1b2/0x2e0 [ 160.832952] ? mark_held_locks+0x50/0x80 [ 160.832952] ? get_work_pool+0x90/0x90 [ 160.832952] __cancel_work_timer+0x13c/0x1e0 [ 160.832952] ? netvsc_remove+0x1e/0x250 [hv_netvsc] [ 160.832952] ? __lock_is_held+0x55/0x90 [ 160.832952] netvsc_remove+0x9a/0x250 [hv_netvsc] [ 160.832952] vmbus_remove+0x26/0x30 [ 160.832952] device_release_driver_internal+0x18a/0x250 [ 160.832952] unbind_store+0xb4/0x180 [ 160.832952] kernfs_fop_write+0x113/0x1a0 [ 160.832952] __vfs_write+0x36/0x1a0 [ 160.832952] ? rcu_read_lock_sched_held+0x6b/0x80 [ 160.832952] ? rcu_sync_lockdep_assert+0x2e/0x60 [ 160.832952] ? __sb_start_write+0x141/0x1a0 [ 160.832952] ? vfs_write+0x184/0x1b0 [ 160.832952] vfs_write+0xbe/0x1b0 [ 160.832952] ksys_write+0x55/0xc0 [ 160.832952] do_syscall_64+0x60/0x1b0 [ 160.832952] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 160.832952] RIP: 0033:0x7fe48f4c8154 Resolve this by getting RTNL earlier. This is safe because the subchannel work queue does trylock on RTNL and will detect the race. Fixes: 7b2ee50c0cd5 ("hv_netvsc: common detach logic") Signed-off-by: Stephen Hemminger Reviewed-by: Haiyang Zhang Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc_drv.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 70921bbe0e28..915fbd66a02b 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -2272,17 +2272,15 @@ static int netvsc_remove(struct hv_device *dev) cancel_delayed_work_sync(&ndev_ctx->dwork); - rcu_read_lock(); - nvdev = rcu_dereference(ndev_ctx->nvdev); - - if (nvdev) + rtnl_lock(); + nvdev = rtnl_dereference(ndev_ctx->nvdev); + if (nvdev) cancel_work_sync(&nvdev->subchan_work); /* * Call to the vsc driver to let it know that the device is being * removed. Also blocks mtu and channel changes. */ - rtnl_lock(); vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); if (vf_netdev) netvsc_unregister_vf(vf_netdev); @@ -2294,7 +2292,6 @@ static int netvsc_remove(struct hv_device *dev) list_del(&ndev_ctx->list); rtnl_unlock(); - rcu_read_unlock(); hv_set_drvdata(dev, NULL); -- GitLab From 9824dfae5741275473a23a7ed5756c7b6efacc9d Mon Sep 17 00:00:00 2001 From: Willy Tarreau Date: Wed, 12 Sep 2018 07:36:35 +0200 Subject: [PATCH 1424/1692] net/appletalk: fix minor pointer leak to userspace in SIOCFINDIPDDPRT Fields ->dev and ->next of struct ipddp_route may be copied to userspace on the SIOCFINDIPDDPRT ioctl. This is only accessible to CAP_NET_ADMIN though. Let's manually copy the relevant fields instead of using memcpy(). BugLink: http://blog.infosectcbr.com.au/2018/09/linux-kernel-infoleaks.html Cc: Jann Horn Signed-off-by: Willy Tarreau Signed-off-by: David S. Miller --- drivers/net/appletalk/ipddp.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c index 9375cef22420..3d27616d9c85 100644 --- a/drivers/net/appletalk/ipddp.c +++ b/drivers/net/appletalk/ipddp.c @@ -283,8 +283,12 @@ static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) case SIOCFINDIPDDPRT: spin_lock_bh(&ipddp_route_lock); rp = __ipddp_find_route(&rcp); - if (rp) - memcpy(&rcp2, rp, sizeof(rcp2)); + if (rp) { + memset(&rcp2, 0, sizeof(rcp2)); + rcp2.ip = rp->ip; + rcp2.at = rp->at; + rcp2.flags = rp->flags; + } spin_unlock_bh(&ipddp_route_lock); if (rp) { -- GitLab From 56a49d7048703f5ffdb84d3a0ee034108fba6850 Mon Sep 17 00:00:00 2001 From: Roopa Prabhu Date: Wed, 12 Sep 2018 13:21:48 -0700 Subject: [PATCH 1425/1692] net: rtnl_configure_link: fix dev flags changes arg to __dev_notify_flags This fix addresses https://bugzilla.kernel.org/show_bug.cgi?id=201071 Commit 5025f7f7d506 wrongly relied on __dev_change_flags to notify users of dev flag changes in the case when dev->rtnl_link_state = RTNL_LINK_INITIALIZED. Fix it by indicating flag changes explicitly to __dev_notify_flags. Fixes: 5025f7f7d506 ("rtnetlink: add rtnl_link_state check in rtnl_configure_link") Reported-By: Liam mcbirnie Signed-off-by: Roopa Prabhu Signed-off-by: David S. Miller --- net/core/rtnetlink.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 60c928894a78..63ce2283a456 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2810,7 +2810,7 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm) } if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) { - __dev_notify_flags(dev, old_flags, 0U); + __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags)); } else { dev->rtnl_link_state = RTNL_LINK_INITIALIZED; __dev_notify_flags(dev, old_flags, ~0U); -- GitLab From f0e0d04413fcce9bc76388839099aee93cd0d33b Mon Sep 17 00:00:00 2001 From: Vasily Khoruzhick Date: Thu, 13 Sep 2018 11:12:03 -0700 Subject: [PATCH 1426/1692] neighbour: confirm neigh entries when ARP packet is received Update 'confirmed' timestamp when ARP packet is received. It shouldn't affect locktime logic and anyway entry can be confirmed by any higher-layer protocol. Thus it makes sense to confirm it when ARP packet is received. Fixes: 77d7123342dc ("neighbour: update neigh timestamps iff update is effective") Signed-off-by: Vasily Khoruzhick Signed-off-by: David S. Miller --- net/core/neighbour.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/net/core/neighbour.c b/net/core/neighbour.c index aa19d86937af..91592fceeaad 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -1180,6 +1180,12 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, lladdr = neigh->ha; } + /* Update confirmed timestamp for neighbour entry after we + * received ARP packet even if it doesn't change IP to MAC binding. + */ + if (new & NUD_CONNECTED) + neigh->confirmed = jiffies; + /* If entry was valid and address is not changed, do not change entry state, if new one is STALE. */ @@ -1201,15 +1207,12 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, } } - /* Update timestamps only once we know we will make a change to the + /* Update timestamp only once we know we will make a change to the * neighbour entry. Otherwise we risk to move the locktime window with * noop updates and ignore relevant ARP updates. */ - if (new != old || lladdr != neigh->ha) { - if (new & NUD_CONNECTED) - neigh->confirmed = jiffies; + if (new != old || lladdr != neigh->ha) neigh->updated = jiffies; - } if (new != old) { neigh_del_timer(neigh); -- GitLab From 7cba09c6d5bc73ebbd25a353742d9ddb7a713b95 Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Wed, 12 Sep 2018 17:44:41 +0200 Subject: [PATCH 1427/1692] tls: don't copy the key out of tls12_crypto_info_aes_gcm_128 There's no need to copy the key to an on-stack buffer before calling crypto_aead_setkey(). Fixes: 3c4d7559159b ("tls: kernel TLS support") Signed-off-by: Sabrina Dubroca Signed-off-by: David S. Miller --- net/tls/tls_sw.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index e28a6ff25d96..f29b7c49cbf2 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -1136,7 +1136,6 @@ void tls_sw_free_resources_rx(struct sock *sk) int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) { - char keyval[TLS_CIPHER_AES_GCM_128_KEY_SIZE]; struct tls_crypto_info *crypto_info; struct tls12_crypto_info_aes_gcm_128 *gcm_128_info; struct tls_sw_context_tx *sw_ctx_tx = NULL; @@ -1265,9 +1264,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) ctx->push_pending_record = tls_sw_push_pending_record; - memcpy(keyval, gcm_128_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE); - - rc = crypto_aead_setkey(*aead, keyval, + rc = crypto_aead_setkey(*aead, gcm_128_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE); if (rc) goto free_aead; -- GitLab From 86029d10af18381814881d6cce2dd6872163b59f Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Wed, 12 Sep 2018 17:44:42 +0200 Subject: [PATCH 1428/1692] tls: zero the crypto information from tls_context before freeing This contains key material in crypto_send_aes_gcm_128 and crypto_recv_aes_gcm_128. Introduce union tls_crypto_context, and replace the two identical unions directly embedded in struct tls_context with it. We can then use this union to clean up the memory in the new tls_ctx_free() function. Fixes: 3c4d7559159b ("tls: kernel TLS support") Signed-off-by: Sabrina Dubroca Signed-off-by: David S. Miller --- include/net/tls.h | 19 +++++++++---------- net/tls/tls_device.c | 6 +++--- net/tls/tls_device_fallback.c | 2 +- net/tls/tls_main.c | 20 +++++++++++++++----- net/tls/tls_sw.c | 8 ++++---- 5 files changed, 32 insertions(+), 23 deletions(-) diff --git a/include/net/tls.h b/include/net/tls.h index d5c683e8bb22..0a769cf2f5f3 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -171,15 +171,14 @@ struct cipher_context { char *rec_seq; }; +union tls_crypto_context { + struct tls_crypto_info info; + struct tls12_crypto_info_aes_gcm_128 aes_gcm_128; +}; + struct tls_context { - union { - struct tls_crypto_info crypto_send; - struct tls12_crypto_info_aes_gcm_128 crypto_send_aes_gcm_128; - }; - union { - struct tls_crypto_info crypto_recv; - struct tls12_crypto_info_aes_gcm_128 crypto_recv_aes_gcm_128; - }; + union tls_crypto_context crypto_send; + union tls_crypto_context crypto_recv; struct list_head list; struct net_device *netdev; @@ -367,8 +366,8 @@ static inline void tls_fill_prepend(struct tls_context *ctx, * size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE */ buf[0] = record_type; - buf[1] = TLS_VERSION_MINOR(ctx->crypto_send.version); - buf[2] = TLS_VERSION_MAJOR(ctx->crypto_send.version); + buf[1] = TLS_VERSION_MINOR(ctx->crypto_send.info.version); + buf[2] = TLS_VERSION_MAJOR(ctx->crypto_send.info.version); /* we can use IV for nonce explicit according to spec */ buf[3] = pkt_len >> 8; buf[4] = pkt_len & 0xFF; diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index 292742e50bfa..961b07d4d41c 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -686,7 +686,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) goto free_marker_record; } - crypto_info = &ctx->crypto_send; + crypto_info = &ctx->crypto_send.info; switch (crypto_info->cipher_type) { case TLS_CIPHER_AES_GCM_128: nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE; @@ -780,7 +780,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) ctx->priv_ctx_tx = offload_ctx; rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX, - &ctx->crypto_send, + &ctx->crypto_send.info, tcp_sk(sk)->write_seq); if (rc) goto release_netdev; @@ -862,7 +862,7 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx) goto release_ctx; rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX, - &ctx->crypto_recv, + &ctx->crypto_recv.info, tcp_sk(sk)->copied_seq); if (rc) { pr_err_ratelimited("%s: The netdev has refused to offload this socket\n", diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c index 6102169239d1..450a6dbc5a88 100644 --- a/net/tls/tls_device_fallback.c +++ b/net/tls/tls_device_fallback.c @@ -320,7 +320,7 @@ static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx, goto free_req; iv = buf; - memcpy(iv, tls_ctx->crypto_send_aes_gcm_128.salt, + memcpy(iv, tls_ctx->crypto_send.aes_gcm_128.salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE); aad = buf + TLS_CIPHER_AES_GCM_128_SALT_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE; diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 180b6640e531..737b3865be1b 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -241,6 +241,16 @@ static void tls_write_space(struct sock *sk) ctx->sk_write_space(sk); } +static void tls_ctx_free(struct tls_context *ctx) +{ + if (!ctx) + return; + + memzero_explicit(&ctx->crypto_send, sizeof(ctx->crypto_send)); + memzero_explicit(&ctx->crypto_recv, sizeof(ctx->crypto_recv)); + kfree(ctx); +} + static void tls_sk_proto_close(struct sock *sk, long timeout) { struct tls_context *ctx = tls_get_ctx(sk); @@ -294,7 +304,7 @@ static void tls_sk_proto_close(struct sock *sk, long timeout) #else { #endif - kfree(ctx); + tls_ctx_free(ctx); ctx = NULL; } @@ -305,7 +315,7 @@ static void tls_sk_proto_close(struct sock *sk, long timeout) * for sk->sk_prot->unhash [tls_hw_unhash] */ if (free_ctx) - kfree(ctx); + tls_ctx_free(ctx); } static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval, @@ -330,7 +340,7 @@ static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval, } /* get user crypto info */ - crypto_info = &ctx->crypto_send; + crypto_info = &ctx->crypto_send.info; if (!TLS_CRYPTO_INFO_READY(crypto_info)) { rc = -EBUSY; @@ -417,9 +427,9 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval, } if (tx) - crypto_info = &ctx->crypto_send; + crypto_info = &ctx->crypto_send.info; else - crypto_info = &ctx->crypto_recv; + crypto_info = &ctx->crypto_recv.info; /* Currently we don't support set crypto info more than one time */ if (TLS_CRYPTO_INFO_READY(crypto_info)) { diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index f29b7c49cbf2..9e918489f4fb 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -1055,8 +1055,8 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb) goto read_failure; } - if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.version) || - header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.version)) { + if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.info.version) || + header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.info.version)) { ret = -EINVAL; goto read_failure; } @@ -1180,12 +1180,12 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) if (tx) { crypto_init_wait(&sw_ctx_tx->async_wait); - crypto_info = &ctx->crypto_send; + crypto_info = &ctx->crypto_send.info; cctx = &ctx->tx; aead = &sw_ctx_tx->aead_send; } else { crypto_init_wait(&sw_ctx_rx->async_wait); - crypto_info = &ctx->crypto_recv; + crypto_info = &ctx->crypto_recv.info; cctx = &ctx->rx; aead = &sw_ctx_rx->aead_recv; } -- GitLab From c844eb46b7d43c2cf760169df5ae1d5b033af338 Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Wed, 12 Sep 2018 17:44:43 +0200 Subject: [PATCH 1429/1692] tls: clear key material from kernel memory when do_tls_setsockopt_conf fails Fixes: 3c4d7559159b ("tls: kernel TLS support") Signed-off-by: Sabrina Dubroca Signed-off-by: Sabrina Dubroca Signed-off-by: David S. Miller --- net/tls/tls_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 737b3865be1b..523622dc74f8 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -509,7 +509,7 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval, goto out; err_crypto_info: - memset(crypto_info, 0, sizeof(*crypto_info)); + memzero_explicit(crypto_info, sizeof(union tls_crypto_context)); out: return rc; } -- GitLab From c56cae23c6b167acc68043c683c4573b80cbcc2c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= Date: Thu, 13 Sep 2018 16:43:07 +0200 Subject: [PATCH 1430/1692] gso_segment: Reset skb->mac_len after modifying network header MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When splitting a GSO segment that consists of encapsulated packets, the skb->mac_len of the segments can end up being set wrong, causing packet drops in particular when using act_mirred and ifb interfaces in combination with a qdisc that splits GSO packets. This happens because at the time skb_segment() is called, network_header will point to the inner header, throwing off the calculation in skb_reset_mac_len(). The network_header is subsequently adjust by the outer IP gso_segment handlers, but they don't set the mac_len. Fix this by adding skb_reset_mac_len() calls to both the IPv4 and IPv6 gso_segment handlers, after they modify the network_header. Many thanks to Eric Dumazet for his help in identifying the cause of the bug. Acked-by: Dave Taht Reviewed-by: Eric Dumazet Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: David S. Miller --- net/ipv4/af_inet.c | 1 + net/ipv6/ip6_offload.c | 1 + 2 files changed, 2 insertions(+) diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 20fda8fb8ffd..1fbe2f815474 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1377,6 +1377,7 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb, if (encap) skb_reset_inner_headers(skb); skb->network_header = (u8 *)iph - skb->head; + skb_reset_mac_len(skb); } while ((skb = skb->next)); out: diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 37ff4805b20c..c7e495f12011 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -115,6 +115,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, payload_len = skb->len - nhoff - sizeof(*ipv6h); ipv6h->payload_len = htons(payload_len); skb->network_header = (u8 *)ipv6h - skb->head; + skb_reset_mac_len(skb); if (udpfrag) { int err = ip6_find_1stfragopt(skb, &prevhdr); -- GitLab From 1194c4154662ac60312c164e9eaab0f8dd0dd36f Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 13 Sep 2018 12:16:36 -0700 Subject: [PATCH 1431/1692] MAINTAINERS: Make Dennis the percpu tree maintainer Dennis rewrote a significant portion of the percpu allocator and has shown that he can respond in a timely and helpful manner when issues are reported against percpu allocator. Let's make Dennis the percpu tree maintainer. Signed-off-by: Tejun Heo Cc: Dennis Zhou Cc: Christoph Lameter --- MAINTAINERS | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index 4130acc2e152..bb65f0c1861c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -11360,10 +11360,10 @@ S: Maintained F: drivers/platform/x86/peaq-wmi.c PER-CPU MEMORY ALLOCATOR +M: Dennis Zhou M: Tejun Heo M: Christoph Lameter -M: Dennis Zhou -T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/dennis/percpu.git S: Maintained F: include/linux/percpu*.h F: mm/percpu*.c -- GitLab From 52e211c1f04f7544bf3d3cf5ed3939708d5988d2 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Mon, 10 Sep 2018 12:53:25 -0400 Subject: [PATCH 1432/1692] drm/amdgpu:Add error message when register failed to reach expected value Add error message when register failed to reach expected value, It will help discover potential issue. Signed-off-by: James Zhu Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15_common.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h index 0942f492d2e1..f5d602540673 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h @@ -56,6 +56,8 @@ tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \ loop--; \ if (!loop) { \ + DRM_ERROR("Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n", \ + inst, #reg, expected_value, (tmp_ & (mask))); \ ret = -ETIMEDOUT; \ break; \ } \ -- GitLab From 73633e3223e6e19bd22775b4ad725fdc65d5d8ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Sat, 1 Sep 2018 10:36:48 +0200 Subject: [PATCH 1433/1692] drm/amdgpu: add some VM PD/PT iterators v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Both a leaf as well as dfs iterator to walk over all the PDs/PTs. v2: update comments and fix for_each_amdgpu_vm_pt_dfs_safe Signed-off-by: Christian König Reviewed-by: Huang Rui Reviewed-by: Junwei Zhang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 224 +++++++++++++++++++++++++ 1 file changed, 224 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index be1659fedf94..06a173e843ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -355,6 +355,230 @@ static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt) return list_first_entry(&parent->va, struct amdgpu_vm_pt, base.bo_list); } +/** + * amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt + */ +struct amdgpu_vm_pt_cursor { + uint64_t pfn; + struct amdgpu_vm_pt *parent; + struct amdgpu_vm_pt *entry; + unsigned level; +}; + +/** + * amdgpu_vm_pt_start - start PD/PT walk + * + * @adev: amdgpu_device pointer + * @vm: amdgpu_vm structure + * @start: start address of the walk + * @cursor: state to initialize + * + * Initialize a amdgpu_vm_pt_cursor to start a walk. + */ +static void amdgpu_vm_pt_start(struct amdgpu_device *adev, + struct amdgpu_vm *vm, uint64_t start, + struct amdgpu_vm_pt_cursor *cursor) +{ + cursor->pfn = start; + cursor->parent = NULL; + cursor->entry = &vm->root; + cursor->level = adev->vm_manager.root_level; +} + +/** + * amdgpu_vm_pt_descendant - go to child node + * + * @adev: amdgpu_device pointer + * @cursor: current state + * + * Walk to the child node of the current node. + * Returns: + * True if the walk was possible, false otherwise. + */ +static bool amdgpu_vm_pt_descendant(struct amdgpu_device *adev, + struct amdgpu_vm_pt_cursor *cursor) +{ + unsigned num_entries, shift, idx; + + if (!cursor->entry->entries) + return false; + + BUG_ON(!cursor->entry->base.bo); + num_entries = amdgpu_vm_num_entries(adev, cursor->level); + shift = amdgpu_vm_level_shift(adev, cursor->level); + + ++cursor->level; + idx = (cursor->pfn >> shift) % num_entries; + cursor->parent = cursor->entry; + cursor->entry = &cursor->entry->entries[idx]; + return true; +} + +/** + * amdgpu_vm_pt_sibling - go to sibling node + * + * @adev: amdgpu_device pointer + * @cursor: current state + * + * Walk to the sibling node of the current node. + * Returns: + * True if the walk was possible, false otherwise. + */ +static bool amdgpu_vm_pt_sibling(struct amdgpu_device *adev, + struct amdgpu_vm_pt_cursor *cursor) +{ + unsigned shift, num_entries; + + /* Root doesn't have a sibling */ + if (!cursor->parent) + return false; + + /* Go to our parents and see if we got a sibling */ + shift = amdgpu_vm_level_shift(adev, cursor->level - 1); + num_entries = amdgpu_vm_num_entries(adev, cursor->level - 1); + + if (cursor->entry == &cursor->parent->entries[num_entries - 1]) + return false; + + cursor->pfn += 1ULL << shift; + cursor->pfn &= ~((1ULL << shift) - 1); + ++cursor->entry; + return true; +} + +/** + * amdgpu_vm_pt_ancestor - go to parent node + * + * @cursor: current state + * + * Walk to the parent node of the current node. + * Returns: + * True if the walk was possible, false otherwise. + */ +static bool amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor *cursor) +{ + if (!cursor->parent) + return false; + + --cursor->level; + cursor->entry = cursor->parent; + cursor->parent = amdgpu_vm_pt_parent(cursor->parent); + return true; +} + +/** + * amdgpu_vm_pt_next - get next PD/PT in hieratchy + * + * @adev: amdgpu_device pointer + * @cursor: current state + * + * Walk the PD/PT tree to the next node. + */ +static void amdgpu_vm_pt_next(struct amdgpu_device *adev, + struct amdgpu_vm_pt_cursor *cursor) +{ + /* First try a newborn child */ + if (amdgpu_vm_pt_descendant(adev, cursor)) + return; + + /* If that didn't worked try to find a sibling */ + while (!amdgpu_vm_pt_sibling(adev, cursor)) { + /* No sibling, go to our parents and grandparents */ + if (!amdgpu_vm_pt_ancestor(cursor)) { + cursor->pfn = ~0ll; + return; + } + } +} + +/** + * amdgpu_vm_pt_first_leaf - get first leaf PD/PT + * + * @adev: amdgpu_device pointer + * @vm: amdgpu_vm structure + * @start: start addr of the walk + * @cursor: state to initialize + * + * Start a walk and go directly to the leaf node. + */ +static void amdgpu_vm_pt_first_leaf(struct amdgpu_device *adev, + struct amdgpu_vm *vm, uint64_t start, + struct amdgpu_vm_pt_cursor *cursor) +{ + amdgpu_vm_pt_start(adev, vm, start, cursor); + while (amdgpu_vm_pt_descendant(adev, cursor)); +} + +/** + * amdgpu_vm_pt_next_leaf - get next leaf PD/PT + * + * @adev: amdgpu_device pointer + * @cursor: current state + * + * Walk the PD/PT tree to the next leaf node. + */ +static void amdgpu_vm_pt_next_leaf(struct amdgpu_device *adev, + struct amdgpu_vm_pt_cursor *cursor) +{ + amdgpu_vm_pt_next(adev, cursor); + while (amdgpu_vm_pt_descendant(adev, cursor)); +} + +/** + * for_each_amdgpu_vm_pt_leaf - walk over all leaf PDs/PTs in the hierarchy + */ +#define for_each_amdgpu_vm_pt_leaf(adev, vm, start, end, cursor) \ + for (amdgpu_vm_pt_first_leaf((adev), (vm), (start), &(cursor)); \ + (cursor).pfn <= end; amdgpu_vm_pt_next_leaf((adev), &(cursor))) + +/** + * amdgpu_vm_pt_first_dfs - start a deep first search + * + * @adev: amdgpu_device structure + * @vm: amdgpu_vm structure + * @cursor: state to initialize + * + * Starts a deep first traversal of the PD/PT tree. + */ +static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + struct amdgpu_vm_pt_cursor *cursor) +{ + amdgpu_vm_pt_start(adev, vm, 0, cursor); + while (amdgpu_vm_pt_descendant(adev, cursor)); +} + +/** + * amdgpu_vm_pt_next_dfs - get the next node for a deep first search + * + * @adev: amdgpu_device structure + * @cursor: current state + * + * Move the cursor to the next node in a deep first search. + */ +static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev, + struct amdgpu_vm_pt_cursor *cursor) +{ + if (!cursor->entry) + return; + + if (!cursor->parent) + cursor->entry = NULL; + else if (amdgpu_vm_pt_sibling(adev, cursor)) + while (amdgpu_vm_pt_descendant(adev, cursor)); + else + amdgpu_vm_pt_ancestor(cursor); +} + +/** + * for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs + */ +#define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, cursor, entry) \ + for (amdgpu_vm_pt_first_dfs((adev), (vm), &(cursor)), \ + (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor));\ + (entry); (entry) = (cursor).entry, \ + amdgpu_vm_pt_next_dfs((adev), &(cursor))) + /** * amdgpu_vm_get_pd_bo - add the VM PD to a validation list * -- GitLab From d72a6887eef880271c0d48ed427329ccc0d547f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Sat, 1 Sep 2018 12:03:37 +0200 Subject: [PATCH 1434/1692] drm/amdgpu: use leaf iterator for allocating PD/PT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Less code and allows for easier error handling. Signed-off-by: Christian König Reviewed-by: Felix Kuehling Reviewed-by: Junwei Zhang Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 156 +++++++++---------------- 1 file changed, 55 insertions(+), 101 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 06a173e843ce..60fa4f455b52 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -844,103 +844,6 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm, bp->resv = vm->root.base.bo->tbo.resv; } -/** - * amdgpu_vm_alloc_levels - allocate the PD/PT levels - * - * @adev: amdgpu_device pointer - * @vm: requested vm - * @parent: parent PT - * @saddr: start of the address range - * @eaddr: end of the address range - * @level: VMPT level - * @ats: indicate ATS support from PTE - * - * Make sure the page directories and page tables are allocated - * - * Returns: - * 0 on success, errno otherwise. - */ -static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - struct amdgpu_vm_pt *parent, - uint64_t saddr, uint64_t eaddr, - unsigned level, bool ats) -{ - unsigned shift = amdgpu_vm_level_shift(adev, level); - struct amdgpu_bo_param bp; - unsigned pt_idx, from, to; - int r; - - if (!parent->entries) { - unsigned num_entries = amdgpu_vm_num_entries(adev, level); - - parent->entries = kvmalloc_array(num_entries, - sizeof(struct amdgpu_vm_pt), - GFP_KERNEL | __GFP_ZERO); - if (!parent->entries) - return -ENOMEM; - } - - from = saddr >> shift; - to = eaddr >> shift; - if (from >= amdgpu_vm_num_entries(adev, level) || - to >= amdgpu_vm_num_entries(adev, level)) - return -EINVAL; - - ++level; - saddr = saddr & ((1 << shift) - 1); - eaddr = eaddr & ((1 << shift) - 1); - - amdgpu_vm_bo_param(adev, vm, level, &bp); - - /* walk over the address space and allocate the page tables */ - for (pt_idx = from; pt_idx <= to; ++pt_idx) { - struct amdgpu_vm_pt *entry = &parent->entries[pt_idx]; - struct amdgpu_bo *pt; - - if (!entry->base.bo) { - r = amdgpu_bo_create(adev, &bp, &pt); - if (r) - return r; - - r = amdgpu_vm_clear_bo(adev, vm, pt, level, ats); - if (r) { - amdgpu_bo_unref(&pt->shadow); - amdgpu_bo_unref(&pt); - return r; - } - - if (vm->use_cpu_for_update) { - r = amdgpu_bo_kmap(pt, NULL); - if (r) { - amdgpu_bo_unref(&pt->shadow); - amdgpu_bo_unref(&pt); - return r; - } - } - - /* Keep a reference to the root directory to avoid - * freeing them up in the wrong order. - */ - pt->parent = amdgpu_bo_ref(parent->base.bo); - - amdgpu_vm_bo_base_init(&entry->base, vm, pt); - } - - if (level < AMDGPU_VM_PTB) { - uint64_t sub_saddr = (pt_idx == from) ? saddr : 0; - uint64_t sub_eaddr = (pt_idx == to) ? eaddr : - ((1 << shift) - 1); - r = amdgpu_vm_alloc_levels(adev, vm, entry, sub_saddr, - sub_eaddr, level, ats); - if (r) - return r; - } - } - - return 0; -} - /** * amdgpu_vm_alloc_pts - Allocate page tables. * @@ -949,7 +852,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, * @saddr: Start address which needs to be allocated * @size: Size from start address we need. * - * Make sure the page tables are allocated. + * Make sure the page directories and page tables are allocated * * Returns: * 0 on success, errno otherwise. @@ -958,8 +861,11 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, struct amdgpu_vm *vm, uint64_t saddr, uint64_t size) { - uint64_t eaddr; + struct amdgpu_vm_pt_cursor cursor; + struct amdgpu_bo *pt; bool ats = false; + uint64_t eaddr; + int r; /* validate the parameters */ if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK) @@ -979,8 +885,56 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, return -EINVAL; } - return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr, - adev->vm_manager.root_level, ats); + for_each_amdgpu_vm_pt_leaf(adev, vm, saddr, eaddr, cursor) { + struct amdgpu_vm_pt *entry = cursor.entry; + struct amdgpu_bo_param bp; + + if (cursor.level < AMDGPU_VM_PTB) { + unsigned num_entries; + + num_entries = amdgpu_vm_num_entries(adev, cursor.level); + entry->entries = kvmalloc_array(num_entries, + sizeof(*entry->entries), + GFP_KERNEL | + __GFP_ZERO); + if (!entry->entries) + return -ENOMEM; + } + + + if (entry->base.bo) + continue; + + amdgpu_vm_bo_param(adev, vm, cursor.level, &bp); + + r = amdgpu_bo_create(adev, &bp, &pt); + if (r) + return r; + + r = amdgpu_vm_clear_bo(adev, vm, pt, cursor.level, ats); + if (r) + goto error_free_pt; + + if (vm->use_cpu_for_update) { + r = amdgpu_bo_kmap(pt, NULL); + if (r) + goto error_free_pt; + } + + /* Keep a reference to the root directory to avoid + * freeing them up in the wrong order. + */ + pt->parent = amdgpu_bo_ref(cursor.parent->base.bo); + + amdgpu_vm_bo_base_init(&entry->base, vm, pt); + } + + return 0; + +error_free_pt: + amdgpu_bo_unref(&pt->shadow); + amdgpu_bo_unref(&pt); + return r; } /** -- GitLab From 229a37f83454c59a9c8742c811119da4a33d619b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 6 Sep 2018 15:35:13 +0200 Subject: [PATCH 1435/1692] drm/amdgpu: use dfs iterator to free PDs/PTs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Allows us to free all PDs/PTs without recursion. Signed-off-by: Christian König Reviewed-by: Felix Kuehling Reviewed-by: Junwei Zhang Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 62 +++++++++++++------------- 1 file changed, 30 insertions(+), 32 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 60fa4f455b52..139bd6347fc4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -937,6 +937,35 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, return r; } +/** + * amdgpu_vm_free_pts - free PD/PT levels + * + * @adev: amdgpu device structure + * @parent: PD/PT starting level to free + * @level: level of parent structure + * + * Free the page directory or page table level and all sub levels. + */ +static void amdgpu_vm_free_pts(struct amdgpu_device *adev, + struct amdgpu_vm *vm) +{ + struct amdgpu_vm_pt_cursor cursor; + struct amdgpu_vm_pt *entry; + + for_each_amdgpu_vm_pt_dfs_safe(adev, vm, cursor, entry) { + + if (entry->base.bo) { + list_del(&entry->base.bo_list); + list_del(&entry->base.vm_status); + amdgpu_bo_unref(&entry->base.bo->shadow); + amdgpu_bo_unref(&entry->base.bo); + } + kvfree(entry->entries); + } + + BUG_ON(vm->root.base.bo); +} + /** * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug * @@ -3147,36 +3176,6 @@ void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) vm->pasid = 0; } -/** - * amdgpu_vm_free_levels - free PD/PT levels - * - * @adev: amdgpu device structure - * @parent: PD/PT starting level to free - * @level: level of parent structure - * - * Free the page directory or page table level and all sub levels. - */ -static void amdgpu_vm_free_levels(struct amdgpu_device *adev, - struct amdgpu_vm_pt *parent, - unsigned level) -{ - unsigned i, num_entries = amdgpu_vm_num_entries(adev, level); - - if (parent->base.bo) { - list_del(&parent->base.bo_list); - list_del(&parent->base.vm_status); - amdgpu_bo_unref(&parent->base.bo->shadow); - amdgpu_bo_unref(&parent->base.bo); - } - - if (parent->entries) - for (i = 0; i < num_entries; i++) - amdgpu_vm_free_levels(adev, &parent->entries[i], - level + 1); - - kvfree(parent->entries); -} - /** * amdgpu_vm_fini - tear down a vm instance * @@ -3237,8 +3236,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) if (r) { dev_err(adev->dev, "Leaking page tables because BO reservation failed\n"); } else { - amdgpu_vm_free_levels(adev, &vm->root, - adev->vm_manager.root_level); + amdgpu_vm_free_pts(adev, vm); amdgpu_bo_unreserve(root); } amdgpu_bo_unref(&root); -- GitLab From d4085ea9bc8dc44eb4bfd696474f9ef26ec9e0cd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Sat, 8 Sep 2018 13:05:34 +0200 Subject: [PATCH 1436/1692] drm/amdgpu: use the DFS iterator in amdgpu_vm_invalidate_pds v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Less code and easier to maintain. v2: rename the function as well Signed-off-by: Christian König Reviewed-by: Huang Rui Reviewed-by: Junwei Zhang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 32 +++++++------------------- 1 file changed, 8 insertions(+), 24 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 139bd6347fc4..cca8fc931bbb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1370,37 +1370,22 @@ static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params, } /* - * amdgpu_vm_invalidate_level - mark all PD levels as invalid + * amdgpu_vm_invalidate_pds - mark all PDs as invalid * * @adev: amdgpu_device pointer * @vm: related vm - * @parent: parent PD - * @level: VMPT level * * Mark all PD level as invalid after an error. */ -static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - struct amdgpu_vm_pt *parent, - unsigned level) +static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev, + struct amdgpu_vm *vm) { - unsigned pt_idx, num_entries; - - /* - * Recurse into the subdirectories. This recursion is harmless because - * we only have a maximum of 5 layers. - */ - num_entries = amdgpu_vm_num_entries(adev, level); - for (pt_idx = 0; pt_idx < num_entries; ++pt_idx) { - struct amdgpu_vm_pt *entry = &parent->entries[pt_idx]; - - if (!entry->base.bo) - continue; + struct amdgpu_vm_pt_cursor cursor; + struct amdgpu_vm_pt *entry; - if (!entry->base.moved) + for_each_amdgpu_vm_pt_dfs_safe(adev, vm, cursor, entry) + if (entry->base.bo && !entry->base.moved) amdgpu_vm_bo_relocated(&entry->base); - amdgpu_vm_invalidate_level(adev, vm, entry, level + 1); - } } /* @@ -1497,8 +1482,7 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev, return 0; error: - amdgpu_vm_invalidate_level(adev, vm, &vm->root, - adev->vm_manager.root_level); + amdgpu_vm_invalidate_pds(adev, vm); amdgpu_job_free(job); return r; } -- GitLab From dfa70550f5b77252d99ba9c410b9fbe6e4088e51 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 3 Sep 2018 14:34:51 +0200 Subject: [PATCH 1437/1692] drm/amdgpu: use leaf iterator for filling PTs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Less overhead and is the starting point for further cleanups and improvements. Signed-off-by: Christian König Reviewed-by: Felix Kuehling Reviewed-by: Junwei Zhang Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 62 +++++++------------------- 1 file changed, 15 insertions(+), 47 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index cca8fc931bbb..e873bbb2f0c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1487,36 +1487,6 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev, return r; } -/** - * amdgpu_vm_find_entry - find the entry for an address - * - * @p: see amdgpu_pte_update_params definition - * @addr: virtual address in question - * @entry: resulting entry or NULL - * @parent: parent entry - * - * Find the vm_pt entry and it's parent for the given address. - */ -void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr, - struct amdgpu_vm_pt **entry, - struct amdgpu_vm_pt **parent) -{ - unsigned level = p->adev->vm_manager.root_level; - - *parent = NULL; - *entry = &p->vm->root; - while ((*entry)->entries) { - unsigned shift = amdgpu_vm_level_shift(p->adev, level++); - - *parent = *entry; - *entry = &(*entry)->entries[addr >> shift]; - addr &= (1ULL << shift) - 1; - } - - if (level != AMDGPU_VM_PTB) - *entry = NULL; -} - /** * amdgpu_vm_handle_huge_pages - handle updating the PD with huge pages * @@ -1580,36 +1550,34 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, { struct amdgpu_device *adev = params->adev; const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1; - - uint64_t addr, pe_start; - struct amdgpu_bo *pt; - unsigned nptes; + struct amdgpu_vm_pt_cursor cursor; /* walk over the address space and update the page tables */ - for (addr = start; addr < end; addr += nptes, - dst += nptes * AMDGPU_GPU_PAGE_SIZE) { - struct amdgpu_vm_pt *entry, *parent; + for_each_amdgpu_vm_pt_leaf(adev, params->vm, start, end - 1, cursor) { + struct amdgpu_bo *pt = cursor.entry->base.bo; + uint64_t pe_start; + unsigned nptes; - amdgpu_vm_get_entry(params, addr, &entry, &parent); - if (!entry) + if (!pt || cursor.level != AMDGPU_VM_PTB) return -ENOENT; - if ((addr & ~mask) == (end & ~mask)) - nptes = end - addr; + if ((cursor.pfn & ~mask) == (end & ~mask)) + nptes = end - cursor.pfn; else - nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask); + nptes = AMDGPU_VM_PTE_COUNT(adev) - (cursor.pfn & mask); - amdgpu_vm_handle_huge_pages(params, entry, parent, + amdgpu_vm_handle_huge_pages(params, cursor.entry, cursor.parent, nptes, dst, flags); /* We don't need to update PTEs for huge pages */ - if (entry->huge) + if (cursor.entry->huge) { + dst += nptes * AMDGPU_GPU_PAGE_SIZE; continue; + } - pt = entry->base.bo; - pe_start = (addr & mask) * 8; + pe_start = (cursor.pfn & mask) * 8; amdgpu_vm_update_func(params, pt, pe_start, dst, nptes, AMDGPU_GPU_PAGE_SIZE, flags); - + dst += nptes * AMDGPU_GPU_PAGE_SIZE; } return 0; -- GitLab From dfcd99f6273e7ae9aae10eafacc5521018bee143 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 6 Sep 2018 17:13:06 +0200 Subject: [PATCH 1438/1692] drm/amdgpu: meld together VM fragment and huge page handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This optimizes the generating of PTEs by walking the hierarchy only once for a range and making changes as necessary. It allows for both huge (2MB) as well giant (1GB) pages to be used on Vega and Raven. Signed-off-by: Christian König Reviewed-by: Felix Kuehling Reviewed-by: Huang Rui Acked-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 267 ++++++++++++++----------- 1 file changed, 147 insertions(+), 120 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index e873bbb2f0c7..45343501c1f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1488,46 +1488,76 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev, } /** - * amdgpu_vm_handle_huge_pages - handle updating the PD with huge pages + * amdgpu_vm_update_huge - figure out parameters for PTE updates * - * @p: see amdgpu_pte_update_params definition - * @entry: vm_pt entry to check - * @parent: parent entry - * @nptes: number of PTEs updated with this operation - * @dst: destination address where the PTEs should point to - * @flags: access flags fro the PTEs - * - * Check if we can update the PD with a huge page. + * Make sure to set the right flags for the PTEs at the desired level. */ -static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p, - struct amdgpu_vm_pt *entry, - struct amdgpu_vm_pt *parent, - unsigned nptes, uint64_t dst, - uint64_t flags) -{ - uint64_t pde; +static void amdgpu_vm_update_huge(struct amdgpu_pte_update_params *params, + struct amdgpu_bo *bo, unsigned level, + uint64_t pe, uint64_t addr, + unsigned count, uint32_t incr, + uint64_t flags) - /* In the case of a mixed PT the PDE must point to it*/ - if (p->adev->asic_type >= CHIP_VEGA10 && !p->src && - nptes == AMDGPU_VM_PTE_COUNT(p->adev)) { - /* Set the huge page flag to stop scanning at this PDE */ +{ + if (level != AMDGPU_VM_PTB) { flags |= AMDGPU_PDE_PTE; + amdgpu_gmc_get_vm_pde(params->adev, level, &addr, &flags); } - if (!(flags & AMDGPU_PDE_PTE)) { - if (entry->huge) { - /* Add the entry to the relocated list to update it. */ - entry->huge = false; - amdgpu_vm_bo_relocated(&entry->base); - } + amdgpu_vm_update_func(params, bo, pe, addr, count, incr, flags); +} + +/** + * amdgpu_vm_fragment - get fragment for PTEs + * + * @params: see amdgpu_pte_update_params definition + * @start: first PTE to handle + * @end: last PTE to handle + * @flags: hw mapping flags + * @frag: resulting fragment size + * @frag_end: end of this fragment + * + * Returns the first possible fragment for the start and end address. + */ +static void amdgpu_vm_fragment(struct amdgpu_pte_update_params *params, + uint64_t start, uint64_t end, uint64_t flags, + unsigned int *frag, uint64_t *frag_end) +{ + /** + * The MC L1 TLB supports variable sized pages, based on a fragment + * field in the PTE. When this field is set to a non-zero value, page + * granularity is increased from 4KB to (1 << (12 + frag)). The PTE + * flags are considered valid for all PTEs within the fragment range + * and corresponding mappings are assumed to be physically contiguous. + * + * The L1 TLB can store a single PTE for the whole fragment, + * significantly increasing the space available for translation + * caching. This leads to large improvements in throughput when the + * TLB is under pressure. + * + * The L2 TLB distributes small and large fragments into two + * asymmetric partitions. The large fragment cache is significantly + * larger. Thus, we try to use large fragments wherever possible. + * Userspace can support this by aligning virtual base address and + * allocation size to the fragment size. + */ + unsigned max_frag = params->adev->vm_manager.fragment_size; + + /* system pages are non continuously */ + if (params->src || !(flags & AMDGPU_PTE_VALID)) { + *frag = 0; + *frag_end = end; return; } - entry->huge = true; - amdgpu_gmc_get_vm_pde(p->adev, AMDGPU_VM_PDB0, &dst, &flags); - - pde = (entry - parent->entries) * 8; - amdgpu_vm_update_func(p, parent->base.bo, pde, dst, 1, 0, flags); + /* This intentionally wraps around if no bit is set */ + *frag = min((unsigned)ffs(start) - 1, (unsigned)fls64(end - start) - 1); + if (*frag >= max_frag) { + *frag = max_frag; + *frag_end = end & ~((1ULL << max_frag) - 1); + } else { + *frag_end = start + (1 << *frag); + } } /** @@ -1545,108 +1575,105 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p, * 0 for success, -EINVAL for failure. */ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, - uint64_t start, uint64_t end, - uint64_t dst, uint64_t flags) + uint64_t start, uint64_t end, + uint64_t dst, uint64_t flags) { struct amdgpu_device *adev = params->adev; - const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1; struct amdgpu_vm_pt_cursor cursor; + uint64_t frag_start = start, frag_end; + unsigned int frag; - /* walk over the address space and update the page tables */ - for_each_amdgpu_vm_pt_leaf(adev, params->vm, start, end - 1, cursor) { + /* figure out the initial fragment */ + amdgpu_vm_fragment(params, frag_start, end, flags, &frag, &frag_end); + + /* walk over the address space and update the PTs */ + amdgpu_vm_pt_start(adev, params->vm, start, &cursor); + while (cursor.pfn < end) { struct amdgpu_bo *pt = cursor.entry->base.bo; - uint64_t pe_start; - unsigned nptes; + unsigned shift, parent_shift, num_entries; + uint64_t incr, entry_end, pe_start; - if (!pt || cursor.level != AMDGPU_VM_PTB) + if (!pt) return -ENOENT; - if ((cursor.pfn & ~mask) == (end & ~mask)) - nptes = end - cursor.pfn; - else - nptes = AMDGPU_VM_PTE_COUNT(adev) - (cursor.pfn & mask); - - amdgpu_vm_handle_huge_pages(params, cursor.entry, cursor.parent, - nptes, dst, flags); - /* We don't need to update PTEs for huge pages */ - if (cursor.entry->huge) { - dst += nptes * AMDGPU_GPU_PAGE_SIZE; + /* The root level can't be a huge page */ + if (cursor.level == adev->vm_manager.root_level) { + if (!amdgpu_vm_pt_descendant(adev, &cursor)) + return -ENOENT; continue; } - pe_start = (cursor.pfn & mask) * 8; - amdgpu_vm_update_func(params, pt, pe_start, dst, nptes, - AMDGPU_GPU_PAGE_SIZE, flags); - dst += nptes * AMDGPU_GPU_PAGE_SIZE; - } - - return 0; -} + /* First check if the entry is already handled */ + if (cursor.pfn < frag_start) { + cursor.entry->huge = true; + amdgpu_vm_pt_next(adev, &cursor); + continue; + } -/* - * amdgpu_vm_frag_ptes - add fragment information to PTEs - * - * @params: see amdgpu_pte_update_params definition - * @vm: requested vm - * @start: first PTE to handle - * @end: last PTE to handle - * @dst: addr those PTEs should point to - * @flags: hw mapping flags - * - * Returns: - * 0 for success, -EINVAL for failure. - */ -static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params, - uint64_t start, uint64_t end, - uint64_t dst, uint64_t flags) -{ - /** - * The MC L1 TLB supports variable sized pages, based on a fragment - * field in the PTE. When this field is set to a non-zero value, page - * granularity is increased from 4KB to (1 << (12 + frag)). The PTE - * flags are considered valid for all PTEs within the fragment range - * and corresponding mappings are assumed to be physically contiguous. - * - * The L1 TLB can store a single PTE for the whole fragment, - * significantly increasing the space available for translation - * caching. This leads to large improvements in throughput when the - * TLB is under pressure. - * - * The L2 TLB distributes small and large fragments into two - * asymmetric partitions. The large fragment cache is significantly - * larger. Thus, we try to use large fragments wherever possible. - * Userspace can support this by aligning virtual base address and - * allocation size to the fragment size. - */ - unsigned max_frag = params->adev->vm_manager.fragment_size; - int r; + /* If it isn't already handled it can't be a huge page */ + if (cursor.entry->huge) { + /* Add the entry to the relocated list to update it. */ + cursor.entry->huge = false; + amdgpu_vm_bo_relocated(&cursor.entry->base); + } - /* system pages are non continuously */ - if (params->src || !(flags & AMDGPU_PTE_VALID)) - return amdgpu_vm_update_ptes(params, start, end, dst, flags); - - while (start != end) { - uint64_t frag_flags, frag_end; - unsigned frag; - - /* This intentionally wraps around if no bit is set */ - frag = min((unsigned)ffs(start) - 1, - (unsigned)fls64(end - start) - 1); - if (frag >= max_frag) { - frag_flags = AMDGPU_PTE_FRAG(max_frag); - frag_end = end & ~((1ULL << max_frag) - 1); - } else { - frag_flags = AMDGPU_PTE_FRAG(frag); - frag_end = start + (1 << frag); + shift = amdgpu_vm_level_shift(adev, cursor.level); + parent_shift = amdgpu_vm_level_shift(adev, cursor.level - 1); + if (adev->asic_type < CHIP_VEGA10) { + /* No huge page support before GMC v9 */ + if (cursor.level != AMDGPU_VM_PTB) { + if (!amdgpu_vm_pt_descendant(adev, &cursor)) + return -ENOENT; + continue; + } + } else if (frag < shift) { + /* We can't use this level when the fragment size is + * smaller than the address shift. Go to the next + * child entry and try again. + */ + if (!amdgpu_vm_pt_descendant(adev, &cursor)) + return -ENOENT; + continue; + } else if (frag >= parent_shift) { + /* If the fragment size is even larger than the parent + * shift we should go up one level and check it again. + */ + if (!amdgpu_vm_pt_ancestor(&cursor)) + return -ENOENT; + continue; } - r = amdgpu_vm_update_ptes(params, start, frag_end, dst, - flags | frag_flags); - if (r) - return r; + /* Looks good so far, calculate parameters for the update */ + incr = AMDGPU_GPU_PAGE_SIZE << shift; + num_entries = amdgpu_vm_num_entries(adev, cursor.level); + pe_start = ((cursor.pfn >> shift) & (num_entries - 1)) * 8; + entry_end = num_entries << shift; + entry_end += cursor.pfn & ~(entry_end - 1); + entry_end = min(entry_end, end); + + do { + uint64_t upd_end = min(entry_end, frag_end); + unsigned nptes = (upd_end - frag_start) >> shift; + + amdgpu_vm_update_huge(params, pt, cursor.level, + pe_start, dst, nptes, incr, + flags | AMDGPU_PTE_FRAG(frag)); + + pe_start += nptes * 8; + dst += nptes * AMDGPU_GPU_PAGE_SIZE << shift; + + frag_start = upd_end; + if (frag_start >= frag_end) { + /* figure out the next fragment */ + amdgpu_vm_fragment(params, frag_start, end, + flags, &frag, &frag_end); + if (frag < shift) + break; + } + } while (frag_start < entry_end); - dst += (frag_end - start) * AMDGPU_GPU_PAGE_SIZE; - start = frag_end; + if (frag >= shift) + amdgpu_vm_pt_next(adev, &cursor); } return 0; @@ -1708,8 +1735,8 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, params.func = amdgpu_vm_cpu_set_ptes; params.pages_addr = pages_addr; - return amdgpu_vm_frag_ptes(¶ms, start, last + 1, - addr, flags); + return amdgpu_vm_update_ptes(¶ms, start, last + 1, + addr, flags); } ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched); @@ -1788,7 +1815,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, if (r) goto error_free; - r = amdgpu_vm_frag_ptes(¶ms, start, last + 1, addr, flags); + r = amdgpu_vm_update_ptes(¶ms, start, last + 1, addr, flags); if (r) goto error_free; -- GitLab From 1b1d5c43db58d236d4a6c9700ef9395b3fc129fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 7 Sep 2018 14:21:15 +0200 Subject: [PATCH 1439/1692] drm/amdgpu: use the maximum possible fragment size on Vega/Raven MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The fragment size controls only the L1 on Vega/Raven and we now don't have any extra overhead any more because of larger fragments. Signed-off-by: Christian König Reviewed-by: Huang Rui Reviewed-by: Felix Kuehling Acked-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 45343501c1f3..2d2d6197c3dc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1540,8 +1540,16 @@ static void amdgpu_vm_fragment(struct amdgpu_pte_update_params *params, * larger. Thus, we try to use large fragments wherever possible. * Userspace can support this by aligning virtual base address and * allocation size to the fragment size. + * + * Starting with Vega10 the fragment size only controls the L1. The L2 + * is now directly feed with small/huge/giant pages from the walker. */ - unsigned max_frag = params->adev->vm_manager.fragment_size; + unsigned max_frag; + + if (params->adev->asic_type < CHIP_VEGA10) + max_frag = params->adev->vm_manager.fragment_size; + else + max_frag = 31; /* system pages are non continuously */ if (params->src || !(flags & AMDGPU_PTE_VALID)) { -- GitLab From 0c70dd4985b3ad440ec6c51f34f2b47877edf8fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 7 Sep 2018 20:34:17 +0200 Subject: [PATCH 1440/1692] drm/amdgpu: allow fragment processing for invalid PTEs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit That should improve the PRT performance on Vega quite a bit. Signed-off-by: Christian König Reviewed-by: Felix Kuehling Reviewed-by: Huang Rui Acked-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 2d2d6197c3dc..dd5a0cdd67bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1552,7 +1552,7 @@ static void amdgpu_vm_fragment(struct amdgpu_pte_update_params *params, max_frag = 31; /* system pages are non continuously */ - if (params->src || !(flags & AMDGPU_PTE_VALID)) { + if (params->src) { *frag = 0; *frag_end = end; return; -- GitLab From c37e2d29f0fbc933508637d7129a8f079b06ab40 Mon Sep 17 00:00:00 2001 From: Andrey Grodzovsky Date: Wed, 12 Sep 2018 16:38:57 -0400 Subject: [PATCH 1441/1692] drm/amd/display: Fix pflip IRQ status after gpu reset. Problem: After GPU reset pflip completion IRQ is disabled and hence any subsequent mode set or plane update leads to hang. Fix: Unless acrtc->otg_inst is initialized to -1 during display block initializtion then durng resume from GPU reset amdgpu_irq_gpu_reset_resume_helper will override CRTC 0 pflip IRQ value with whatever value was on every other unused CRTC because dm_irq_state will do irq_source = dal_irq_type + acrtc->otg_inst where acrtc->otg_inst will be 0 for every unused CRTC. Reviewed-by: Harry Wentland Signed-off-by: Andrey Grodzovsky Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index eccae63d3ef1..23ddf54b7dee 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -3424,6 +3424,7 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, acrtc->crtc_id = crtc_index; acrtc->base.enabled = false; + acrtc->otg_inst = -1; dm->adev->mode_info.crtcs[crtc_index] = acrtc; drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES, -- GitLab From 03651735fbded39f608163718f816ab9cf14fba7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 12 Sep 2018 21:19:57 +0200 Subject: [PATCH 1442/1692] drm/ttm: once more fix ttm_bo_bulk_move_lru_tail MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit While cutting the lists we sometimes accidentally added a list_head from the stack to the LRUs, effectively corrupting the list. Remove the list cutting and use explicit list manipulation instead. Signed-off-by: Christian König Reviewed-and-Tested: Huang Rui Tested-by: Mike Lothian Signed-off-by: Alex Deucher --- drivers/gpu/drm/ttm/ttm_bo.c | 51 +++++++++++++++++++++--------------- 1 file changed, 30 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 138c98902033..b2a33bf1ef10 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -247,23 +247,18 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_bo_move_to_lru_tail); -static void ttm_bo_bulk_move_helper(struct ttm_lru_bulk_move_pos *pos, - struct list_head *lru, bool is_swap) +static void ttm_list_move_bulk_tail(struct list_head *list, + struct list_head *first, + struct list_head *last) { - struct list_head *list; - LIST_HEAD(entries); - LIST_HEAD(before); + first->prev->next = last->next; + last->next->prev = first->prev; - reservation_object_assert_held(pos->last->resv); - list = is_swap ? &pos->last->swap : &pos->last->lru; - list_cut_position(&entries, lru, list); + list->prev->next = first; + first->prev = list->prev; - reservation_object_assert_held(pos->first->resv); - list = is_swap ? pos->first->swap.prev : pos->first->lru.prev; - list_cut_position(&before, &entries, list); - - list_splice(&before, lru); - list_splice_tail(&entries, lru); + last->next = list; + list->prev = last; } void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk) @@ -271,23 +266,33 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk) unsigned i; for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { + struct ttm_lru_bulk_move_pos *pos = &bulk->tt[i]; struct ttm_mem_type_manager *man; - if (!bulk->tt[i].first) + if (!pos->first) continue; - man = &bulk->tt[i].first->bdev->man[TTM_PL_TT]; - ttm_bo_bulk_move_helper(&bulk->tt[i], &man->lru[i], false); + reservation_object_assert_held(pos->first->resv); + reservation_object_assert_held(pos->last->resv); + + man = &pos->first->bdev->man[TTM_PL_TT]; + ttm_list_move_bulk_tail(&man->lru[i], &pos->first->lru, + &pos->last->lru); } for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { + struct ttm_lru_bulk_move_pos *pos = &bulk->vram[i]; struct ttm_mem_type_manager *man; - if (!bulk->vram[i].first) + if (!pos->first) continue; - man = &bulk->vram[i].first->bdev->man[TTM_PL_VRAM]; - ttm_bo_bulk_move_helper(&bulk->vram[i], &man->lru[i], false); + reservation_object_assert_held(pos->first->resv); + reservation_object_assert_held(pos->last->resv); + + man = &pos->first->bdev->man[TTM_PL_VRAM]; + ttm_list_move_bulk_tail(&man->lru[i], &pos->first->lru, + &pos->last->lru); } for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { @@ -297,8 +302,12 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk) if (!pos->first) continue; + reservation_object_assert_held(pos->first->resv); + reservation_object_assert_held(pos->last->resv); + lru = &pos->first->bdev->glob->swap_lru[i]; - ttm_bo_bulk_move_helper(&bulk->swap[i], lru, true); + ttm_list_move_bulk_tail(lru, &pos->first->swap, + &pos->last->swap); } } EXPORT_SYMBOL(ttm_bo_bulk_move_lru_tail); -- GitLab From e83dfe4d869358549bb259ab581ae4f0450c6580 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 10 Sep 2018 16:07:57 +0200 Subject: [PATCH 1443/1692] drm/amdgpu: remove amdgpu_bo_list_entry.robj (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We can get that just by casting tv.bo. v2: squash in kfd fix (Alex) Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 2 - drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c | 42 ++++++++------ drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h | 1 - drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 58 +++++++++++-------- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 3 +- 5 files changed, 58 insertions(+), 48 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index e7ceae05d517..6ee9dc476c86 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -675,7 +675,6 @@ static int reserve_bo_and_vm(struct kgd_mem *mem, if (!ctx->vm_pd) return -ENOMEM; - ctx->kfd_bo.robj = bo; ctx->kfd_bo.priority = 0; ctx->kfd_bo.tv.bo = &bo->tbo; ctx->kfd_bo.tv.shared = true; @@ -740,7 +739,6 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem, return -ENOMEM; } - ctx->kfd_bo.robj = bo; ctx->kfd_bo.priority = 0; ctx->kfd_bo.tv.bo = &bo->tbo; ctx->kfd_bo.tv.shared = true; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index b80243d3972e..14d2982a47cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -49,8 +49,11 @@ static void amdgpu_bo_list_free(struct kref *ref) refcount); struct amdgpu_bo_list_entry *e; - amdgpu_bo_list_for_each_entry(e, list) - amdgpu_bo_unref(&e->robj); + amdgpu_bo_list_for_each_entry(e, list) { + struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); + + amdgpu_bo_unref(&bo); + } call_rcu(&list->rhead, amdgpu_bo_list_free_rcu); } @@ -112,21 +115,20 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp, entry = &array[last_entry++]; } - entry->robj = bo; entry->priority = min(info[i].bo_priority, AMDGPU_BO_LIST_MAX_PRIORITY); - entry->tv.bo = &entry->robj->tbo; - entry->tv.shared = !entry->robj->prime_shared_count; - - if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GDS) - list->gds_obj = entry->robj; - if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GWS) - list->gws_obj = entry->robj; - if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_OA) - list->oa_obj = entry->robj; - - total_size += amdgpu_bo_size(entry->robj); - trace_amdgpu_bo_list_set(list, entry->robj); + entry->tv.bo = &bo->tbo; + entry->tv.shared = !bo->prime_shared_count; + + if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GDS) + list->gds_obj = bo; + if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GWS) + list->gws_obj = bo; + if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_OA) + list->oa_obj = bo; + + total_size += amdgpu_bo_size(bo); + trace_amdgpu_bo_list_set(list, bo); } list->first_userptr = first_userptr; @@ -138,8 +140,11 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp, return 0; error_free: - while (i--) - amdgpu_bo_unref(&array[i].robj); + while (i--) { + struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo); + + amdgpu_bo_unref(&bo); + } kvfree(list); return r; @@ -191,9 +196,10 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list, * with the same priority, i.e. it must be stable. */ amdgpu_bo_list_for_each_entry(e, list) { + struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); unsigned priority = e->priority; - if (!e->robj->parent) + if (!bo->parent) list_add_tail(&e->tv.head, &bucket[priority]); e->user_pages = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h index 61b089768e1c..7c5f5d1601e6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h @@ -32,7 +32,6 @@ struct amdgpu_bo_va; struct amdgpu_fpriv; struct amdgpu_bo_list_entry { - struct amdgpu_bo *robj; struct ttm_validate_buffer tv; struct amdgpu_bo_va *bo_va; uint32_t priority; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 1081fd00b059..d762d78e5102 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -39,6 +39,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, uint32_t *offset) { struct drm_gem_object *gobj; + struct amdgpu_bo *bo; unsigned long size; int r; @@ -46,21 +47,21 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, if (gobj == NULL) return -EINVAL; - p->uf_entry.robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); + bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); p->uf_entry.priority = 0; - p->uf_entry.tv.bo = &p->uf_entry.robj->tbo; + p->uf_entry.tv.bo = &bo->tbo; p->uf_entry.tv.shared = true; p->uf_entry.user_pages = NULL; drm_gem_object_put_unlocked(gobj); - size = amdgpu_bo_size(p->uf_entry.robj); + size = amdgpu_bo_size(bo); if (size != PAGE_SIZE || (data->offset + 8) > size) { r = -EINVAL; goto error_unref; } - if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) { + if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { r = -EINVAL; goto error_unref; } @@ -70,7 +71,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, return 0; error_unref: - amdgpu_bo_unref(&p->uf_entry.robj); + amdgpu_bo_unref(&bo); return r; } @@ -229,7 +230,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs goto free_all_kdata; } - if (p->uf_entry.robj) + if (p->uf_entry.tv.bo) p->job->uf_addr = uf_offset; kfree(chunk_array); @@ -458,13 +459,13 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, p->evictable = list_prev_entry(p->evictable, tv.head)) { struct amdgpu_bo_list_entry *candidate = p->evictable; - struct amdgpu_bo *bo = candidate->robj; + struct amdgpu_bo *bo = ttm_to_amdgpu_bo(candidate->tv.bo); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); bool update_bytes_moved_vis; uint32_t other; /* If we reached our current BO we can forget it */ - if (candidate->robj == validated) + if (bo == validated) break; /* We can't move pinned BOs here */ @@ -529,7 +530,7 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, int r; list_for_each_entry(lobj, validated, tv.head) { - struct amdgpu_bo *bo = lobj->robj; + struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo); bool binding_userptr = false; struct mm_struct *usermm; @@ -604,7 +605,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, INIT_LIST_HEAD(&duplicates); amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd); - if (p->uf_entry.robj && !p->uf_entry.robj->parent) + if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent) list_add(&p->uf_entry.tv.head, &p->validated); while (1) { @@ -620,7 +621,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, INIT_LIST_HEAD(&need_pages); amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { - struct amdgpu_bo *bo = e->robj; + struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm, &e->user_invalidated) && e->user_pages) { @@ -639,7 +640,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, list_del(&e->tv.head); list_add(&e->tv.head, &need_pages); - amdgpu_bo_unreserve(e->robj); + amdgpu_bo_unreserve(bo); } } @@ -658,7 +659,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, /* Fill the page arrays for all userptrs. */ list_for_each_entry(e, &need_pages, tv.head) { - struct ttm_tt *ttm = e->robj->tbo.ttm; + struct ttm_tt *ttm = e->tv.bo->ttm; e->user_pages = kvmalloc_array(ttm->num_pages, sizeof(struct page*), @@ -717,7 +718,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, oa = p->bo_list->oa_obj; amdgpu_bo_list_for_each_entry(e, p->bo_list) - e->bo_va = amdgpu_vm_bo_find(vm, e->robj); + e->bo_va = amdgpu_vm_bo_find(vm, ttm_to_amdgpu_bo(e->tv.bo)); if (gds) { p->job->gds_base = amdgpu_bo_gpu_offset(gds); @@ -732,8 +733,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, p->job->oa_size = amdgpu_bo_size(oa); } - if (!r && p->uf_entry.robj) { - struct amdgpu_bo *uf = p->uf_entry.robj; + if (!r && p->uf_entry.tv.bo) { + struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo); r = amdgpu_ttm_alloc_gart(&uf->tbo); p->job->uf_addr += amdgpu_bo_gpu_offset(uf); @@ -749,8 +750,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, if (!e->user_pages) continue; - release_pages(e->user_pages, - e->robj->tbo.ttm->num_pages); + release_pages(e->user_pages, e->tv.bo->ttm->num_pages); kvfree(e->user_pages); } @@ -763,9 +763,11 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) int r; list_for_each_entry(e, &p->validated, tv.head) { - struct reservation_object *resv = e->robj->tbo.resv; + struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); + struct reservation_object *resv = bo->tbo.resv; + r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp, - amdgpu_bo_explicit_sync(e->robj)); + amdgpu_bo_explicit_sync(bo)); if (r) return r; @@ -808,7 +810,11 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, kfree(parser->chunks); if (parser->job) amdgpu_job_free(parser->job); - amdgpu_bo_unref(&parser->uf_entry.robj); + if (parser->uf_entry.tv.bo) { + struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo); + + amdgpu_bo_unref(&uf); + } } static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) @@ -919,7 +925,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) struct dma_fence *f; /* ignore duplicates */ - bo = e->robj; + bo = ttm_to_amdgpu_bo(e->tv.bo); if (!bo) continue; @@ -958,11 +964,13 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) if (amdgpu_vm_debug) { /* Invalidate all BOs to test for userspace bugs */ amdgpu_bo_list_for_each_entry(e, p->bo_list) { + struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); + /* ignore duplicates */ - if (!e->robj) + if (!bo) continue; - amdgpu_vm_bo_invalidate(adev, e->robj, false); + amdgpu_vm_bo_invalidate(adev, bo, false); } } @@ -1211,7 +1219,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, /* No memory allocation is allowed while holding the mn lock */ amdgpu_mn_lock(p->mn); amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { - struct amdgpu_bo *bo = e->robj; + struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) { r = -ERESTARTSYS; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index dd5a0cdd67bc..234764ac58cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -593,9 +593,8 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, struct list_head *validated, struct amdgpu_bo_list_entry *entry) { - entry->robj = vm->root.base.bo; entry->priority = 0; - entry->tv.bo = &entry->robj->tbo; + entry->tv.bo = &vm->root.base.bo->tbo; entry->tv.shared = true; entry->user_pages = NULL; list_add(&entry->tv.head, validated); -- GitLab From 646b90259842faa8341b076a3488a227927d84a2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 10 Sep 2018 20:02:46 +0200 Subject: [PATCH 1444/1692] drm/amdgpu: use a single linked list for amdgpu_vm_bo_base MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of the double linked list. Gets the size of amdgpu_vm_pt down to 64 bytes again. We could even reduce it down to 32 bytes, but that would require some rather extreme hacks. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Acked-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 4 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 38 ++++++++++++++-------- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 2 +- 4 files changed, 29 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index de990bdcdd6c..e6909252aefa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -448,7 +448,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, return -ENOMEM; drm_gem_private_object_init(adev->ddev, &bo->gem_base, size); INIT_LIST_HEAD(&bo->shadow_list); - INIT_LIST_HEAD(&bo->va); + bo->vm_bo = NULL; bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain : bp->domain; bo->allowed_domains = bo->preferred_domains; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 907fdf46d895..64337ff2ad63 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -89,8 +89,8 @@ struct amdgpu_bo { void *metadata; u32 metadata_size; unsigned prime_shared_count; - /* list of all virtual address to which this bo is associated to */ - struct list_head va; + /* per VM structure for page tables and with virtual addresses */ + struct amdgpu_vm_bo_base *vm_bo; /* Constant after initialization */ struct drm_gem_object gem_base; struct amdgpu_bo *parent; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 234764ac58cf..a7f9aaa47c49 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -309,12 +309,13 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, { base->vm = vm; base->bo = bo; - INIT_LIST_HEAD(&base->bo_list); + base->next = NULL; INIT_LIST_HEAD(&base->vm_status); if (!bo) return; - list_add_tail(&base->bo_list, &bo->va); + base->next = bo->vm_bo; + bo->vm_bo = base; if (bo->tbo.resv != vm->root.base.bo->tbo.resv) return; @@ -352,7 +353,7 @@ static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt) if (!parent) return NULL; - return list_first_entry(&parent->va, struct amdgpu_vm_pt, base.bo_list); + return container_of(parent->vm_bo, struct amdgpu_vm_pt, base); } /** @@ -954,7 +955,7 @@ static void amdgpu_vm_free_pts(struct amdgpu_device *adev, for_each_amdgpu_vm_pt_dfs_safe(adev, vm, cursor, entry) { if (entry->base.bo) { - list_del(&entry->base.bo_list); + entry->base.bo->vm_bo = NULL; list_del(&entry->base.vm_status); amdgpu_bo_unref(&entry->base.bo->shadow); amdgpu_bo_unref(&entry->base.bo); @@ -1162,12 +1163,13 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, struct amdgpu_bo *bo) { - struct amdgpu_bo_va *bo_va; + struct amdgpu_vm_bo_base *base; - list_for_each_entry(bo_va, &bo->va, base.bo_list) { - if (bo_va->base.vm == vm) { - return bo_va; - } + for (base = bo->vm_bo; base; base = base->next) { + if (base->vm != vm) + continue; + + return container_of(base, struct amdgpu_bo_va, base); } return NULL; } @@ -2728,11 +2730,21 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, struct amdgpu_bo_va_mapping *mapping, *next; struct amdgpu_bo *bo = bo_va->base.bo; struct amdgpu_vm *vm = bo_va->base.vm; + struct amdgpu_vm_bo_base **base; - if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) - vm->bulk_moveable = false; + if (bo) { + if (bo->tbo.resv == vm->root.base.bo->tbo.resv) + vm->bulk_moveable = false; - list_del(&bo_va->base.bo_list); + for (base = &bo_va->base.bo->vm_bo; *base; + base = &(*base)->next) { + if (*base != &bo_va->base) + continue; + + *base = bo_va->base.next; + break; + } + } spin_lock(&vm->invalidated_lock); list_del(&bo_va->base.vm_status); @@ -2774,7 +2786,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, if (bo->parent && bo->parent->shadow == bo) bo = bo->parent; - list_for_each_entry(bo_base, &bo->va, bo_list) { + for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) { struct amdgpu_vm *vm = bo_base->vm; if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 12d21eec4568..2a8898d19c8b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -129,7 +129,7 @@ struct amdgpu_vm_bo_base { struct amdgpu_bo *bo; /* protected by bo being reserved */ - struct list_head bo_list; + struct amdgpu_vm_bo_base *next; /* protected by spinlock */ struct list_head vm_status; -- GitLab From 1cebf8f143c21eb422cd0f4e27ab2ae366eb4d04 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 13 Sep 2018 14:40:55 +0200 Subject: [PATCH 1445/1692] socket: fix struct ifreq size in compat ioctl As reported by Reobert O'Callahan, since Viro's commit to kill dev_ifsioc() we attempt to copy too much data in compat mode, which may lead to EFAULT when the 32-bit version of struct ifreq sits at/near the end of a page boundary, and the next page isn't mapped. Fix this by passing the approprate compat/non-compat size to copy and using that, as before the dev_ifsioc() removal. This works because only the embedded "struct ifmap" has different size, and this is only used in SIOCGIFMAP/SIOCSIFMAP which has a different handler. All other parts of the union are naturally compatible. This fixes https://bugzilla.kernel.org/show_bug.cgi?id=199469. Fixes: bf4405737f9f ("kill dev_ifsioc()") Reported-by: Robert O'Callahan Signed-off-by: Johannes Berg Signed-off-by: David S. Miller --- net/socket.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/net/socket.c b/net/socket.c index e6945e318f02..01f3f8f32d6f 100644 --- a/net/socket.c +++ b/net/socket.c @@ -941,7 +941,8 @@ void dlci_ioctl_set(int (*hook) (unsigned int, void __user *)) EXPORT_SYMBOL(dlci_ioctl_set); static long sock_do_ioctl(struct net *net, struct socket *sock, - unsigned int cmd, unsigned long arg) + unsigned int cmd, unsigned long arg, + unsigned int ifreq_size) { int err; void __user *argp = (void __user *)arg; @@ -967,11 +968,11 @@ static long sock_do_ioctl(struct net *net, struct socket *sock, } else { struct ifreq ifr; bool need_copyout; - if (copy_from_user(&ifr, argp, sizeof(struct ifreq))) + if (copy_from_user(&ifr, argp, ifreq_size)) return -EFAULT; err = dev_ioctl(net, cmd, &ifr, &need_copyout); if (!err && need_copyout) - if (copy_to_user(argp, &ifr, sizeof(struct ifreq))) + if (copy_to_user(argp, &ifr, ifreq_size)) return -EFAULT; } return err; @@ -1070,7 +1071,8 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) err = open_related_ns(&net->ns, get_net_ns); break; default: - err = sock_do_ioctl(net, sock, cmd, arg); + err = sock_do_ioctl(net, sock, cmd, arg, + sizeof(struct ifreq)); break; } return err; @@ -2750,7 +2752,8 @@ static int do_siocgstamp(struct net *net, struct socket *sock, int err; set_fs(KERNEL_DS); - err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv); + err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv, + sizeof(struct compat_ifreq)); set_fs(old_fs); if (!err) err = compat_put_timeval(&ktv, up); @@ -2766,7 +2769,8 @@ static int do_siocgstampns(struct net *net, struct socket *sock, int err; set_fs(KERNEL_DS); - err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts); + err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts, + sizeof(struct compat_ifreq)); set_fs(old_fs); if (!err) err = compat_put_timespec(&kts, up); @@ -3072,7 +3076,8 @@ static int routing_ioctl(struct net *net, struct socket *sock, } set_fs(KERNEL_DS); - ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r); + ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r, + sizeof(struct compat_ifreq)); set_fs(old_fs); out: @@ -3185,7 +3190,8 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock, case SIOCBONDSETHWADDR: case SIOCBONDCHANGEACTIVE: case SIOCGIFNAME: - return sock_do_ioctl(net, sock, cmd, arg); + return sock_do_ioctl(net, sock, cmd, arg, + sizeof(struct compat_ifreq)); } return -ENOIOCTLCMD; -- GitLab From 7a9cdebdcc17e426fb5287e4a82db1dfe86339b2 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Wed, 12 Sep 2018 23:57:48 -1000 Subject: [PATCH 1446/1692] mm: get rid of vmacache_flush_all() entirely Jann Horn points out that the vmacache_flush_all() function is not only potentially expensive, it's buggy too. It also happens to be entirely unnecessary, because the sequence number overflow case can be avoided by simply making the sequence number be 64-bit. That doesn't even grow the data structures in question, because the other adjacent fields are already 64-bit. So simplify the whole thing by just making the sequence number overflow case go away entirely, which gets rid of all the complications and makes the code faster too. Win-win. [ Oleg Nesterov points out that the VMACACHE_FULL_FLUSHES statistics also just goes away entirely with this ] Reported-by: Jann Horn Suggested-by: Will Deacon Acked-by: Davidlohr Bueso Cc: Oleg Nesterov Cc: stable@kernel.org Signed-off-by: Linus Torvalds --- include/linux/mm_types.h | 2 +- include/linux/mm_types_task.h | 2 +- include/linux/vm_event_item.h | 1 - include/linux/vmacache.h | 5 ----- mm/debug.c | 4 ++-- mm/vmacache.c | 38 ----------------------------------- 6 files changed, 4 insertions(+), 48 deletions(-) diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index cd2bc939efd0..5ed8f6292a53 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -341,7 +341,7 @@ struct mm_struct { struct { struct vm_area_struct *mmap; /* list of VMAs */ struct rb_root mm_rb; - u32 vmacache_seqnum; /* per-thread vmacache */ + u64 vmacache_seqnum; /* per-thread vmacache */ #ifdef CONFIG_MMU unsigned long (*get_unmapped_area) (struct file *filp, unsigned long addr, unsigned long len, diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h index 5fe87687664c..d7016dcb245e 100644 --- a/include/linux/mm_types_task.h +++ b/include/linux/mm_types_task.h @@ -32,7 +32,7 @@ #define VMACACHE_MASK (VMACACHE_SIZE - 1) struct vmacache { - u32 seqnum; + u64 seqnum; struct vm_area_struct *vmas[VMACACHE_SIZE]; }; diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 5c7f010676a7..47a3441cf4c4 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -105,7 +105,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, #ifdef CONFIG_DEBUG_VM_VMACACHE VMACACHE_FIND_CALLS, VMACACHE_FIND_HITS, - VMACACHE_FULL_FLUSHES, #endif #ifdef CONFIG_SWAP SWAP_RA, diff --git a/include/linux/vmacache.h b/include/linux/vmacache.h index 3e9a963edd6a..6fce268a4588 100644 --- a/include/linux/vmacache.h +++ b/include/linux/vmacache.h @@ -10,7 +10,6 @@ static inline void vmacache_flush(struct task_struct *tsk) memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas)); } -extern void vmacache_flush_all(struct mm_struct *mm); extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma); extern struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr); @@ -24,10 +23,6 @@ extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm, static inline void vmacache_invalidate(struct mm_struct *mm) { mm->vmacache_seqnum++; - - /* deal with overflows */ - if (unlikely(mm->vmacache_seqnum == 0)) - vmacache_flush_all(mm); } #endif /* __LINUX_VMACACHE_H */ diff --git a/mm/debug.c b/mm/debug.c index 38c926520c97..bd10aad8539a 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -114,7 +114,7 @@ EXPORT_SYMBOL(dump_vma); void dump_mm(const struct mm_struct *mm) { - pr_emerg("mm %px mmap %px seqnum %d task_size %lu\n" + pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n" #ifdef CONFIG_MMU "get_unmapped_area %px\n" #endif @@ -142,7 +142,7 @@ void dump_mm(const struct mm_struct *mm) "tlb_flush_pending %d\n" "def_flags: %#lx(%pGv)\n", - mm, mm->mmap, mm->vmacache_seqnum, mm->task_size, + mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size, #ifdef CONFIG_MMU mm->get_unmapped_area, #endif diff --git a/mm/vmacache.c b/mm/vmacache.c index ea517bef7dc5..cdc32a3b02fa 100644 --- a/mm/vmacache.c +++ b/mm/vmacache.c @@ -19,44 +19,6 @@ #endif #define VMACACHE_HASH(addr) ((addr >> VMACACHE_SHIFT) & VMACACHE_MASK) -/* - * Flush vma caches for threads that share a given mm. - * - * The operation is safe because the caller holds the mmap_sem - * exclusively and other threads accessing the vma cache will - * have mmap_sem held at least for read, so no extra locking - * is required to maintain the vma cache. - */ -void vmacache_flush_all(struct mm_struct *mm) -{ - struct task_struct *g, *p; - - count_vm_vmacache_event(VMACACHE_FULL_FLUSHES); - - /* - * Single threaded tasks need not iterate the entire - * list of process. We can avoid the flushing as well - * since the mm's seqnum was increased and don't have - * to worry about other threads' seqnum. Current's - * flush will occur upon the next lookup. - */ - if (atomic_read(&mm->mm_users) == 1) - return; - - rcu_read_lock(); - for_each_process_thread(g, p) { - /* - * Only flush the vmacache pointers as the - * mm seqnum is already set and curr's will - * be set upon invalidation when the next - * lookup is done. - */ - if (mm == p->mm) - vmacache_flush(p); - } - rcu_read_unlock(); -} - /* * This task may be accessing a foreign mm via (for example) * get_user_pages()->find_vma(). The vmacache is task-local and this -- GitLab From 24568b47d48ec8c906fd0f589489a08b17e1edca Mon Sep 17 00:00:00 2001 From: Ondrej Mosnacek Date: Wed, 5 Sep 2018 09:26:41 +0200 Subject: [PATCH 1447/1692] crypto: x86/aegis,morus - Do not require OSXSAVE for SSE2 It turns out OSXSAVE needs to be checked only for AVX, not for SSE. Without this patch the affected modules refuse to load on CPUs with SSE2 but without AVX support. Fixes: 877ccce7cbe8 ("crypto: x86/aegis,morus - Fix and simplify CPUID checks") Cc: # 4.18 Reported-by: Zdenek Kaspar Signed-off-by: Ondrej Mosnacek Signed-off-by: Herbert Xu --- arch/x86/crypto/aegis128-aesni-glue.c | 1 - arch/x86/crypto/aegis128l-aesni-glue.c | 1 - arch/x86/crypto/aegis256-aesni-glue.c | 1 - arch/x86/crypto/morus1280-sse2-glue.c | 1 - arch/x86/crypto/morus640-sse2-glue.c | 1 - 5 files changed, 5 deletions(-) diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c index acd11b3bf639..2a356b948720 100644 --- a/arch/x86/crypto/aegis128-aesni-glue.c +++ b/arch/x86/crypto/aegis128-aesni-glue.c @@ -379,7 +379,6 @@ static int __init crypto_aegis128_aesni_module_init(void) { if (!boot_cpu_has(X86_FEATURE_XMM2) || !boot_cpu_has(X86_FEATURE_AES) || - !boot_cpu_has(X86_FEATURE_OSXSAVE) || !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL)) return -ENODEV; diff --git a/arch/x86/crypto/aegis128l-aesni-glue.c b/arch/x86/crypto/aegis128l-aesni-glue.c index 2071c3d1ae07..dbe8bb980da1 100644 --- a/arch/x86/crypto/aegis128l-aesni-glue.c +++ b/arch/x86/crypto/aegis128l-aesni-glue.c @@ -379,7 +379,6 @@ static int __init crypto_aegis128l_aesni_module_init(void) { if (!boot_cpu_has(X86_FEATURE_XMM2) || !boot_cpu_has(X86_FEATURE_AES) || - !boot_cpu_has(X86_FEATURE_OSXSAVE) || !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL)) return -ENODEV; diff --git a/arch/x86/crypto/aegis256-aesni-glue.c b/arch/x86/crypto/aegis256-aesni-glue.c index b5f2a8fd5a71..8bebda2de92f 100644 --- a/arch/x86/crypto/aegis256-aesni-glue.c +++ b/arch/x86/crypto/aegis256-aesni-glue.c @@ -379,7 +379,6 @@ static int __init crypto_aegis256_aesni_module_init(void) { if (!boot_cpu_has(X86_FEATURE_XMM2) || !boot_cpu_has(X86_FEATURE_AES) || - !boot_cpu_has(X86_FEATURE_OSXSAVE) || !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL)) return -ENODEV; diff --git a/arch/x86/crypto/morus1280-sse2-glue.c b/arch/x86/crypto/morus1280-sse2-glue.c index 95cf857d2cbb..f40244eaf14d 100644 --- a/arch/x86/crypto/morus1280-sse2-glue.c +++ b/arch/x86/crypto/morus1280-sse2-glue.c @@ -40,7 +40,6 @@ MORUS1280_DECLARE_ALGS(sse2, "morus1280-sse2", 350); static int __init crypto_morus1280_sse2_module_init(void) { if (!boot_cpu_has(X86_FEATURE_XMM2) || - !boot_cpu_has(X86_FEATURE_OSXSAVE) || !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL)) return -ENODEV; diff --git a/arch/x86/crypto/morus640-sse2-glue.c b/arch/x86/crypto/morus640-sse2-glue.c index 615fb7bc9a32..9afaf8f8565a 100644 --- a/arch/x86/crypto/morus640-sse2-glue.c +++ b/arch/x86/crypto/morus640-sse2-glue.c @@ -40,7 +40,6 @@ MORUS640_DECLARE_ALGS(sse2, "morus640-sse2", 400); static int __init crypto_morus640_sse2_module_init(void) { if (!boot_cpu_has(X86_FEATURE_XMM2) || - !boot_cpu_has(X86_FEATURE_OSXSAVE) || !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL)) return -ENODEV; -- GitLab From 308b118b6090e42883a88d8e39ef27fca3389b19 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 6 Jun 2018 12:37:41 +0300 Subject: [PATCH 1448/1692] MAINTAINERS: Update tree location for the Renesas DRM drivers The fbdev git tree referenced in the MAINTAINERS file doesn't exist anymore. Update the location to point to the new git tree. Signed-off-by: Laurent Pinchart Reviewed-by: Simon Horman --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index 029baa270a11..21737b16466e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4872,7 +4872,7 @@ DRM DRIVERS FOR RENESAS M: Laurent Pinchart L: dri-devel@lists.freedesktop.org L: linux-renesas-soc@vger.kernel.org -T: git git://linuxtv.org/pinchartl/fbdev +T: git git://linuxtv.org/pinchartl/media drm/du/next S: Supported F: drivers/gpu/drm/rcar-du/ F: drivers/gpu/drm/shmobile/ -- GitLab From 500dd232449e7c07500e713dc6970aa713f8e4f1 Mon Sep 17 00:00:00 2001 From: Andrew Murray Date: Thu, 13 Sep 2018 13:48:27 +0100 Subject: [PATCH 1449/1692] asm-generic: io: Fix ioport_map() for !CONFIG_GENERIC_IOMAP && CONFIG_INDIRECT_PIO The !CONFIG_GENERIC_IOMAP version of ioport_map uses MMIO_UPPER_LIMIT to prevent users from making I/O accesses outside the expected I/O range - however it erroneously treats MMIO_UPPER_LIMIT as a mask which is contradictory to its other users. The introduction of CONFIG_INDIRECT_PIO, which subtracts an arbitrary amount from IO_SPACE_LIMIT to form MMIO_UPPER_LIMIT, results in ioport_map mangling the given port rather than capping it. We address this by aligning more closely with the CONFIG_GENERIC_IOMAP implementation of ioport_map by using the comparison operator and returning NULL where the port exceeds MMIO_UPPER_LIMIT. Though note that we preserve the existing behavior of masking with IO_SPACE_LIMIT such that we don't break existing buggy drivers that somehow rely on this masking. Fixes: 5745392e0c2b ("PCI: Apply the new generic I/O management on PCI IO hosts") Reported-by: Will Deacon Reviewed-by: Arnd Bergmann Signed-off-by: Andrew Murray Signed-off-by: Will Deacon --- include/asm-generic/io.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index 66d1d45fa2e1..d356f802945a 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -1026,7 +1026,8 @@ static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size) #define ioport_map ioport_map static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) { - return PCI_IOBASE + (port & MMIO_UPPER_LIMIT); + port &= IO_SPACE_LIMIT; + return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port; } #endif -- GitLab From 7408e252ddf29ae9d6e0ca5c4223aa460ef47043 Mon Sep 17 00:00:00 2001 From: Kieran Bingham Date: Mon, 6 Aug 2018 15:39:01 +0100 Subject: [PATCH 1450/1692] MAINTAINERS: rcar-du: Add co-maintainer Add myself as a co-maintainer for the Renesas DRM drivers. Signed-off-by: Kieran Bingham Acked-by: Laurent Pinchart Signed-off-by: Laurent Pinchart --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) diff --git a/MAINTAINERS b/MAINTAINERS index 21737b16466e..567e03759af9 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4870,6 +4870,7 @@ F: Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt DRM DRIVERS FOR RENESAS M: Laurent Pinchart +M: Kieran Bingham L: dri-devel@lists.freedesktop.org L: linux-renesas-soc@vger.kernel.org T: git git://linuxtv.org/pinchartl/media drm/du/next -- GitLab From 4ffe5aa53791ac5ab2c29e99f23c07cb85922dd5 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Mon, 4 Jun 2018 22:04:59 +0300 Subject: [PATCH 1451/1692] dt-bindings: display: renesas: du: document R8A77980 bindings Document the R-Car V3H (R8A77980) SoC in the R-Car DU bindings; the DU hardware has the same topology as in the R-Car V3M (R8A77970). Signed-off-by: Sergei Shtylyov Reviewed-by: Simon Horman Reviewed-by: Laurent Pinchart Signed-off-by: Laurent Pinchart --- Documentation/devicetree/bindings/display/renesas,du.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Documentation/devicetree/bindings/display/renesas,du.txt b/Documentation/devicetree/bindings/display/renesas,du.txt index ec9d34be2ff7..caae2348a292 100644 --- a/Documentation/devicetree/bindings/display/renesas,du.txt +++ b/Documentation/devicetree/bindings/display/renesas,du.txt @@ -15,6 +15,7 @@ Required Properties: - "renesas,du-r8a7796" for R8A7796 (R-Car M3-W) compatible DU - "renesas,du-r8a77965" for R8A77965 (R-Car M3-N) compatible DU - "renesas,du-r8a77970" for R8A77970 (R-Car V3M) compatible DU + - "renesas,du-r8a77980" for R8A77980 (R-Car V3H) compatible DU - "renesas,du-r8a77995" for R8A77995 (R-Car D3) compatible DU - reg: the memory-mapped I/O registers base address and length @@ -61,6 +62,7 @@ corresponding to each DU output. R8A7796 (R-Car M3-W) DPAD 0 HDMI 0 LVDS 0 - R8A77965 (R-Car M3-N) DPAD 0 HDMI 0 LVDS 0 - R8A77970 (R-Car V3M) DPAD 0 LVDS 0 - - + R8A77980 (R-Car V3H) DPAD 0 LVDS 0 - - R8A77995 (R-Car D3) DPAD 0 LVDS 0 LVDS 1 - -- GitLab From ab77eb4c4de77a3095abbc213b8f191c58ebdca1 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Tue, 5 Jun 2018 23:28:58 +0300 Subject: [PATCH 1452/1692] dt-bindings: display: renesas: lvds: document R8A77980 bindings Document the R-Car V3H (R8A77980) SoC in the R-Car LVDS bindings. Signed-off-by: Sergei Shtylyov Acked-by: Rob Herring Reviewed-by: Laurent Pinchart Signed-off-by: Laurent Pinchart --- .../devicetree/bindings/display/bridge/renesas,lvds.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt b/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt index 4f0ab3ed3b6f..5a4e379bb414 100644 --- a/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt +++ b/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt @@ -14,6 +14,7 @@ Required properties: - "renesas,r8a7795-lvds" for R8A7795 (R-Car H3) compatible LVDS encoders - "renesas,r8a7796-lvds" for R8A7796 (R-Car M3-W) compatible LVDS encoders - "renesas,r8a77970-lvds" for R8A77970 (R-Car V3M) compatible LVDS encoders + - "renesas,r8a77980-lvds" for R8A77980 (R-Car V3H) compatible LVDS encoders - "renesas,r8a77995-lvds" for R8A77995 (R-Car D3) compatible LVDS encoders - reg: Base address and length for the memory-mapped registers -- GitLab From 59104f239b9ed6cf3986e4228173ff2f4c95039e Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Thu, 26 Jul 2018 02:37:49 +0000 Subject: [PATCH 1453/1692] drm: shmobile: convert to SPDX identifiers Signed-off-by: Kuninori Morimoto Acked-by: Laurent Pinchart Reviewed-by: Simon Horman Signed-off-by: Laurent Pinchart --- drivers/gpu/drm/shmobile/Kconfig | 1 + drivers/gpu/drm/shmobile/shmob_drm_backlight.c | 6 +----- drivers/gpu/drm/shmobile/shmob_drm_backlight.h | 6 +----- drivers/gpu/drm/shmobile/shmob_drm_crtc.c | 6 +----- drivers/gpu/drm/shmobile/shmob_drm_crtc.h | 6 +----- drivers/gpu/drm/shmobile/shmob_drm_drv.c | 6 +----- drivers/gpu/drm/shmobile/shmob_drm_drv.h | 6 +----- drivers/gpu/drm/shmobile/shmob_drm_kms.c | 6 +----- drivers/gpu/drm/shmobile/shmob_drm_kms.h | 6 +----- drivers/gpu/drm/shmobile/shmob_drm_plane.c | 6 +----- drivers/gpu/drm/shmobile/shmob_drm_plane.h | 6 +----- drivers/gpu/drm/shmobile/shmob_drm_regs.h | 6 +----- include/linux/platform_data/shmob_drm.h | 6 +----- 13 files changed, 13 insertions(+), 60 deletions(-) diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig index 0426d66660d1..61bbe8e8bcc5 100644 --- a/drivers/gpu/drm/shmobile/Kconfig +++ b/drivers/gpu/drm/shmobile/Kconfig @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 config DRM_SHMOBILE tristate "DRM Support for SH Mobile" depends on DRM && ARM diff --git a/drivers/gpu/drm/shmobile/shmob_drm_backlight.c b/drivers/gpu/drm/shmobile/shmob_drm_backlight.c index 33dd41afea0e..f6628a5ee95f 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_backlight.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_backlight.c @@ -1,14 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * shmob_drm_backlight.c -- SH Mobile DRM Backlight * * Copyright (C) 2012 Renesas Electronics Corporation * * Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #include diff --git a/drivers/gpu/drm/shmobile/shmob_drm_backlight.h b/drivers/gpu/drm/shmobile/shmob_drm_backlight.h index bac719ecc301..d9abb7a60be5 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_backlight.h +++ b/drivers/gpu/drm/shmobile/shmob_drm_backlight.h @@ -1,14 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * shmob_drm_backlight.h -- SH Mobile DRM Backlight * * Copyright (C) 2012 Renesas Electronics Corporation * * Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #ifndef __SHMOB_DRM_BACKLIGHT_H__ diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c index fc66167b0641..499b5fdb869f 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c @@ -1,14 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * shmob_drm_crtc.c -- SH Mobile DRM CRTCs * * Copyright (C) 2012 Renesas Electronics Corporation * * Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #include diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.h b/drivers/gpu/drm/shmobile/shmob_drm_crtc.h index c11f421737dc..9ca6920641d8 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.h +++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.h @@ -1,14 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * shmob_drm_crtc.h -- SH Mobile DRM CRTCs * * Copyright (C) 2012 Renesas Electronics Corporation * * Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #ifndef __SHMOB_DRM_CRTC_H__ diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c index 592572554eb0..6ececad6f845 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c @@ -1,14 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * shmob_drm_drv.c -- SH Mobile DRM driver * * Copyright (C) 2012 Renesas Electronics Corporation * * Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #include diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.h b/drivers/gpu/drm/shmobile/shmob_drm_drv.h index 088a6e55fa29..80dc4b1020aa 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_drv.h +++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.h @@ -1,14 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * shmob_drm.h -- SH Mobile DRM driver * * Copyright (C) 2012 Renesas Electronics Corporation * * Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #ifndef __SHMOB_DRM_DRV_H__ diff --git a/drivers/gpu/drm/shmobile/shmob_drm_kms.c b/drivers/gpu/drm/shmobile/shmob_drm_kms.c index 447638581c08..a17268444c6d 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_kms.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_kms.c @@ -1,14 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * shmob_drm_kms.c -- SH Mobile DRM Mode Setting * * Copyright (C) 2012 Renesas Electronics Corporation * * Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #include diff --git a/drivers/gpu/drm/shmobile/shmob_drm_kms.h b/drivers/gpu/drm/shmobile/shmob_drm_kms.h index 753e2817dc2c..6ec2b732bb94 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_kms.h +++ b/drivers/gpu/drm/shmobile/shmob_drm_kms.h @@ -1,14 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * shmob_drm_kms.h -- SH Mobile DRM Mode Setting * * Copyright (C) 2012 Renesas Electronics Corporation * * Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #ifndef __SHMOB_DRM_KMS_H__ diff --git a/drivers/gpu/drm/shmobile/shmob_drm_plane.c b/drivers/gpu/drm/shmobile/shmob_drm_plane.c index 1d0359f713ca..1d1ee5e51351 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_plane.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_plane.c @@ -1,14 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * shmob_drm_plane.c -- SH Mobile DRM Planes * * Copyright (C) 2012 Renesas Electronics Corporation * * Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #include diff --git a/drivers/gpu/drm/shmobile/shmob_drm_plane.h b/drivers/gpu/drm/shmobile/shmob_drm_plane.h index a58cc1fc3240..bae67cc8c628 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_plane.h +++ b/drivers/gpu/drm/shmobile/shmob_drm_plane.h @@ -1,14 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * shmob_drm_plane.h -- SH Mobile DRM Planes * * Copyright (C) 2012 Renesas Electronics Corporation * * Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #ifndef __SHMOB_DRM_PLANE_H__ diff --git a/drivers/gpu/drm/shmobile/shmob_drm_regs.h b/drivers/gpu/drm/shmobile/shmob_drm_regs.h index ea17d4415b9e..9eb0b3d01df8 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_regs.h +++ b/drivers/gpu/drm/shmobile/shmob_drm_regs.h @@ -1,14 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * shmob_drm_regs.h -- SH Mobile DRM registers * * Copyright (C) 2012 Renesas Electronics Corporation * * Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #ifndef __SHMOB_DRM_REGS_H__ diff --git a/include/linux/platform_data/shmob_drm.h b/include/linux/platform_data/shmob_drm.h index ee495d707f17..fe815d7d9f58 100644 --- a/include/linux/platform_data/shmob_drm.h +++ b/include/linux/platform_data/shmob_drm.h @@ -1,14 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * shmob_drm.h -- SH Mobile DRM driver * * Copyright (C) 2012 Renesas Corporation * * Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #ifndef __SHMOB_DRM_H__ -- GitLab From d7cfd259048b6f774c9a0c95f1356c386341942b Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Mon, 6 Aug 2018 03:18:22 +0000 Subject: [PATCH 1454/1692] drm: panel-lvds: convert to SPDX identifiers Signed-off-by: Kuninori Morimoto Acked-by: Laurent Pinchart Reviewed-by: Simon Horman Signed-off-by: Laurent Pinchart --- drivers/gpu/drm/panel/panel-lvds.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c index 8a1687887ae9..3f6550e6b6a4 100644 --- a/drivers/gpu/drm/panel/panel-lvds.c +++ b/drivers/gpu/drm/panel/panel-lvds.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Generic LVDS panel driver * @@ -5,11 +6,6 @@ * Copyright (C) 2016 Renesas Electronics Corporation * * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #include -- GitLab From 0bbce9eb7738b1a9f4c03dc9fb28cd93bd19ac8a Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Thu, 26 Jul 2018 02:37:32 +0000 Subject: [PATCH 1455/1692] drm: rcar-du: Convert to SPDX identifiers Kconfig doesn't have license line, thus, it is GPL-2.0 as default. rcar_du_regs.h, rcar_lvds_regs.h are GPL-2.0, and all other files are GPL-2.0+ as original license. Signed-off-by: Kuninori Morimoto Acked-by: Laurent Pinchart Reviewed-by: Simon Horman Signed-off-by: Laurent Pinchart --- drivers/gpu/drm/rcar-du/Kconfig | 1 + drivers/gpu/drm/rcar-du/rcar_du_crtc.c | 6 +----- drivers/gpu/drm/rcar-du/rcar_du_crtc.h | 6 +----- drivers/gpu/drm/rcar-du/rcar_du_drv.c | 6 +----- drivers/gpu/drm/rcar-du/rcar_du_drv.h | 6 +----- drivers/gpu/drm/rcar-du/rcar_du_encoder.c | 6 +----- drivers/gpu/drm/rcar-du/rcar_du_encoder.h | 6 +----- drivers/gpu/drm/rcar-du/rcar_du_group.c | 6 +----- drivers/gpu/drm/rcar-du/rcar_du_group.h | 6 +----- drivers/gpu/drm/rcar-du/rcar_du_kms.c | 6 +----- drivers/gpu/drm/rcar-du/rcar_du_kms.h | 6 +----- drivers/gpu/drm/rcar-du/rcar_du_plane.c | 6 +----- drivers/gpu/drm/rcar-du/rcar_du_plane.h | 6 +----- drivers/gpu/drm/rcar-du/rcar_du_regs.h | 5 +---- drivers/gpu/drm/rcar-du/rcar_du_vsp.c | 6 +----- drivers/gpu/drm/rcar-du/rcar_du_vsp.h | 6 +----- drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c | 6 +----- drivers/gpu/drm/rcar-du/rcar_lvds_regs.h | 5 +---- 18 files changed, 18 insertions(+), 83 deletions(-) diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig index edde8d4b87a3..225141656e19 100644 --- a/drivers/gpu/drm/rcar-du/Kconfig +++ b/drivers/gpu/drm/rcar-du/Kconfig @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 config DRM_RCAR_DU tristate "DRM Support for R-Car Display Unit" depends on DRM && OF diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index 8a9e5e6f16b4..578047986a29 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -1,14 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * rcar_du_crtc.c -- R-Car Display Unit CRTCs * * Copyright (C) 2013-2015 Renesas Electronics Corporation * * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #include diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h index 592c79993e08..4990bbe9ba26 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h @@ -1,14 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * rcar_du_crtc.h -- R-Car Display Unit CRTCs * * Copyright (C) 2013-2015 Renesas Electronics Corporation * * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #ifndef __RCAR_DU_CRTC_H__ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index 02aee6cb0e53..a04e0a1fefc9 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -1,14 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * rcar_du_drv.c -- R-Car Display Unit DRM driver * * Copyright (C) 2013-2015 Renesas Electronics Corporation * * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #include diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h index b3a25e8e07d0..43a149dafa65 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h @@ -1,14 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * rcar_du_drv.h -- R-Car Display Unit DRM driver * * Copyright (C) 2013-2015 Renesas Electronics Corporation * * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #ifndef __RCAR_DU_DRV_H__ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c index f9c933d3bae6..1877764bd6d9 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c @@ -1,14 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * rcar_du_encoder.c -- R-Car Display Unit Encoder * * Copyright (C) 2013-2014 Renesas Electronics Corporation * * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #include diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h index 2d2abcacd169..ce3cbc85695e 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h @@ -1,14 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * rcar_du_encoder.h -- R-Car Display Unit Encoder * * Copyright (C) 2013-2014 Renesas Electronics Corporation * * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #ifndef __RCAR_DU_ENCODER_H__ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c index d539cb290a35..ef2c177afb6d 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_group.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c @@ -1,14 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * rcar_du_group.c -- R-Car Display Unit Channels Pair * * Copyright (C) 2013-2015 Renesas Electronics Corporation * * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ /* diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.h b/drivers/gpu/drm/rcar-du/rcar_du_group.h index 42105aedecc8..87950c1f6a52 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_group.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_group.h @@ -1,14 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * rcar_du_group.c -- R-Car Display Unit Planes and CRTCs Group * * Copyright (C) 2013-2014 Renesas Electronics Corporation * * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #ifndef __RCAR_DU_GROUP_H__ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index f0bc7cc0e913..ed7fa3204892 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -1,14 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * rcar_du_kms.c -- R-Car Display Unit Mode Setting * * Copyright (C) 2013-2015 Renesas Electronics Corporation * * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #include diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.h b/drivers/gpu/drm/rcar-du/rcar_du_kms.h index 07951d5fe38b..e171527abdaa 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.h @@ -1,14 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * rcar_du_kms.h -- R-Car Display Unit Mode Setting * * Copyright (C) 2013-2014 Renesas Electronics Corporation * * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #ifndef __RCAR_DU_KMS_H__ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c index 8861e715c248..9e07758a755c 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c @@ -1,14 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * rcar_du_plane.c -- R-Car Display Unit Planes * * Copyright (C) 2013-2015 Renesas Electronics Corporation * * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #include diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.h b/drivers/gpu/drm/rcar-du/rcar_du_plane.h index 5c19c69e4691..2f223a4c1d33 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_plane.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.h @@ -1,14 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * rcar_du_plane.h -- R-Car Display Unit Planes * * Copyright (C) 2013-2014 Renesas Electronics Corporation * * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #ifndef __RCAR_DU_PLANE_H__ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h index 9dfd220ceda1..f1417248f7e1 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h @@ -1,13 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * rcar_du_regs.h -- R-Car Display Unit Registers Definitions * * Copyright (C) 2013-2015 Renesas Electronics Corporation * * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. */ #ifndef __RCAR_DU_REGS_H__ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c index 45eb777a16a4..e991642afa4f 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c @@ -1,14 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * rcar_du_vsp.h -- R-Car Display Unit VSP-Based Compositor * * Copyright (C) 2015 Renesas Electronics Corporation * * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #include diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.h b/drivers/gpu/drm/rcar-du/rcar_du_vsp.h index 8a8a25c8c8e8..e8c14dc5cb93 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.h @@ -1,14 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * rcar_du_vsp.h -- R-Car Display Unit VSP-Based Compositor * * Copyright (C) 2015 Renesas Electronics Corporation * * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #ifndef __RCAR_DU_VSP_H__ diff --git a/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c b/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c index 76210ae25094..75490a3e0a2a 100644 --- a/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c +++ b/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c @@ -1,14 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * R-Car Gen3 HDMI PHY * * Copyright (C) 2016 Renesas Electronics Corporation * * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #include diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h b/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h index 2896835ca7e9..4870f50d9bec 100644 --- a/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h +++ b/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h @@ -1,13 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * rcar_lvds_regs.h -- R-Car LVDS Interface Registers Definitions * * Copyright (C) 2013-2015 Renesas Electronics Corporation * * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. */ #ifndef __RCAR_LVDS_REGS_H__ -- GitLab From 4e86c208ddf2da6683a0ee6f8696a9f1b4444018 Mon Sep 17 00:00:00 2001 From: Kieran Bingham Date: Fri, 3 Aug 2018 12:37:30 +0100 Subject: [PATCH 1456/1692] drm: rcar-du: Support interlaced video output through vsp1 Use the newly exposed VSP1 interface to enable interlaced frame support through the VSP1 LIF pipelines. The DSMR register is updated to set the ODEV flag on interlaced pipelines, thus defining an interlaced stream as having the ODD field located in the second half (BOTTOM) of the frame buffer. Signed-off-by: Kieran Bingham Reviewed-by: Laurent Pinchart Signed-off-by: Laurent Pinchart --- drivers/gpu/drm/rcar-du/rcar_du_crtc.c | 1 + drivers/gpu/drm/rcar-du/rcar_du_vsp.c | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index 578047986a29..4b7cf6cf0c57 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -285,6 +285,7 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) /* Signal polarities */ value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? DSMR_VSL : 0) | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? DSMR_HSL : 0) + | ((mode->flags & DRM_MODE_FLAG_INTERLACE) ? DSMR_ODEV : 0) | DSMR_DIPM_DISP | DSMR_CSPM; rcar_du_crtc_write(rcrtc, DSMR, value); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c index e991642afa4f..4480243813ec 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c @@ -48,6 +48,7 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc) struct vsp1_du_lif_config cfg = { .width = mode->hdisplay, .height = mode->vdisplay, + .interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE, .callback = rcar_du_vsp_complete, .callback_data = crtc, }; -- GitLab From c14f63abebb3e393eede4fff2b0c448d42b085e9 Mon Sep 17 00:00:00 2001 From: Kieran Bingham Date: Mon, 20 Aug 2018 17:00:43 +0100 Subject: [PATCH 1457/1692] drm: rcar-du: Refactor Feature and Quirk definitions These flags are represented by bit fields. To make this clear, utilise the BIT() macro. Signed-off-by: Kieran Bingham Reviewed-by: Laurent Pinchart Reviewed-by: Simon Horman Signed-off-by: Laurent Pinchart --- drivers/gpu/drm/rcar-du/rcar_du_drv.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h index 43a149dafa65..fff3c1cf56a0 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h @@ -23,11 +23,11 @@ struct drm_device; struct drm_fbdev_cma; struct rcar_du_device; -#define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK (1 << 0) /* Per-CRTC IRQ and clock */ -#define RCAR_DU_FEATURE_EXT_CTRL_REGS (1 << 1) /* Has extended control registers */ -#define RCAR_DU_FEATURE_VSP1_SOURCE (1 << 2) /* Has inputs from VSP1 */ +#define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK BIT(0) /* Per-CRTC IRQ and clock */ +#define RCAR_DU_FEATURE_EXT_CTRL_REGS BIT(1) /* Has extended control registers */ +#define RCAR_DU_FEATURE_VSP1_SOURCE BIT(2) /* Has inputs from VSP1 */ -#define RCAR_DU_QUIRK_ALIGN_128B (1 << 0) /* Align pitches to 128 bytes */ +#define RCAR_DU_QUIRK_ALIGN_128B BIT(0) /* Align pitches to 128 bytes */ /* * struct rcar_du_output_routing - Output routing specification -- GitLab From c6e3194a3b55a9365e40c3a25f8e31afa154c26c Mon Sep 17 00:00:00 2001 From: Kieran Bingham Date: Mon, 20 Aug 2018 17:00:44 +0100 Subject: [PATCH 1458/1692] drm: rcar-du: Add interlaced feature flag Upcoming implementations of the R-Car DU have removed support for interlaced display pipelines. Provide a means to determine this based on the feature flags of the hardware configuration structs. Signed-off-by: Kieran Bingham Reviewed-by: Laurent Pinchart Signed-off-by: Laurent Pinchart --- drivers/gpu/drm/rcar-du/rcar_du_crtc.c | 14 +++++++++++ drivers/gpu/drm/rcar-du/rcar_du_drv.c | 32 +++++++++++++++++--------- drivers/gpu/drm/rcar-du/rcar_du_drv.h | 1 + 3 files changed, 36 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index 4b7cf6cf0c57..175c36ca89c5 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -681,11 +681,25 @@ static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc, rcar_du_vsp_atomic_flush(rcrtc); } +enum drm_mode_status rcar_du_crtc_mode_valid(struct drm_crtc *crtc, + const struct drm_display_mode *mode) +{ + struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); + struct rcar_du_device *rcdu = rcrtc->group->dev; + bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE; + + if (interlaced && !rcar_du_has(rcdu, RCAR_DU_FEATURE_INTERLACED)) + return MODE_NO_INTERLACE; + + return MODE_OK; +} + static const struct drm_crtc_helper_funcs crtc_helper_funcs = { .atomic_begin = rcar_du_crtc_atomic_begin, .atomic_flush = rcar_du_crtc_atomic_flush, .atomic_enable = rcar_du_crtc_atomic_enable, .atomic_disable = rcar_du_crtc_atomic_disable, + .mode_valid = rcar_du_crtc_mode_valid, }; static void rcar_du_crtc_crc_init(struct rcar_du_crtc *rcrtc) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index a04e0a1fefc9..02fa9d36be28 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -35,7 +35,8 @@ static const struct rcar_du_device_info rzg1_du_r8a7743_info = { .gen = 2, .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK - | RCAR_DU_FEATURE_EXT_CTRL_REGS, + | RCAR_DU_FEATURE_EXT_CTRL_REGS + | RCAR_DU_FEATURE_INTERLACED, .channels_mask = BIT(1) | BIT(0), .routes = { /* @@ -56,7 +57,8 @@ static const struct rcar_du_device_info rzg1_du_r8a7743_info = { static const struct rcar_du_device_info rzg1_du_r8a7745_info = { .gen = 2, .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK - | RCAR_DU_FEATURE_EXT_CTRL_REGS, + | RCAR_DU_FEATURE_EXT_CTRL_REGS + | RCAR_DU_FEATURE_INTERLACED, .channels_mask = BIT(1) | BIT(0), .routes = { /* @@ -75,7 +77,7 @@ static const struct rcar_du_device_info rzg1_du_r8a7745_info = { static const struct rcar_du_device_info rcar_du_r8a7779_info = { .gen = 2, - .features = 0, + .features = RCAR_DU_FEATURE_INTERLACED, .channels_mask = BIT(1) | BIT(0), .routes = { /* @@ -96,7 +98,8 @@ static const struct rcar_du_device_info rcar_du_r8a7779_info = { static const struct rcar_du_device_info rcar_du_r8a7790_info = { .gen = 2, .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK - | RCAR_DU_FEATURE_EXT_CTRL_REGS, + | RCAR_DU_FEATURE_EXT_CTRL_REGS + | RCAR_DU_FEATURE_INTERLACED, .quirks = RCAR_DU_QUIRK_ALIGN_128B, .channels_mask = BIT(2) | BIT(1) | BIT(0), .routes = { @@ -124,7 +127,8 @@ static const struct rcar_du_device_info rcar_du_r8a7790_info = { static const struct rcar_du_device_info rcar_du_r8a7791_info = { .gen = 2, .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK - | RCAR_DU_FEATURE_EXT_CTRL_REGS, + | RCAR_DU_FEATURE_EXT_CTRL_REGS + | RCAR_DU_FEATURE_INTERLACED, .channels_mask = BIT(1) | BIT(0), .routes = { /* @@ -146,7 +150,8 @@ static const struct rcar_du_device_info rcar_du_r8a7791_info = { static const struct rcar_du_device_info rcar_du_r8a7792_info = { .gen = 2, .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK - | RCAR_DU_FEATURE_EXT_CTRL_REGS, + | RCAR_DU_FEATURE_EXT_CTRL_REGS + | RCAR_DU_FEATURE_INTERLACED, .channels_mask = BIT(1) | BIT(0), .routes = { /* R8A7792 has two RGB outputs. */ @@ -164,7 +169,8 @@ static const struct rcar_du_device_info rcar_du_r8a7792_info = { static const struct rcar_du_device_info rcar_du_r8a7794_info = { .gen = 2, .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK - | RCAR_DU_FEATURE_EXT_CTRL_REGS, + | RCAR_DU_FEATURE_EXT_CTRL_REGS + | RCAR_DU_FEATURE_INTERLACED, .channels_mask = BIT(1) | BIT(0), .routes = { /* @@ -186,7 +192,8 @@ static const struct rcar_du_device_info rcar_du_r8a7795_info = { .gen = 3, .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_EXT_CTRL_REGS - | RCAR_DU_FEATURE_VSP1_SOURCE, + | RCAR_DU_FEATURE_VSP1_SOURCE + | RCAR_DU_FEATURE_INTERLACED, .channels_mask = BIT(3) | BIT(2) | BIT(1) | BIT(0), .routes = { /* @@ -218,7 +225,8 @@ static const struct rcar_du_device_info rcar_du_r8a7796_info = { .gen = 3, .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_EXT_CTRL_REGS - | RCAR_DU_FEATURE_VSP1_SOURCE, + | RCAR_DU_FEATURE_VSP1_SOURCE + | RCAR_DU_FEATURE_INTERLACED, .channels_mask = BIT(2) | BIT(1) | BIT(0), .routes = { /* @@ -246,7 +254,8 @@ static const struct rcar_du_device_info rcar_du_r8a77965_info = { .gen = 3, .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_EXT_CTRL_REGS - | RCAR_DU_FEATURE_VSP1_SOURCE, + | RCAR_DU_FEATURE_VSP1_SOURCE + | RCAR_DU_FEATURE_INTERLACED, .channels_mask = BIT(3) | BIT(1) | BIT(0), .routes = { /* @@ -274,7 +283,8 @@ static const struct rcar_du_device_info rcar_du_r8a77970_info = { .gen = 3, .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_EXT_CTRL_REGS - | RCAR_DU_FEATURE_VSP1_SOURCE, + | RCAR_DU_FEATURE_VSP1_SOURCE + | RCAR_DU_FEATURE_INTERLACED, .channels_mask = BIT(0), .routes = { /* R8A77970 has one RGB output and one LVDS output. */ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h index fff3c1cf56a0..534a0291380d 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h @@ -26,6 +26,7 @@ struct rcar_du_device; #define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK BIT(0) /* Per-CRTC IRQ and clock */ #define RCAR_DU_FEATURE_EXT_CTRL_REGS BIT(1) /* Has extended control registers */ #define RCAR_DU_FEATURE_VSP1_SOURCE BIT(2) /* Has inputs from VSP1 */ +#define RCAR_DU_FEATURE_INTERLACED BIT(3) /* HW supports interlaced */ #define RCAR_DU_QUIRK_ALIGN_128B BIT(0) /* Align pitches to 128 bytes */ -- GitLab From 87dffe86d406bee8782cac2db035acb9a28620a7 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Thu, 6 Sep 2018 13:26:08 +0200 Subject: [PATCH 1459/1692] xen/manage: don't complain about an empty value in control/sysrq node When guest receives a sysrq request from the host it acknowledges it by writing '\0' to control/sysrq xenstore node. This, however, make xenstore watch fire again but xenbus_scanf() fails to parse empty value with "%c" format string: sysrq: SysRq : Emergency Sync Emergency Sync complete xen:manage: Error -34 reading sysrq code in control/sysrq Ignore -ERANGE the same way we already ignore -ENOENT, empty value in control/sysrq is totally legal. Signed-off-by: Vitaly Kuznetsov Reviewed-by: Wei Liu Signed-off-by: Boris Ostrovsky --- drivers/xen/manage.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index c93d8ef8df34..5bb01a62f214 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c @@ -280,9 +280,11 @@ static void sysrq_handler(struct xenbus_watch *watch, const char *path, /* * The Xenstore watch fires directly after registering it and * after a suspend/resume cycle. So ENOENT is no error but - * might happen in those cases. + * might happen in those cases. ERANGE is observed when we get + * an empty value (''), this happens when we acknowledge the + * request by writing '\0' below. */ - if (err != -ENOENT) + if (err != -ENOENT && err != -ERANGE) pr_err("Error %d reading sysrq code in control/sysrq\n", err); xenbus_transaction_end(xbt, 1); -- GitLab From 197ecb3802c04499d8ff4f8cb28f6efa008067db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?= Date: Fri, 7 Sep 2018 18:49:08 +0200 Subject: [PATCH 1460/1692] xen/balloon: add runtime control for scrubbing ballooned out pages MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Scrubbing pages on initial balloon down can take some time, especially in nested virtualization case (nested EPT is slow). When HVM/PVH guest is started with memory= significantly lower than maxmem=, all the extra pages will be scrubbed before returning to Xen. But since most of them weren't used at all at that point, Xen needs to populate them first (from populate-on-demand pool). In nested virt case (Xen inside KVM) this slows down the guest boot by 15-30s with just 1.5GB needed to be returned to Xen. Add runtime parameter to enable/disable it, to allow initially disabling scrubbing, then enable it back during boot (for example in initramfs). Such usage relies on assumption that a) most pages ballooned out during initial boot weren't used at all, and b) even if they were, very few secrets are in the guest at that time (before any serious userspace kicks in). Convert CONFIG_XEN_SCRUB_PAGES to CONFIG_XEN_SCRUB_PAGES_DEFAULT (also enabled by default), controlling default value for the new runtime switch. Signed-off-by: Marek Marczykowski-Górecki Reviewed-by: Juergen Gross Signed-off-by: Boris Ostrovsky --- .../ABI/stable/sysfs-devices-system-xen_memory | 9 +++++++++ Documentation/admin-guide/kernel-parameters.txt | 6 ++++++ drivers/xen/Kconfig | 10 +++++++--- drivers/xen/mem-reservation.c | 4 ++++ drivers/xen/xen-balloon.c | 3 +++ include/xen/mem-reservation.h | 7 ++++--- 6 files changed, 33 insertions(+), 6 deletions(-) diff --git a/Documentation/ABI/stable/sysfs-devices-system-xen_memory b/Documentation/ABI/stable/sysfs-devices-system-xen_memory index caa311d59ac1..6d83f95a8a8e 100644 --- a/Documentation/ABI/stable/sysfs-devices-system-xen_memory +++ b/Documentation/ABI/stable/sysfs-devices-system-xen_memory @@ -75,3 +75,12 @@ Contact: Konrad Rzeszutek Wilk Description: Amount (in KiB) of low (or normal) memory in the balloon. + +What: /sys/devices/system/xen_memory/xen_memory0/scrub_pages +Date: September 2018 +KernelVersion: 4.20 +Contact: xen-devel@lists.xenproject.org +Description: + Control scrubbing pages before returning them to Xen for others domains + use. Can be set with xen_scrub_pages cmdline + parameter. Default value controlled with CONFIG_XEN_SCRUB_PAGES_DEFAULT. diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 64a3bf54b974..92eb1f42240d 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -5000,6 +5000,12 @@ Disables the PV optimizations forcing the HVM guest to run as generic HVM guest with no PV drivers. + xen_scrub_pages= [XEN] + Boolean option to control scrubbing pages before giving them back + to Xen, for use by other domains. Can be also changed at runtime + with /sys/devices/system/xen_memory/xen_memory0/scrub_pages. + Default value controlled with CONFIG_XEN_SCRUB_PAGES_DEFAULT. + xirc2ps_cs= [NET,PCMCIA] Format: ,,,,,[,[,[,]]] diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index b459edfacff3..90d387b50ab7 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@ -79,15 +79,19 @@ config XEN_BALLOON_MEMORY_HOTPLUG_LIMIT This value is used to allocate enough space in internal tables needed for physical memory administration. -config XEN_SCRUB_PAGES - bool "Scrub pages before returning them to system" +config XEN_SCRUB_PAGES_DEFAULT + bool "Scrub pages before returning them to system by default" depends on XEN_BALLOON default y help Scrub pages before returning them to the system for reuse by other domains. This makes sure that any confidential data is not accidentally visible to other domains. Is it more - secure, but slightly less efficient. + secure, but slightly less efficient. This can be controlled with + xen_scrub_pages=0 parameter and + /sys/devices/system/xen_memory/xen_memory0/scrub_pages. + This option only sets the default value. + If in doubt, say yes. config XEN_DEV_EVTCHN diff --git a/drivers/xen/mem-reservation.c b/drivers/xen/mem-reservation.c index 084799c6180e..3782cf070338 100644 --- a/drivers/xen/mem-reservation.c +++ b/drivers/xen/mem-reservation.c @@ -14,6 +14,10 @@ #include #include +#include + +bool __read_mostly xen_scrub_pages = IS_ENABLED(CONFIG_XEN_SCRUB_PAGES_DEFAULT); +core_param(xen_scrub_pages, xen_scrub_pages, bool, 0); /* * Use one extent per PAGE_SIZE to avoid to break down the page into diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c index 294f35ce9e46..63c1494a8d73 100644 --- a/drivers/xen/xen-balloon.c +++ b/drivers/xen/xen-balloon.c @@ -44,6 +44,7 @@ #include #include #include +#include #define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10)) @@ -137,6 +138,7 @@ static DEVICE_ULONG_ATTR(schedule_delay, 0444, balloon_stats.schedule_delay); static DEVICE_ULONG_ATTR(max_schedule_delay, 0644, balloon_stats.max_schedule_delay); static DEVICE_ULONG_ATTR(retry_count, 0444, balloon_stats.retry_count); static DEVICE_ULONG_ATTR(max_retry_count, 0644, balloon_stats.max_retry_count); +static DEVICE_BOOL_ATTR(scrub_pages, 0644, xen_scrub_pages); static ssize_t show_target_kb(struct device *dev, struct device_attribute *attr, char *buf) @@ -203,6 +205,7 @@ static struct attribute *balloon_attrs[] = { &dev_attr_max_schedule_delay.attr.attr, &dev_attr_retry_count.attr.attr, &dev_attr_max_retry_count.attr.attr, + &dev_attr_scrub_pages.attr.attr, NULL }; diff --git a/include/xen/mem-reservation.h b/include/xen/mem-reservation.h index 80b52b4945e9..a2ab516fcd2c 100644 --- a/include/xen/mem-reservation.h +++ b/include/xen/mem-reservation.h @@ -17,11 +17,12 @@ #include +extern bool xen_scrub_pages; + static inline void xenmem_reservation_scrub_page(struct page *page) { -#ifdef CONFIG_XEN_SCRUB_PAGES - clear_highpage(page); -#endif + if (xen_scrub_pages) + clear_highpage(page); } #ifdef CONFIG_XEN_HAVE_PVMMU -- GitLab From 3366cdb6d350d95466ee430ac50f3c8415ca8f46 Mon Sep 17 00:00:00 2001 From: Olaf Hering Date: Fri, 7 Sep 2018 16:31:35 +0200 Subject: [PATCH 1461/1692] xen: avoid crash in disable_hotplug_cpu The command 'xl vcpu-set 0 0', issued in dom0, will crash dom0: BUG: unable to handle kernel NULL pointer dereference at 00000000000002d8 PGD 0 P4D 0 Oops: 0000 [#1] PREEMPT SMP NOPTI CPU: 7 PID: 65 Comm: xenwatch Not tainted 4.19.0-rc2-1.ga9462db-default #1 openSUSE Tumbleweed (unreleased) Hardware name: Intel Corporation S5520UR/S5520UR, BIOS S5500.86B.01.00.0050.050620101605 05/06/2010 RIP: e030:device_offline+0x9/0xb0 Code: 77 24 00 e9 ce fe ff ff 48 8b 13 e9 68 ff ff ff 48 8b 13 e9 29 ff ff ff 48 8b 13 e9 ea fe ff ff 90 66 66 66 66 90 41 54 55 53 87 d8 02 00 00 01 0f 85 88 00 00 00 48 c7 c2 20 09 60 81 31 f6 RSP: e02b:ffffc90040f27e80 EFLAGS: 00010203 RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000000 RDX: ffff8801f3800000 RSI: ffffc90040f27e70 RDI: 0000000000000000 RBP: 0000000000000000 R08: ffffffff820e47b3 R09: 0000000000000000 R10: 0000000000007ff0 R11: 0000000000000000 R12: ffffffff822e6d30 R13: dead000000000200 R14: dead000000000100 R15: ffffffff8158b4e0 FS: 00007ffa595158c0(0000) GS:ffff8801f39c0000(0000) knlGS:0000000000000000 CS: e033 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00000000000002d8 CR3: 00000001d9602000 CR4: 0000000000002660 Call Trace: handle_vcpu_hotplug_event+0xb5/0xc0 xenwatch_thread+0x80/0x140 ? wait_woken+0x80/0x80 kthread+0x112/0x130 ? kthread_create_worker_on_cpu+0x40/0x40 ret_from_fork+0x3a/0x50 This happens because handle_vcpu_hotplug_event is called twice. In the first iteration cpu_present is still true, in the second iteration cpu_present is false which causes get_cpu_device to return NULL. In case of cpu#0, cpu_online is apparently always true. Fix this crash by checking if the cpu can be hotplugged, which is false for a cpu that was just removed. Also check if the cpu was actually offlined by device_remove, otherwise leave the cpu_present state as it is. Rearrange to code to do all work with device_hotplug_lock held. Signed-off-by: Olaf Hering Reviewed-by: Juergen Gross Signed-off-by: Boris Ostrovsky --- drivers/xen/cpu_hotplug.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c index d4265c8ebb22..b1357aa4bc55 100644 --- a/drivers/xen/cpu_hotplug.c +++ b/drivers/xen/cpu_hotplug.c @@ -19,15 +19,16 @@ static void enable_hotplug_cpu(int cpu) static void disable_hotplug_cpu(int cpu) { - if (cpu_online(cpu)) { - lock_device_hotplug(); + if (!cpu_is_hotpluggable(cpu)) + return; + lock_device_hotplug(); + if (cpu_online(cpu)) device_offline(get_cpu_device(cpu)); - unlock_device_hotplug(); - } - if (cpu_present(cpu)) + if (!cpu_online(cpu) && cpu_present(cpu)) { xen_arch_unregister_cpu(cpu); - - set_cpu_present(cpu, false); + set_cpu_present(cpu, false); + } + unlock_device_hotplug(); } static int vcpu_online(unsigned int cpu) -- GitLab From 4dca864b59dd150a221730775e2f21f49779c135 Mon Sep 17 00:00:00 2001 From: Josh Abraham Date: Wed, 12 Sep 2018 15:13:54 -1000 Subject: [PATCH 1462/1692] xen: fix GCC warning and remove duplicate EVTCHN_ROW/EVTCHN_COL usage MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch removes duplicate macro useage in events_base.c. It also fixes gcc warning: variable ‘col’ set but not used [-Wunused-but-set-variable] Signed-off-by: Joshua Abraham Reviewed-by: Juergen Gross Signed-off-by: Boris Ostrovsky --- drivers/xen/events/events_base.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 08e4af04d6f2..e6c1934734b7 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -138,7 +138,7 @@ static int set_evtchn_to_irq(unsigned evtchn, unsigned irq) clear_evtchn_to_irq_row(row); } - evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)] = irq; + evtchn_to_irq[row][col] = irq; return 0; } -- GitLab From 58a57569904039d9ac38c0ff2a88396a43899689 Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Wed, 5 Sep 2018 09:21:39 +1000 Subject: [PATCH 1463/1692] xen/gntdev: fix up blockable calls to mn_invl_range_start Patch series "mmu_notifiers follow ups". Tetsuo has noticed some fallouts from 93065ac753e4 ("mm, oom: distinguish blockable mode for mmu notifiers"). One of them has been fixed and picked up by AMD/DRM maintainer [1]. XEN issue is fixed by patch 1. I have also clarified expectations about blockable semantic of invalidate_range_end. Finally the last patch removes MMU_INVALIDATE_DOES_NOT_BLOCK which is no longer used nor needed. [1] http://lkml.kernel.org/r/20180824135257.GU29735@dhcp22.suse.cz This patch (of 3): 93065ac753e4 ("mm, oom: distinguish blockable mode for mmu notifiers") has introduced blockable parameter to all mmu_notifiers and the notifier has to back off when called in !blockable case and it could block down the road. The above commit implemented that for mn_invl_range_start but both in_range checks are done unconditionally regardless of the blockable mode and as such they would fail all the time for regular calls. Fix this by checking blockable parameter as well. Once we are there we can remove the stale TODO. The lock has to be sleepable because we wait for completion down in gnttab_unmap_refs_sync. Link: http://lkml.kernel.org/r/20180827112623.8992-2-mhocko@kernel.org Fixes: 93065ac753e4 ("mm, oom: distinguish blockable mode for mmu notifiers") Signed-off-by: Michal Hocko Cc: Boris Ostrovsky Cc: Juergen Gross Cc: David Rientjes Cc: Jerome Glisse Cc: Tetsuo Handa Reviewed-by: Juergen Gross Signed-off-by: Boris Ostrovsky --- drivers/xen/gntdev.c | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 57390c7666e5..b0b02a501167 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -492,12 +492,19 @@ static bool in_range(struct gntdev_grant_map *map, return true; } -static void unmap_if_in_range(struct gntdev_grant_map *map, - unsigned long start, unsigned long end) +static int unmap_if_in_range(struct gntdev_grant_map *map, + unsigned long start, unsigned long end, + bool blockable) { unsigned long mstart, mend; int err; + if (!in_range(map, start, end)) + return 0; + + if (!blockable) + return -EAGAIN; + mstart = max(start, map->vma->vm_start); mend = min(end, map->vma->vm_end); pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n", @@ -508,6 +515,8 @@ static void unmap_if_in_range(struct gntdev_grant_map *map, (mstart - map->vma->vm_start) >> PAGE_SHIFT, (mend - mstart) >> PAGE_SHIFT); WARN_ON(err); + + return 0; } static int mn_invl_range_start(struct mmu_notifier *mn, @@ -519,25 +528,20 @@ static int mn_invl_range_start(struct mmu_notifier *mn, struct gntdev_grant_map *map; int ret = 0; - /* TODO do we really need a mutex here? */ if (blockable) mutex_lock(&priv->lock); else if (!mutex_trylock(&priv->lock)) return -EAGAIN; list_for_each_entry(map, &priv->maps, next) { - if (in_range(map, start, end)) { - ret = -EAGAIN; + ret = unmap_if_in_range(map, start, end, blockable); + if (ret) goto out_unlock; - } - unmap_if_in_range(map, start, end); } list_for_each_entry(map, &priv->freeable_maps, next) { - if (in_range(map, start, end)) { - ret = -EAGAIN; + ret = unmap_if_in_range(map, start, end, blockable); + if (ret) goto out_unlock; - } - unmap_if_in_range(map, start, end); } out_unlock: -- GitLab From be9699e3923000ea32c2f4522e1e4de333d21d47 Mon Sep 17 00:00:00 2001 From: Likun Gao Date: Tue, 10 Jul 2018 20:10:05 +0800 Subject: [PATCH 1464/1692] drm/amdgpu: add picasso to asic_type enum Add picasso to amd_asic_type enum and amdgpu_asic_name[]. Signed-off-by: Likun Gao Reviewed-by: Alex Deucher Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 + include/drm/amd_asic_type.h | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index d4855d1ef51f..e8083ec3fbc2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -89,6 +89,7 @@ static const char *amdgpu_asic_name[] = { "VEGA12", "VEGA20", "RAVEN", + "PICASSO", "LAST", }; diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h index dd63d08cc54e..5644fc679d6f 100644 --- a/include/drm/amd_asic_type.h +++ b/include/drm/amd_asic_type.h @@ -49,6 +49,7 @@ enum amd_asic_type { CHIP_VEGA12, CHIP_VEGA20, CHIP_RAVEN, + CHIP_PICASSO, CHIP_LAST, }; -- GitLab From ad5a67a7ea87e625721a5d0c4e9f12100372f1f6 Mon Sep 17 00:00:00 2001 From: Likun Gao Date: Tue, 10 Jul 2018 20:22:36 +0800 Subject: [PATCH 1465/1692] drm/amdgpu: add soc15 support for picasso Add the IP blocks, clock and powergating flags, and common clockgating support. Signed-off-by: Likun Gao Reviewed-by: Alex Deucher Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 7 ++++++- drivers/gpu/drm/amd/amdgpu/soc15.c | 21 +++++++++++++++++++++ 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index e8083ec3fbc2..82c2e8260571 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -62,6 +62,7 @@ MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin"); +MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin"); #define AMDGPU_RESUME_MS 2000 @@ -1335,6 +1336,9 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) case CHIP_RAVEN: chip_name = "raven"; break; + case CHIP_PICASSO: + chip_name = "picasso"; + break; } snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name); @@ -1460,7 +1464,8 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: - if (adev->asic_type == CHIP_RAVEN) + case CHIP_PICASSO: + if ((adev->asic_type == CHIP_RAVEN) || (adev->asic_type == CHIP_PICASSO)) adev->family = AMDGPU_FAMILY_RV; else adev->family = AMDGPU_FAMILY_AI; diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index fc0cb7d38c9f..d3b73afcb871 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -486,6 +486,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) case CHIP_VEGA10: case CHIP_VEGA12: case CHIP_RAVEN: + case CHIP_PICASSO: vega10_reg_base_init(adev); break; case CHIP_VEGA20: @@ -724,6 +725,25 @@ static int soc15_common_early_init(void *handle) adev->external_rev_id = 0x1; break; + case CHIP_PICASSO: + adev->cg_flags = AMD_CG_SUPPORT_GFX_MGLS | + AMD_CG_SUPPORT_GFX_CP_LS | + AMD_CG_SUPPORT_GFX_3D_CGCG | + AMD_CG_SUPPORT_GFX_3D_CGLS | + AMD_CG_SUPPORT_GFX_CGCG | + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_BIF_LS | + AMD_CG_SUPPORT_HDP_LS | + AMD_CG_SUPPORT_ROM_MGCG | + AMD_CG_SUPPORT_MC_MGCG | + AMD_CG_SUPPORT_MC_LS | + AMD_CG_SUPPORT_SDMA_MGCG | + AMD_CG_SUPPORT_SDMA_LS; + + adev->pg_flags = 0; + + adev->external_rev_id = adev->rev_id + 0x41; + break; default: /* FIXME: not supported yet */ return -EINVAL; @@ -924,6 +944,7 @@ static int soc15_common_set_clockgating_state(void *handle, state == AMD_CG_STATE_GATE ? true : false); break; case CHIP_RAVEN: + case CHIP_PICASSO: adev->nbio_funcs->update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); adev->nbio_funcs->update_medium_grain_light_sleep(adev, -- GitLab From 5f4e2085eeab8c6386452bc18f4f680d1ea3b9d2 Mon Sep 17 00:00:00 2001 From: Likun Gao Date: Tue, 10 Jul 2018 20:20:16 +0800 Subject: [PATCH 1466/1692] drm/amdgpu: add picasso support for vm Add vm support for picasso. Signed-off-by: Likun Gao Reviewed-by: Alex Deucher Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index a7f9aaa47c49..7a9ffe9eb8bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2981,7 +2981,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & AMDGPU_VM_USE_CPU_FOR_COMPUTE); - if (adev->asic_type == CHIP_RAVEN) + if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_PICASSO) vm->pte_support_ats = true; } else { vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & @@ -3073,7 +3073,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, */ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid) { - bool pte_support_ats = (adev->asic_type == CHIP_RAVEN); + bool pte_support_ats = (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_PICASSO); int r; r = amdgpu_bo_reserve(vm->root.base.bo, true); -- GitLab From b22ab73314c0dfa91ac1948812c7e8050240c563 Mon Sep 17 00:00:00 2001 From: Likun Gao Date: Tue, 10 Jul 2018 20:32:06 +0800 Subject: [PATCH 1467/1692] drm/amd/display/dm: add picasso support Add support for picasso to the display manager. Signed-off-by: Likun Gao Reviewed-by: Alex Deucher Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 + drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 8 ++++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 82c2e8260571..25e7e1cccaa1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2177,6 +2177,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) case CHIP_VEGA20: #if defined(CONFIG_DRM_AMD_DC_DCN1_0) case CHIP_RAVEN: + case CHIP_PICASSO: #endif return amdgpu_dc != 0; #endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 23ddf54b7dee..1ff2e8fd5a22 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1213,7 +1213,8 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev) if (adev->asic_type == CHIP_VEGA10 || adev->asic_type == CHIP_VEGA12 || adev->asic_type == CHIP_VEGA20 || - adev->asic_type == CHIP_RAVEN) + adev->asic_type == CHIP_RAVEN || + adev->asic_type == CHIP_PICASSO) client_id = SOC15_IH_CLIENTID_DCE; int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; @@ -1632,6 +1633,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) break; #if defined(CONFIG_DRM_AMD_DC_DCN1_0) case CHIP_RAVEN: + case CHIP_PICASSO: if (dcn10_register_irq_handlers(dm->adev)) { DRM_ERROR("DM: Failed to initialize IRQ\n"); goto fail; @@ -1858,6 +1860,7 @@ static int dm_early_init(void *handle) break; #if defined(CONFIG_DRM_AMD_DC_DCN1_0) case CHIP_RAVEN: + case CHIP_PICASSO: adev->mode_info.num_crtc = 4; adev->mode_info.num_hpd = 4; adev->mode_info.num_dig = 4; @@ -2106,7 +2109,8 @@ static int fill_plane_attributes_from_fb(struct amdgpu_device *adev, if (adev->asic_type == CHIP_VEGA10 || adev->asic_type == CHIP_VEGA12 || adev->asic_type == CHIP_VEGA20 || - adev->asic_type == CHIP_RAVEN) { + adev->asic_type == CHIP_RAVEN || + adev->asic_type == CHIP_PICASSO) { /* Fill GFX9 params */ plane_state->tiling_info.gfx9.num_pipes = adev->gfx.config.gb_addr_config_fields.num_pipes; -- GitLab From 186b073decbf7573971b1786ffeb2fe776d0d9e7 Mon Sep 17 00:00:00 2001 From: Likun Gao Date: Tue, 10 Jul 2018 19:25:29 +0800 Subject: [PATCH 1468/1692] drm/amdgpu: Add support of powerplay for picasso add powerplay support for picasso, treat it the same as raven now. Signed-off-by: Likun Gao Reviewed-by: Alex Deucher Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 1 + drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 7500a3e61dba..a45578e6504a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -171,6 +171,7 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr) case AMDGPU_FAMILY_RV: switch (hwmgr->chip_id) { case CHIP_RAVEN: + case CHIP_PICASSO: hwmgr->od_enabled = false; hwmgr->smumgr_funcs = &smu10_smu_funcs; smu10_init_function_pointers(hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c index 77c14671866c..f6fe9ce793ad 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c @@ -832,7 +832,7 @@ static const ATOM_PPLIB_POWERPLAYTABLE *get_powerplay_table( uint16_t size; if (!table_addr) { - if (hwmgr->chip_id == CHIP_RAVEN) { + if (hwmgr->chip_id == CHIP_RAVEN || hwmgr->chip_id == CHIP_PICASSO) { table_addr = &soft_dummy_pp_table[0]; hwmgr->soft_pp_table = &soft_dummy_pp_table[0]; hwmgr->soft_pp_table_size = sizeof(soft_dummy_pp_table); @@ -1055,7 +1055,7 @@ static int init_overdrive_limits(struct pp_hwmgr *hwmgr, hwmgr->platform_descriptor.maxOverdriveVDDC = 0; hwmgr->platform_descriptor.overdriveVDDCStep = 0; - if (hwmgr->chip_id == CHIP_RAVEN) + if (hwmgr->chip_id == CHIP_RAVEN || hwmgr->chip_id == CHIP_PICASSO) return 0; /* We assume here that fw_info is unchanged if this call fails.*/ @@ -1595,7 +1595,7 @@ static int pp_tables_initialize(struct pp_hwmgr *hwmgr) int result; const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table; - if (hwmgr->chip_id == CHIP_RAVEN) + if (hwmgr->chip_id == CHIP_RAVEN || hwmgr->chip_id == CHIP_PICASSO) return 0; hwmgr->need_pp_table_upload = true; @@ -1644,7 +1644,7 @@ static int pp_tables_initialize(struct pp_hwmgr *hwmgr) static int pp_tables_uninitialize(struct pp_hwmgr *hwmgr) { - if (hwmgr->chip_id == CHIP_RAVEN) + if (hwmgr->chip_id == CHIP_RAVEN || hwmgr->chip_id == CHIP_PICASSO) return 0; kfree(hwmgr->dyn_state.vddc_dependency_on_sclk); -- GitLab From 227f3dc5f615eb31e007f5db4da7297c66a36e24 Mon Sep 17 00:00:00 2001 From: Likun Gao Date: Tue, 10 Jul 2018 20:12:38 +0800 Subject: [PATCH 1469/1692] drm/amdgpu: initilize picasso psp firmwares support Same as raven for now. Signed-off-by: Likun Gao Reviewed-by: Alex Deucher Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 1 + drivers/gpu/drm/amd/amdgpu/psp_v10_0.c | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index bd397d2916fb..611c06d3600a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -56,6 +56,7 @@ static int psp_sw_init(void *handle) psp_v3_1_set_psp_funcs(psp); break; case CHIP_RAVEN: + case CHIP_PICASSO: psp_v10_0_set_psp_funcs(psp); break; case CHIP_VEGA20: diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c index 240dc8c85867..eae3bf8b8463 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c @@ -35,6 +35,7 @@ #include "sdma0/sdma0_4_1_offset.h" MODULE_FIRMWARE("amdgpu/raven_asd.bin"); +MODULE_FIRMWARE("amdgpu/picasso_asd.bin"); static int psp_v10_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *type) @@ -119,6 +120,9 @@ static int psp_v10_0_init_microcode(struct psp_context *psp) case CHIP_RAVEN: chip_name = "raven"; break; + case CHIP_PICASSO: + chip_name = "picasso"; + break; default: BUG(); } -- GitLab From 669018bdcd858def1c43d130b4cc29409ed8c036 Mon Sep 17 00:00:00 2001 From: Likun Gao Date: Tue, 10 Jul 2018 20:15:45 +0800 Subject: [PATCH 1470/1692] drm/amdgpu: add picasso ucode loading method Same as raven. Signed-off-by: Likun Gao Reviewed-by: Alex Deucher Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 1fa8bc337859..acb4c66fe89b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -303,6 +303,7 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type) return AMDGPU_FW_LOAD_SMU; case CHIP_VEGA10: case CHIP_RAVEN: + case CHIP_PICASSO: case CHIP_VEGA12: case CHIP_VEGA20: if (!load_type) -- GitLab From 86771d9a58fa8f6b2b871f72c6beaf269f309718 Mon Sep 17 00:00:00 2001 From: Likun Gao Date: Tue, 10 Jul 2018 20:17:13 +0800 Subject: [PATCH 1471/1692] drm/amdgpu: add picasso support for vcn Add vcn support for picasso. Signed-off-by: Likun Gao Reviewed-by: Alex Deucher Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 76e59a6e8311..93ffba2cbec4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -42,8 +42,10 @@ /* Firmware Names */ #define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin" +#define FIRMWARE_PICASSO "amdgpu/picasso_vcn.bin" MODULE_FIRMWARE(FIRMWARE_RAVEN); +MODULE_FIRMWARE(FIRMWARE_PICASSO); static void amdgpu_vcn_idle_work_handler(struct work_struct *work); @@ -61,6 +63,9 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) case CHIP_RAVEN: fw_name = FIRMWARE_RAVEN; break; + case CHIP_PICASSO: + fw_name = FIRMWARE_PICASSO; + break; default: return -EINVAL; } -- GitLab From 79f3641cc03881ba98aa549a149b34c838b7d7fb Mon Sep 17 00:00:00 2001 From: Likun Gao Date: Tue, 10 Jul 2018 20:25:24 +0800 Subject: [PATCH 1472/1692] drm/amdgpu: add clockgating support for picasso Treat it the same as raven for now. Signed-off-by: Likun Gao Reviewed-by: Alex Deucher Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 0e09549d1db8..1797304cb40b 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -621,7 +621,7 @@ static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *ad def = data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG); - if (adev->asic_type != CHIP_RAVEN) { + if (adev->asic_type != CHIP_RAVEN && adev->asic_type != CHIP_PICASSO) { def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2); def2 = data2 = RREG32_SOC15(MMHUB, 0, mmDAGB1_CNTL_MISC2); } else @@ -637,7 +637,7 @@ static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *ad DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK); - if (adev->asic_type != CHIP_RAVEN) + if (adev->asic_type != CHIP_RAVEN && adev->asic_type != CHIP_PICASSO) data2 &= ~(DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK | DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | @@ -654,7 +654,7 @@ static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *ad DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK); - if (adev->asic_type != CHIP_RAVEN) + if (adev->asic_type != CHIP_RAVEN && adev->asic_type != CHIP_PICASSO) data2 |= (DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK | DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | @@ -667,13 +667,13 @@ static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *ad WREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG, data); if (def1 != data1) { - if (adev->asic_type != CHIP_RAVEN) + if (adev->asic_type != CHIP_RAVEN && adev->asic_type != CHIP_PICASSO) WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2, data1); else WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_RV, data1); } - if (adev->asic_type != CHIP_RAVEN && def2 != data2) + if (adev->asic_type != CHIP_RAVEN && adev->asic_type != CHIP_PICASSO && def2 != data2) WREG32_SOC15(MMHUB, 0, mmDAGB1_CNTL_MISC2, data2); } @@ -737,6 +737,7 @@ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev, case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: + case CHIP_PICASSO: mmhub_v1_0_update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); athub_update_medium_grain_clock_gating(adev, -- GitLab From e21f561ad52a6c909c64d2856a3ad03a4042a5df Mon Sep 17 00:00:00 2001 From: Likun Gao Date: Tue, 10 Jul 2018 20:26:41 +0800 Subject: [PATCH 1473/1692] drm/amdgpu: add picasso support for gmc Same as raven. Signed-off-by: Likun Gao Reviewed-by: Alex Deucher Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index b1c848937e42..55b11afec16e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -846,6 +846,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) adev->gmc.gart_size = 512ULL << 20; break; case CHIP_RAVEN: /* DCE SG support */ + case CHIP_PICASSO: /* DCE SG support */ adev->gmc.gart_size = 1024ULL << 20; break; } @@ -934,6 +935,7 @@ static int gmc_v9_0_sw_init(void *handle) adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev); switch (adev->asic_type) { case CHIP_RAVEN: + case CHIP_PICASSO: if (adev->rev_id == 0x0 || adev->rev_id == 0x1) { amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); } else { @@ -1060,6 +1062,7 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) case CHIP_VEGA12: break; case CHIP_RAVEN: + case CHIP_PICASSO: soc15_program_register_sequence(adev, golden_settings_athub_1_0_0, ARRAY_SIZE(golden_settings_athub_1_0_0)); @@ -1094,6 +1097,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_RAVEN: + case CHIP_PICASSO: mmhub_v1_0_initialize_power_gating(adev); mmhub_v1_0_update_power_gating(adev, true); break; -- GitLab From 501a580ae6a4087ed7c8e4fdcf3de7a5ca56bdd1 Mon Sep 17 00:00:00 2001 From: Likun Gao Date: Tue, 10 Jul 2018 20:29:12 +0800 Subject: [PATCH 1474/1692] drm/amdgpu: add picasso support for gfx_v9_0 Add gfx support to picasso Signed-off-by: Likun Gao Reviewed-by: Alex Deucher Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 30 ++++++++++++++++++++++++--- 1 file changed, 27 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 3594704a6f9b..ad20747bace8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -80,6 +80,13 @@ MODULE_FIRMWARE("amdgpu/raven_mec.bin"); MODULE_FIRMWARE("amdgpu/raven_mec2.bin"); MODULE_FIRMWARE("amdgpu/raven_rlc.bin"); +MODULE_FIRMWARE("amdgpu/picasso_ce.bin"); +MODULE_FIRMWARE("amdgpu/picasso_pfp.bin"); +MODULE_FIRMWARE("amdgpu/picasso_me.bin"); +MODULE_FIRMWARE("amdgpu/picasso_mec.bin"); +MODULE_FIRMWARE("amdgpu/picasso_mec2.bin"); +MODULE_FIRMWARE("amdgpu/picasso_rlc.bin"); + static const struct soc15_reg_golden golden_settings_gc_9_0[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400), @@ -240,6 +247,7 @@ static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] = #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042 #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042 +#define PICASSO_GB_ADDR_CONFIG_GOLDEN 0x24000042 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev); static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev); @@ -279,6 +287,7 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) ARRAY_SIZE(golden_settings_gc_9_0_vg20)); break; case CHIP_RAVEN: + case CHIP_PICASSO: soc15_program_register_sequence(adev, golden_settings_gc_9_1, ARRAY_SIZE(golden_settings_gc_9_1)); @@ -566,6 +575,9 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev) case CHIP_RAVEN: chip_name = "raven"; break; + case CHIP_PICASSO: + chip_name = "picasso"; + break; default: BUG(); } @@ -1019,7 +1031,7 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); } - if (adev->asic_type == CHIP_RAVEN) { + if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_PICASSO) { /* TODO: double check the cp_table_size for RV */ adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */ r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size, @@ -1268,6 +1280,14 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN; break; + case CHIP_PICASSO: + adev->gfx.config.max_hw_contexts = 8; + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; + adev->gfx.config.sc_prim_fifo_size_backend = 0x100; + adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; + gb_addr_config = PICASSO_GB_ADDR_CONFIG_GOLDEN; + break; default: BUG(); break; @@ -1546,6 +1566,7 @@ static int gfx_v9_0_sw_init(void *handle) case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: + case CHIP_PICASSO: adev->gfx.mec.num_mec = 2; break; default: @@ -1707,7 +1728,7 @@ static int gfx_v9_0_sw_fini(void *handle) amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, &adev->gfx.rlc.clear_state_gpu_addr, (void **)&adev->gfx.rlc.cs_ptr); - if (adev->asic_type == CHIP_RAVEN) { + if ((adev->asic_type == CHIP_RAVEN) || (adev->asic_type == CHIP_PICASSO)) { amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, &adev->gfx.rlc.cp_table_gpu_addr, (void **)&adev->gfx.rlc.cp_table_ptr); @@ -2373,7 +2394,7 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev) return r; } - if (adev->asic_type == CHIP_RAVEN) { + if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_PICASSO) { if (amdgpu_lbpw != 0) gfx_v9_0_enable_lbpw(adev, true); else @@ -3777,6 +3798,7 @@ static int gfx_v9_0_set_powergating_state(void *handle, switch (adev->asic_type) { case CHIP_RAVEN: + case CHIP_PICASSO: if (!enable) { amdgpu_gfx_off_ctrl(adev, false); cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work); @@ -3831,6 +3853,7 @@ static int gfx_v9_0_set_clockgating_state(void *handle, case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: + case CHIP_PICASSO: gfx_v9_0_update_gfx_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); break; @@ -4840,6 +4863,7 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev) case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: + case CHIP_PICASSO: adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs; break; default: -- GitLab From 84ad2e1bd6e6fededa7ed389d0a171d0822abaac Mon Sep 17 00:00:00 2001 From: Likun Gao Date: Tue, 10 Jul 2018 20:30:42 +0800 Subject: [PATCH 1475/1692] drm/amdgpu: add picasso support for sdma_v4 Add sdma support to picasso Signed-off-by: Likun Gao Reviewed-by: Alex Deucher Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index ee0213edca8e..ca8a26178e2f 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -48,6 +48,7 @@ MODULE_FIRMWARE("amdgpu/vega12_sdma1.bin"); MODULE_FIRMWARE("amdgpu/vega20_sdma.bin"); MODULE_FIRMWARE("amdgpu/vega20_sdma1.bin"); MODULE_FIRMWARE("amdgpu/raven_sdma.bin"); +MODULE_FIRMWARE("amdgpu/picasso_sdma.bin"); #define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L #define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L @@ -221,6 +222,7 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev) ARRAY_SIZE(golden_settings_sdma1_4_2)); break; case CHIP_RAVEN: + case CHIP_PICASSO: soc15_program_register_sequence(adev, golden_settings_sdma_4_1, ARRAY_SIZE(golden_settings_sdma_4_1)); @@ -269,6 +271,9 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev) case CHIP_RAVEN: chip_name = "raven"; break; + case CHIP_PICASSO: + chip_name = "picasso"; + break; default: BUG(); } @@ -849,6 +854,7 @@ static void sdma_v4_0_init_pg(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_RAVEN: + case CHIP_PICASSO: sdma_v4_1_init_power_gating(adev); sdma_v4_1_update_power_gating(adev, true); break; @@ -1256,7 +1262,7 @@ static int sdma_v4_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (adev->asic_type == CHIP_RAVEN) + if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_PICASSO) adev->sdma.num_instances = 1; else adev->sdma.num_instances = 2; @@ -1599,6 +1605,7 @@ static int sdma_v4_0_set_clockgating_state(void *handle, case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: + case CHIP_PICASSO: sdma_v4_0_update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); sdma_v4_0_update_medium_grain_light_sleep(adev, @@ -1617,6 +1624,7 @@ static int sdma_v4_0_set_powergating_state(void *handle, switch (adev->asic_type) { case CHIP_RAVEN: + case CHIP_PICASSO: sdma_v4_1_update_power_gating(adev, state == AMD_PG_STATE_GATE ? true : false); break; -- GitLab From b95874cfa83e9096b7654cd79e35157c918005f5 Mon Sep 17 00:00:00 2001 From: Likun Gao Date: Tue, 10 Jul 2018 20:34:10 +0800 Subject: [PATCH 1476/1692] drm/amdgpu: add picasso for amdgpu kms Add picasso for amdgpu kms Signed-off-by: Likun Gao Reviewed-by: Alex Deucher Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 86e8772b6852..76174c4d288a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -974,7 +974,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, pm_runtime_get_sync(dev->dev); - if (adev->asic_type != CHIP_RAVEN) { + if (adev->asic_type != CHIP_RAVEN && adev->asic_type != CHIP_PICASSO) { amdgpu_uvd_free_handles(adev, file_priv); amdgpu_vce_free_handles(adev, file_priv); } -- GitLab From 8c7bf5834a334dad9a1c3a2ddc93e26bdeeadd57 Mon Sep 17 00:00:00 2001 From: Kenneth Feng Date: Tue, 7 Aug 2018 17:05:22 +0800 Subject: [PATCH 1477/1692] drm/amdgpu: Add pg support for gfxoff for PCO Add pg support for gfxoff. Signed-off-by: Kenneth Feng Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index d3b73afcb871..2539fa7b9594 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -740,7 +740,10 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_SDMA_MGCG | AMD_CG_SUPPORT_SDMA_LS; - adev->pg_flags = 0; + if (adev->powerplay.pp_feature & PP_GFXOFF_MASK) + adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | + AMD_PG_SUPPORT_CP | + AMD_PG_SUPPORT_RLC_SMU_HS; adev->external_rev_id = adev->rev_id + 0x41; break; -- GitLab From a4494fda32adb4e8d7441f680219b6e99ffbf6e7 Mon Sep 17 00:00:00 2001 From: Kenneth Feng Date: Fri, 10 Aug 2018 16:22:26 +0800 Subject: [PATCH 1478/1692] drm/amdgpu: Enable SDMA power gating for PCO Enable SDMA power gating Signed-off-by: Kenneth Feng Acked-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 2539fa7b9594..b205a4a5c3d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -740,6 +740,8 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_SDMA_MGCG | AMD_CG_SUPPORT_SDMA_LS; + adev->pg_flags = AMD_PG_SUPPORT_SDMA; + if (adev->powerplay.pp_feature & PP_GFXOFF_MASK) adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | AMD_PG_SUPPORT_CP | -- GitLab From 91468057d39ec08ae8389a91436be2c019a1ac36 Mon Sep 17 00:00:00 2001 From: Kenneth Feng Date: Mon, 20 Aug 2018 15:39:32 +0800 Subject: [PATCH 1479/1692] drm/amdgpu: enable mmhub power gating Remove some functions due to the design change. All the mmhub power gating sequence is moved to smu fw.Driver sends the message to enable mmhub powergating.We can also skip the fw version check since the old fw version is in a very early stage and we don't use that fw for release. Signed-off-by: Kenneth Feng Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 1 - drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 220 ------------------------ drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h | 1 - drivers/gpu/drm/amd/amdgpu/soc15.c | 2 +- 4 files changed, 1 insertion(+), 223 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 55b11afec16e..0ad1586c293f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1098,7 +1098,6 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_RAVEN: case CHIP_PICASSO: - mmhub_v1_0_initialize_power_gating(adev); mmhub_v1_0_update_power_gating(adev, true); break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 1797304cb40b..b121eb6a0ad4 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -261,236 +261,16 @@ static void mmhub_v1_0_program_invalidation(struct amdgpu_device *adev) } } -struct pctl_data { - uint32_t index; - uint32_t data; -}; - -static const struct pctl_data pctl0_data[] = { - {0x0, 0x7a640}, - {0x9, 0x2a64a}, - {0xd, 0x2a680}, - {0x11, 0x6a684}, - {0x19, 0xea68e}, - {0x29, 0xa69e}, - {0x2b, 0x0010a6c0}, - {0x3d, 0x83a707}, - {0xc2, 0x8a7a4}, - {0xcc, 0x1a7b8}, - {0xcf, 0xfa7cc}, - {0xe0, 0x17a7dd}, - {0xf9, 0xa7dc}, - {0xfb, 0x12a7f5}, - {0x10f, 0xa808}, - {0x111, 0x12a810}, - {0x125, 0x7a82c} -}; -#define PCTL0_DATA_LEN (ARRAY_SIZE(pctl0_data)) - -#define PCTL0_RENG_EXEC_END_PTR 0x12d -#define PCTL0_STCTRL_REG_SAVE_RANGE0_BASE 0xa640 -#define PCTL0_STCTRL_REG_SAVE_RANGE0_LIMIT 0xa833 - -static const struct pctl_data pctl1_data[] = { - {0x0, 0x39a000}, - {0x3b, 0x44a040}, - {0x81, 0x2a08d}, - {0x85, 0x6ba094}, - {0xf2, 0x18a100}, - {0x10c, 0x4a132}, - {0x112, 0xca141}, - {0x120, 0x2fa158}, - {0x151, 0x17a1d0}, - {0x16a, 0x1a1e9}, - {0x16d, 0x13a1ec}, - {0x182, 0x7a201}, - {0x18b, 0x3a20a}, - {0x190, 0x7a580}, - {0x199, 0xa590}, - {0x19b, 0x4a594}, - {0x1a1, 0x1a59c}, - {0x1a4, 0x7a82c}, - {0x1ad, 0xfa7cc}, - {0x1be, 0x17a7dd}, - {0x1d7, 0x12a810}, - {0x1eb, 0x4000a7e1}, - {0x1ec, 0x5000a7f5}, - {0x1ed, 0x4000a7e2}, - {0x1ee, 0x5000a7dc}, - {0x1ef, 0x4000a7e3}, - {0x1f0, 0x5000a7f6}, - {0x1f1, 0x5000a7e4} -}; -#define PCTL1_DATA_LEN (ARRAY_SIZE(pctl1_data)) - -#define PCTL1_RENG_EXEC_END_PTR 0x1f1 -#define PCTL1_STCTRL_REG_SAVE_RANGE0_BASE 0xa000 -#define PCTL1_STCTRL_REG_SAVE_RANGE0_LIMIT 0xa20d -#define PCTL1_STCTRL_REG_SAVE_RANGE1_BASE 0xa580 -#define PCTL1_STCTRL_REG_SAVE_RANGE1_LIMIT 0xa59d -#define PCTL1_STCTRL_REG_SAVE_RANGE2_BASE 0xa82c -#define PCTL1_STCTRL_REG_SAVE_RANGE2_LIMIT 0xa833 - -static void mmhub_v1_0_power_gating_write_save_ranges(struct amdgpu_device *adev) -{ - uint32_t tmp = 0; - - /* PCTL0_STCTRL_REGISTER_SAVE_RANGE0 */ - tmp = REG_SET_FIELD(tmp, PCTL0_STCTRL_REGISTER_SAVE_RANGE0, - STCTRL_REGISTER_SAVE_BASE, - PCTL0_STCTRL_REG_SAVE_RANGE0_BASE); - tmp = REG_SET_FIELD(tmp, PCTL0_STCTRL_REGISTER_SAVE_RANGE0, - STCTRL_REGISTER_SAVE_LIMIT, - PCTL0_STCTRL_REG_SAVE_RANGE0_LIMIT); - WREG32_SOC15(MMHUB, 0, mmPCTL0_STCTRL_REGISTER_SAVE_RANGE0, tmp); - - /* PCTL1_STCTRL_REGISTER_SAVE_RANGE0 */ - tmp = 0; - tmp = REG_SET_FIELD(tmp, PCTL1_STCTRL_REGISTER_SAVE_RANGE0, - STCTRL_REGISTER_SAVE_BASE, - PCTL1_STCTRL_REG_SAVE_RANGE0_BASE); - tmp = REG_SET_FIELD(tmp, PCTL1_STCTRL_REGISTER_SAVE_RANGE0, - STCTRL_REGISTER_SAVE_LIMIT, - PCTL1_STCTRL_REG_SAVE_RANGE0_LIMIT); - WREG32_SOC15(MMHUB, 0, mmPCTL1_STCTRL_REGISTER_SAVE_RANGE0, tmp); - - /* PCTL1_STCTRL_REGISTER_SAVE_RANGE1 */ - tmp = 0; - tmp = REG_SET_FIELD(tmp, PCTL1_STCTRL_REGISTER_SAVE_RANGE1, - STCTRL_REGISTER_SAVE_BASE, - PCTL1_STCTRL_REG_SAVE_RANGE1_BASE); - tmp = REG_SET_FIELD(tmp, PCTL1_STCTRL_REGISTER_SAVE_RANGE1, - STCTRL_REGISTER_SAVE_LIMIT, - PCTL1_STCTRL_REG_SAVE_RANGE1_LIMIT); - WREG32_SOC15(MMHUB, 0, mmPCTL1_STCTRL_REGISTER_SAVE_RANGE1, tmp); - - /* PCTL1_STCTRL_REGISTER_SAVE_RANGE2 */ - tmp = 0; - tmp = REG_SET_FIELD(tmp, PCTL1_STCTRL_REGISTER_SAVE_RANGE2, - STCTRL_REGISTER_SAVE_BASE, - PCTL1_STCTRL_REG_SAVE_RANGE2_BASE); - tmp = REG_SET_FIELD(tmp, PCTL1_STCTRL_REGISTER_SAVE_RANGE2, - STCTRL_REGISTER_SAVE_LIMIT, - PCTL1_STCTRL_REG_SAVE_RANGE2_LIMIT); - WREG32_SOC15(MMHUB, 0, mmPCTL1_STCTRL_REGISTER_SAVE_RANGE2, tmp); -} - -void mmhub_v1_0_initialize_power_gating(struct amdgpu_device *adev) -{ - uint32_t pctl0_misc = 0; - uint32_t pctl0_reng_execute = 0; - uint32_t pctl1_misc = 0; - uint32_t pctl1_reng_execute = 0; - int i = 0; - - if (amdgpu_sriov_vf(adev)) - return; - - /****************** pctl0 **********************/ - pctl0_misc = RREG32_SOC15(MMHUB, 0, mmPCTL0_MISC); - pctl0_reng_execute = RREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_EXECUTE); - - /* Light sleep must be disabled before writing to pctl0 registers */ - pctl0_misc &= ~PCTL0_MISC__RENG_MEM_LS_ENABLE_MASK; - WREG32_SOC15(MMHUB, 0, mmPCTL0_MISC, pctl0_misc); - - /* Write data used to access ram of register engine */ - for (i = 0; i < PCTL0_DATA_LEN; i++) { - WREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_RAM_INDEX, - pctl0_data[i].index); - WREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_RAM_DATA, - pctl0_data[i].data); - } - - /* Re-enable light sleep */ - pctl0_misc |= PCTL0_MISC__RENG_MEM_LS_ENABLE_MASK; - WREG32_SOC15(MMHUB, 0, mmPCTL0_MISC, pctl0_misc); - - /****************** pctl1 **********************/ - pctl1_misc = RREG32_SOC15(MMHUB, 0, mmPCTL1_MISC); - pctl1_reng_execute = RREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE); - - /* Light sleep must be disabled before writing to pctl1 registers */ - pctl1_misc &= ~PCTL1_MISC__RENG_MEM_LS_ENABLE_MASK; - WREG32_SOC15(MMHUB, 0, mmPCTL1_MISC, pctl1_misc); - - /* Write data used to access ram of register engine */ - for (i = 0; i < PCTL1_DATA_LEN; i++) { - WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_RAM_INDEX, - pctl1_data[i].index); - WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_RAM_DATA, - pctl1_data[i].data); - } - - /* Re-enable light sleep */ - pctl1_misc |= PCTL1_MISC__RENG_MEM_LS_ENABLE_MASK; - WREG32_SOC15(MMHUB, 0, mmPCTL1_MISC, pctl1_misc); - - mmhub_v1_0_power_gating_write_save_ranges(adev); - - /* Set the reng execute end ptr for pctl0 */ - pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute, - PCTL0_RENG_EXECUTE, - RENG_EXECUTE_END_PTR, - PCTL0_RENG_EXEC_END_PTR); - WREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_EXECUTE, pctl0_reng_execute); - - /* Set the reng execute end ptr for pctl1 */ - pctl1_reng_execute = REG_SET_FIELD(pctl1_reng_execute, - PCTL1_RENG_EXECUTE, - RENG_EXECUTE_END_PTR, - PCTL1_RENG_EXEC_END_PTR); - WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE, pctl1_reng_execute); -} - void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev, bool enable) { - uint32_t pctl0_reng_execute = 0; - uint32_t pctl1_reng_execute = 0; - if (amdgpu_sriov_vf(adev)) return; - pctl0_reng_execute = RREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_EXECUTE); - pctl1_reng_execute = RREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE); - if (enable && adev->pg_flags & AMD_PG_SUPPORT_MMHUB) { - pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute, - PCTL0_RENG_EXECUTE, - RENG_EXECUTE_ON_PWR_UP, 1); - pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute, - PCTL0_RENG_EXECUTE, - RENG_EXECUTE_ON_REG_UPDATE, 1); - WREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_EXECUTE, pctl0_reng_execute); - - pctl1_reng_execute = REG_SET_FIELD(pctl1_reng_execute, - PCTL1_RENG_EXECUTE, - RENG_EXECUTE_ON_PWR_UP, 1); - pctl1_reng_execute = REG_SET_FIELD(pctl1_reng_execute, - PCTL1_RENG_EXECUTE, - RENG_EXECUTE_ON_REG_UPDATE, 1); - WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE, pctl1_reng_execute); - if (adev->powerplay.pp_funcs->set_powergating_by_smu) amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GMC, true); - } else { - pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute, - PCTL0_RENG_EXECUTE, - RENG_EXECUTE_ON_PWR_UP, 0); - pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute, - PCTL0_RENG_EXECUTE, - RENG_EXECUTE_ON_REG_UPDATE, 0); - WREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_EXECUTE, pctl0_reng_execute); - - pctl1_reng_execute = REG_SET_FIELD(pctl1_reng_execute, - PCTL1_RENG_EXECUTE, - RENG_EXECUTE_ON_PWR_UP, 0); - pctl1_reng_execute = REG_SET_FIELD(pctl1_reng_execute, - PCTL1_RENG_EXECUTE, - RENG_EXECUTE_ON_REG_UPDATE, 0); - WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE, pctl1_reng_execute); } } diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h index 5d38229baf69..bef3d0c0c117 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h @@ -32,7 +32,6 @@ void mmhub_v1_0_init(struct amdgpu_device *adev); int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev, enum amd_clockgating_state state); void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags); -void mmhub_v1_0_initialize_power_gating(struct amdgpu_device *adev); void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev, bool enable); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index b205a4a5c3d6..53159f1d7a39 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -740,7 +740,7 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_SDMA_MGCG | AMD_CG_SUPPORT_SDMA_LS; - adev->pg_flags = AMD_PG_SUPPORT_SDMA; + adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_MMHUB; if (adev->powerplay.pp_feature & PP_GFXOFF_MASK) adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | -- GitLab From 1a84d967c180d421a9182a227fc1fee3701cc7d6 Mon Sep 17 00:00:00 2001 From: Kenneth Feng Date: Fri, 24 Aug 2018 16:44:11 +0800 Subject: [PATCH 1480/1692] drm/amdgpu: enable vcn powergating for PCO enable vcn pg Signed-off-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 53159f1d7a39..7837f0cc75b2 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -740,7 +740,9 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_SDMA_MGCG | AMD_CG_SUPPORT_SDMA_LS; - adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_MMHUB; + adev->pg_flags = AMD_PG_SUPPORT_SDMA | + AMD_PG_SUPPORT_MMHUB | + AMD_PG_SUPPORT_VCN; if (adev->powerplay.pp_feature & PP_GFXOFF_MASK) adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | -- GitLab From 40c2358b1102b65d2c173a43083894af630f0e60 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Mon, 9 Jul 2018 20:00:05 +0800 Subject: [PATCH 1481/1692] drm/amdgpu: add ip blocks for picasso (v2) Add PCO IPs. V2: enable VCN as well Signed-off-by: Huang Rui Signed-off-by: Likun Gao Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 7837f0cc75b2..e338ad6d0d20 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -551,6 +551,24 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &dm_ip_block); #else # warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15." +#endif + amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); + amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); + amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block); + break; + case CHIP_PICASSO: + amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); + amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); + amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); + amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block); + amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); + if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) + amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); +#if defined(CONFIG_DRM_AMD_DC) + else if (amdgpu_device_has_dc_support(adev)) + amdgpu_device_ip_block_add(adev, &dm_ip_block); +#else +# warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15." #endif amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); -- GitLab From 367b013ce0064c782cec784233805eeb67417747 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Mon, 9 Jul 2018 19:51:19 +0800 Subject: [PATCH 1482/1692] drm/amdgpu: add new raven series device This patch is to add new pci device for raven series. Signed-off-by: Huang Rui Signed-off-by: Likun Gao Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index b5c2ccb585b4..b575728ead07 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -874,6 +874,8 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, /* Raven */ {0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU}, + /* Picasso */ + {0x1002, 0x15d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PICASSO|AMD_IS_APU}, {0, 0, 0} }; -- GitLab From a06c3ee083b5c622bb9f4a687d7ab5265ee73dbf Mon Sep 17 00:00:00 2001 From: Kenneth Feng Date: Thu, 6 Sep 2018 14:56:19 +0800 Subject: [PATCH 1483/1692] drm/amdgpu: enable gfxoff in non-sriov and stutter mode by default enable gfxoff in non-sriov and stutter mode by default Signed-off-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 25e7e1cccaa1..39fe66810d53 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1492,6 +1492,8 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) } adev->powerplay.pp_feature = amdgpu_pp_feature_mask; + if (amdgpu_sriov_vf(adev)) + adev->powerplay.pp_feature &= ~PP_GFXOFF_MASK; for (i = 0; i < adev->num_ip_blocks; i++) { if ((amdgpu_ip_block_mask & (1 << i)) == 0) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index b575728ead07..33e1856fb8cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -114,8 +114,8 @@ uint amdgpu_pg_mask = 0xffffffff; uint amdgpu_sdma_phase_quantum = 32; char *amdgpu_disable_cu = NULL; char *amdgpu_virtual_display = NULL; -/* OverDrive(bit 14),gfxoff(bit 15),stutter mode(bit 17) disabled by default*/ -uint amdgpu_pp_feature_mask = 0xfffd3fff; +/* OverDrive(bit 14) disabled by default*/ +uint amdgpu_pp_feature_mask = 0xffffbfff; int amdgpu_ngg = 0; int amdgpu_prim_buf_per_se = 0; int amdgpu_pos_buf_per_se = 0; -- GitLab From 44876ae294dd7d7bcdfb527d12b86beb54cd013b Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 12 Sep 2018 13:51:25 -0500 Subject: [PATCH 1484/1692] drm/amdgpu: use IP presence to free uvd and vce handles Rather than checking the asic type, check whether the UVD or VCE IP blocks exist. This way we don't have to update the check with new asics that use VCN. Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 76174c4d288a..65b713225ebf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -974,10 +974,10 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, pm_runtime_get_sync(dev->dev); - if (adev->asic_type != CHIP_RAVEN && adev->asic_type != CHIP_PICASSO) { + if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD) != NULL) amdgpu_uvd_free_handles(adev, file_priv); + if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL) amdgpu_vce_free_handles(adev, file_priv); - } amdgpu_vm_bo_rmv(adev, fpriv->prt_va); -- GitLab From 4cb0becb269e7b45ff3fd15bfd2242c8b3e013d1 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Fri, 15 Jun 2018 16:05:48 -0500 Subject: [PATCH 1485/1692] drm/amdgpu: move get_rev_id at first before load gpu_info firmware MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rev id is used for identifying Raven2 series of chips. So we would better to initialize it at first. Signed-off-by: Huang Rui Reviewed-by: Alex Deucher Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index e338ad6d0d20..794cfe4a52d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -479,6 +479,11 @@ static const struct amdgpu_ip_block_version vega10_common_ip_block = .funcs = &soc15_common_ip_funcs, }; +static uint32_t soc15_get_rev_id(struct amdgpu_device *adev) +{ + return adev->nbio_funcs->get_rev_id(adev); +} + int soc15_set_ip_blocks(struct amdgpu_device *adev) { /* Set IP register base before any HW register access */ @@ -507,6 +512,8 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) adev->df_funcs = &df_v3_6_funcs; else adev->df_funcs = &df_v1_7_funcs; + + adev->rev_id = soc15_get_rev_id(adev); adev->nbio_funcs->detect_hw_virt(adev); if (amdgpu_sriov_vf(adev)) @@ -581,11 +588,6 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) return 0; } -static uint32_t soc15_get_rev_id(struct amdgpu_device *adev) -{ - return adev->nbio_funcs->get_rev_id(adev); -} - static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { adev->nbio_funcs->hdp_flush(adev, ring); @@ -642,7 +644,6 @@ static int soc15_common_early_init(void *handle) adev->asic_funcs = &soc15_asic_funcs; - adev->rev_id = soc15_get_rev_id(adev); adev->external_rev_id = 0xFF; switch (adev->asic_type) { case CHIP_VEGA10: -- GitLab From 1879e6a7f86e8bae0760c28a73dacc4c338866d5 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 15 Jun 2018 17:28:44 -0500 Subject: [PATCH 1486/1692] drm/amdgpu: set external rev id for raven2 It's different from raven1. Signed-off-by: Huang Rui Reviewed-by: Feifei Xu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 794cfe4a52d1..0a935604ec9e 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -742,7 +742,10 @@ static int soc15_common_early_init(void *handle) AMD_PG_SUPPORT_CP | AMD_PG_SUPPORT_RLC_SMU_HS; - adev->external_rev_id = 0x1; + if (adev->rev_id >= 0x8) + adev->external_rev_id = adev->rev_id + 0x81; + else + adev->external_rev_id = 0x1; break; case CHIP_PICASSO: adev->cg_flags = AMD_CG_SUPPORT_GFX_MGLS | -- GitLab From 54c4d17e98db7a7fae85a324b7ace134b3e3f8b5 Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Thu, 4 Jan 2018 17:26:00 +0800 Subject: [PATCH 1487/1692] drm/amdgpu: add raven2 to gpu_info firmware Add gpu_info firmware for raven2. Signed-off-by: Feifei Xu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 39fe66810d53..762dc5f886cd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -63,6 +63,7 @@ MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin"); +MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin"); #define AMDGPU_RESUME_MS 2000 @@ -1334,7 +1335,10 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) chip_name = "vega12"; break; case CHIP_RAVEN: - chip_name = "raven"; + if (adev->rev_id >= 8) + chip_name = "raven2"; + else + chip_name = "raven"; break; case CHIP_PICASSO: chip_name = "picasso"; -- GitLab From 8b47cc9bb122e4bb970685b870484d9f31844ef0 Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Fri, 15 Jun 2018 16:01:41 -0500 Subject: [PATCH 1488/1692] drm/amdgpu: add raven2 vcn firmware support Specify raven2 vcn firmware on amdgpu_vce_sw_init. Signed-off-by: Feifei Xu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 93ffba2cbec4..a74498ce87ff 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -43,9 +43,11 @@ /* Firmware Names */ #define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin" #define FIRMWARE_PICASSO "amdgpu/picasso_vcn.bin" +#define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin" MODULE_FIRMWARE(FIRMWARE_RAVEN); MODULE_FIRMWARE(FIRMWARE_PICASSO); +MODULE_FIRMWARE(FIRMWARE_RAVEN2); static void amdgpu_vcn_idle_work_handler(struct work_struct *work); @@ -61,7 +63,10 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_RAVEN: - fw_name = FIRMWARE_RAVEN; + if (adev->rev_id >= 8) + fw_name = FIRMWARE_RAVEN2; + else + fw_name = FIRMWARE_RAVEN; break; case CHIP_PICASSO: fw_name = FIRMWARE_PICASSO; -- GitLab From e11fa1b68a494687cce353d5bddc12e859a9c74e Mon Sep 17 00:00:00 2001 From: Likun Gao Date: Tue, 5 Jun 2018 14:05:45 +0800 Subject: [PATCH 1489/1692] drm/amdgpu: add psp support for raven2 Modified for using raven2_asd.bin to replace raven_asd.bin for raven2 Signed-off-by: Likun Gao Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/psp_v10_0.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c index eae3bf8b8463..2cfd1bb559dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c @@ -36,6 +36,7 @@ MODULE_FIRMWARE("amdgpu/raven_asd.bin"); MODULE_FIRMWARE("amdgpu/picasso_asd.bin"); +MODULE_FIRMWARE("amdgpu/raven2_asd.bin"); static int psp_v10_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *type) @@ -118,7 +119,10 @@ static int psp_v10_0_init_microcode(struct psp_context *psp) switch (adev->asic_type) { case CHIP_RAVEN: - chip_name = "raven"; + if (adev->rev_id >= 0x8) + chip_name = "raven2"; + else + chip_name = "raven"; break; case CHIP_PICASSO: chip_name = "picasso"; -- GitLab From e7497a302bf0c2abcd971ae6a08d2b260269ec82 Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Thu, 4 Jan 2018 18:05:35 +0800 Subject: [PATCH 1490/1692] drm/amdgpu/sdma4: specify raven2 firmware. use raven2 sdma firmware. Signed-off-by: Feifei Xu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index ca8a26178e2f..7ef140aaca3c 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -49,6 +49,7 @@ MODULE_FIRMWARE("amdgpu/vega20_sdma.bin"); MODULE_FIRMWARE("amdgpu/vega20_sdma1.bin"); MODULE_FIRMWARE("amdgpu/raven_sdma.bin"); MODULE_FIRMWARE("amdgpu/picasso_sdma.bin"); +MODULE_FIRMWARE("amdgpu/raven2_sdma.bin"); #define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L #define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L @@ -269,7 +270,10 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev) chip_name = "vega20"; break; case CHIP_RAVEN: - chip_name = "raven"; + if (adev->rev_id >= 8) + chip_name = "raven2"; + else + chip_name = "raven"; break; case CHIP_PICASSO: chip_name = "picasso"; -- GitLab From e75279e871cee939d95204d896b4adbd150dd6ed Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Thu, 4 Jan 2018 18:13:41 +0800 Subject: [PATCH 1491/1692] drm/amdgpu/sdma4: Add raven2 golden setting Golden register settings from the hw team. Signed-off-by: Feifei Xu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 7ef140aaca3c..75be0b9ed2c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -185,6 +185,12 @@ static const struct soc15_reg_golden golden_settings_sdma_rv1[] = SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00000002) }; +static const struct soc15_reg_golden golden_settings_sdma_rv2[] = +{ + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00003001), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00003001) +}; + static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 offset) { @@ -225,11 +231,16 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev) case CHIP_RAVEN: case CHIP_PICASSO: soc15_program_register_sequence(adev, - golden_settings_sdma_4_1, - ARRAY_SIZE(golden_settings_sdma_4_1)); - soc15_program_register_sequence(adev, - golden_settings_sdma_rv1, - ARRAY_SIZE(golden_settings_sdma_rv1)); + golden_settings_sdma_4_1, + ARRAY_SIZE(golden_settings_sdma_4_1)); + if (adev->rev_id >= 8) + soc15_program_register_sequence(adev, + golden_settings_sdma_rv2, + ARRAY_SIZE(golden_settings_sdma_rv2)); + else + soc15_program_register_sequence(adev, + golden_settings_sdma_rv1, + ARRAY_SIZE(golden_settings_sdma_rv1)); break; default: break; -- GitLab From cf4b60c6846aaf1b52d91e035b9e257e427ec10c Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Thu, 4 Jan 2018 18:33:49 +0800 Subject: [PATCH 1492/1692] drm/amdgpu/gfx9: add support for raven2 gfx firmware use raven2 gfx firmware. Signed-off-by: Feifei Xu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index ad20747bace8..7b9e1c274c59 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -87,6 +87,13 @@ MODULE_FIRMWARE("amdgpu/picasso_mec.bin"); MODULE_FIRMWARE("amdgpu/picasso_mec2.bin"); MODULE_FIRMWARE("amdgpu/picasso_rlc.bin"); +MODULE_FIRMWARE("amdgpu/raven2_ce.bin"); +MODULE_FIRMWARE("amdgpu/raven2_pfp.bin"); +MODULE_FIRMWARE("amdgpu/raven2_me.bin"); +MODULE_FIRMWARE("amdgpu/raven2_mec.bin"); +MODULE_FIRMWARE("amdgpu/raven2_mec2.bin"); +MODULE_FIRMWARE("amdgpu/raven2_rlc.bin"); + static const struct soc15_reg_golden golden_settings_gc_9_0[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400), @@ -573,7 +580,10 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev) chip_name = "vega20"; break; case CHIP_RAVEN: - chip_name = "raven"; + if (adev->rev_id >= 8) + chip_name = "raven2"; + else + chip_name = "raven"; break; case CHIP_PICASSO: chip_name = "picasso"; -- GitLab From 28ab1229c36a6538d2a964ce37967a9aefde004a Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Thu, 4 Jan 2018 18:36:40 +0800 Subject: [PATCH 1493/1692] drm/amdgpu/gfx9: add raven2 golden setting Golden register settings from the hw team. Signed-off-by: Feifei Xu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 40 ++++++++++++++++++++++++++- 1 file changed, 39 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 7b9e1c274c59..4991ae00a4ca 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -187,6 +187,29 @@ static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x00000800) }; +static const struct soc15_reg_golden golden_settings_gc_9_1_rv2[] = +{ + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0xff7fffff, 0x04000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x7f0fffff, 0x08000080), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0xff8fffff, 0x08000080), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x7f8fffff, 0x08000080), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x26013041), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x26013041), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x3f8fffff, 0x08000080), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0xff0fffff, 0x08000080), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0xff0fffff, 0x08000080), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0xff0fffff, 0x08000080), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0xff0fffff, 0x08000080), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0xff0fffff, 0x08000080), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x3f8fffff, 0x08000080), +}; + static const struct soc15_reg_golden golden_settings_gc_9_x_common[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000), @@ -255,6 +278,7 @@ static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] = #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042 #define PICASSO_GB_ADDR_CONFIG_GOLDEN 0x24000042 +#define RAVEN2_GB_ADDR_CONFIG_GOLDEN 0x26013041 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev); static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev); @@ -294,6 +318,17 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) ARRAY_SIZE(golden_settings_gc_9_0_vg20)); break; case CHIP_RAVEN: + soc15_program_register_sequence(adev, golden_settings_gc_9_1, + ARRAY_SIZE(golden_settings_gc_9_1)); + if (adev->rev_id >= 8) + soc15_program_register_sequence(adev, + golden_settings_gc_9_1_rv2, + ARRAY_SIZE(golden_settings_gc_9_1_rv2)); + else + soc15_program_register_sequence(adev, + golden_settings_gc_9_1_rv1, + ARRAY_SIZE(golden_settings_gc_9_1_rv1)); + break; case CHIP_PICASSO: soc15_program_register_sequence(adev, golden_settings_gc_9_1, @@ -1288,7 +1323,10 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) adev->gfx.config.sc_prim_fifo_size_backend = 0x100; adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; - gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN; + if (adev->rev_id >= 8) + gb_addr_config = RAVEN2_GB_ADDR_CONFIG_GOLDEN; + else + gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN; break; case CHIP_PICASSO: adev->gfx.config.max_hw_contexts = 8; -- GitLab From 760067769ebb3fad2ebf1880ad87afb309ef7b14 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Tue, 16 Jan 2018 10:42:58 +0800 Subject: [PATCH 1494/1692] drm/amdgpu: fix the VM fault while write at the top of the invisible vram Raven2 has a HW issue that it is unable to use the vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the workaround that increase system aperture high address to get rid of the VM fault and hardware hang. Signed-off-by: Huang Rui Reviewed-by: Alex Deucher Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c | 15 +++++++++++++-- drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 15 +++++++++++++-- 2 files changed, 26 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index ffd0ec9586d1..65f58ebcf835 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -73,8 +73,19 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) /* Program the system aperture low logical page number. */ WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, min(adev->gmc.vram_start, adev->gmc.agp_start) >> 18); - WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, - max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18); + + if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8) + /* + * Raven2 has a HW issue that it is unable to use the vram which + * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the + * workaround that increase system aperture high address (add 1) + * to get rid of the VM fault and hardware hang. + */ + WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, + (max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18) + 0x1); + else + WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, + max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18); /* Set default page address. */ value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index b121eb6a0ad4..2a126c6950c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -91,8 +91,19 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) /* Program the system aperture low logical page number. */ WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, min(adev->gmc.vram_start, adev->gmc.agp_start) >> 18); - WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, - max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18); + + if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8) + /* + * Raven2 has a HW issue that it is unable to use the vram which + * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the + * workaround that increase system aperture high address (add 1) + * to get rid of the VM fault and hardware hang. + */ + WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, + (max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18) + 0x1); + else + WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, + max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18); /* Set default page address. */ value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + -- GitLab From 0e3d73f1a440eaca270a028bff51649ae99df113 Mon Sep 17 00:00:00 2001 From: Bhawanpreet Lakha Date: Mon, 22 Jan 2018 16:12:27 -0500 Subject: [PATCH 1495/1692] drm/amd/display: Add Raven2 definitions in dc Add Raven2 definitions in the dc code Signed-off-by: Bhawanpreet Lakha Reviewed-by: Harry Wentland Reviewed-by: Huang Rui Acked-by: Alex Deucher --- .../display/dc/bios/command_table_helper2.c | 5 +++ .../gpu/drm/amd/display/dc/core/dc_resource.c | 7 ++++ .../drm/amd/display/dc/dce/dce_clock_source.c | 7 ++++ .../drm/amd/display/dc/dcn10/dcn10_resource.c | 36 ++++++++++++++++++- .../gpu/drm/amd/display/dc/gpio/hw_factory.c | 5 +++ .../drm/amd/display/dc/gpio/hw_translate.c | 5 +++ .../gpu/drm/amd/display/dc/i2caux/i2caux.c | 4 +++ .../gpu/drm/amd/display/include/dal_asic_id.h | 7 ++++ .../gpu/drm/amd/display/include/dal_types.h | 3 ++ 9 files changed, 78 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c index bbbcef566c55..65b006ad372e 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c @@ -61,6 +61,11 @@ bool dal_bios_parser_init_cmd_tbl_helper2( return true; #endif +#if defined(CONFIG_DRM_AMD_DC_DCN1_01) + case DCN_VERSION_1_01: + *h = dal_cmd_tbl_helper_dce112_get_table2(); + return true; +#endif case DCE_VERSION_12_0: *h = dal_cmd_tbl_helper_dce112_get_table2(); return true; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index d981755d1e4d..721dd13d2ed2 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -88,6 +88,10 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id) #if defined(CONFIG_DRM_AMD_DC_DCN1_0) case FAMILY_RV: dc_version = DCN_VERSION_1_0; +#if defined(CONFIG_DRM_AMD_DC_DCN1_01) + if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev)) + dc_version = DCN_VERSION_1_01; +#endif break; #endif default: @@ -138,6 +142,9 @@ struct resource_pool *dc_create_resource_pool( #if defined(CONFIG_DRM_AMD_DC_DCN1_0) case DCN_VERSION_1_0: +#if defined(CONFIG_DRM_AMD_DC_DCN1_01) + case DCN_VERSION_1_01: +#endif res_pool = dcn10_create_resource_pool( num_virtual_links, dc); break; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c index 5a9f3601ffb6..ae3c44aff1c8 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c @@ -601,6 +601,9 @@ static uint32_t dce110_get_pix_clk_dividers( case DCN_VERSION_1_0: #endif +#if defined(CONFIG_DRM_AMD_DC_DCN1_01) + case DCN_VERSION_1_01: +#endif dce112_get_pix_clk_dividers_helper(clk_src, pll_settings, pix_clk_params); break; @@ -907,6 +910,10 @@ static bool dce110_program_pix_clk( case DCN_VERSION_1_0: #endif +#if defined(CONFIG_DRM_AMD_DC_DCN1_01) + case DCN_VERSION_1_01: +#endif + if (clock_source->id != CLOCK_SOURCE_ID_DP_DTO) { bp_pc_params.flags.SET_GENLOCK_REF_DIV_SRC = pll_settings->use_external_clk; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 1b519f8f044f..65a596ffa02a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -152,7 +152,10 @@ enum dcn10_clk_src_array_id { DCN10_CLK_SRC_PLL1, DCN10_CLK_SRC_PLL2, DCN10_CLK_SRC_PLL3, - DCN10_CLK_SRC_TOTAL + DCN10_CLK_SRC_TOTAL, +#if defined(CONFIG_DRM_AMD_DC_DCN1_01) + DCN101_CLK_SRC_TOTAL = DCN10_CLK_SRC_PLL3 +#endif }; /* begin ********************* @@ -1163,6 +1166,10 @@ static bool construct( /* max pipe num for ASIC before check pipe fuses */ pool->base.pipe_count = pool->base.res_cap->num_timing_generator; +#if defined(CONFIG_DRM_AMD_DC_DCN1_01) + if (dc->ctx->dce_version == DCN_VERSION_1_01) + pool->base.pipe_count = 3; +#endif dc->caps.max_video_width = 3840; dc->caps.max_downscale_ratio = 200; dc->caps.i2c_speed_in_khz = 100; @@ -1194,13 +1201,28 @@ static bool construct( dcn10_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL2, &clk_src_regs[2], false); + +#ifdef CONFIG_DRM_AMD_DC_DCN1_01 + if (dc->ctx->dce_version == DCN_VERSION_1_0) { + pool->base.clock_sources[DCN10_CLK_SRC_PLL3] = + dcn10_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL3, + &clk_src_regs[3], false); + } +#else pool->base.clock_sources[DCN10_CLK_SRC_PLL3] = dcn10_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL3, &clk_src_regs[3], false); +#endif pool->base.clk_src_count = DCN10_CLK_SRC_TOTAL; +#if defined(CONFIG_DRM_AMD_DC_DCN1_01) + if (dc->ctx->dce_version == DCN_VERSION_1_01) + pool->base.clk_src_count = DCN101_CLK_SRC_TOTAL; +#endif + pool->base.dp_clock_source = dcn10_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_ID_DP_DTO, @@ -1246,6 +1268,18 @@ static bool construct( memcpy(dc->dcn_ip, &dcn10_ip_defaults, sizeof(dcn10_ip_defaults)); memcpy(dc->dcn_soc, &dcn10_soc_defaults, sizeof(dcn10_soc_defaults)); +#if defined(CONFIG_DRM_AMD_DC_DCN1_01) + if (dc->ctx->dce_version == DCN_VERSION_1_01) { + struct dcn_soc_bounding_box *dcn_soc = dc->dcn_soc; + struct dcn_ip_params *dcn_ip = dc->dcn_ip; + struct display_mode_lib *dml = &dc->dml; + + dml->ip.max_num_dpp = 3; + /* TODO how to handle 23.84? */ + dcn_soc->dram_clock_change_latency = 23; + dcn_ip->max_num_dpp = 3; + } +#endif if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) { dc->dcn_soc->urgent_latency = 3; dc->debug.disable_dmcu = true; diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c index 0caee3523017..a683f4102e65 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c @@ -86,6 +86,11 @@ bool dal_hw_factory_init( dal_hw_factory_dcn10_init(factory); return true; #endif +#if defined(CONFIG_DRM_AMD_DC_DCN1_01) + case DCN_VERSION_1_01: + dal_hw_factory_dcn10_init(factory); + return true; +#endif default: ASSERT_CRITICAL(false); diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c index 55c707488541..096f45628630 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c @@ -83,6 +83,11 @@ bool dal_hw_translate_init( dal_hw_translate_dcn10_init(translate); return true; #endif +#if defined(CONFIG_DRM_AMD_DC_DCN1_01) + case DCN_VERSION_1_01: + dal_hw_translate_dcn10_init(translate); + return true; +#endif default: BREAK_TO_DEBUGGER(); diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c index 9b0bcc6b769b..e56093f26eed 100644 --- a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c +++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c @@ -96,6 +96,10 @@ struct i2caux *dal_i2caux_create( return dal_i2caux_dcn10_create(ctx); #endif +#if defined(CONFIG_DRM_AMD_DC_DCN1_01) + case DCN_VERSION_1_01: + return dal_i2caux_dcn10_create(ctx); +#endif default: BREAK_TO_DEBUGGER(); return NULL; diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h index 25029ed42d89..4f501ddcfb8d 100644 --- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h +++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h @@ -131,8 +131,15 @@ #define INTERNAL_REV_RAVEN_A0 0x00 /* First spin of Raven */ #define RAVEN_A0 0x01 #define RAVEN_B0 0x21 +#if defined(CONFIG_DRM_AMD_DC_DCN1_01) +/* DCN1_01 */ +#define RAVEN2_A0 0x81 +#endif #define RAVEN_UNKNOWN 0xFF +#if defined(CONFIG_DRM_AMD_DC_DCN1_01) +#define ASICREV_IS_RAVEN2(eChipRev) ((eChipRev >= RAVEN2_A0) && (eChipRev < 0xF0)) +#endif /* DCN1_01 */ #define ASIC_REV_IS_RAVEN(eChipRev) ((eChipRev >= RAVEN_A0) && eChipRev < RAVEN_UNKNOWN) #define RAVEN1_F0 0xF0 #define ASICREV_IS_RV1_F0(eChipRev) ((eChipRev >= RAVEN1_F0) && (eChipRev < RAVEN_UNKNOWN)) diff --git a/drivers/gpu/drm/amd/display/include/dal_types.h b/drivers/gpu/drm/amd/display/include/dal_types.h index 840142b65f8b..89627133e188 100644 --- a/drivers/gpu/drm/amd/display/include/dal_types.h +++ b/drivers/gpu/drm/amd/display/include/dal_types.h @@ -44,6 +44,9 @@ enum dce_version { DCE_VERSION_12_0, DCE_VERSION_MAX, DCN_VERSION_1_0, +#if defined(CONFIG_DRM_AMD_DC_DCN1_01) + DCN_VERSION_1_01, +#endif /* DCN1_01 */ DCN_VERSION_MAX }; -- GitLab From 1a4d427dd28348b78b8bacff04e2577ed15e1ac9 Mon Sep 17 00:00:00 2001 From: Bhawanpreet Lakha Date: Mon, 22 Jan 2018 17:40:50 -0500 Subject: [PATCH 1496/1692] drm/amd/display: Add DC config flag for Raven2 (v2) Add DRM_AMD_DC_DCN1_01 config flag for Raven2 v2: Make DC select DRM_AMD_DC_DCN1_01 (Alex) Signed-off-by: Bhawanpreet Lakha Reviewed-by: Harry Wentland Reviewed-by: Huang Rui Acked-by: Alex Deucher --- drivers/gpu/drm/amd/display/Kconfig | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig index ed654a76c76a..13a6ce9c8e94 100644 --- a/drivers/gpu/drm/amd/display/Kconfig +++ b/drivers/gpu/drm/amd/display/Kconfig @@ -5,6 +5,7 @@ config DRM_AMD_DC bool "AMD DC - Enable new display engine" default y select DRM_AMD_DC_DCN1_0 if X86 && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS) + select DRM_AMD_DC_DCN1_01 if X86 && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS) help Choose this option if you want to use the new display engine support for AMDGPU. This adds required support for Vega and @@ -15,6 +16,11 @@ config DRM_AMD_DC_DCN1_0 help RV family support for display engine +config DRM_AMD_DC_DCN1_01 + def_bool n + help + RV2 family for display engine + config DEBUG_KERNEL_DC bool "Enable kgdb break in DC" depends on DRM_AMD_DC -- GitLab From 3e9d06b56f405aa53675efdc81a5417da6fc130a Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Fri, 15 Jun 2018 17:22:38 -0500 Subject: [PATCH 1497/1692] drm/amd/powerplay: update smu10_verify_smc_interface() to be raven2 compatible Check the raven2 version number as well. Signed-off-by: Evan Quan Reviewed-by: Huang Rui Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c index bb07d43f3874..6f961dec2088 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c @@ -177,7 +177,8 @@ static int smu10_verify_smc_interface(struct pp_hwmgr *hwmgr) PPSMC_MSG_GetDriverIfVersion); smc_driver_if_version = smu10_read_arg_from_smc(hwmgr); - if (smc_driver_if_version != SMU10_DRIVER_IF_VERSION) { + if ((smc_driver_if_version != SMU10_DRIVER_IF_VERSION) && + (smc_driver_if_version != SMU10_DRIVER_IF_VERSION + 1)) { pr_err("Attempt to read SMC IF Version Number Failed!\n"); return -EINVAL; } -- GitLab From fa27203f8af9c6ed5ab88860f9a83cbc13ab786f Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Tue, 19 Jun 2018 10:32:50 -0500 Subject: [PATCH 1498/1692] drm/amd/powerplay: round up the Mhz convertion (v2) Since the clock value there may be like 29999 10Khz. v2: rebase (Alex) Signed-off-by: Evan Quan Reviewed-by: Huang Rui Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index 26d130a91725..5b55c709fb1c 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -211,12 +211,18 @@ static int smu10_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input) return 0; } +static inline uint32_t convert_10k_to_mhz(uint32_t clock) +{ + return (clock + 99) / 100; +} + static int smu10_set_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock) { struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); - if (smu10_data->need_min_deep_sleep_dcefclk && smu10_data->deep_sleep_dcefclk != clock/100) { - smu10_data->deep_sleep_dcefclk = clock/100; + if (smu10_data->need_min_deep_sleep_dcefclk && + smu10_data->deep_sleep_dcefclk != convert_10k_to_mhz(clock)) { + smu10_data->deep_sleep_dcefclk = convert_10k_to_mhz(clock); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk, smu10_data->deep_sleep_dcefclk); -- GitLab From 89da2a505f1bdbc34819a170435eb64e596bf5c6 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Tue, 19 Jun 2018 10:41:00 -0500 Subject: [PATCH 1499/1692] drm/amd/powerplay: disable raven2 force dpm level support (v2) It's not supported yet. v2: rebase (Alex) Signed-off-by: Evan Quan Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index 5b55c709fb1c..9808bd48b386 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -551,12 +551,18 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level) { struct smu10_hwmgr *data = hwmgr->backend; + struct amdgpu_device *adev = hwmgr->adev; if (hwmgr->smu_version < 0x1E3700) { pr_info("smu firmware version too old, can not set dpm level\n"); return 0; } + /* Disable UMDPSTATE support on rv2 temporarily */ + if ((adev->asic_type == CHIP_RAVEN) && + (adev->rev_id >= 8)) + return 0; + switch (level) { case AMD_DPM_FORCED_LEVEL_HIGH: case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: -- GitLab From 520cbe0f4a7f170ec0d1167a827bcbec74aef4f4 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Tue, 19 Jun 2018 10:46:42 -0500 Subject: [PATCH 1500/1692] drm/amdgpu: set CG flags for raven2 (v2) Raven2 does not enable all of the CG flags that raven1 does. v2: rebase (Alex) Signed-off-by: Huang Rui Reviewed-by: Feifei Xu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 57 +++++++++++++++++++----------- 1 file changed, 37 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 0a935604ec9e..f5a44d1fe5da 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -714,26 +714,43 @@ static int soc15_common_early_init(void *handle) adev->external_rev_id = adev->rev_id + 0x28; break; case CHIP_RAVEN: - adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | - AMD_CG_SUPPORT_GFX_MGLS | - AMD_CG_SUPPORT_GFX_RLC_LS | - AMD_CG_SUPPORT_GFX_CP_LS | - AMD_CG_SUPPORT_GFX_3D_CGCG | - AMD_CG_SUPPORT_GFX_3D_CGLS | - AMD_CG_SUPPORT_GFX_CGCG | - AMD_CG_SUPPORT_GFX_CGLS | - AMD_CG_SUPPORT_BIF_MGCG | - AMD_CG_SUPPORT_BIF_LS | - AMD_CG_SUPPORT_HDP_MGCG | - AMD_CG_SUPPORT_HDP_LS | - AMD_CG_SUPPORT_DRM_MGCG | - AMD_CG_SUPPORT_DRM_LS | - AMD_CG_SUPPORT_ROM_MGCG | - AMD_CG_SUPPORT_MC_MGCG | - AMD_CG_SUPPORT_MC_LS | - AMD_CG_SUPPORT_SDMA_MGCG | - AMD_CG_SUPPORT_SDMA_LS | - AMD_CG_SUPPORT_VCN_MGCG; + if (adev->rev_id >= 0x8) + adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_MGLS | + AMD_CG_SUPPORT_GFX_CP_LS | + AMD_CG_SUPPORT_GFX_3D_CGCG | + AMD_CG_SUPPORT_GFX_3D_CGLS | + AMD_CG_SUPPORT_GFX_CGCG | + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_BIF_LS | + AMD_CG_SUPPORT_HDP_LS | + AMD_CG_SUPPORT_ROM_MGCG | + AMD_CG_SUPPORT_MC_MGCG | + AMD_CG_SUPPORT_MC_LS | + AMD_CG_SUPPORT_SDMA_MGCG | + AMD_CG_SUPPORT_SDMA_LS | + AMD_CG_SUPPORT_VCN_MGCG; + else + adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_MGLS | + AMD_CG_SUPPORT_GFX_RLC_LS | + AMD_CG_SUPPORT_GFX_CP_LS | + AMD_CG_SUPPORT_GFX_3D_CGCG | + AMD_CG_SUPPORT_GFX_3D_CGLS | + AMD_CG_SUPPORT_GFX_CGCG | + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_BIF_MGCG | + AMD_CG_SUPPORT_BIF_LS | + AMD_CG_SUPPORT_HDP_MGCG | + AMD_CG_SUPPORT_HDP_LS | + AMD_CG_SUPPORT_DRM_MGCG | + AMD_CG_SUPPORT_DRM_LS | + AMD_CG_SUPPORT_ROM_MGCG | + AMD_CG_SUPPORT_MC_MGCG | + AMD_CG_SUPPORT_MC_LS | + AMD_CG_SUPPORT_SDMA_MGCG | + AMD_CG_SUPPORT_SDMA_LS | + AMD_CG_SUPPORT_VCN_MGCG; adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; -- GitLab From 6a15f3ff19a8dd394e3a21480512c2cb98e26b3c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Wed, 12 Sep 2018 18:07:10 +0200 Subject: [PATCH 1501/1692] drm/amdgpu: Initialize fences array entries in amdgpu_sa_bo_next_hole MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The entries were only initialized once in amdgpu_sa_bo_new. If a fence wasn't signalled yet in the first amdgpu_sa_bo_next_hole call, but then got signalled before a later amdgpu_sa_bo_next_hole call, it could destroy the fence but leave its pointer in the array, resulting in use-after-free in amdgpu_sa_bo_new. Reviewed-by: Christian König Signed-off-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index fb1667b35daa..12f2bf97611f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c @@ -226,6 +226,8 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager, for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) { struct amdgpu_sa_bo *sa_bo; + fences[i] = NULL; + if (list_empty(&sa_manager->flist[i])) continue; @@ -296,10 +298,8 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, spin_lock(&sa_manager->wq.lock); do { - for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) { - fences[i] = NULL; + for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) tries[i] = 0; - } do { amdgpu_sa_bo_try_free(sa_manager); -- GitLab From 56ea09760076800f08c57ef2026b67d51f338810 Mon Sep 17 00:00:00 2001 From: Vijetha Malkai Date: Thu, 13 Sep 2018 14:47:39 -0400 Subject: [PATCH 1502/1692] drm/amdgpu: Style fixes to PRIME code documentation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Use consistent capitalization in the description of function arguments * Define and consistently use the BO acronym for buffer objects * Some minor wording improvements Signed-off-by: Vijetha Malkai [ Michel Dänzer: Made commit log more specific ] Signed-off-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c | 52 +++++++++++------------ 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c index 2686297e34e0..e45e929aaab5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c @@ -44,10 +44,10 @@ static const struct dma_buf_ops amdgpu_dmabuf_ops; /** * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table * implementation - * @obj: GEM buffer object + * @obj: GEM buffer object (BO) * * Returns: - * A scatter/gather table for the pinned pages of the buffer object's memory. + * A scatter/gather table for the pinned pages of the BO's memory. */ struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj) { @@ -59,9 +59,9 @@ struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj) /** * amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation - * @obj: GEM buffer object + * @obj: GEM BO * - * Sets up an in-kernel virtual mapping of the buffer object's memory. + * Sets up an in-kernel virtual mapping of the BO's memory. * * Returns: * The virtual address of the mapping or an error pointer. @@ -81,10 +81,10 @@ void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj) /** * amdgpu_gem_prime_vunmap - &dma_buf_ops.vunmap implementation - * @obj: GEM buffer object - * @vaddr: virtual address (unused) + * @obj: GEM BO + * @vaddr: Virtual address (unused) * - * Tears down the in-kernel virtual mapping of the buffer object's memory. + * Tears down the in-kernel virtual mapping of the BO's memory. */ void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) { @@ -95,14 +95,14 @@ void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) /** * amdgpu_gem_prime_mmap - &drm_driver.gem_prime_mmap implementation - * @obj: GEM buffer object - * @vma: virtual memory area + * @obj: GEM BO + * @vma: Virtual memory area * - * Sets up a userspace mapping of the buffer object's memory in the given + * Sets up a userspace mapping of the BO's memory in the given * virtual memory area. * * Returns: - * 0 on success or negative error code. + * 0 on success or a negative error code on failure. */ int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) { @@ -145,10 +145,10 @@ int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma * @attach: DMA-buf attachment * @sg: Scatter/gather table * - * Import shared DMA buffer memory exported by another device. + * Imports shared DMA buffer memory exported by another device. * * Returns: - * A new GEM buffer object of the given DRM device, representing the memory + * A new GEM BO of the given DRM device, representing the memory * described by the given DMA-buf attachment and scatter/gather table. */ struct drm_gem_object * @@ -191,7 +191,7 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev, /** * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation - * @dma_buf: shared DMA buffer + * @dma_buf: Shared DMA buffer * @attach: DMA-buf attachment * * Makes sure that the shared DMA buffer can be accessed by the target device. @@ -199,7 +199,7 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev, * all DMA devices. * * Returns: - * 0 on success or negative error code. + * 0 on success or a negative error code on failure. */ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf, struct dma_buf_attachment *attach) @@ -251,11 +251,11 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf, /** * amdgpu_gem_map_detach - &dma_buf_ops.detach implementation - * @dma_buf: shared DMA buffer + * @dma_buf: Shared DMA buffer * @attach: DMA-buf attachment * * This is called when a shared DMA buffer no longer needs to be accessible by - * the other device. For now, simply unpins the buffer from GTT. + * another device. For now, simply unpins the buffer from GTT. */ static void amdgpu_gem_map_detach(struct dma_buf *dma_buf, struct dma_buf_attachment *attach) @@ -280,10 +280,10 @@ static void amdgpu_gem_map_detach(struct dma_buf *dma_buf, /** * amdgpu_gem_prime_res_obj - &drm_driver.gem_prime_res_obj implementation - * @obj: GEM buffer object + * @obj: GEM BO * * Returns: - * The buffer object's reservation object. + * The BO's reservation object. */ struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj) { @@ -294,15 +294,15 @@ struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj) /** * amdgpu_gem_begin_cpu_access - &dma_buf_ops.begin_cpu_access implementation - * @dma_buf: shared DMA buffer - * @direction: direction of DMA transfer + * @dma_buf: Shared DMA buffer + * @direction: Direction of DMA transfer * * This is called before CPU access to the shared DMA buffer's memory. If it's * a read access, the buffer is moved to the GTT domain if possible, for optimal * CPU read performance. * * Returns: - * 0 on success or negative error code. + * 0 on success or a negative error code on failure. */ static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) @@ -349,14 +349,14 @@ static const struct dma_buf_ops amdgpu_dmabuf_ops = { /** * amdgpu_gem_prime_export - &drm_driver.gem_prime_export implementation * @dev: DRM device - * @gobj: GEM buffer object - * @flags: flags like DRM_CLOEXEC and DRM_RDWR + * @gobj: GEM BO + * @flags: Flags such as DRM_CLOEXEC and DRM_RDWR. * * The main work is done by the &drm_gem_prime_export helper, which in turn * uses &amdgpu_gem_prime_res_obj. * * Returns: - * Shared DMA buffer representing the GEM buffer object from the given device. + * Shared DMA buffer representing the GEM BO from the given device. */ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, struct drm_gem_object *gobj, @@ -387,7 +387,7 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, * uses &amdgpu_gem_prime_import_sg_table. * * Returns: - * GEM buffer object representing the shared DMA buffer for the given device. + * GEM BO representing the shared DMA buffer for the given device. */ struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf) -- GitLab From ee6e89c0f1d18fef9206e68747577d99bbf923ea Mon Sep 17 00:00:00 2001 From: David Francis Date: Thu, 13 Sep 2018 15:36:27 -0400 Subject: [PATCH 1503/1692] drm/amd/display: Add DMCU firmware version Read the version number from the common firmware header and store it in the dm struct Signed-off-by: David Francis Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 ++ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 1 + 2 files changed, 3 insertions(+) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 1ff2e8fd5a22..985c6291dbfd 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -589,6 +589,8 @@ static int load_dmcu_fw(struct amdgpu_device *adev) adev->firmware.fw_size += ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE); + adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version); + DRM_DEBUG_KMS("PSP loading DMCU firmware\n"); return 0; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 9a57c654943a..b6fe9adf4b93 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -131,6 +131,7 @@ struct amdgpu_display_manager { struct dm_comressor_info compressor; const struct firmware *fw_dmcu; + uint32_t dmcu_fw_version; }; struct amdgpu_dm_connector { -- GitLab From 4d11b4b256a882800e033e003351244ae7d5d174 Mon Sep 17 00:00:00 2001 From: David Francis Date: Thu, 13 Sep 2018 15:37:50 -0400 Subject: [PATCH 1504/1692] drm/amdgpu: Add DMCU to firmware query interface DMCU firmware version can be read using the AMDGPU_INFO ioctl or the amdgpu_firmware_info debugfs entry Signed-off-by: David Francis Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 12 ++++++++++++ include/uapi/drm/amdgpu_drm.h | 2 ++ 2 files changed, 14 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 65b713225ebf..dc4b2f34e3ea 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -257,6 +257,10 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info, fw_info->ver = adev->psp.asd_fw_version; fw_info->feature = adev->psp.asd_feature_version; break; + case AMDGPU_INFO_FW_DMCU: + fw_info->ver = adev->dm.dmcu_fw_version; + fw_info->feature = 0; + break; default: return -EINVAL; } @@ -1295,6 +1299,14 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data) seq_printf(m, "VCN feature version: %u, firmware version: 0x%08x\n", fw_info.feature, fw_info.ver); + /* DMCU */ + query_fw.fw_type = AMDGPU_INFO_FW_DMCU; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "DMCU feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version); diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 1ceec56de015..370e9a5536ef 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -665,6 +665,8 @@ struct drm_amdgpu_cs_chunk_data { #define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM 0x10 /* Subquery id: Query GFX RLC SRLS firmware version */ #define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM 0x11 + /* Subquery id: Query DMCU firmware version */ + #define AMDGPU_INFO_FW_DMCU 0x12 /* number of bytes moved for TTM migration */ #define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f /* the used VRAM size */ -- GitLab From 23ecdc6187ef74e00b78e889446a309628719b6e Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 13 Sep 2018 15:05:22 -0500 Subject: [PATCH 1505/1692] drm/amdgpu/soc15: clean up picasso support It's the same as raven so remove the duplicate case. Acked-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index f5a44d1fe5da..f930e09071d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -546,23 +546,6 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); break; case CHIP_RAVEN: - amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); - amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); - amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); - amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); - if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); -#if defined(CONFIG_DRM_AMD_DC) - else if (amdgpu_device_has_dc_support(adev)) - amdgpu_device_ip_block_add(adev, &dm_ip_block); -#else -# warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15." -#endif - amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); - amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block); - break; case CHIP_PICASSO: amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); -- GitLab From 741deade2a704a434bd5939118c43d38e9ddac25 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 13 Sep 2018 15:41:57 -0500 Subject: [PATCH 1506/1692] drm/amdgpu: simplify Raven, Raven2, and Picasso handling Treat them all as Raven rather than adding a new picasso asic type. This simplifies a lot of code and also handles the case of rv2 chips with the 0x15d8 pci id. It also fixes dmcu fw handling for picasso. Acked-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 10 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 1 - drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 1 - drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 7 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 4 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 32 ++------- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 4 -- drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 11 ++-- drivers/gpu/drm/amd/amdgpu/psp_v10_0.c | 5 +- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 11 +--- drivers/gpu/drm/amd/amdgpu/soc15.c | 66 +++++++++---------- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 8 +-- drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 1 - .../drm/amd/powerplay/hwmgr/processpptables.c | 8 +-- include/drm/amd_asic_type.h | 1 - 16 files changed, 60 insertions(+), 113 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 762dc5f886cd..354f0557d697 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -91,7 +91,6 @@ static const char *amdgpu_asic_name[] = { "VEGA12", "VEGA20", "RAVEN", - "PICASSO", "LAST", }; @@ -1337,12 +1336,11 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) case CHIP_RAVEN: if (adev->rev_id >= 8) chip_name = "raven2"; + else if (adev->pdev->device == 0x15d8) + chip_name = "picasso"; else chip_name = "raven"; break; - case CHIP_PICASSO: - chip_name = "picasso"; - break; } snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name); @@ -1468,8 +1466,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: - case CHIP_PICASSO: - if ((adev->asic_type == CHIP_RAVEN) || (adev->asic_type == CHIP_PICASSO)) + if (adev->asic_type == CHIP_RAVEN) adev->family = AMDGPU_FAMILY_RV; else adev->family = AMDGPU_FAMILY_AI; @@ -2183,7 +2180,6 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) case CHIP_VEGA20: #if defined(CONFIG_DRM_AMD_DC_DCN1_0) case CHIP_RAVEN: - case CHIP_PICASSO: #endif return amdgpu_dc != 0; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 33e1856fb8cc..ff10df4f50d3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -874,8 +874,7 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, /* Raven */ {0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU}, - /* Picasso */ - {0x1002, 0x15d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PICASSO|AMD_IS_APU}, + {0x1002, 0x15d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU}, {0, 0, 0} }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 611c06d3600a..bd397d2916fb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -56,7 +56,6 @@ static int psp_sw_init(void *handle) psp_v3_1_set_psp_funcs(psp); break; case CHIP_RAVEN: - case CHIP_PICASSO: psp_v10_0_set_psp_funcs(psp); break; case CHIP_VEGA20: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index acb4c66fe89b..1fa8bc337859 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -303,7 +303,6 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type) return AMDGPU_FW_LOAD_SMU; case CHIP_VEGA10: case CHIP_RAVEN: - case CHIP_PICASSO: case CHIP_VEGA12: case CHIP_VEGA20: if (!load_type) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index a74498ce87ff..a73674f9a0f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -63,14 +63,13 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_RAVEN: - if (adev->rev_id >= 8) + if (adev->rev_id >= 8) fw_name = FIRMWARE_RAVEN2; + else if (adev->pdev->device == 0x15d8) + fw_name = FIRMWARE_PICASSO; else fw_name = FIRMWARE_RAVEN; break; - case CHIP_PICASSO: - fw_name = FIRMWARE_PICASSO; - break; default: return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 7a9ffe9eb8bb..a7f9aaa47c49 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2981,7 +2981,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & AMDGPU_VM_USE_CPU_FOR_COMPUTE); - if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_PICASSO) + if (adev->asic_type == CHIP_RAVEN) vm->pte_support_ats = true; } else { vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & @@ -3073,7 +3073,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, */ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid) { - bool pte_support_ats = (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_PICASSO); + bool pte_support_ats = (adev->asic_type == CHIP_RAVEN); int r; r = amdgpu_bo_reserve(vm->root.base.bo, true); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 4991ae00a4ca..75a91663019f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -277,7 +277,6 @@ static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] = #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042 #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042 -#define PICASSO_GB_ADDR_CONFIG_GOLDEN 0x24000042 #define RAVEN2_GB_ADDR_CONFIG_GOLDEN 0x26013041 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev); @@ -329,14 +328,6 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_gc_9_1_rv1, ARRAY_SIZE(golden_settings_gc_9_1_rv1)); break; - case CHIP_PICASSO: - soc15_program_register_sequence(adev, - golden_settings_gc_9_1, - ARRAY_SIZE(golden_settings_gc_9_1)); - soc15_program_register_sequence(adev, - golden_settings_gc_9_1_rv1, - ARRAY_SIZE(golden_settings_gc_9_1_rv1)); - break; default: break; } @@ -617,12 +608,11 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev) case CHIP_RAVEN: if (adev->rev_id >= 8) chip_name = "raven2"; + else if (adev->pdev->device == 0x15d8) + chip_name = "picasso"; else chip_name = "raven"; break; - case CHIP_PICASSO: - chip_name = "picasso"; - break; default: BUG(); } @@ -1076,7 +1066,7 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); } - if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_PICASSO) { + if (adev->asic_type == CHIP_RAVEN) { /* TODO: double check the cp_table_size for RV */ adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */ r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size, @@ -1328,14 +1318,6 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) else gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN; break; - case CHIP_PICASSO: - adev->gfx.config.max_hw_contexts = 8; - adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; - adev->gfx.config.sc_prim_fifo_size_backend = 0x100; - adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; - adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; - gb_addr_config = PICASSO_GB_ADDR_CONFIG_GOLDEN; - break; default: BUG(); break; @@ -1614,7 +1596,6 @@ static int gfx_v9_0_sw_init(void *handle) case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: - case CHIP_PICASSO: adev->gfx.mec.num_mec = 2; break; default: @@ -1776,7 +1757,7 @@ static int gfx_v9_0_sw_fini(void *handle) amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, &adev->gfx.rlc.clear_state_gpu_addr, (void **)&adev->gfx.rlc.cs_ptr); - if ((adev->asic_type == CHIP_RAVEN) || (adev->asic_type == CHIP_PICASSO)) { + if (adev->asic_type == CHIP_RAVEN) { amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, &adev->gfx.rlc.cp_table_gpu_addr, (void **)&adev->gfx.rlc.cp_table_ptr); @@ -2442,7 +2423,7 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev) return r; } - if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_PICASSO) { + if (adev->asic_type == CHIP_RAVEN) { if (amdgpu_lbpw != 0) gfx_v9_0_enable_lbpw(adev, true); else @@ -3846,7 +3827,6 @@ static int gfx_v9_0_set_powergating_state(void *handle, switch (adev->asic_type) { case CHIP_RAVEN: - case CHIP_PICASSO: if (!enable) { amdgpu_gfx_off_ctrl(adev, false); cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work); @@ -3901,7 +3881,6 @@ static int gfx_v9_0_set_clockgating_state(void *handle, case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: - case CHIP_PICASSO: gfx_v9_0_update_gfx_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); break; @@ -4911,7 +4890,6 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev) case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: - case CHIP_PICASSO: adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs; break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 0ad1586c293f..aad3c7c5fb3a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -846,7 +846,6 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) adev->gmc.gart_size = 512ULL << 20; break; case CHIP_RAVEN: /* DCE SG support */ - case CHIP_PICASSO: /* DCE SG support */ adev->gmc.gart_size = 1024ULL << 20; break; } @@ -935,7 +934,6 @@ static int gmc_v9_0_sw_init(void *handle) adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev); switch (adev->asic_type) { case CHIP_RAVEN: - case CHIP_PICASSO: if (adev->rev_id == 0x0 || adev->rev_id == 0x1) { amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); } else { @@ -1062,7 +1060,6 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) case CHIP_VEGA12: break; case CHIP_RAVEN: - case CHIP_PICASSO: soc15_program_register_sequence(adev, golden_settings_athub_1_0_0, ARRAY_SIZE(golden_settings_athub_1_0_0)); @@ -1097,7 +1094,6 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_RAVEN: - case CHIP_PICASSO: mmhub_v1_0_update_power_gating(adev, true); break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 2a126c6950c7..80698b5ffa4a 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -412,7 +412,7 @@ static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *ad def = data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG); - if (adev->asic_type != CHIP_RAVEN && adev->asic_type != CHIP_PICASSO) { + if (adev->asic_type != CHIP_RAVEN) { def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2); def2 = data2 = RREG32_SOC15(MMHUB, 0, mmDAGB1_CNTL_MISC2); } else @@ -428,7 +428,7 @@ static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *ad DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK); - if (adev->asic_type != CHIP_RAVEN && adev->asic_type != CHIP_PICASSO) + if (adev->asic_type != CHIP_RAVEN) data2 &= ~(DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK | DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | @@ -445,7 +445,7 @@ static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *ad DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK); - if (adev->asic_type != CHIP_RAVEN && adev->asic_type != CHIP_PICASSO) + if (adev->asic_type != CHIP_RAVEN) data2 |= (DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK | DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | @@ -458,13 +458,13 @@ static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *ad WREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG, data); if (def1 != data1) { - if (adev->asic_type != CHIP_RAVEN && adev->asic_type != CHIP_PICASSO) + if (adev->asic_type != CHIP_RAVEN) WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2, data1); else WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_RV, data1); } - if (adev->asic_type != CHIP_RAVEN && adev->asic_type != CHIP_PICASSO && def2 != data2) + if (adev->asic_type != CHIP_RAVEN && def2 != data2) WREG32_SOC15(MMHUB, 0, mmDAGB1_CNTL_MISC2, data2); } @@ -528,7 +528,6 @@ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev, case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: - case CHIP_PICASSO: mmhub_v1_0_update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); athub_update_medium_grain_clock_gating(adev, diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c index 2cfd1bb559dd..295c2205485a 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c @@ -121,12 +121,11 @@ static int psp_v10_0_init_microcode(struct psp_context *psp) case CHIP_RAVEN: if (adev->rev_id >= 0x8) chip_name = "raven2"; + else if (adev->pdev->device == 0x15d8) + chip_name = "picasso"; else chip_name = "raven"; break; - case CHIP_PICASSO: - chip_name = "picasso"; - break; default: BUG(); } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 75be0b9ed2c0..2ea1f0d8f5be 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -229,7 +229,6 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev) ARRAY_SIZE(golden_settings_sdma1_4_2)); break; case CHIP_RAVEN: - case CHIP_PICASSO: soc15_program_register_sequence(adev, golden_settings_sdma_4_1, ARRAY_SIZE(golden_settings_sdma_4_1)); @@ -283,12 +282,11 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev) case CHIP_RAVEN: if (adev->rev_id >= 8) chip_name = "raven2"; + else if (adev->pdev->device == 0x15d8) + chip_name = "picasso"; else chip_name = "raven"; break; - case CHIP_PICASSO: - chip_name = "picasso"; - break; default: BUG(); } @@ -869,7 +867,6 @@ static void sdma_v4_0_init_pg(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_RAVEN: - case CHIP_PICASSO: sdma_v4_1_init_power_gating(adev); sdma_v4_1_update_power_gating(adev, true); break; @@ -1277,7 +1274,7 @@ static int sdma_v4_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_PICASSO) + if (adev->asic_type == CHIP_RAVEN) adev->sdma.num_instances = 1; else adev->sdma.num_instances = 2; @@ -1620,7 +1617,6 @@ static int sdma_v4_0_set_clockgating_state(void *handle, case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: - case CHIP_PICASSO: sdma_v4_0_update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); sdma_v4_0_update_medium_grain_light_sleep(adev, @@ -1639,7 +1635,6 @@ static int sdma_v4_0_set_powergating_state(void *handle, switch (adev->asic_type) { case CHIP_RAVEN: - case CHIP_PICASSO: sdma_v4_1_update_power_gating(adev, state == AMD_PG_STATE_GATE ? true : false); break; diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index f930e09071d4..c4daf1f93486 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -491,7 +491,6 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) case CHIP_VEGA10: case CHIP_VEGA12: case CHIP_RAVEN: - case CHIP_PICASSO: vega10_reg_base_init(adev); break; case CHIP_VEGA20: @@ -546,7 +545,6 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); break; case CHIP_RAVEN: - case CHIP_PICASSO: amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); @@ -698,6 +696,13 @@ static int soc15_common_early_init(void *handle) break; case CHIP_RAVEN: if (adev->rev_id >= 0x8) + adev->external_rev_id = adev->rev_id + 0x81; + else if (adev->pdev->device == 0x15d8) + adev->external_rev_id = adev->rev_id + 0x41; + else + adev->external_rev_id = 0x1; + + if (adev->rev_id >= 0x8) { adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS | AMD_CG_SUPPORT_GFX_CP_LS | @@ -713,7 +718,27 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_SDMA_MGCG | AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_VCN_MGCG; - else + + adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; + } else if (adev->pdev->device == 0x15d8) { + adev->cg_flags = AMD_CG_SUPPORT_GFX_MGLS | + AMD_CG_SUPPORT_GFX_CP_LS | + AMD_CG_SUPPORT_GFX_3D_CGCG | + AMD_CG_SUPPORT_GFX_3D_CGLS | + AMD_CG_SUPPORT_GFX_CGCG | + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_BIF_LS | + AMD_CG_SUPPORT_HDP_LS | + AMD_CG_SUPPORT_ROM_MGCG | + AMD_CG_SUPPORT_MC_MGCG | + AMD_CG_SUPPORT_MC_LS | + AMD_CG_SUPPORT_SDMA_MGCG | + AMD_CG_SUPPORT_SDMA_LS; + + adev->pg_flags = AMD_PG_SUPPORT_SDMA | + AMD_PG_SUPPORT_MMHUB | + AMD_PG_SUPPORT_VCN; + } else { adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS | AMD_CG_SUPPORT_GFX_RLC_LS | @@ -735,43 +760,13 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_VCN_MGCG; - adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; - - if (adev->powerplay.pp_feature & PP_GFXOFF_MASK) - adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | - AMD_PG_SUPPORT_CP | - AMD_PG_SUPPORT_RLC_SMU_HS; - - if (adev->rev_id >= 0x8) - adev->external_rev_id = adev->rev_id + 0x81; - else - adev->external_rev_id = 0x1; - break; - case CHIP_PICASSO: - adev->cg_flags = AMD_CG_SUPPORT_GFX_MGLS | - AMD_CG_SUPPORT_GFX_CP_LS | - AMD_CG_SUPPORT_GFX_3D_CGCG | - AMD_CG_SUPPORT_GFX_3D_CGLS | - AMD_CG_SUPPORT_GFX_CGCG | - AMD_CG_SUPPORT_GFX_CGLS | - AMD_CG_SUPPORT_BIF_LS | - AMD_CG_SUPPORT_HDP_LS | - AMD_CG_SUPPORT_ROM_MGCG | - AMD_CG_SUPPORT_MC_MGCG | - AMD_CG_SUPPORT_MC_LS | - AMD_CG_SUPPORT_SDMA_MGCG | - AMD_CG_SUPPORT_SDMA_LS; - - adev->pg_flags = AMD_PG_SUPPORT_SDMA | - AMD_PG_SUPPORT_MMHUB | - AMD_PG_SUPPORT_VCN; + adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; + } if (adev->powerplay.pp_feature & PP_GFXOFF_MASK) adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | AMD_PG_SUPPORT_CP | AMD_PG_SUPPORT_RLC_SMU_HS; - - adev->external_rev_id = adev->rev_id + 0x41; break; default: /* FIXME: not supported yet */ @@ -973,7 +968,6 @@ static int soc15_common_set_clockgating_state(void *handle, state == AMD_CG_STATE_GATE ? true : false); break; case CHIP_RAVEN: - case CHIP_PICASSO: adev->nbio_funcs->update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); adev->nbio_funcs->update_medium_grain_light_sleep(adev, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 985c6291dbfd..47c3453c688a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1215,8 +1215,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev) if (adev->asic_type == CHIP_VEGA10 || adev->asic_type == CHIP_VEGA12 || adev->asic_type == CHIP_VEGA20 || - adev->asic_type == CHIP_RAVEN || - adev->asic_type == CHIP_PICASSO) + adev->asic_type == CHIP_RAVEN) client_id = SOC15_IH_CLIENTID_DCE; int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; @@ -1635,7 +1634,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) break; #if defined(CONFIG_DRM_AMD_DC_DCN1_0) case CHIP_RAVEN: - case CHIP_PICASSO: if (dcn10_register_irq_handlers(dm->adev)) { DRM_ERROR("DM: Failed to initialize IRQ\n"); goto fail; @@ -1862,7 +1860,6 @@ static int dm_early_init(void *handle) break; #if defined(CONFIG_DRM_AMD_DC_DCN1_0) case CHIP_RAVEN: - case CHIP_PICASSO: adev->mode_info.num_crtc = 4; adev->mode_info.num_hpd = 4; adev->mode_info.num_dig = 4; @@ -2111,8 +2108,7 @@ static int fill_plane_attributes_from_fb(struct amdgpu_device *adev, if (adev->asic_type == CHIP_VEGA10 || adev->asic_type == CHIP_VEGA12 || adev->asic_type == CHIP_VEGA20 || - adev->asic_type == CHIP_RAVEN || - adev->asic_type == CHIP_PICASSO) { + adev->asic_type == CHIP_RAVEN) { /* Fill GFX9 params */ plane_state->tiling_info.gfx9.num_pipes = adev->gfx.config.gb_addr_config_fields.num_pipes; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index a45578e6504a..7500a3e61dba 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -171,7 +171,6 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr) case AMDGPU_FAMILY_RV: switch (hwmgr->chip_id) { case CHIP_RAVEN: - case CHIP_PICASSO: hwmgr->od_enabled = false; hwmgr->smumgr_funcs = &smu10_smu_funcs; smu10_init_function_pointers(hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c index f6fe9ce793ad..77c14671866c 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c @@ -832,7 +832,7 @@ static const ATOM_PPLIB_POWERPLAYTABLE *get_powerplay_table( uint16_t size; if (!table_addr) { - if (hwmgr->chip_id == CHIP_RAVEN || hwmgr->chip_id == CHIP_PICASSO) { + if (hwmgr->chip_id == CHIP_RAVEN) { table_addr = &soft_dummy_pp_table[0]; hwmgr->soft_pp_table = &soft_dummy_pp_table[0]; hwmgr->soft_pp_table_size = sizeof(soft_dummy_pp_table); @@ -1055,7 +1055,7 @@ static int init_overdrive_limits(struct pp_hwmgr *hwmgr, hwmgr->platform_descriptor.maxOverdriveVDDC = 0; hwmgr->platform_descriptor.overdriveVDDCStep = 0; - if (hwmgr->chip_id == CHIP_RAVEN || hwmgr->chip_id == CHIP_PICASSO) + if (hwmgr->chip_id == CHIP_RAVEN) return 0; /* We assume here that fw_info is unchanged if this call fails.*/ @@ -1595,7 +1595,7 @@ static int pp_tables_initialize(struct pp_hwmgr *hwmgr) int result; const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table; - if (hwmgr->chip_id == CHIP_RAVEN || hwmgr->chip_id == CHIP_PICASSO) + if (hwmgr->chip_id == CHIP_RAVEN) return 0; hwmgr->need_pp_table_upload = true; @@ -1644,7 +1644,7 @@ static int pp_tables_initialize(struct pp_hwmgr *hwmgr) static int pp_tables_uninitialize(struct pp_hwmgr *hwmgr) { - if (hwmgr->chip_id == CHIP_RAVEN || hwmgr->chip_id == CHIP_PICASSO) + if (hwmgr->chip_id == CHIP_RAVEN) return 0; kfree(hwmgr->dyn_state.vddc_dependency_on_sclk); diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h index 5644fc679d6f..dd63d08cc54e 100644 --- a/include/drm/amd_asic_type.h +++ b/include/drm/amd_asic_type.h @@ -49,7 +49,6 @@ enum amd_asic_type { CHIP_VEGA12, CHIP_VEGA20, CHIP_RAVEN, - CHIP_PICASSO, CHIP_LAST, }; -- GitLab From 59d0f396b5abfb5ec4dbf3488cb4f24dc7c1aaf4 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 13 Sep 2018 11:01:28 -0500 Subject: [PATCH 1507/1692] drm/amdgpu/display: return proper error codes in dm Replace -1 with proper error codes. Acked-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 47c3453c688a..2662ab01c1cf 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -493,7 +493,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) error: amdgpu_dm_fini(adev); - return -1; + return -EINVAL; } static void amdgpu_dm_fini(struct amdgpu_device *adev) @@ -548,7 +548,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev) break; default: DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type); - return -1; + return -EINVAL; } if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { @@ -1539,7 +1539,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) link_cnt = dm->dc->caps.max_links; if (amdgpu_dm_mode_config_init(dm->adev)) { DRM_ERROR("DM: Failed to initialize mode config\n"); - return -1; + return -EINVAL; } /* Identify the number of planes to be initialized */ @@ -1654,7 +1654,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) kfree(aconnector); for (i = 0; i < dm->dc->caps.max_planes; i++) kfree(mode_info->planes[i]); - return -1; + return -EINVAL; } static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm) -- GitLab From feabaad8aae0f6b1dae681c998572d2663f4a598 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 14 Sep 2018 10:17:24 +0200 Subject: [PATCH 1508/1692] drm/amdgpu: fix mask in GART location calculation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We need to mask the lower bits not the upper one. Fixes: ec210e3226dc0 drm/amdgpu: put GART away from VRAM v2 Signed-off-by: Christian König Reviewed-by: Alex Deucher Acked-by: James Zhu Tested-by: James Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index ae4467113240..9a5b252784a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -166,7 +166,7 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) else mc->gart_start = mc->mc_mask - mc->gart_size + 1; - mc->gart_start &= four_gb - 1; + mc->gart_start &= ~(four_gb - 1); mc->gart_end = mc->gart_start + mc->gart_size - 1; dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n", mc->gart_size >> 20, mc->gart_start, mc->gart_end); -- GitLab From 0957dc7097a3f462f6cedb45cf9b9785cc29e5bb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 14 Sep 2018 12:54:33 +0200 Subject: [PATCH 1509/1692] drm/amdgpu: revert "stop using gart_start as offset for the GTT domain" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Turned out the commit is incomplete and since we remove using the AGP mapping from the GTT manager it is also not necessary any more. This reverts commit 22d8bfafcc12dfa17b91d2e8ae4e1898e782003a. Signed-off-by: Christian König Acked-by: Alex Deucher Acked-by: James Zhu Tested-by: James Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 3 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 7 +++---- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index c2539f6821c0..da7b1b92d9cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -143,8 +143,7 @@ static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, spin_unlock(&mgr->lock); if (!r) - mem->start = node->node.start + - (adev->gmc.gart_start >> PAGE_SHIFT); + mem->start = node->node.start; return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 8a158ee922f7..f12ae6b525b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -188,7 +188,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, case TTM_PL_TT: /* GTT memory */ man->func = &amdgpu_gtt_mgr_func; - man->gpu_offset = 0; + man->gpu_offset = adev->gmc.gart_start; man->available_caching = TTM_PL_MASK_CACHING; man->default_caching = TTM_PL_FLAG_CACHED; man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; @@ -1060,7 +1060,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem); /* bind pages into GART page tables */ - gtt->offset = ((u64)bo_mem->start << PAGE_SHIFT) - adev->gmc.gart_start; + gtt->offset = (u64)bo_mem->start << PAGE_SHIFT; r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, ttm->pages, gtt->ttm.dma_address, flags); @@ -1112,8 +1112,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp); /* Bind pages */ - gtt->offset = ((u64)tmp.start << PAGE_SHIFT) - - adev->gmc.gart_start; + gtt->offset = (u64)tmp.start << PAGE_SHIFT; r = amdgpu_ttm_gart_bind(adev, bo, flags); if (unlikely(r)) { ttm_bo_mem_put(bo, &tmp); -- GitLab From 61a6bd83abf2f14b2a917b6a0279c88d299267af Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 14 Sep 2018 12:59:14 +0200 Subject: [PATCH 1510/1692] Revert "x86/mm/legacy: Populate the user page-table with user pgd's" This reverts commit 1f40a46cf47c12d93a5ad9dccd82bd36ff8f956a. It turned out that this patch is not sufficient to enable PTI on 32 bit systems with legacy 2-level page-tables. In this paging mode the huge-page PTEs are in the top-level page-table directory, where also the mirroring to the user-space page-table happens. So every huge PTE exits twice, in the kernel and in the user page-table. That means that accessed/dirty bits need to be fetched from two PTEs in this mode to be safe, but this is not trivial to implement because it needs changes to generic code just for the sake of enabling PTI with 32-bit legacy paging. As all systems that need PTI should support PAE anyway, remove support for PTI when 32-bit legacy paging is used. Fixes: 7757d607c6b3 ('x86/pti: Allow CONFIG_PAGE_TABLE_ISOLATION for x86_32') Reported-by: Meelis Roos Signed-off-by: Joerg Roedel Signed-off-by: Thomas Gleixner Cc: hpa@zytor.com Cc: linux-mm@kvack.org Cc: Linus Torvalds Cc: Andy Lutomirski Cc: Dave Hansen Cc: Borislav Petkov Cc: Andrea Arcangeli Link: https://lkml.kernel.org/r/1536922754-31379-1-git-send-email-joro@8bytes.org --- arch/x86/include/asm/pgtable-2level.h | 9 --------- security/Kconfig | 2 +- 2 files changed, 1 insertion(+), 10 deletions(-) diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h index 24c6cf5f16b7..60d0f9015317 100644 --- a/arch/x86/include/asm/pgtable-2level.h +++ b/arch/x86/include/asm/pgtable-2level.h @@ -19,9 +19,6 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte) static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) { -#ifdef CONFIG_PAGE_TABLE_ISOLATION - pmd.pud.p4d.pgd = pti_set_user_pgtbl(&pmdp->pud.p4d.pgd, pmd.pud.p4d.pgd); -#endif *pmdp = pmd; } @@ -61,9 +58,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp) #ifdef CONFIG_SMP static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) { -#ifdef CONFIG_PAGE_TABLE_ISOLATION - pti_set_user_pgtbl(&xp->pud.p4d.pgd, __pgd(0)); -#endif return __pmd(xchg((pmdval_t *)xp, 0)); } #else @@ -73,9 +67,6 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) #ifdef CONFIG_SMP static inline pud_t native_pudp_get_and_clear(pud_t *xp) { -#ifdef CONFIG_PAGE_TABLE_ISOLATION - pti_set_user_pgtbl(&xp->p4d.pgd, __pgd(0)); -#endif return __pud(xchg((pudval_t *)xp, 0)); } #else diff --git a/security/Kconfig b/security/Kconfig index 27d8b2688f75..d9aa521b5206 100644 --- a/security/Kconfig +++ b/security/Kconfig @@ -57,7 +57,7 @@ config SECURITY_NETWORK config PAGE_TABLE_ISOLATION bool "Remove the kernel mapping in user mode" default y - depends on X86 && !UML + depends on (X86_64 || X86_PAE) && !UML help This feature reduces the number of hardware side channels by ensuring that the majority of kernel addresses are not mapped -- GitLab From 34043d250f51368f214aed7f54c2dc29c819a8c7 Mon Sep 17 00:00:00 2001 From: Davide Caratti Date: Fri, 14 Sep 2018 12:03:18 +0200 Subject: [PATCH 1511/1692] net/sched: act_sample: fix NULL dereference in the data path Matteo reported the following splat, testing the datapath of TC 'sample': BUG: KASAN: null-ptr-deref in tcf_sample_act+0xc4/0x310 Read of size 8 at addr 0000000000000000 by task nc/433 CPU: 0 PID: 433 Comm: nc Not tainted 4.19.0-rc3-kvm #17 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS ?-20180531_142017-buildhw-08.phx2.fedoraproject.org-1.fc28 04/01/2014 Call Trace: kasan_report.cold.6+0x6c/0x2fa tcf_sample_act+0xc4/0x310 ? dev_hard_start_xmit+0x117/0x180 tcf_action_exec+0xa3/0x160 tcf_classify+0xdd/0x1d0 htb_enqueue+0x18e/0x6b0 ? deref_stack_reg+0x7a/0xb0 ? htb_delete+0x4b0/0x4b0 ? unwind_next_frame+0x819/0x8f0 ? entry_SYSCALL_64_after_hwframe+0x44/0xa9 __dev_queue_xmit+0x722/0xca0 ? unwind_get_return_address_ptr+0x50/0x50 ? netdev_pick_tx+0xe0/0xe0 ? save_stack+0x8c/0xb0 ? kasan_kmalloc+0xbe/0xd0 ? __kmalloc_track_caller+0xe4/0x1c0 ? __kmalloc_reserve.isra.45+0x24/0x70 ? __alloc_skb+0xdd/0x2e0 ? sk_stream_alloc_skb+0x91/0x3b0 ? tcp_sendmsg_locked+0x71b/0x15a0 ? tcp_sendmsg+0x22/0x40 ? __sys_sendto+0x1b0/0x250 ? __x64_sys_sendto+0x6f/0x80 ? do_syscall_64+0x5d/0x150 ? entry_SYSCALL_64_after_hwframe+0x44/0xa9 ? __sys_sendto+0x1b0/0x250 ? __x64_sys_sendto+0x6f/0x80 ? do_syscall_64+0x5d/0x150 ? entry_SYSCALL_64_after_hwframe+0x44/0xa9 ip_finish_output2+0x495/0x590 ? ip_copy_metadata+0x2e0/0x2e0 ? skb_gso_validate_network_len+0x6f/0x110 ? ip_finish_output+0x174/0x280 __tcp_transmit_skb+0xb17/0x12b0 ? __tcp_select_window+0x380/0x380 tcp_write_xmit+0x913/0x1de0 ? __sk_mem_schedule+0x50/0x80 tcp_sendmsg_locked+0x49d/0x15a0 ? tcp_rcv_established+0x8da/0xa30 ? tcp_set_state+0x220/0x220 ? clear_user+0x1f/0x50 ? iov_iter_zero+0x1ae/0x590 ? __fget_light+0xa0/0xe0 tcp_sendmsg+0x22/0x40 __sys_sendto+0x1b0/0x250 ? __ia32_sys_getpeername+0x40/0x40 ? _copy_to_user+0x58/0x70 ? poll_select_copy_remaining+0x176/0x200 ? __pollwait+0x1c0/0x1c0 ? ktime_get_ts64+0x11f/0x140 ? kern_select+0x108/0x150 ? core_sys_select+0x360/0x360 ? vfs_read+0x127/0x150 ? kernel_write+0x90/0x90 __x64_sys_sendto+0x6f/0x80 do_syscall_64+0x5d/0x150 entry_SYSCALL_64_after_hwframe+0x44/0xa9 RIP: 0033:0x7fefef2b129d Code: ff ff ff ff eb b6 0f 1f 80 00 00 00 00 48 8d 05 51 37 0c 00 41 89 ca 8b 00 85 c0 75 20 45 31 c9 45 31 c0 b8 2c 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 6b f3 c3 66 0f 1f 84 00 00 00 00 00 41 56 41 RSP: 002b:00007fff2f5350c8 EFLAGS: 00000246 ORIG_RAX: 000000000000002c RAX: ffffffffffffffda RBX: 000056118d60c120 RCX: 00007fefef2b129d RDX: 0000000000002000 RSI: 000056118d629320 RDI: 0000000000000003 RBP: 000056118d530370 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000002000 R13: 000056118d5c2a10 R14: 000056118d5c2a10 R15: 000056118d5303b8 tcf_sample_act() tried to update its per-cpu stats, but tcf_sample_init() forgot to allocate them, because tcf_idr_create() was called with a wrong value of 'cpustats'. Setting it to true proved to fix the reported crash. Reported-by: Matteo Croce Fixes: 65a206c01e8e ("net/sched: Change act_api and act_xxx modules to use IDR") Fixes: 5c5670fae430 ("net/sched: Introduce sample tc action") Tested-by: Matteo Croce Signed-off-by: Davide Caratti Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- net/sched/act_sample.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c index 44e9c00657bc..6b67aa13d2dd 100644 --- a/net/sched/act_sample.c +++ b/net/sched/act_sample.c @@ -69,7 +69,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, parm->index, est, a, - &act_sample_ops, bind, false); + &act_sample_ops, bind, true); if (ret) { tcf_idr_cleanup(tn, parm->index); return ret; -- GitLab From 2a534a7473bf4e7f1c12805113f80c795fc8e89a Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 23 Aug 2018 11:02:49 -0400 Subject: [PATCH 1512/1692] NFSv4: Fix a tracepoint Oops in initiate_file_draining() Now that the value of 'ino' can be NULL or an ERR_PTR(), we need to change the test in the tracepoint. Fixes: ce5624f7e6675 ("NFSv4: Return NFS4ERR_DELAY when a layout fails...") Signed-off-by: Trond Myklebust Cc: stable@vger.kernel.org # v4.17+ Signed-off-by: Anna Schumaker --- fs/nfs/nfs4trace.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h index a275fba93170..708342f4692f 100644 --- a/fs/nfs/nfs4trace.h +++ b/fs/nfs/nfs4trace.h @@ -1194,7 +1194,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_callback_event, TP_fast_assign( __entry->error = error; __entry->fhandle = nfs_fhandle_hash(fhandle); - if (inode != NULL) { + if (!IS_ERR_OR_NULL(inode)) { __entry->fileid = NFS_FILEID(inode); __entry->dev = inode->i_sb->s_dev; } else { -- GitLab From d03360aaf5ccac49581960bd736258c62972b88b Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 5 Sep 2018 14:07:12 -0400 Subject: [PATCH 1513/1692] pNFS: Ensure we return the error if someone kills a waiting layoutget If someone interrupts a wait on one or more outstanding layoutgets in pnfs_update_layout() then return the ERESTARTSYS/EINTR error. Signed-off-by: Trond Myklebust Signed-off-by: Anna Schumaker --- fs/nfs/pnfs.c | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index e8f232de484f..7d9a51e6b847 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -1740,16 +1740,16 @@ static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx, return ret; } -static bool pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo) +static int pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo) { /* * send layoutcommit as it can hold up layoutreturn due to lseg * reference */ pnfs_layoutcommit_inode(lo->plh_inode, false); - return !wait_on_bit_action(&lo->plh_flags, NFS_LAYOUT_RETURN, + return wait_on_bit_action(&lo->plh_flags, NFS_LAYOUT_RETURN, nfs_wait_bit_killable, - TASK_UNINTERRUPTIBLE); + TASK_KILLABLE); } static void nfs_layoutget_begin(struct pnfs_layout_hdr *lo) @@ -1830,7 +1830,9 @@ pnfs_update_layout(struct inode *ino, } lookup_again: - nfs4_client_recover_expired_lease(clp); + lseg = ERR_PTR(nfs4_client_recover_expired_lease(clp)); + if (IS_ERR(lseg)) + goto out; first = false; spin_lock(&ino->i_lock); lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags); @@ -1863,9 +1865,9 @@ pnfs_update_layout(struct inode *ino, if (list_empty(&lo->plh_segs) && atomic_read(&lo->plh_outstanding) != 0) { spin_unlock(&ino->i_lock); - if (wait_var_event_killable(&lo->plh_outstanding, - atomic_read(&lo->plh_outstanding) == 0 - || !list_empty(&lo->plh_segs))) + lseg = ERR_PTR(wait_var_event_killable(&lo->plh_outstanding, + atomic_read(&lo->plh_outstanding))); + if (IS_ERR(lseg) || !list_empty(&lo->plh_segs)) goto out_put_layout_hdr; pnfs_put_layout_hdr(lo); goto lookup_again; @@ -1898,8 +1900,11 @@ pnfs_update_layout(struct inode *ino, if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET, &lo->plh_flags)) { spin_unlock(&ino->i_lock); - wait_on_bit(&lo->plh_flags, NFS_LAYOUT_FIRST_LAYOUTGET, - TASK_UNINTERRUPTIBLE); + lseg = ERR_PTR(wait_on_bit(&lo->plh_flags, + NFS_LAYOUT_FIRST_LAYOUTGET, + TASK_KILLABLE)); + if (IS_ERR(lseg)) + goto out_put_layout_hdr; pnfs_put_layout_hdr(lo); dprintk("%s retrying\n", __func__); goto lookup_again; @@ -1925,7 +1930,8 @@ pnfs_update_layout(struct inode *ino, if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) { spin_unlock(&ino->i_lock); dprintk("%s wait for layoutreturn\n", __func__); - if (pnfs_prepare_to_retry_layoutget(lo)) { + lseg = ERR_PTR(pnfs_prepare_to_retry_layoutget(lo)); + if (!IS_ERR(lseg)) { if (first) pnfs_clear_first_layoutget(lo); pnfs_put_layout_hdr(lo); -- GitLab From 2edaead69e7573f35e8d5dc20938e41eacc21b35 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 5 Sep 2018 14:07:13 -0400 Subject: [PATCH 1514/1692] NFSv4: Fix a tracepoint Oops in initiate_file_draining() Now that the value of 'ino' can be NULL or an ERR_PTR(), we need to change the test in the tracepoint. Fixes: ce5624f7e6675 ("NFSv4: Return NFS4ERR_DELAY when a layout fails...") Signed-off-by: Trond Myklebust Cc: stable@vger.kernel.org # v4.17+ Signed-off-by: Anna Schumaker --- fs/nfs/nfs4trace.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h index 708342f4692f..b1483b303e0b 100644 --- a/fs/nfs/nfs4trace.h +++ b/fs/nfs/nfs4trace.h @@ -1137,7 +1137,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_callback_event, TP_fast_assign( __entry->error = error; __entry->fhandle = nfs_fhandle_hash(fhandle); - if (inode != NULL) { + if (!IS_ERR_OR_NULL(inode)) { __entry->fileid = NFS_FILEID(inode); __entry->dev = inode->i_sb->s_dev; } else { -- GitLab From 994b15b983a72e1148a173b61e5b279219bb45ae Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 5 Sep 2018 14:07:14 -0400 Subject: [PATCH 1515/1692] NFSv4.1 fix infinite loop on I/O. The previous fix broke recovery of delegated stateids because it assumes that if we did not mark the delegation as suspect, then the delegation has effectively been revoked, and so it removes that delegation irrespectively of whether or not it is valid and still in use. While this is "mostly harmless" for ordinary I/O, we've seen pNFS fail with LAYOUTGET spinning in an infinite loop while complaining that we're using an invalid stateid (in this case the all-zero stateid). What we rather want to do here is ensure that the delegation is always correctly marked as needing testing when that is the case. So we want to close the loophole offered by nfs4_schedule_stateid_recovery(), which marks the state as needing to be reclaimed, but not the delegation that may be backing it. Fixes: 0e3d3e5df07dc ("NFSv4.1 fix infinite loop on IO BAD_STATEID error") Signed-off-by: Trond Myklebust Cc: stable@vger.kernel.org # v4.11+ Signed-off-by: Anna Schumaker --- fs/nfs/nfs4proc.c | 10 +++++++--- fs/nfs/nfs4state.c | 2 ++ 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index df60dce935f3..094c3c09ff00 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -2676,14 +2676,18 @@ static void nfs41_check_delegation_stateid(struct nfs4_state *state) } nfs4_stateid_copy(&stateid, &delegation->stateid); - if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) || - !test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED, - &delegation->flags)) { + if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) { rcu_read_unlock(); nfs_finish_clear_delegation_stateid(state, &stateid); return; } + if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED, + &delegation->flags)) { + rcu_read_unlock(); + return; + } + cred = get_rpccred(delegation->cred); rcu_read_unlock(); status = nfs41_test_and_free_expired_stateid(server, &stateid, cred); diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 3df0eb52da1c..40a08cd483f0 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1390,6 +1390,8 @@ int nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4_ if (!nfs4_state_mark_reclaim_nograce(clp, state)) return -EBADF; + nfs_inode_find_delegation_state_and_recover(state->inode, + &state->stateid); dprintk("%s: scheduling stateid recovery for server %s\n", __func__, clp->cl_hostname); nfs4_schedule_state_manager(clp); -- GitLab From 9f0c5124f4a82503ee5d55c60b0b9c6afc3af68b Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 5 Sep 2018 14:07:15 -0400 Subject: [PATCH 1516/1692] NFS: Don't open code clearing of delegation state Add a helper for the case when the nfs4 open state has been set to use a delegation stateid, and we want to revert to using the open stateid. Signed-off-by: Trond Myklebust Signed-off-by: Anna Schumaker --- fs/nfs/nfs4proc.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 094c3c09ff00..481787cac4c2 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1637,6 +1637,14 @@ static void nfs_state_set_delegation(struct nfs4_state *state, write_sequnlock(&state->seqlock); } +static void nfs_state_clear_delegation(struct nfs4_state *state) +{ + write_seqlock(&state->seqlock); + nfs4_stateid_copy(&state->stateid, &state->open_stateid); + clear_bit(NFS_DELEGATED_STATE, &state->flags); + write_sequnlock(&state->seqlock); +} + static int update_open_stateid(struct nfs4_state *state, const nfs4_stateid *open_stateid, const nfs4_stateid *delegation, @@ -2145,10 +2153,7 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, if (IS_ERR(opendata)) return PTR_ERR(opendata); nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); - write_seqlock(&state->seqlock); - nfs4_stateid_copy(&state->stateid, &state->open_stateid); - write_sequnlock(&state->seqlock); - clear_bit(NFS_DELEGATED_STATE, &state->flags); + nfs_state_clear_delegation(state); switch (type & (FMODE_READ|FMODE_WRITE)) { case FMODE_READ|FMODE_WRITE: case FMODE_WRITE: @@ -2601,10 +2606,7 @@ static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state, const nfs4_stateid *stateid) { nfs_remove_bad_delegation(state->inode, stateid); - write_seqlock(&state->seqlock); - nfs4_stateid_copy(&state->stateid, &state->open_stateid); - write_sequnlock(&state->seqlock); - clear_bit(NFS_DELEGATED_STATE, &state->flags); + nfs_state_clear_delegation(state); } static void nfs40_clear_delegation_stateid(struct nfs4_state *state) @@ -2672,13 +2674,14 @@ static void nfs41_check_delegation_stateid(struct nfs4_state *state) delegation = rcu_dereference(NFS_I(state->inode)->delegation); if (delegation == NULL) { rcu_read_unlock(); + nfs_state_clear_delegation(state); return; } nfs4_stateid_copy(&stateid, &delegation->stateid); if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) { rcu_read_unlock(); - nfs_finish_clear_delegation_stateid(state, &stateid); + nfs_state_clear_delegation(state); return; } -- GitLab From d77f778e59ca858e1fb1e9d4946080d689c04711 Mon Sep 17 00:00:00 2001 From: Charlene Liu Date: Mon, 27 Aug 2018 11:31:08 -0400 Subject: [PATCH 1517/1692] drm/amd/display: Fix 3D stereo issues. We were not providing the correct pixel clocks to DML for marks calculation. Signed-off-by: Charlene Liu Reviewed-by: Dmytro Laktyushkin Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c | 6 +++++- drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c | 5 +++-- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 5 ++++- drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c | 3 +++ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 2 ++ 5 files changed, 17 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c index 160d11a15eac..9ebe30ba4dab 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c @@ -2881,6 +2881,7 @@ static void populate_initial_data( /* Pipes without underlay after */ for (i = 0; i < pipe_count; i++) { + unsigned int pixel_clock_khz; if (!pipe[i].stream || pipe[i].bottom_pipe) continue; @@ -2889,7 +2890,10 @@ static void populate_initial_data( data->lpt_en[num_displays + 4] = false; data->h_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.h_total); data->v_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.v_total); - data->pixel_rate[num_displays + 4] = bw_frc_to_fixed(pipe[i].stream->timing.pix_clk_khz, 1000); + pixel_clock_khz = pipe[i].stream->timing.pix_clk_khz; + if (pipe[i].stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) + pixel_clock_khz *= 2; + data->pixel_rate[num_displays + 4] = bw_frc_to_fixed(pixel_clock_khz, 1000); if (pipe[i].plane_state) { data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.width); data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4]; diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c index 32b34134c501..80ec09eef44f 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c @@ -852,8 +852,9 @@ bool dcn_validate_bandwidth( v->v_sync_plus_back_porch[input_idx] = pipe->stream->timing.v_total - v->vactive[input_idx] - pipe->stream->timing.v_front_porch; - v->pixel_clock[input_idx] = pipe->stream->timing.pix_clk_khz / 1000.0f; - + v->pixel_clock[input_idx] = pipe->stream->timing.pix_clk_khz/1000.0; + if (pipe->stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) + v->pixel_clock[input_idx] *= 2; if (!pipe->plane_state) { v->dcc_enable[input_idx] = dcn_bw_yes; v->source_pixel_format[input_idx] = dcn_bw_rgb_sub_32; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 721dd13d2ed2..2d6a4300bfa4 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -363,6 +363,9 @@ bool resource_are_streams_timing_synchronizable( || !dc_is_dp_signal(stream2->signal))) return false; + if (stream1->view_format != stream2->view_format) + return false; + return true; } static bool is_dp_and_hdmi_sharable( @@ -373,7 +376,7 @@ static bool is_dp_and_hdmi_sharable( return false; if (stream1->clamping.c_depth != COLOR_DEPTH_888 || - stream2->clamping.c_depth != COLOR_DEPTH_888) + stream2->clamping.c_depth != COLOR_DEPTH_888) return false; return true; diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c index 9f44f1cad221..b44cc7042249 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c @@ -798,6 +798,9 @@ static void get_pixel_clock_parameters( if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) { pixel_clk_params->requested_pix_clk = pixel_clk_params->requested_pix_clk / 2; } + if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) + pixel_clk_params->requested_pix_clk *= 2; + } void dce110_resource_build_pipe_hw_param(struct pipe_ctx *pipe_ctx) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 65a596ffa02a..f628b62d75fc 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -980,6 +980,8 @@ static void get_pixel_clock_parameters( if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) pixel_clk_params->requested_pix_clk /= 2; + if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) + pixel_clk_params->requested_pix_clk *= 2; } -- GitLab From b07971d43c4d321c65240749765bba2b2eaeeb30 Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Wed, 29 Aug 2018 16:23:59 -0400 Subject: [PATCH 1518/1692] drm/amd/display: stop using switch for different CS revisions Clock sources currently have support for asic specific function pointers. But actual separation into functions was never performed, leaving us with giant functions that rely on switch. This change creates separate functions, removing switch use. Signed-off-by: Dmytro Laktyushkin Reviewed-by: Tony Cheng Acked-by: Leo Li Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/dce/dce_clock_source.c | 369 ++++++++++-------- .../drm/amd/display/dc/dce/dce_clock_source.h | 9 + .../amd/display/dc/dce112/dce112_resource.c | 2 +- .../amd/display/dc/dce120/dce120_resource.c | 2 +- .../drm/amd/display/dc/dcn10/dcn10_resource.c | 2 +- 5 files changed, 217 insertions(+), 167 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c index ae3c44aff1c8..723ce80ed89c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c @@ -584,34 +584,42 @@ static uint32_t dce110_get_pix_clk_dividers( return 0; } - switch (cs->ctx->dce_version) { - case DCE_VERSION_8_0: - case DCE_VERSION_8_1: - case DCE_VERSION_8_3: - case DCE_VERSION_10_0: - case DCE_VERSION_11_0: - pll_calc_error = - dce110_get_pix_clk_dividers_helper(clk_src, + pll_calc_error = dce110_get_pix_clk_dividers_helper(clk_src, pll_settings, pix_clk_params); - break; - case DCE_VERSION_11_2: - case DCE_VERSION_11_22: - case DCE_VERSION_12_0: -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) - case DCN_VERSION_1_0: -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN1_01) - case DCN_VERSION_1_01: -#endif - dce112_get_pix_clk_dividers_helper(clk_src, - pll_settings, pix_clk_params); - break; - default: - break; + return pll_calc_error; +} + +static uint32_t dce112_get_pix_clk_dividers( + struct clock_source *cs, + struct pixel_clk_params *pix_clk_params, + struct pll_settings *pll_settings) +{ + struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(cs); + DC_LOGGER_INIT(); + + if (pix_clk_params == NULL || pll_settings == NULL + || pix_clk_params->requested_pix_clk == 0) { + DC_LOG_ERROR( + "%s: Invalid parameters!!\n", __func__); + return -1; } - return pll_calc_error; + memset(pll_settings, 0, sizeof(*pll_settings)); + + if (cs->id == CLOCK_SOURCE_ID_DP_DTO || + cs->id == CLOCK_SOURCE_ID_EXTERNAL) { + pll_settings->adjusted_pix_clk = clk_src->ext_clk_khz; + pll_settings->calculated_pix_clk = clk_src->ext_clk_khz; + pll_settings->actual_pix_clk = + pix_clk_params->requested_pix_clk; + return -1; + } + + dce112_get_pix_clk_dividers_helper(clk_src, + pll_settings, pix_clk_params); + + return 0; } static bool disable_spread_spectrum(struct dce110_clk_src *clk_src) @@ -833,6 +841,65 @@ static bool dce110_program_pix_clk( struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source); struct bp_pixel_clock_parameters bp_pc_params = {0}; + /* First disable SS + * ATOMBIOS will enable by default SS on PLL for DP, + * do not disable it here + */ + if (clock_source->id != CLOCK_SOURCE_ID_EXTERNAL && + !dc_is_dp_signal(pix_clk_params->signal_type) && + clock_source->ctx->dce_version <= DCE_VERSION_11_0) + disable_spread_spectrum(clk_src); + + /*ATOMBIOS expects pixel rate adjusted by deep color ratio)*/ + bp_pc_params.controller_id = pix_clk_params->controller_id; + bp_pc_params.pll_id = clock_source->id; + bp_pc_params.target_pixel_clock = pll_settings->actual_pix_clk; + bp_pc_params.encoder_object_id = pix_clk_params->encoder_object_id; + bp_pc_params.signal_type = pix_clk_params->signal_type; + + bp_pc_params.reference_divider = pll_settings->reference_divider; + bp_pc_params.feedback_divider = pll_settings->feedback_divider; + bp_pc_params.fractional_feedback_divider = + pll_settings->fract_feedback_divider; + bp_pc_params.pixel_clock_post_divider = + pll_settings->pix_clk_post_divider; + bp_pc_params.flags.SET_EXTERNAL_REF_DIV_SRC = + pll_settings->use_external_clk; + + if (clk_src->bios->funcs->set_pixel_clock( + clk_src->bios, &bp_pc_params) != BP_RESULT_OK) + return false; + /* Enable SS + * ATOMBIOS will enable by default SS for DP on PLL ( DP ID clock), + * based on HW display PLL team, SS control settings should be programmed + * during PLL Reset, but they do not have effect + * until SS_EN is asserted.*/ + if (clock_source->id != CLOCK_SOURCE_ID_EXTERNAL + && !dc_is_dp_signal(pix_clk_params->signal_type)) { + + if (pix_clk_params->flags.ENABLE_SS) + if (!enable_spread_spectrum(clk_src, + pix_clk_params->signal_type, + pll_settings)) + return false; + + /* Resync deep color DTO */ + dce110_program_pixel_clk_resync(clk_src, + pix_clk_params->signal_type, + pix_clk_params->color_depth); + } + + return true; +} + +static bool dce112_program_pix_clk( + struct clock_source *clock_source, + struct pixel_clk_params *pix_clk_params, + struct pll_settings *pll_settings) +{ + struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source); + struct bp_pixel_clock_parameters bp_pc_params = {0}; + #if defined(CONFIG_DRM_AMD_DC_DCN1_0) if (IS_FPGA_MAXIMUS_DC(clock_source->ctx->dce_environment)) { unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0; @@ -864,82 +931,29 @@ static bool dce110_program_pix_clk( bp_pc_params.encoder_object_id = pix_clk_params->encoder_object_id; bp_pc_params.signal_type = pix_clk_params->signal_type; - switch (clock_source->ctx->dce_version) { - case DCE_VERSION_8_0: - case DCE_VERSION_8_1: - case DCE_VERSION_8_3: - case DCE_VERSION_10_0: - case DCE_VERSION_11_0: - bp_pc_params.reference_divider = pll_settings->reference_divider; - bp_pc_params.feedback_divider = pll_settings->feedback_divider; - bp_pc_params.fractional_feedback_divider = - pll_settings->fract_feedback_divider; - bp_pc_params.pixel_clock_post_divider = - pll_settings->pix_clk_post_divider; - bp_pc_params.flags.SET_EXTERNAL_REF_DIV_SRC = + if (clock_source->id != CLOCK_SOURCE_ID_DP_DTO) { + bp_pc_params.flags.SET_GENLOCK_REF_DIV_SRC = pll_settings->use_external_clk; - - if (clk_src->bios->funcs->set_pixel_clock( - clk_src->bios, &bp_pc_params) != BP_RESULT_OK) - return false; - /* Enable SS - * ATOMBIOS will enable by default SS for DP on PLL ( DP ID clock), - * based on HW display PLL team, SS control settings should be programmed - * during PLL Reset, but they do not have effect - * until SS_EN is asserted.*/ - if (clock_source->id != CLOCK_SOURCE_ID_EXTERNAL - && !dc_is_dp_signal(pix_clk_params->signal_type)) { - - if (pix_clk_params->flags.ENABLE_SS) - if (!enable_spread_spectrum(clk_src, - pix_clk_params->signal_type, - pll_settings)) - return false; - - /* Resync deep color DTO */ - dce110_program_pixel_clk_resync(clk_src, - pix_clk_params->signal_type, - pix_clk_params->color_depth); + bp_pc_params.flags.SET_XTALIN_REF_SRC = + !pll_settings->use_external_clk; + if (pix_clk_params->flags.SUPPORT_YCBCR420) { + bp_pc_params.flags.SUPPORT_YUV_420 = 1; } - - break; - case DCE_VERSION_11_2: - case DCE_VERSION_11_22: - case DCE_VERSION_12_0: -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) - case DCN_VERSION_1_0: -#endif - -#if defined(CONFIG_DRM_AMD_DC_DCN1_01) - case DCN_VERSION_1_01: -#endif - - if (clock_source->id != CLOCK_SOURCE_ID_DP_DTO) { - bp_pc_params.flags.SET_GENLOCK_REF_DIV_SRC = - pll_settings->use_external_clk; - bp_pc_params.flags.SET_XTALIN_REF_SRC = - !pll_settings->use_external_clk; - if (pix_clk_params->flags.SUPPORT_YCBCR420) { - bp_pc_params.flags.SUPPORT_YUV_420 = 1; - } - } - if (clk_src->bios->funcs->set_pixel_clock( - clk_src->bios, &bp_pc_params) != BP_RESULT_OK) - return false; - /* Resync deep color DTO */ - if (clock_source->id != CLOCK_SOURCE_ID_DP_DTO) - dce112_program_pixel_clk_resync(clk_src, - pix_clk_params->signal_type, - pix_clk_params->color_depth, - pix_clk_params->flags.SUPPORT_YCBCR420); - break; - default: - break; } + if (clk_src->bios->funcs->set_pixel_clock( + clk_src->bios, &bp_pc_params) != BP_RESULT_OK) + return false; + /* Resync deep color DTO */ + if (clock_source->id != CLOCK_SOURCE_ID_DP_DTO) + dce112_program_pixel_clk_resync(clk_src, + pix_clk_params->signal_type, + pix_clk_params->color_depth, + pix_clk_params->flags.SUPPORT_YCBCR420); return true; } + static bool dce110_clock_source_power_down( struct clock_source *clk_src) { @@ -966,12 +980,19 @@ static bool dce110_clock_source_power_down( /*****************************************/ /* Constructor */ /*****************************************/ + +static const struct clock_source_funcs dce112_clk_src_funcs = { + .cs_power_down = dce110_clock_source_power_down, + .program_pix_clk = dce112_program_pix_clk, + .get_pix_clk_dividers = dce112_get_pix_clk_dividers +}; static const struct clock_source_funcs dce110_clk_src_funcs = { .cs_power_down = dce110_clock_source_power_down, .program_pix_clk = dce110_program_pix_clk, .get_pix_clk_dividers = dce110_get_pix_clk_dividers }; + static void get_ss_info_from_atombios( struct dce110_clk_src *clk_src, enum as_signal_type as_signal, @@ -1227,81 +1248,70 @@ bool dce110_clk_src_construct( clk_src->ext_clk_khz = fw_info.external_clock_source_frequency_for_dp; - switch (clk_src->base.ctx->dce_version) { - case DCE_VERSION_8_0: - case DCE_VERSION_8_1: - case DCE_VERSION_8_3: - case DCE_VERSION_10_0: - case DCE_VERSION_11_0: - - /* structure normally used with PLL ranges from ATOMBIOS; DS on by default */ - calc_pll_cs_init_data.bp = bios; - calc_pll_cs_init_data.min_pix_clk_pll_post_divider = 1; - calc_pll_cs_init_data.max_pix_clk_pll_post_divider = - clk_src->cs_mask->PLL_POST_DIV_PIXCLK; - calc_pll_cs_init_data.min_pll_ref_divider = 1; - calc_pll_cs_init_data.max_pll_ref_divider = clk_src->cs_mask->PLL_REF_DIV; - /* when 0 use minInputPxlClkPLLFrequencyInKHz from firmwareInfo*/ - calc_pll_cs_init_data.min_override_input_pxl_clk_pll_freq_khz = 0; - /* when 0 use maxInputPxlClkPLLFrequencyInKHz from firmwareInfo*/ - calc_pll_cs_init_data.max_override_input_pxl_clk_pll_freq_khz = 0; - /*numberOfFractFBDividerDecimalPoints*/ - calc_pll_cs_init_data.num_fract_fb_divider_decimal_point = - FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM; - /*number of decimal point to round off for fractional feedback divider value*/ - calc_pll_cs_init_data.num_fract_fb_divider_decimal_point_precision = - FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM; - calc_pll_cs_init_data.ctx = ctx; - - /*structure for HDMI, no SS or SS% <= 0.06% for 27 MHz Ref clock */ - calc_pll_cs_init_data_hdmi.bp = bios; - calc_pll_cs_init_data_hdmi.min_pix_clk_pll_post_divider = 1; - calc_pll_cs_init_data_hdmi.max_pix_clk_pll_post_divider = - clk_src->cs_mask->PLL_POST_DIV_PIXCLK; - calc_pll_cs_init_data_hdmi.min_pll_ref_divider = 1; - calc_pll_cs_init_data_hdmi.max_pll_ref_divider = clk_src->cs_mask->PLL_REF_DIV; - /* when 0 use minInputPxlClkPLLFrequencyInKHz from firmwareInfo*/ - calc_pll_cs_init_data_hdmi.min_override_input_pxl_clk_pll_freq_khz = 13500; - /* when 0 use maxInputPxlClkPLLFrequencyInKHz from firmwareInfo*/ - calc_pll_cs_init_data_hdmi.max_override_input_pxl_clk_pll_freq_khz = 27000; - /*numberOfFractFBDividerDecimalPoints*/ - calc_pll_cs_init_data_hdmi.num_fract_fb_divider_decimal_point = - FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM; - /*number of decimal point to round off for fractional feedback divider value*/ - calc_pll_cs_init_data_hdmi.num_fract_fb_divider_decimal_point_precision = - FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM; - calc_pll_cs_init_data_hdmi.ctx = ctx; - - clk_src->ref_freq_khz = fw_info.pll_info.crystal_frequency; - - if (clk_src->base.id == CLOCK_SOURCE_ID_EXTERNAL) - return true; - - /* PLL only from here on */ - ss_info_from_atombios_create(clk_src); - - if (!calc_pll_max_vco_construct( - &clk_src->calc_pll, - &calc_pll_cs_init_data)) { - ASSERT_CRITICAL(false); - goto unexpected_failure; - } + /* structure normally used with PLL ranges from ATOMBIOS; DS on by default */ + calc_pll_cs_init_data.bp = bios; + calc_pll_cs_init_data.min_pix_clk_pll_post_divider = 1; + calc_pll_cs_init_data.max_pix_clk_pll_post_divider = + clk_src->cs_mask->PLL_POST_DIV_PIXCLK; + calc_pll_cs_init_data.min_pll_ref_divider = 1; + calc_pll_cs_init_data.max_pll_ref_divider = clk_src->cs_mask->PLL_REF_DIV; + /* when 0 use minInputPxlClkPLLFrequencyInKHz from firmwareInfo*/ + calc_pll_cs_init_data.min_override_input_pxl_clk_pll_freq_khz = 0; + /* when 0 use maxInputPxlClkPLLFrequencyInKHz from firmwareInfo*/ + calc_pll_cs_init_data.max_override_input_pxl_clk_pll_freq_khz = 0; + /*numberOfFractFBDividerDecimalPoints*/ + calc_pll_cs_init_data.num_fract_fb_divider_decimal_point = + FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM; + /*number of decimal point to round off for fractional feedback divider value*/ + calc_pll_cs_init_data.num_fract_fb_divider_decimal_point_precision = + FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM; + calc_pll_cs_init_data.ctx = ctx; + + /*structure for HDMI, no SS or SS% <= 0.06% for 27 MHz Ref clock */ + calc_pll_cs_init_data_hdmi.bp = bios; + calc_pll_cs_init_data_hdmi.min_pix_clk_pll_post_divider = 1; + calc_pll_cs_init_data_hdmi.max_pix_clk_pll_post_divider = + clk_src->cs_mask->PLL_POST_DIV_PIXCLK; + calc_pll_cs_init_data_hdmi.min_pll_ref_divider = 1; + calc_pll_cs_init_data_hdmi.max_pll_ref_divider = clk_src->cs_mask->PLL_REF_DIV; + /* when 0 use minInputPxlClkPLLFrequencyInKHz from firmwareInfo*/ + calc_pll_cs_init_data_hdmi.min_override_input_pxl_clk_pll_freq_khz = 13500; + /* when 0 use maxInputPxlClkPLLFrequencyInKHz from firmwareInfo*/ + calc_pll_cs_init_data_hdmi.max_override_input_pxl_clk_pll_freq_khz = 27000; + /*numberOfFractFBDividerDecimalPoints*/ + calc_pll_cs_init_data_hdmi.num_fract_fb_divider_decimal_point = + FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM; + /*number of decimal point to round off for fractional feedback divider value*/ + calc_pll_cs_init_data_hdmi.num_fract_fb_divider_decimal_point_precision = + FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM; + calc_pll_cs_init_data_hdmi.ctx = ctx; + + clk_src->ref_freq_khz = fw_info.pll_info.crystal_frequency; + + if (clk_src->base.id == CLOCK_SOURCE_ID_EXTERNAL) + return true; + /* PLL only from here on */ + ss_info_from_atombios_create(clk_src); - calc_pll_cs_init_data_hdmi. - min_override_input_pxl_clk_pll_freq_khz = clk_src->ref_freq_khz/2; - calc_pll_cs_init_data_hdmi. - max_override_input_pxl_clk_pll_freq_khz = clk_src->ref_freq_khz; + if (!calc_pll_max_vco_construct( + &clk_src->calc_pll, + &calc_pll_cs_init_data)) { + ASSERT_CRITICAL(false); + goto unexpected_failure; + } - if (!calc_pll_max_vco_construct( - &clk_src->calc_pll_hdmi, &calc_pll_cs_init_data_hdmi)) { - ASSERT_CRITICAL(false); - goto unexpected_failure; - } - break; - default: - break; + calc_pll_cs_init_data_hdmi. + min_override_input_pxl_clk_pll_freq_khz = clk_src->ref_freq_khz/2; + calc_pll_cs_init_data_hdmi. + max_override_input_pxl_clk_pll_freq_khz = clk_src->ref_freq_khz; + + + if (!calc_pll_max_vco_construct( + &clk_src->calc_pll_hdmi, &calc_pll_cs_init_data_hdmi)) { + ASSERT_CRITICAL(false); + goto unexpected_failure; } return true; @@ -1310,3 +1320,34 @@ bool dce110_clk_src_construct( return false; } +bool dce112_clk_src_construct( + struct dce110_clk_src *clk_src, + struct dc_context *ctx, + struct dc_bios *bios, + enum clock_source_id id, + const struct dce110_clk_src_regs *regs, + const struct dce110_clk_src_shift *cs_shift, + const struct dce110_clk_src_mask *cs_mask) +{ + struct dc_firmware_info fw_info = { { 0 } }; + + clk_src->base.ctx = ctx; + clk_src->bios = bios; + clk_src->base.id = id; + clk_src->base.funcs = &dce112_clk_src_funcs; + + clk_src->regs = regs; + clk_src->cs_shift = cs_shift; + clk_src->cs_mask = cs_mask; + + if (clk_src->bios->funcs->get_firmware_info( + clk_src->bios, &fw_info) != BP_RESULT_OK) { + ASSERT_CRITICAL(false); + return false; + } + + clk_src->ext_clk_khz = fw_info.external_clock_source_frequency_for_dp; + + return true; +} + diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h index cdeb96a268fb..1ed7695a76d3 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h @@ -144,4 +144,13 @@ bool dce110_clk_src_construct( const struct dce110_clk_src_shift *cs_shift, const struct dce110_clk_src_mask *cs_mask); +bool dce112_clk_src_construct( + struct dce110_clk_src *clk_src, + struct dc_context *ctx, + struct dc_bios *bios, + enum clock_source_id id, + const struct dce110_clk_src_regs *regs, + const struct dce110_clk_src_shift *cs_shift, + const struct dce110_clk_src_mask *cs_mask); + #endif diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c index 2aa922cdcc58..0f8332ea1160 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c @@ -670,7 +670,7 @@ struct clock_source *dce112_clock_source_create( if (!clk_src) return NULL; - if (dce110_clk_src_construct(clk_src, ctx, bios, id, + if (dce112_clk_src_construct(clk_src, ctx, bios, id, regs, &cs_shift, &cs_mask)) { clk_src->base.dp_clk_src = dp_clk_src; return &clk_src->base; diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c index 465f68655db2..59055801af44 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c @@ -456,7 +456,7 @@ struct clock_source *dce120_clock_source_create( if (!clk_src) return NULL; - if (dce110_clk_src_construct(clk_src, ctx, bios, id, + if (dce112_clk_src_construct(clk_src, ctx, bios, id, regs, &cs_shift, &cs_mask)) { clk_src->base.dp_clk_src = dp_clk_src; return &clk_src->base; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index f628b62d75fc..cb1b134b8fcb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -749,7 +749,7 @@ struct clock_source *dcn10_clock_source_create( if (!clk_src) return NULL; - if (dce110_clk_src_construct(clk_src, ctx, bios, id, + if (dce112_clk_src_construct(clk_src, ctx, bios, id, regs, &cs_shift, &cs_mask)) { clk_src->base.dp_clk_src = dp_clk_src; return &clk_src->base; -- GitLab From c276f81b72401d64eac221d53ba24986186c502a Mon Sep 17 00:00:00 2001 From: Chiawen Huang Date: Wed, 29 Aug 2018 18:39:38 +0800 Subject: [PATCH 1519/1692] drm/amd/display: add aux i2c event log. [Why] support i2c transition event log [How] refined aux REQ and REP events in aux flow. commented REQ and REP events in i2c flow. note: i2c event log is currently commented out. more work is required to find an portocol parser to and generate event for the parser Signed-off-by: Chiawen Huang Reviewed-by: Tony Cheng Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dm_event_log.h | 5 +++-- .../drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c | 9 +++++---- drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c | 4 ++++ 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dm_event_log.h b/drivers/gpu/drm/amd/display/dc/dm_event_log.h index 00a275dfa472..34a701ca879e 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_event_log.h +++ b/drivers/gpu/drm/amd/display/dc/dm_event_log.h @@ -31,7 +31,8 @@ #define __DM_EVENT_LOG_H__ -#define EVENT_LOG_AUX_REQ(dcc, type, action, address, len, data) -#define EVENT_LOG_AUX_Reply(dcc, type, swStatus, replyStatus, len, data) +#define EVENT_LOG_AUX_REQ(ddc, type, action, address, len, data) +#define EVENT_LOG_AUX_REP(ddc, type, replyStatus, len, data) #endif + diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c index 4a88fc76614e..8eee8ace1259 100644 --- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c +++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c @@ -274,8 +274,8 @@ static void submit_channel_request( REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0, 10, aux110->timeout_period/10); REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1); - EVENT_LOG_AUX_REQ(engine->base.ddc->pin_data->en, Native, request->action, - request->address, request->length, request->data); + EVENT_LOG_AUX_REQ(engine->base.ddc->pin_data->en, EVENT_LOG_AUX_ORIGIN_NATIVE, + request->action, request->address, request->length, request->data); } static int read_channel_reply(struct aux_engine *engine, uint32_t size, @@ -340,8 +340,9 @@ static void process_channel_reply( bytes_replied = read_channel_reply(engine, reply->length, reply->data, &reply_result, &sw_status); - EVENT_LOG_AUX_Reply(engine->base.ddc->pin_data->en, Native, - sw_status, reply_result, bytes_replied, reply->data); + EVENT_LOG_AUX_REP(engine->base.ddc->pin_data->en, + EVENT_LOG_AUX_ORIGIN_NATIVE, reply_result, + bytes_replied, reply->data); /* in case HPD is LOW, exit AUX transaction */ if ((sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) { diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c index c995ef4ea5a4..141898533e8e 100644 --- a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c +++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c @@ -121,6 +121,8 @@ bool dal_i2c_hw_engine_submit_request( hw_engine->base.funcs->submit_channel_request( &hw_engine->base, &request); + /* EVENT_LOG_AUX_REQ(engine->ddc->pin_data->en, EVENT_LOG_AUX_ORIGIN_I2C, */ + /* request.action, request.address, request.length, request.data); */ if ((request.status == I2C_CHANNEL_OPERATION_FAILED) || (request.status == I2C_CHANNEL_OPERATION_ENGINE_BUSY)) { @@ -169,6 +171,8 @@ bool dal_i2c_hw_engine_submit_request( hw_engine->base.funcs-> process_channel_reply(&hw_engine->base, &reply); + /* EVENT_LOG_AUX_REP(engine->ddc->pin_data->en, EVENT_LOG_AUX_ORIGIN_I2C, */ + /* AUX_TRANSACTION_REPLY_I2C_ACK, reply.length, reply.data); */ } -- GitLab From cac7643a27ff15a3be2bf375fe7abd4cced228c3 Mon Sep 17 00:00:00 2001 From: Tony Cheng Date: Mon, 27 Aug 2018 13:35:31 -0400 Subject: [PATCH 1520/1692] drm/amd/display: dc 3.1.66 Signed-off-by: Tony Cheng Reviewed-by: Steven Chiu Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index a769d07d947f..7691139363a9 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -38,7 +38,7 @@ #include "inc/compressor.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.1.65" +#define DC_VER "3.1.66" #define MAX_SURFACES 3 #define MAX_STREAMS 6 -- GitLab From 16f4c69549ef676bc278be8b267a811b6f8f59ad Mon Sep 17 00:00:00 2001 From: Chiawen Huang Date: Wed, 5 Sep 2018 20:34:57 +0800 Subject: [PATCH 1521/1692] drm/amd/display: add query HPD interface. [Why] current dc_link_detect function is not only detection but also update some link data. [How] added a pure get HPD state function. Signed-off-by: Chiawen Huang Reviewed-by: Tony Cheng Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 18 ++++++++++++++++++ drivers/gpu/drm/amd/display/dc/dc_link.h | 1 + 2 files changed, 19 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 9d8dc2c1ca65..bd58dbae7d3e 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -890,6 +890,24 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) return true; } +bool dc_link_get_hpd_state(struct dc_link *dc_link) +{ + struct gpio *hpd_pin; + uint32_t state; + + hpd_pin = get_hpd_gpio(dc_link->ctx->dc_bios, + dc_link->link_id, dc_link->ctx->gpio_service); + if (hpd_pin == NULL) + ASSERT(false); + + dal_gpio_open(hpd_pin, GPIO_MODE_INTERRUPT); + dal_gpio_get_value(hpd_pin, &state); + dal_gpio_close(hpd_pin); + dal_gpio_destroy_irq(&hpd_pin); + + return state; +} + static enum hpd_source_id get_hpd_line( struct dc_link *link) { diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index d43cefbc43d3..438fb35d87b8 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -167,6 +167,7 @@ enum dc_detect_reason { }; bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason); +bool dc_link_get_hpd_state(struct dc_link *dc_link); /* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt). * Return: -- GitLab From 8603b30c9a076641e2f004f36ebbda414d7fdac4 Mon Sep 17 00:00:00 2001 From: Leo Li Date: Wed, 5 Sep 2018 11:19:42 -0400 Subject: [PATCH 1522/1692] drm/amd/display: Drop amdgpu_display_manager.dal member [Why] It's not being used anymore. [How] Nuke it Signed-off-by: Leo Li Reviewed-by: David Francis Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 -- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 2 -- 2 files changed, 4 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 2662ab01c1cf..8acf8acb588d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -427,8 +427,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) init_data.cgs_device = adev->dm.cgs_device; - adev->dm.dal = NULL; - init_data.dce_environment = DCE_ENV_PRODUCTION_DRV; /* diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index b6fe9adf4b93..35cdf02b1b50 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -78,9 +78,7 @@ struct dm_comressor_info { uint64_t gpu_addr; }; - struct amdgpu_display_manager { - struct dal *dal; struct dc *dc; struct cgs_device *cgs_device; -- GitLab From 5232da2f75d903c48a23eca143c61d1ac4fd2d2c Mon Sep 17 00:00:00 2001 From: Leo Li Date: Wed, 5 Sep 2018 11:28:29 -0400 Subject: [PATCH 1523/1692] drm/amd/display: Drop amdgpu_dm_prev_state struct [Why] It's not being used [How] Nuke it Signed-off-by: Leo Li Reviewed-by: David Francis Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 9 --------- 1 file changed, 9 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 35cdf02b1b50..7519f9ad77dd 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -54,13 +54,6 @@ struct drm_device; struct amdgpu_dm_irq_handler_data; struct dc; -struct amdgpu_dm_prev_state { - struct drm_framebuffer *fb; - int32_t x; - int32_t y; - struct drm_display_mode mode; -}; - struct common_irq_params { struct amdgpu_device *adev; enum dc_irq_source irq_src; @@ -86,8 +79,6 @@ struct amdgpu_display_manager { struct drm_device *ddev; /*DRM base driver*/ u16 display_indexes_num; - struct amdgpu_dm_prev_state prev_state; - /* * 'irq_source_handler_table' holds a list of handlers * per (DAL) IRQ source. -- GitLab From cf51e4b9c34407bf0c3d9b582b7837e047e1df47 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 13 Sep 2018 14:58:49 +0900 Subject: [PATCH 1524/1692] mtd: rawnand: denali: fix a race condition when DMA is kicked I thought the read-back of the DMA_ENABLE register was unnecessary (at least it is working on my boards), then deleted it in commit 586a2c52909d ("mtd: nand: denali: squash denali_enable_dma() helper into caller"). Sorry, I was wrong - it caused a timing issue on Cyclone5 SoCFPGAs. Revive the register read-back, commenting why this is necessary. Fixes: 586a2c52909d ("mtd: nand: denali: squash denali_enable_dma() helper into caller") Cc: Reported-by: Steffen Trumtrar Signed-off-by: Masahiro Yamada Reviewed-by: Miquel Raynal Signed-off-by: Boris Brezillon --- drivers/mtd/nand/raw/denali.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c index 67b2065e7a19..b864b93dd289 100644 --- a/drivers/mtd/nand/raw/denali.c +++ b/drivers/mtd/nand/raw/denali.c @@ -596,6 +596,12 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf, } iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE); + /* + * The ->setup_dma() hook kicks DMA by using the data/command + * interface, which belongs to a different AXI port from the + * register interface. Read back the register to avoid a race. + */ + ioread32(denali->reg + DMA_ENABLE); denali_reset_irq(denali); denali->setup_dma(denali, dma_addr, page, write); -- GitLab From 002b87d2aace62b4f3841c3aa43309d2380092be Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 14 Sep 2018 15:10:29 -0700 Subject: [PATCH 1525/1692] x86/APM: Fix build warning when PROC_FS is not enabled Fix build warning in apm_32.c when CONFIG_PROC_FS is not enabled: ../arch/x86/kernel/apm_32.c:1643:12: warning: 'proc_apm_show' defined but not used [-Wunused-function] static int proc_apm_show(struct seq_file *m, void *v) Fixes: 3f3942aca6da ("proc: introduce proc_create_single{,_data}") Signed-off-by: Randy Dunlap Signed-off-by: Thomas Gleixner Reviewed-by: Christoph Hellwig Cc: Jiri Kosina Link: https://lkml.kernel.org/r/be39ac12-44c2-4715-247f-4dcc3c525b8b@infradead.org --- arch/x86/kernel/apm_32.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index ec00d1ff5098..f7151cd03cb0 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c @@ -1640,6 +1640,7 @@ static int do_open(struct inode *inode, struct file *filp) return 0; } +#ifdef CONFIG_PROC_FS static int proc_apm_show(struct seq_file *m, void *v) { unsigned short bx; @@ -1719,6 +1720,7 @@ static int proc_apm_show(struct seq_file *m, void *v) units); return 0; } +#endif static int apm(void *unused) { -- GitLab From 7281e6c6a5bdbde9cae6eb3c6d2bf2706b94807d Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Fri, 27 Jul 2018 15:29:08 +0300 Subject: [PATCH 1526/1692] drm: rcar-du: Rework clock configuration based on hardware limits The DU channels that have a display PLL (DPLL) can only use external clock sources, and don't have an internal clock divider (with the exception of H3 ES1.x where the post-divider is present and needs to be used as a workaround for a DPLL silicon issue). Rework the clock configuration to take this into account, avoiding selection of non-existing clock sources or usage of a missing post-divider. Signed-off-by: Laurent Pinchart Reviewed-by: Jacopo Mondi --- drivers/gpu/drm/rcar-du/rcar_du_crtc.c | 134 ++++++++++++++----------- 1 file changed, 73 insertions(+), 61 deletions(-) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index 175c36ca89c5..687e8129adbd 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -204,78 +204,90 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) const struct drm_display_mode *mode = &rcrtc->crtc.state->adjusted_mode; struct rcar_du_device *rcdu = rcrtc->group->dev; unsigned long mode_clock = mode->clock * 1000; - unsigned long clk; - u32 value; + u32 dsmr; u32 escr; - u32 div; - /* - * Compute the clock divisor and select the internal or external dot - * clock based on the requested frequency. - */ - clk = clk_get_rate(rcrtc->clock); - div = DIV_ROUND_CLOSEST(clk, mode_clock); - div = clamp(div, 1U, 64U) - 1; - escr = div | ESCR_DCLKSEL_CLKS; - - if (rcrtc->extclock) { + if (rcdu->info->dpll_ch & (1 << rcrtc->index)) { + unsigned long target = mode_clock; struct dpll_info dpll = { 0 }; unsigned long extclk; - unsigned long extrate; - unsigned long rate; - u32 extdiv; + u32 dpllcr; + u32 div = 0; - extclk = clk_get_rate(rcrtc->extclock); - if (rcdu->info->dpll_ch & (1 << rcrtc->index)) { - unsigned long target = mode_clock; + /* + * DU channels that have a display PLL can't use the internal + * system clock, and have no internal clock divider. + */ - /* - * The H3 ES1.x exhibits dot clock duty cycle stability - * issues. We can work around them by configuring the - * DPLL to twice the desired frequency, coupled with a - * /2 post-divider. This isn't needed on other SoCs and - * breaks HDMI output on M3-W for a currently unknown - * reason, so restrict the workaround to H3 ES1.x. - */ - if (soc_device_match(rcar_du_r8a7795_es1)) - target *= 2; + if (WARN_ON(!rcrtc->extclock)) + return; - rcar_du_dpll_divider(rcrtc, &dpll, extclk, target); - extclk = dpll.output; + /* + * The H3 ES1.x exhibits dot clock duty cycle stability issues. + * We can work around them by configuring the DPLL to twice the + * desired frequency, coupled with a /2 post-divider. Restrict + * the workaround to H3 ES1.x as ES2.0 and all other SoCs have + * no post-divider when a display PLL is present (as shown by + * the workaround breaking HDMI output on M3-W during testing). + */ + if (soc_device_match(rcar_du_r8a7795_es1)) { + target *= 2; + div = 1; } - extdiv = DIV_ROUND_CLOSEST(extclk, mode_clock); - extdiv = clamp(extdiv, 1U, 64U) - 1; + extclk = clk_get_rate(rcrtc->extclock); + rcar_du_dpll_divider(rcrtc, &dpll, extclk, target); - rate = clk / (div + 1); - extrate = extclk / (extdiv + 1); + dpllcr = DPLLCR_CODE | DPLLCR_CLKE + | DPLLCR_FDPLL(dpll.fdpll) + | DPLLCR_N(dpll.n) | DPLLCR_M(dpll.m) + | DPLLCR_STBY; - if (abs((long)extrate - (long)mode_clock) < - abs((long)rate - (long)mode_clock)) { + if (rcrtc->index == 1) + dpllcr |= DPLLCR_PLCS1 + | DPLLCR_INCS_DOTCLKIN1; + else + dpllcr |= DPLLCR_PLCS0 + | DPLLCR_INCS_DOTCLKIN0; - if (rcdu->info->dpll_ch & (1 << rcrtc->index)) { - u32 dpllcr = DPLLCR_CODE | DPLLCR_CLKE - | DPLLCR_FDPLL(dpll.fdpll) - | DPLLCR_N(dpll.n) | DPLLCR_M(dpll.m) - | DPLLCR_STBY; + rcar_du_group_write(rcrtc->group, DPLLCR, dpllcr); - if (rcrtc->index == 1) - dpllcr |= DPLLCR_PLCS1 - | DPLLCR_INCS_DOTCLKIN1; - else - dpllcr |= DPLLCR_PLCS0 - | DPLLCR_INCS_DOTCLKIN0; + escr = ESCR_DCLKSEL_DCLKIN | div; + } else { + unsigned long clk; + u32 div; - rcar_du_group_write(rcrtc->group, DPLLCR, - dpllcr); - } + /* + * Compute the clock divisor and select the internal or external + * dot clock based on the requested frequency. + */ + clk = clk_get_rate(rcrtc->clock); + div = DIV_ROUND_CLOSEST(clk, mode_clock); + div = clamp(div, 1U, 64U) - 1; - escr = ESCR_DCLKSEL_DCLKIN | extdiv; - } + escr = ESCR_DCLKSEL_CLKS | div; - dev_dbg(rcrtc->group->dev->dev, - "mode clock %lu extrate %lu rate %lu ESCR 0x%08x\n", - mode_clock, extrate, rate, escr); + if (rcrtc->extclock) { + unsigned long extclk; + unsigned long extrate; + unsigned long rate; + u32 extdiv; + + extclk = clk_get_rate(rcrtc->extclock); + extdiv = DIV_ROUND_CLOSEST(extclk, mode_clock); + extdiv = clamp(extdiv, 1U, 64U) - 1; + + extrate = extclk / (extdiv + 1); + rate = clk / (div + 1); + + if (abs((long)extrate - (long)mode_clock) < + abs((long)rate - (long)mode_clock)) + escr = ESCR_DCLKSEL_DCLKIN | extdiv; + + dev_dbg(rcrtc->group->dev->dev, + "mode clock %lu extrate %lu rate %lu ESCR 0x%08x\n", + mode_clock, extrate, rate, escr); + } } rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? ESCR2 : ESCR, @@ -283,11 +295,11 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? OTAR2 : OTAR, 0); /* Signal polarities */ - value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? DSMR_VSL : 0) - | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? DSMR_HSL : 0) - | ((mode->flags & DRM_MODE_FLAG_INTERLACE) ? DSMR_ODEV : 0) - | DSMR_DIPM_DISP | DSMR_CSPM; - rcar_du_crtc_write(rcrtc, DSMR, value); + dsmr = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? DSMR_VSL : 0) + | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? DSMR_HSL : 0) + | ((mode->flags & DRM_MODE_FLAG_INTERLACE) ? DSMR_ODEV : 0) + | DSMR_DIPM_DISP | DSMR_CSPM; + rcar_du_crtc_write(rcrtc, DSMR, dsmr); /* Display timings */ rcar_du_crtc_write(rcrtc, HDSR, mode->htotal - mode->hsync_start - 19); -- GitLab From 8c74c4561f05f57fca2957b1d98676a0454df1ca Mon Sep 17 00:00:00 2001 From: Jacopo Mondi Date: Mon, 20 Aug 2018 17:26:17 +0200 Subject: [PATCH 1527/1692] drm: rcar-du: Improve non-DPLL clock selection DU channels not equipped with a DPLL use an SoC internal (provided by the CPG) or external clock source combined with a DU internal divider to generate the desired output dot clock frequency. The current clock selection procedure does not fully exploit the ability of external clock sources to generate the exact dot clock frequency by themselves, but relies instead on tuning the internal DU clock divider only, resulting in a less precise clock generation process. When possible, and desirable, ask the external clock source for the exact output dot clock frequency, and select the clock source that produces the frequency closest to the desired output dot clock. This patch specifically targets platforms (like Salvator-X[S] and ULCBs) where the DU's input dotclock.in is generated by the versaclock VC5 clock source, which is capable of generating the exact rate the DU needs as pixel clock output. This patch fixes higher resolution modes which requires an high pixel clock output currently not working on non-HDMI DU channel (such as 1920x1080@60Hz on the VGA output). Fixes: 1b30dbde8596 ("drm: rcar-du: Add support for external pixel clock") Signed-off-by: Jacopo Mondi [Factor out code to a helper function] Signed-off-by: Laurent Pinchart Acked-by: Jacopo Mondi --- drivers/gpu/drm/rcar-du/rcar_du_crtc.c | 85 ++++++++++++++++---------- 1 file changed, 54 insertions(+), 31 deletions(-) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index 687e8129adbd..eadf3814228f 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -194,6 +194,47 @@ static void rcar_du_dpll_divider(struct rcar_du_crtc *rcrtc, best_diff); } +struct du_clk_params { + struct clk *clk; + unsigned long rate; + unsigned long diff; + u32 escr; +}; + +static void rcar_du_escr_divider(struct clk *clk, unsigned long target, + u32 escr, struct du_clk_params *params) +{ + unsigned long rate; + unsigned long diff; + u32 div; + + /* + * If the target rate has already been achieved perfectly we can't do + * better. + */ + if (params->diff == 0) + return; + + /* + * Compute the input clock rate and internal divisor values to obtain + * the clock rate closest to the target frequency. + */ + rate = clk_round_rate(clk, target); + div = clamp(DIV_ROUND_CLOSEST(rate, target), 1UL, 64UL) - 1; + diff = abs(rate / (div + 1) - target); + + /* + * Store the parameters if the resulting frequency is better than any + * previously calculated value. + */ + if (diff < params->diff) { + params->clk = clk; + params->rate = rate; + params->diff = diff; + params->escr = escr | div; + } +} + static const struct soc_device_attribute rcar_du_r8a7795_es1[] = { { .soc_id = "r8a7795", .revision = "ES1.*" }, { /* sentinel */ } @@ -254,42 +295,24 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) escr = ESCR_DCLKSEL_DCLKIN | div; } else { - unsigned long clk; - u32 div; - - /* - * Compute the clock divisor and select the internal or external - * dot clock based on the requested frequency. - */ - clk = clk_get_rate(rcrtc->clock); - div = DIV_ROUND_CLOSEST(clk, mode_clock); - div = clamp(div, 1U, 64U) - 1; - - escr = ESCR_DCLKSEL_CLKS | div; - - if (rcrtc->extclock) { - unsigned long extclk; - unsigned long extrate; - unsigned long rate; - u32 extdiv; + struct du_clk_params params = { .diff = (unsigned long)-1 }; - extclk = clk_get_rate(rcrtc->extclock); - extdiv = DIV_ROUND_CLOSEST(extclk, mode_clock); - extdiv = clamp(extdiv, 1U, 64U) - 1; + rcar_du_escr_divider(rcrtc->clock, mode_clock, + ESCR_DCLKSEL_CLKS, ¶ms); + if (rcrtc->extclock) + rcar_du_escr_divider(rcrtc->extclock, mode_clock, + ESCR_DCLKSEL_DCLKIN, ¶ms); - extrate = extclk / (extdiv + 1); - rate = clk / (div + 1); + dev_dbg(rcrtc->group->dev->dev, "mode clock %lu %s rate %lu\n", + mode_clock, params.clk == rcrtc->clock ? "cpg" : "ext", + params.rate); - if (abs((long)extrate - (long)mode_clock) < - abs((long)rate - (long)mode_clock)) - escr = ESCR_DCLKSEL_DCLKIN | extdiv; - - dev_dbg(rcrtc->group->dev->dev, - "mode clock %lu extrate %lu rate %lu ESCR 0x%08x\n", - mode_clock, extrate, rate, escr); - } + clk_set_rate(params.clk, params.rate); + escr = params.escr; } + dev_dbg(rcrtc->group->dev->dev, "%s: ESCR 0x%08x\n", __func__, escr); + rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? ESCR2 : ESCR, escr); rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? OTAR2 : OTAR, 0); -- GitLab From 9fe50e64fac7a7301f82e31dbd87a4145384005e Mon Sep 17 00:00:00 2001 From: Jacopo Mondi Date: Wed, 22 Aug 2018 09:21:47 +0200 Subject: [PATCH 1528/1692] drm: rcar-du: Rename and document dpll_ch field Document and re-name the 'dpll_ch' field to a more precise 'dpll_mask' for consistency with the 'channels_mask' field defined in 'struct rcar_du_device_info'. Signed-off-by: Jacopo Mondi Reviewed-by: Laurent Pinchart Signed-off-by: Laurent Pinchart --- drivers/gpu/drm/rcar-du/rcar_du_crtc.c | 2 +- drivers/gpu/drm/rcar-du/rcar_du_drv.c | 6 +++--- drivers/gpu/drm/rcar-du/rcar_du_drv.h | 3 ++- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index eadf3814228f..2bf63dcdaa7e 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -248,7 +248,7 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) u32 dsmr; u32 escr; - if (rcdu->info->dpll_ch & (1 << rcrtc->index)) { + if (rcdu->info->dpll_mask & (1 << rcrtc->index)) { unsigned long target = mode_clock; struct dpll_info dpll = { 0 }; unsigned long extclk; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index 02fa9d36be28..0954ecd2f943 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -218,7 +218,7 @@ static const struct rcar_du_device_info rcar_du_r8a7795_info = { }, }, .num_lvds = 1, - .dpll_ch = BIT(2) | BIT(1), + .dpll_mask = BIT(2) | BIT(1), }; static const struct rcar_du_device_info rcar_du_r8a7796_info = { @@ -247,7 +247,7 @@ static const struct rcar_du_device_info rcar_du_r8a7796_info = { }, }, .num_lvds = 1, - .dpll_ch = BIT(1), + .dpll_mask = BIT(1), }; static const struct rcar_du_device_info rcar_du_r8a77965_info = { @@ -276,7 +276,7 @@ static const struct rcar_du_device_info rcar_du_r8a77965_info = { }, }, .num_lvds = 1, - .dpll_ch = BIT(1), + .dpll_mask = BIT(1), }; static const struct rcar_du_device_info rcar_du_r8a77970_info = { diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h index 534a0291380d..fef9ea5c22f3 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h @@ -52,6 +52,7 @@ struct rcar_du_output_routing { * @channels_mask: bit mask of available DU channels * @routes: array of CRTC to output routes, indexed by output (RCAR_DU_OUTPUT_*) * @num_lvds: number of internal LVDS encoders + * @dpll_mask: bit mask of DU channels equipped with a DPLL */ struct rcar_du_device_info { unsigned int gen; @@ -60,7 +61,7 @@ struct rcar_du_device_info { unsigned int channels_mask; struct rcar_du_output_routing routes[RCAR_DU_OUTPUT_MAX]; unsigned int num_lvds; - unsigned int dpll_ch; + unsigned int dpll_mask; }; #define RCAR_DU_MAX_CRTCS 4 -- GitLab From a8492e88d9c7f336fd1356e6b25499216af62902 Mon Sep 17 00:00:00 2001 From: Jacopo Mondi Date: Wed, 22 Aug 2018 09:21:48 +0200 Subject: [PATCH 1529/1692] drm: rcar-du: Write ESCR and OTAR as CRTC registers The ESCR and OTAR registers exist in each DU channel, but at different offsets for odd and even channels. This led to usage of the group register access API to write them, with offsets macros named ESCR/OTAR and ESCR2/OTAR2 for the first and second ESCR/OTAR register in the group respectively. The names are confusing as it suggests that the ESCR/OTAR registers for DU0 and DU2 are taken into account, especially with writes performed to the group register access API. Rename the offsets to ESCR/OTAR02 and ESCR/OTAR13, and use the CRTC register access API to clarify the code. The offsets values are updated accordingly. Cosmetic patch, no functional changes intended. Signed-off-by: Jacopo Mondi Reviewed-by: Laurent Pinchart [Squashed ESCR and OTAR changes in a single commit] Signed-off-by: Laurent Pinchart --- drivers/gpu/drm/rcar-du/rcar_du_crtc.c | 5 ++--- drivers/gpu/drm/rcar-du/rcar_du_regs.h | 8 ++++---- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index 2bf63dcdaa7e..6288b9ad9e24 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -313,9 +313,8 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) dev_dbg(rcrtc->group->dev->dev, "%s: ESCR 0x%08x\n", __func__, escr); - rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? ESCR2 : ESCR, - escr); - rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? OTAR2 : OTAR, 0); + rcar_du_crtc_write(rcrtc, rcrtc->index % 2 ? ESCR13 : ESCR02, escr); + rcar_du_crtc_write(rcrtc, rcrtc->index % 2 ? OTAR13 : OTAR02, 0); /* Signal polarities */ dsmr = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? DSMR_VSL : 0) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h index f1417248f7e1..bc87f080b170 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h @@ -489,8 +489,8 @@ * External Synchronization Control Registers */ -#define ESCR 0x10000 -#define ESCR2 0x31000 +#define ESCR02 0x10000 +#define ESCR13 0x01000 #define ESCR_DCLKOINV (1 << 25) #define ESCR_DCLKSEL_DCLKIN (0 << 20) #define ESCR_DCLKSEL_CLKS (1 << 20) @@ -501,8 +501,8 @@ #define ESCR_SYNCSEL_EXHSYNC (3 << 8) #define ESCR_FRQSEL_MASK (0x3f << 0) -#define OTAR 0x10004 -#define OTAR2 0x31004 +#define OTAR02 0x10004 +#define OTAR13 0x01004 /* ----------------------------------------------------------------------------- * Dual Display Output Control Registers -- GitLab From c4341442acb14d8f1cec6999123a70f9d2bfe48f Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Tue, 5 Jun 2018 23:30:36 +0300 Subject: [PATCH 1530/1692] drm: rcar-du: lvds: add R8A77980 support Add support for the R-Car V3H (R8A77980) SoC to the LVDS encoder driver. Signed-off-by: Sergei Shtylyov Reviewed-by: Laurent Pinchart Signed-off-by: Laurent Pinchart --- drivers/gpu/drm/rcar-du/rcar_lvds.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c index 4c39de3f4f0f..ce0eb68c3416 100644 --- a/drivers/gpu/drm/rcar-du/rcar_lvds.c +++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c @@ -522,6 +522,7 @@ static const struct of_device_id rcar_lvds_of_table[] = { { .compatible = "renesas,r8a7795-lvds", .data = &rcar_lvds_gen3_info }, { .compatible = "renesas,r8a7796-lvds", .data = &rcar_lvds_gen3_info }, { .compatible = "renesas,r8a77970-lvds", .data = &rcar_lvds_r8a77970_info }, + { .compatible = "renesas,r8a77980-lvds", .data = &rcar_lvds_gen3_info }, { } }; -- GitLab From 2a3181d9cfd6d5aa48f8527708d0c32072072cef Mon Sep 17 00:00:00 2001 From: Kieran Bingham Date: Fri, 31 Aug 2018 19:12:57 +0100 Subject: [PATCH 1531/1692] drm: rcar-du: Update Gen3 output limitations The R-Car Gen3 DU utilises the VSP1 hardware for memory access. The limits on the RPF and WPF in this pipeline are 8190x8190. Update the supported maximum sizes accordingly. Signed-off-by: Kieran Bingham Reviewed-by: Laurent Pinchart Signed-off-by: Laurent Pinchart --- drivers/gpu/drm/rcar-du/rcar_du_kms.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index ed7fa3204892..7c7aff8cdf77 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -512,12 +512,22 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu) dev->mode_config.min_width = 0; dev->mode_config.min_height = 0; - dev->mode_config.max_width = 4095; - dev->mode_config.max_height = 2047; dev->mode_config.normalize_zpos = true; dev->mode_config.funcs = &rcar_du_mode_config_funcs; dev->mode_config.helper_private = &rcar_du_mode_config_helper; + if (rcdu->info->gen < 3) { + dev->mode_config.max_width = 4095; + dev->mode_config.max_height = 2047; + } else { + /* + * The Gen3 DU uses the VSP1 for memory access, and is limited + * to frame sizes of 8190x8190. + */ + dev->mode_config.max_width = 8190; + dev->mode_config.max_height = 8190; + } + rcdu->num_crtcs = hweight8(rcdu->info->channels_mask); ret = rcar_du_properties_init(rcdu); -- GitLab From 0f35b25b87923394cd9048a199d05e994fbf8bae Mon Sep 17 00:00:00 2001 From: Koji Matsuoka Date: Fri, 31 Aug 2018 19:12:58 +0100 Subject: [PATCH 1532/1692] drm: rcar-du: Add support for missing pixel formats This patch supports pixel format of RGB332, ARGB4444, XRGB4444, BGR888, RGB888, BGRA8888, BGRX8888 and YVYU. VYUY pixel format is not supported by H/W specification. Signed-off-by: Koji Matsuoka Signed-off-by: Kieran Bingham Reviewed-by: Laurent Pinchart [Reordered formats with RGB first] Signed-off-by: Laurent Pinchart --- drivers/gpu/drm/rcar-du/rcar_du_kms.c | 32 +++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index 7c7aff8cdf77..a58a96948850 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -97,6 +97,38 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = { * associated .pnmr or .edf settings. */ { + .fourcc = DRM_FORMAT_RGB332, + .bpp = 8, + .planes = 1, + }, { + .fourcc = DRM_FORMAT_ARGB4444, + .bpp = 16, + .planes = 1, + }, { + .fourcc = DRM_FORMAT_XRGB4444, + .bpp = 16, + .planes = 1, + }, { + .fourcc = DRM_FORMAT_BGR888, + .bpp = 24, + .planes = 1, + }, { + .fourcc = DRM_FORMAT_RGB888, + .bpp = 24, + .planes = 1, + }, { + .fourcc = DRM_FORMAT_BGRA8888, + .bpp = 32, + .planes = 1, + }, { + .fourcc = DRM_FORMAT_BGRX8888, + .bpp = 32, + .planes = 1, + }, { + .fourcc = DRM_FORMAT_YVYU, + .bpp = 16, + .planes = 1, + }, { .fourcc = DRM_FORMAT_NV61, .bpp = 16, .planes = 2, -- GitLab From f09e5b5d776debc2761e8d9330d60b8dcd8cb9dd Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Fri, 31 Aug 2018 19:12:59 +0100 Subject: [PATCH 1533/1692] drm: rcar-du: Update framebuffer pitch and alignment limits for Gen3 The framebuffer pitch and alignment constraints reflect the limitations of the Gen2 DU hardware. On Gen3, the DU has no memory interface and thus doesn't impose any constraint. The limitations come instead from the VSP that has a limit of 65535 bytes for the pitch and no alignment constraint. Update the checks accordingly. Signed-off-by: Laurent Pinchart Signed-off-by: Kieran Bingham --- drivers/gpu/drm/rcar-du/rcar_du_kms.c | 35 ++++++++++++++++++--------- 1 file changed, 23 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index a58a96948850..b5d79ecd25ea 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -204,7 +204,6 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv, const struct rcar_du_format_info *format; unsigned int max_pitch; unsigned int align; - unsigned int bpp; unsigned int i; format = rcar_du_format_info(mode_cmd->pixel_format); @@ -214,20 +213,32 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv, return ERR_PTR(-EINVAL); } - /* - * The pitch and alignment constraints are expressed in pixels on the - * hardware side and in bytes in the DRM API. - */ - bpp = format->planes == 1 ? format->bpp / 8 : 1; - max_pitch = 4096 * bpp; + if (rcdu->info->gen < 3) { + /* + * On Gen2 the DU limits the pitch to 4095 pixels and requires + * buffers to be aligned to a 16 pixels boundary (or 128 bytes + * on some platforms). + */ + unsigned int bpp = format->planes == 1 ? format->bpp / 8 : 1; - if (rcar_du_needs(rcdu, RCAR_DU_QUIRK_ALIGN_128B)) - align = 128; - else - align = 16 * bpp; + max_pitch = 4095 * bpp; + + if (rcar_du_needs(rcdu, RCAR_DU_QUIRK_ALIGN_128B)) + align = 128; + else + align = 16 * bpp; + } else { + /* + * On Gen3 the memory interface is handled by the VSP that + * limits the pitch to 65535 bytes and has no alignment + * constraint. + */ + max_pitch = 65535; + align = 1; + } if (mode_cmd->pitches[0] & (align - 1) || - mode_cmd->pitches[0] >= max_pitch) { + mode_cmd->pitches[0] > max_pitch) { dev_dbg(dev->dev, "invalid pitch value %u\n", mode_cmd->pitches[0]); return ERR_PTR(-EINVAL); -- GitLab From 6e1637c91742570ff873433ed27227933b792af4 Mon Sep 17 00:00:00 2001 From: Kieran Bingham Date: Fri, 14 Sep 2018 14:21:49 +0100 Subject: [PATCH 1534/1692] drm: rcar-du: Remove packed VYUY support The Gen3 VSP used by the DU for display does not support the packed VYUY pixel format. Gen2 VSP hardware is able to process this format, but DU + VSP operation isn't enabled on Gen2, and VYUY isn't a strategic format, so it can be ignored. Remove the format from the capabilities of the DU driver. Signed-off-by: Kieran Bingham Reviewed-by: Laurent Pinchart Signed-off-by: Laurent Pinchart --- drivers/gpu/drm/rcar-du/rcar_du_vsp.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c index 4480243813ec..4576119e7777 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c @@ -126,7 +126,6 @@ static const u32 formats_kms[] = { DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB8888, DRM_FORMAT_UYVY, - DRM_FORMAT_VYUY, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU, DRM_FORMAT_NV12, @@ -155,7 +154,6 @@ static const u32 formats_v4l2[] = { V4L2_PIX_FMT_ABGR32, V4L2_PIX_FMT_XBGR32, V4L2_PIX_FMT_UYVY, - V4L2_PIX_FMT_VYUY, V4L2_PIX_FMT_YUYV, V4L2_PIX_FMT_YVYU, V4L2_PIX_FMT_NV12M, -- GitLab From 37196ba4ae95a2077d78715eb12e879e57613d43 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Thu, 13 Sep 2018 19:43:58 -0700 Subject: [PATCH 1535/1692] hwmon: (nct6775) Fix virtual temperature sources for NCT6796D The following kernel log message is reported for the nct6775 driver on ASUS WS X299 SAGE. nct6775: Found NCT6796D or compatible chip at 0x2e:0x290 nct6775 nct6775.656: Invalid temperature source 11 at index 0, source register 0x100, temp register 0x73 nct6775 nct6775.656: Invalid temperature source 11 at index 2, source register 0x300, temp register 0x77 nct6775 nct6775.656: Invalid temperature source 11 at index 3, source register 0x800, temp register 0x79 nct6775 nct6775.656: Invalid temperature source 11 at index 4, source register 0x900, temp register 0x7b A recent version of the datasheet lists temperature source 11 as reserved. However, an older version of the datasheet lists temperature sources 10 and 11 as supported virtual temperature sources. Apparently the older version of the datasheet is correct, so list those temperature sources as supported. Virtual temperature sources are different than other temperature sources: Values are not read from a temperature sensor, but written either from BIOS or an embedded controller. As such, each virtual temperature has to be reported. Since there is now more than one temperature source, we have to keep virtual temperature sources in a chip-specific mask and can no longer rely on the assumption that there is only one virtual temperature source with a fixed index. This accounts for most of the complexity of this patch. Reported-by: Robert Kern Cc: Robert Kern Fixes: 81820059a428 ("hwmon: (nct6775) Add support for NCT6796D") Signed-off-by: Guenter Roeck --- drivers/hwmon/nct6775.c | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index 87c316c6c341..202a2b422461 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c @@ -207,8 +207,6 @@ superio_exit(int ioreg) #define NUM_FAN 7 -#define TEMP_SOURCE_VIRTUAL 0x1f - /* Common and NCT6775 specific data */ /* Voltage min/max registers for nr=7..14 are in bank 5 */ @@ -374,6 +372,7 @@ static const char *const nct6775_temp_label[] = { }; #define NCT6775_TEMP_MASK 0x001ffffe +#define NCT6775_VIRT_TEMP_MASK 0x00000000 static const u16 NCT6775_REG_TEMP_ALTERNATE[32] = { [13] = 0x661, @@ -462,6 +461,7 @@ static const char *const nct6776_temp_label[] = { }; #define NCT6776_TEMP_MASK 0x007ffffe +#define NCT6776_VIRT_TEMP_MASK 0x00000000 static const u16 NCT6776_REG_TEMP_ALTERNATE[32] = { [14] = 0x401, @@ -560,7 +560,9 @@ static const char *const nct6779_temp_label[] = { }; #define NCT6779_TEMP_MASK 0x07ffff7e +#define NCT6779_VIRT_TEMP_MASK 0x00000000 #define NCT6791_TEMP_MASK 0x87ffff7e +#define NCT6791_VIRT_TEMP_MASK 0x80000000 static const u16 NCT6779_REG_TEMP_ALTERNATE[32] = { 0x490, 0x491, 0x492, 0x493, 0x494, 0x495, 0, 0, @@ -639,6 +641,7 @@ static const char *const nct6792_temp_label[] = { }; #define NCT6792_TEMP_MASK 0x9fffff7e +#define NCT6792_VIRT_TEMP_MASK 0x80000000 static const char *const nct6793_temp_label[] = { "", @@ -676,6 +679,7 @@ static const char *const nct6793_temp_label[] = { }; #define NCT6793_TEMP_MASK 0xbfff037e +#define NCT6793_VIRT_TEMP_MASK 0x80000000 static const char *const nct6795_temp_label[] = { "", @@ -713,6 +717,7 @@ static const char *const nct6795_temp_label[] = { }; #define NCT6795_TEMP_MASK 0xbfffff7e +#define NCT6795_VIRT_TEMP_MASK 0x80000000 static const char *const nct6796_temp_label[] = { "", @@ -725,8 +730,8 @@ static const char *const nct6796_temp_label[] = { "AUXTIN4", "SMBUSMASTER 0", "SMBUSMASTER 1", - "", - "", + "Virtual_TEMP", + "Virtual_TEMP", "", "", "", @@ -749,7 +754,8 @@ static const char *const nct6796_temp_label[] = { "Virtual_TEMP" }; -#define NCT6796_TEMP_MASK 0xbfff03fe +#define NCT6796_TEMP_MASK 0xbfff0ffe +#define NCT6796_VIRT_TEMP_MASK 0x80000c00 /* NCT6102D/NCT6106D specific data */ @@ -970,6 +976,7 @@ struct nct6775_data { u16 reg_temp_config[NUM_TEMP]; const char * const *temp_label; u32 temp_mask; + u32 virt_temp_mask; u16 REG_CONFIG; u16 REG_VBAT; @@ -3644,6 +3651,7 @@ static int nct6775_probe(struct platform_device *pdev) data->temp_label = nct6776_temp_label; data->temp_mask = NCT6776_TEMP_MASK; + data->virt_temp_mask = NCT6776_VIRT_TEMP_MASK; data->REG_VBAT = NCT6106_REG_VBAT; data->REG_DIODE = NCT6106_REG_DIODE; @@ -3722,6 +3730,7 @@ static int nct6775_probe(struct platform_device *pdev) data->temp_label = nct6775_temp_label; data->temp_mask = NCT6775_TEMP_MASK; + data->virt_temp_mask = NCT6775_VIRT_TEMP_MASK; data->REG_CONFIG = NCT6775_REG_CONFIG; data->REG_VBAT = NCT6775_REG_VBAT; @@ -3794,6 +3803,7 @@ static int nct6775_probe(struct platform_device *pdev) data->temp_label = nct6776_temp_label; data->temp_mask = NCT6776_TEMP_MASK; + data->virt_temp_mask = NCT6776_VIRT_TEMP_MASK; data->REG_CONFIG = NCT6775_REG_CONFIG; data->REG_VBAT = NCT6775_REG_VBAT; @@ -3866,6 +3876,7 @@ static int nct6775_probe(struct platform_device *pdev) data->temp_label = nct6779_temp_label; data->temp_mask = NCT6779_TEMP_MASK; + data->virt_temp_mask = NCT6779_VIRT_TEMP_MASK; data->REG_CONFIG = NCT6775_REG_CONFIG; data->REG_VBAT = NCT6775_REG_VBAT; @@ -3949,22 +3960,27 @@ static int nct6775_probe(struct platform_device *pdev) case nct6791: data->temp_label = nct6779_temp_label; data->temp_mask = NCT6791_TEMP_MASK; + data->virt_temp_mask = NCT6791_VIRT_TEMP_MASK; break; case nct6792: data->temp_label = nct6792_temp_label; data->temp_mask = NCT6792_TEMP_MASK; + data->virt_temp_mask = NCT6792_VIRT_TEMP_MASK; break; case nct6793: data->temp_label = nct6793_temp_label; data->temp_mask = NCT6793_TEMP_MASK; + data->virt_temp_mask = NCT6793_VIRT_TEMP_MASK; break; case nct6795: data->temp_label = nct6795_temp_label; data->temp_mask = NCT6795_TEMP_MASK; + data->virt_temp_mask = NCT6795_VIRT_TEMP_MASK; break; case nct6796: data->temp_label = nct6796_temp_label; data->temp_mask = NCT6796_TEMP_MASK; + data->virt_temp_mask = NCT6796_VIRT_TEMP_MASK; break; } @@ -4148,7 +4164,7 @@ static int nct6775_probe(struct platform_device *pdev) * for each fan reflects a different temperature, and there * are no duplicates. */ - if (src != TEMP_SOURCE_VIRTUAL) { + if (!(data->virt_temp_mask & BIT(src))) { if (mask & BIT(src)) continue; mask |= BIT(src); -- GitLab From 338affb548c243d2af25b1ca628e67819350de6b Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sat, 15 Sep 2018 14:28:26 -0400 Subject: [PATCH 1536/1692] ext4: show test_dummy_encryption mount option in /proc/mounts When in effect, add "test_dummy_encryption" to _ext4_show_options() so that it is shown in /proc/mounts and other relevant procfs files. Signed-off-by: Eric Biggers Signed-off-by: Theodore Ts'o Cc: stable@vger.kernel.org --- fs/ext4/super.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/ext4/super.c b/fs/ext4/super.c index e41da553b430..a430fb3e9720 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -2145,6 +2145,8 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb, SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb); if (test_opt(sb, DATA_ERR_ABORT)) SEQ_OPTS_PUTS("data_err=abort"); + if (DUMMY_ENCRYPTION_ENABLED(sbi)) + SEQ_OPTS_PUTS("test_dummy_encryption"); ext4_show_quota_options(seq, sb); return 0; -- GitLab From b3f0907c71e006e12fde74ea9a745b6096b6f90f Mon Sep 17 00:00:00 2001 From: Brijesh Singh Date: Fri, 14 Sep 2018 08:45:58 -0500 Subject: [PATCH 1537/1692] x86/mm: Add .bss..decrypted section to hold shared variables MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit kvmclock defines few static variables which are shared with the hypervisor during the kvmclock initialization. When SEV is active, memory is encrypted with a guest-specific key, and if the guest OS wants to share the memory region with the hypervisor then it must clear the C-bit before sharing it. Currently, we use kernel_physical_mapping_init() to split large pages before clearing the C-bit on shared pages. But it fails when called from the kvmclock initialization (mainly because the memblock allocator is not ready that early during boot). Add a __bss_decrypted section attribute which can be used when defining such shared variable. The so-defined variables will be placed in the .bss..decrypted section. This section will be mapped with C=0 early during boot. The .bss..decrypted section has a big chunk of memory that may be unused when memory encryption is not active, free it when memory encryption is not active. Suggested-by: Thomas Gleixner Signed-off-by: Brijesh Singh Signed-off-by: Thomas Gleixner Cc: Tom Lendacky Cc: Borislav Petkov Cc: "H. Peter Anvin" Cc: Paolo Bonzini Cc: Sean Christopherson Cc: Radim Krčmář Cc: kvm@vger.kernel.org Link: https://lkml.kernel.org/r/1536932759-12905-2-git-send-email-brijesh.singh@amd.com --- arch/x86/include/asm/mem_encrypt.h | 7 +++++++ arch/x86/kernel/head64.c | 16 ++++++++++++++++ arch/x86/kernel/vmlinux.lds.S | 19 +++++++++++++++++++ arch/x86/mm/init.c | 4 ++++ arch/x86/mm/mem_encrypt.c | 24 ++++++++++++++++++++++++ 5 files changed, 70 insertions(+) diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h index c0643831706e..616f8e637bc3 100644 --- a/arch/x86/include/asm/mem_encrypt.h +++ b/arch/x86/include/asm/mem_encrypt.h @@ -48,10 +48,13 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size); /* Architecture __weak replacement functions */ void __init mem_encrypt_init(void); +void __init mem_encrypt_free_decrypted_mem(void); bool sme_active(void); bool sev_active(void); +#define __bss_decrypted __attribute__((__section__(".bss..decrypted"))) + #else /* !CONFIG_AMD_MEM_ENCRYPT */ #define sme_me_mask 0ULL @@ -77,6 +80,8 @@ early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0; static inline int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; } +#define __bss_decrypted + #endif /* CONFIG_AMD_MEM_ENCRYPT */ /* @@ -88,6 +93,8 @@ early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; #define __sme_pa(x) (__pa(x) | sme_me_mask) #define __sme_pa_nodebug(x) (__pa_nodebug(x) | sme_me_mask) +extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[]; + #endif /* __ASSEMBLY__ */ #endif /* __X86_MEM_ENCRYPT_H__ */ diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 8047379e575a..c16af27eb23f 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -112,6 +112,7 @@ static bool __head check_la57_support(unsigned long physaddr) unsigned long __head __startup_64(unsigned long physaddr, struct boot_params *bp) { + unsigned long vaddr, vaddr_end; unsigned long load_delta, *p; unsigned long pgtable_flags; pgdval_t *pgd; @@ -234,6 +235,21 @@ unsigned long __head __startup_64(unsigned long physaddr, /* Encrypt the kernel and related (if SME is active) */ sme_encrypt_kernel(bp); + /* + * Clear the memory encryption mask from the .bss..decrypted section. + * The bss section will be memset to zero later in the initialization so + * there is no need to zero it after changing the memory encryption + * attribute. + */ + if (mem_encrypt_active()) { + vaddr = (unsigned long)__start_bss_decrypted; + vaddr_end = (unsigned long)__end_bss_decrypted; + for (; vaddr < vaddr_end; vaddr += PMD_SIZE) { + i = pmd_index(vaddr); + pmd[i] -= sme_get_me_mask(); + } + } + /* * Return the SME encryption mask (if SME is active) to be used as a * modifier for the initial pgdir entry programmed into CR3. diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 8bde0a419f86..5dd3317d761f 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -65,6 +65,23 @@ jiffies_64 = jiffies; #define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE); #define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE); +/* + * This section contains data which will be mapped as decrypted. Memory + * encryption operates on a page basis. Make this section PMD-aligned + * to avoid splitting the pages while mapping the section early. + * + * Note: We use a separate section so that only this section gets + * decrypted to avoid exposing more than we wish. + */ +#define BSS_DECRYPTED \ + . = ALIGN(PMD_SIZE); \ + __start_bss_decrypted = .; \ + *(.bss..decrypted); \ + . = ALIGN(PAGE_SIZE); \ + __start_bss_decrypted_unused = .; \ + . = ALIGN(PMD_SIZE); \ + __end_bss_decrypted = .; \ + #else #define X86_ALIGN_RODATA_BEGIN @@ -74,6 +91,7 @@ jiffies_64 = jiffies; #define ALIGN_ENTRY_TEXT_BEGIN #define ALIGN_ENTRY_TEXT_END +#define BSS_DECRYPTED #endif @@ -355,6 +373,7 @@ SECTIONS __bss_start = .; *(.bss..page_aligned) *(.bss) + BSS_DECRYPTED . = ALIGN(PAGE_SIZE); __bss_stop = .; } diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 7a8fc26c1115..faca978ebf9d 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -815,10 +815,14 @@ void free_kernel_image_pages(void *begin, void *end) set_memory_np_noalias(begin_ul, len_pages); } +void __weak mem_encrypt_free_decrypted_mem(void) { } + void __ref free_initmem(void) { e820__reallocate_tables(); + mem_encrypt_free_decrypted_mem(); + free_kernel_image_pages(&__init_begin, &__init_end); } diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index b2de398d1fd3..006f373f54ab 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -348,6 +348,30 @@ bool sev_active(void) EXPORT_SYMBOL(sev_active); /* Architecture __weak replacement functions */ +void __init mem_encrypt_free_decrypted_mem(void) +{ + unsigned long vaddr, vaddr_end, npages; + int r; + + vaddr = (unsigned long)__start_bss_decrypted_unused; + vaddr_end = (unsigned long)__end_bss_decrypted; + npages = (vaddr_end - vaddr) >> PAGE_SHIFT; + + /* + * The unused memory range was mapped decrypted, change the encryption + * attribute from decrypted to encrypted before freeing it. + */ + if (mem_encrypt_active()) { + r = set_memory_encrypted(vaddr, npages); + if (r) { + pr_warn("failed to free unused decrypted pages\n"); + return; + } + } + + free_init_pages("unused decrypted", vaddr, vaddr_end); +} + void __init mem_encrypt_init(void) { if (!sme_me_mask) -- GitLab From 6a1cac56f41f9ea94e440dfcc1cac44b41a1b194 Mon Sep 17 00:00:00 2001 From: Brijesh Singh Date: Fri, 14 Sep 2018 08:45:59 -0500 Subject: [PATCH 1538/1692] x86/kvm: Use __bss_decrypted attribute in shared variables MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The recent removal of the memblock dependency from kvmclock caused a SEV guest regression because the wall_clock and hv_clock_boot variables are no longer mapped decrypted when SEV is active. Use the __bss_decrypted attribute to put the static wall_clock and hv_clock_boot in the .bss..decrypted section so that they are mapped decrypted during boot. In the preparatory stage of CPU hotplug, the per-cpu pvclock data pointer assigns either an element of the static array or dynamically allocated memory for the pvclock data pointer. The static array are now mapped decrypted but the dynamically allocated memory is not mapped decrypted. However, when SEV is active this memory range must be mapped decrypted. Add a function which is called after the page allocator is up, and allocate memory for the pvclock data pointers for the all possible cpus. Map this memory range as decrypted when SEV is active. Fixes: 368a540e0232 ("x86/kvmclock: Remove memblock dependency") Suggested-by: Thomas Gleixner Signed-off-by: Brijesh Singh Signed-off-by: Thomas Gleixner Cc: Tom Lendacky Cc: Borislav Petkov Cc: "H. Peter Anvin" Cc: Paolo Bonzini Cc: Sean Christopherson Cc: "Radim Krčmář" Cc: kvm@vger.kernel.org Link: https://lkml.kernel.org/r/1536932759-12905-3-git-send-email-brijesh.singh@amd.com --- arch/x86/kernel/kvmclock.c | 52 +++++++++++++++++++++++++++++++++++--- 1 file changed, 49 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 1e6764648af3..013fe3d21dbb 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -61,9 +62,10 @@ early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall); (PAGE_SIZE / sizeof(struct pvclock_vsyscall_time_info)) static struct pvclock_vsyscall_time_info - hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __aligned(PAGE_SIZE); -static struct pvclock_wall_clock wall_clock; + hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __bss_decrypted __aligned(PAGE_SIZE); +static struct pvclock_wall_clock wall_clock __bss_decrypted; static DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu); +static struct pvclock_vsyscall_time_info *hvclock_mem; static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void) { @@ -236,6 +238,45 @@ static void kvm_shutdown(void) native_machine_shutdown(); } +static void __init kvmclock_init_mem(void) +{ + unsigned long ncpus; + unsigned int order; + struct page *p; + int r; + + if (HVC_BOOT_ARRAY_SIZE >= num_possible_cpus()) + return; + + ncpus = num_possible_cpus() - HVC_BOOT_ARRAY_SIZE; + order = get_order(ncpus * sizeof(*hvclock_mem)); + + p = alloc_pages(GFP_KERNEL, order); + if (!p) { + pr_warn("%s: failed to alloc %d pages", __func__, (1U << order)); + return; + } + + hvclock_mem = page_address(p); + + /* + * hvclock is shared between the guest and the hypervisor, must + * be mapped decrypted. + */ + if (sev_active()) { + r = set_memory_decrypted((unsigned long) hvclock_mem, + 1UL << order); + if (r) { + __free_pages(p, order); + hvclock_mem = NULL; + pr_warn("kvmclock: set_memory_decrypted() failed. Disabling\n"); + return; + } + } + + memset(hvclock_mem, 0, PAGE_SIZE << order); +} + static int __init kvm_setup_vsyscall_timeinfo(void) { #ifdef CONFIG_X86_64 @@ -250,6 +291,9 @@ static int __init kvm_setup_vsyscall_timeinfo(void) kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK; #endif + + kvmclock_init_mem(); + return 0; } early_initcall(kvm_setup_vsyscall_timeinfo); @@ -269,8 +313,10 @@ static int kvmclock_setup_percpu(unsigned int cpu) /* Use the static page for the first CPUs, allocate otherwise */ if (cpu < HVC_BOOT_ARRAY_SIZE) p = &hv_clock_boot[cpu]; + else if (hvclock_mem) + p = hvclock_mem + cpu - HVC_BOOT_ARRAY_SIZE; else - p = kzalloc(sizeof(*p), GFP_KERNEL); + return -ENOMEM; per_cpu(hv_clock_per_cpu, cpu) = p; return p ? 0 : -ENOMEM; -- GitLab From fe18d649891d813964d3aaeebad873f281627fbc Mon Sep 17 00:00:00 2001 From: Li Dongyang Date: Sat, 15 Sep 2018 17:11:25 -0400 Subject: [PATCH 1539/1692] ext4: don't mark mmp buffer head dirty Marking mmp bh dirty before writing it will make writeback pick up mmp block later and submit a write, we don't want the duplicate write as kmmpd thread should have full control of reading and writing the mmp block. Another reason is we will also have random I/O error on the writeback request when blk integrity is enabled, because kmmpd could modify the content of the mmp block(e.g. setting new seq and time) while the mmp block is under I/O requested by writeback. Signed-off-by: Li Dongyang Signed-off-by: Theodore Ts'o Reviewed-by: Andreas Dilger Cc: stable@vger.kernel.org --- fs/ext4/mmp.c | 1 - 1 file changed, 1 deletion(-) diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c index 39b07c2d3384..2305b4374fd3 100644 --- a/fs/ext4/mmp.c +++ b/fs/ext4/mmp.c @@ -49,7 +49,6 @@ static int write_mmp_block(struct super_block *sb, struct buffer_head *bh) */ sb_start_write(sb); ext4_mmp_csum_set(sb, mmp); - mark_buffer_dirty(bh); lock_buffer(bh); bh->b_end_io = end_buffer_write_sync; get_bh(bh); -- GitLab From f6de298806d9cbc63a4907bca34a06162b9d7dce Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Thu, 13 Sep 2018 20:01:12 -0700 Subject: [PATCH 1540/1692] hwmon: (nct6775) Fix RPM output for fan7 on NCT6796D fan7 on NCT6796D does not have a fan count register; it only has an RPM register. Switch to using RPM registers to read the fan speed for all chips supporting it to solve the problem for good. Reported-by: Robert Kern Cc: Robert Kern Fixes: 81820059a428 ("hwmon: (nct6775) Add support for NCT6796D") Signed-off-by: Guenter Roeck --- drivers/hwmon/nct6775.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index 202a2b422461..af4d8792bbb5 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c @@ -502,7 +502,7 @@ static const s8 NCT6779_BEEP_BITS[] = { 30, 31 }; /* intrusion0, intrusion1 */ static const u16 NCT6779_REG_FAN[] = { - 0x4b0, 0x4b2, 0x4b4, 0x4b6, 0x4b8, 0x4ba, 0x660 }; + 0x4c0, 0x4c2, 0x4c4, 0x4c6, 0x4c8, 0x4ca, 0x660 }; static const u16 NCT6779_REG_FAN_PULSES[NUM_FAN] = { 0x644, 0x645, 0x646, 0x647, 0x648, 0x649 }; @@ -924,6 +924,11 @@ static unsigned int fan_from_reg16(u16 reg, unsigned int divreg) return 1350000U / (reg << divreg); } +static unsigned int fan_from_reg_rpm(u16 reg, unsigned int divreg) +{ + return reg; +} + static u16 fan_to_reg(u32 fan, unsigned int divreg) { if (!fan) @@ -1284,7 +1289,7 @@ static bool is_word_sized(struct nct6775_data *data, u16 reg) case nct6795: case nct6796: return reg == 0x150 || reg == 0x153 || reg == 0x155 || - ((reg & 0xfff0) == 0x4b0 && (reg & 0x000f) < 0x0b) || + (reg & 0xfff0) == 0x4c0 || reg == 0x402 || reg == 0x63a || reg == 0x63c || reg == 0x63e || reg == 0x640 || reg == 0x642 || reg == 0x64a || @@ -3868,7 +3873,7 @@ static int nct6775_probe(struct platform_device *pdev) data->ALARM_BITS = NCT6779_ALARM_BITS; data->BEEP_BITS = NCT6779_BEEP_BITS; - data->fan_from_reg = fan_from_reg13; + data->fan_from_reg = fan_from_reg_rpm; data->fan_from_reg_min = fan_from_reg13; data->target_temp_mask = 0xff; data->tolerance_mask = 0x07; @@ -3949,7 +3954,7 @@ static int nct6775_probe(struct platform_device *pdev) data->ALARM_BITS = NCT6791_ALARM_BITS; data->BEEP_BITS = NCT6779_BEEP_BITS; - data->fan_from_reg = fan_from_reg13; + data->fan_from_reg = fan_from_reg_rpm; data->fan_from_reg_min = fan_from_reg13; data->target_temp_mask = 0xff; data->tolerance_mask = 0x07; -- GitLab From 94dbb63117e82253c9592816aa4465f0a9c94850 Mon Sep 17 00:00:00 2001 From: Toshi Kani Date: Sat, 15 Sep 2018 21:23:41 -0400 Subject: [PATCH 1541/1692] ext4, dax: add ext4_bmap to ext4_dax_aops Ext4 mount path calls .bmap to the journal inode. This currently works for the DAX mount case because ext4_iget() always set 'ext4_da_aops' to any regular files. In preparation to fix ext4_iget() to set 'ext4_dax_aops' for ext4 DAX files, add ext4_bmap() to 'ext4_dax_aops', since bmap works for DAX inodes. Fixes: 5f0663bb4a64 ("ext4, dax: introduce ext4_dax_aops") Signed-off-by: Toshi Kani Signed-off-by: Theodore Ts'o Suggested-by: Jan Kara Cc: stable@vger.kernel.org --- fs/ext4/inode.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 723058bfe43b..5be07f64ae0a 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -3951,6 +3951,7 @@ static const struct address_space_operations ext4_dax_aops = { .writepages = ext4_dax_writepages, .direct_IO = noop_direct_IO, .set_page_dirty = noop_set_page_dirty, + .bmap = ext4_bmap, .invalidatepage = noop_invalidatepage, }; -- GitLab From cce6c9f7e6029caee45c459db5b3e78fec6973cb Mon Sep 17 00:00:00 2001 From: Toshi Kani Date: Sat, 15 Sep 2018 21:37:59 -0400 Subject: [PATCH 1542/1692] ext4, dax: set ext4_dax_aops for dax files Sync syscall to DAX file needs to flush processor cache, but it currently does not flush to existing DAX files. This is because 'ext4_da_aops' is set to address_space_operations of existing DAX files, instead of 'ext4_dax_aops', since S_DAX flag is set after ext4_set_aops() in the open path. New file -------- lookup_open ext4_create __ext4_new_inode ext4_set_inode_flags // Set S_DAX flag ext4_set_aops // Set aops to ext4_dax_aops Existing file ------------- lookup_open ext4_lookup ext4_iget ext4_set_aops // Set aops to ext4_da_aops ext4_set_inode_flags // Set S_DAX flag Change ext4_iget() to initialize i_flags before ext4_set_aops(). Fixes: 5f0663bb4a64 ("ext4, dax: introduce ext4_dax_aops") Signed-off-by: Toshi Kani Signed-off-by: Theodore Ts'o Suggested-by: Jan Kara Cc: stable@vger.kernel.org --- fs/ext4/inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 5be07f64ae0a..f73f18a68165 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -4896,6 +4896,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) * not initialized on a new filesystem. */ } ei->i_flags = le32_to_cpu(raw_inode->i_flags); + ext4_set_inode_flags(inode); inode->i_blocks = ext4_inode_blocks(raw_inode, ei); ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); if (ext4_has_feature_64bit(sb)) @@ -5042,7 +5043,6 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) goto bad_inode; } brelse(iloc.bh); - ext4_set_inode_flags(inode); unlock_new_inode(inode); return inode; -- GitLab From 8a104f8b5867c682d994ffa7a74093c54469c11f Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Sat, 15 Sep 2018 20:26:44 +0200 Subject: [PATCH 1543/1692] Code of Conduct: Let's revamp it. The Code of Conflict is not achieving its implicit goal of fostering civility and the spirit of 'be excellent to each other'. Explicit guidelines have demonstrated success in other projects and other areas of the kernel. Here is a Code of Conduct statement for the wider kernel. It is based on the Contributor Covenant as described at www.contributor-covenant.org From this point forward, we should abide by these rules in order to help make the kernel community a welcoming environment to participate in. Signed-off-by: Chris Mason Signed-off-by: Dan Williams Signed-off-by: Jonathan Corbet Signed-off-by: Olof Johansson Signed-off-by: Steven Rostedt (VMware) Signed-off-by: Greg Kroah-Hartman Signed-off-by: Linus Torvalds --- Documentation/process/code-of-conduct.rst | 81 ++++++++++++++++++++++ Documentation/process/code-of-conflict.rst | 28 -------- Documentation/process/index.rst | 2 +- 3 files changed, 82 insertions(+), 29 deletions(-) create mode 100644 Documentation/process/code-of-conduct.rst delete mode 100644 Documentation/process/code-of-conflict.rst diff --git a/Documentation/process/code-of-conduct.rst b/Documentation/process/code-of-conduct.rst new file mode 100644 index 000000000000..ab7c24b5478c --- /dev/null +++ b/Documentation/process/code-of-conduct.rst @@ -0,0 +1,81 @@ +Contributor Covenant Code of Conduct +++++++++++++++++++++++++++++++++++++ + +Our Pledge +========== + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, sex characteristics, gender identity and +expression, level of experience, education, socio-economic status, nationality, +personal appearance, race, religion, or sexual identity and orientation. + +Our Standards +============= + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others’ private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + + +Our Responsibilities +==================== + +Maintainers are responsible for clarifying the standards of acceptable behavior +and are expected to take appropriate and fair corrective action in response to +any instances of unacceptable behavior. + +Maintainers have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, or to ban temporarily or permanently any +contributor for other behaviors that they deem inappropriate, threatening, +offensive, or harmful. + +Scope +===== + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +Enforcement +=========== + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the Technical Advisory Board (TAB) at +. All complaints will be reviewed and +investigated and will result in a response that is deemed necessary and +appropriate to the circumstances. The TAB is obligated to maintain +confidentiality with regard to the reporter of an incident. Further details of +specific enforcement policies may be posted separately. + +Maintainers who do not follow or enforce the Code of Conduct in good faith may +face temporary or permanent repercussions as determined by other members of the +project’s leadership. + +Attribution +=========== + +This Code of Conduct is adapted from the Contributor Covenant, version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html diff --git a/Documentation/process/code-of-conflict.rst b/Documentation/process/code-of-conflict.rst deleted file mode 100644 index 47b6de763203..000000000000 --- a/Documentation/process/code-of-conflict.rst +++ /dev/null @@ -1,28 +0,0 @@ -Code of Conflict ----------------- - -The Linux kernel development effort is a very personal process compared -to "traditional" ways of developing software. Your code and ideas -behind it will be carefully reviewed, often resulting in critique and -criticism. The review will almost always require improvements to the -code before it can be included in the kernel. Know that this happens -because everyone involved wants to see the best possible solution for -the overall success of Linux. This development process has been proven -to create the most robust operating system kernel ever, and we do not -want to do anything to cause the quality of submission and eventual -result to ever decrease. - -If however, anyone feels personally abused, threatened, or otherwise -uncomfortable due to this process, that is not acceptable. If so, -please contact the Linux Foundation's Technical Advisory Board at -, or the individual members, and they -will work to resolve the issue to the best of their ability. For more -information on who is on the Technical Advisory Board and what their -role is, please see: - - - http://www.linuxfoundation.org/projects/linux/tab - -As a reviewer of code, please strive to keep things civil and focused on -the technical issues involved. We are all humans, and frustrations can -be high on both sides of the process. Try to keep in mind the immortal -words of Bill and Ted, "Be excellent to each other." diff --git a/Documentation/process/index.rst b/Documentation/process/index.rst index 37bd0628b6ee..9ae3e317bddf 100644 --- a/Documentation/process/index.rst +++ b/Documentation/process/index.rst @@ -20,7 +20,7 @@ Below are the essential guides that every developer should read. :maxdepth: 1 howto - code-of-conflict + code-of-conduct development-process submitting-patches coding-style -- GitLab From 7876320f88802b22d4e2daf7eb027dd14175a0f8 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 16 Sep 2018 11:52:37 -0700 Subject: [PATCH 1544/1692] Linux 4.19-rc4 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index a5ef6818157a..83a03facb5ba 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ VERSION = 4 PATCHLEVEL = 19 SUBLEVEL = 0 -EXTRAVERSION = -rc3 +EXTRAVERSION = -rc4 NAME = Merciless Moray # *DOCUMENTATION* -- GitLab From edf2ef7242805e53ec2e0841db26e06d8bc7da70 Mon Sep 17 00:00:00 2001 From: Jongsung Kim Date: Thu, 13 Sep 2018 18:32:21 +0900 Subject: [PATCH 1545/1692] stmmac: fix valid numbers of unicast filter entries Synopsys DWC Ethernet MAC can be configured to have 1..32, 64, or 128 unicast filter entries. (Table 7-8 MAC Address Registers from databook) Fix dwmac1000_validate_ucast_entries() to accept values between 1 and 32 in addition. Signed-off-by: Jongsung Kim Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 3609c7b696c7..2b800ce1d5bf 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -67,7 +67,7 @@ static int dwmac1000_validate_mcast_bins(int mcast_bins) * Description: * This function validates the number of Unicast address entries supported * by a particular Synopsys 10/100/1000 controller. The Synopsys controller - * supports 1, 32, 64, or 128 Unicast filter entries for it's Unicast filter + * supports 1..32, 64, or 128 Unicast filter entries for it's Unicast filter * logic. This function validates a valid, supported configuration is * selected, and defaults to 1 Unicast address if an unsupported * configuration is selected. @@ -77,8 +77,7 @@ static int dwmac1000_validate_ucast_entries(int ucast_entries) int x = ucast_entries; switch (x) { - case 1: - case 32: + case 1 ... 32: case 64: case 128: break; -- GitLab From 2b5a921740a55c00223a797d075b9c77c42cb171 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Thu, 13 Sep 2018 16:27:20 +0200 Subject: [PATCH 1546/1692] udp4: fix IP_CMSG_CHECKSUM for connected sockets commit 2abb7cdc0dc8 ("udp: Add support for doing checksum unnecessary conversion") left out the early demux path for connected sockets. As a result IP_CMSG_CHECKSUM gives wrong values for such socket when GRO is not enabled/available. This change addresses the issue by moving the csum conversion to a common helper and using such helper in both the default and the early demux rx path. Fixes: 2abb7cdc0dc8 ("udp: Add support for doing checksum unnecessary conversion") Signed-off-by: Paolo Abeni Signed-off-by: David S. Miller --- net/ipv4/udp.c | 49 ++++++++++++++++++++++++++----------------------- 1 file changed, 26 insertions(+), 23 deletions(-) diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index f4e35b2ff8b8..7d69dd6fa7e8 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -2124,6 +2124,28 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh, inet_compute_pseudo); } +/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and + * return code conversion for ip layer consumption + */ +static int udp_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb, + struct udphdr *uh) +{ + int ret; + + if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) + skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check, + inet_compute_pseudo); + + ret = udp_queue_rcv_skb(sk, skb); + + /* a return value > 0 means to resubmit the input, but + * it wants the return to be -protocol, or 0 + */ + if (ret > 0) + return -ret; + return 0; +} + /* * All we need to do is get the socket, and then do a checksum. */ @@ -2170,14 +2192,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, if (unlikely(sk->sk_rx_dst != dst)) udp_sk_rx_dst_set(sk, dst); - ret = udp_queue_rcv_skb(sk, skb); + ret = udp_unicast_rcv_skb(sk, skb, uh); sock_put(sk); - /* a return value > 0 means to resubmit the input, but - * it wants the return to be -protocol, or 0 - */ - if (ret > 0) - return -ret; - return 0; + return ret; } if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) @@ -2185,22 +2202,8 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, saddr, daddr, udptable, proto); sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable); - if (sk) { - int ret; - - if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) - skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check, - inet_compute_pseudo); - - ret = udp_queue_rcv_skb(sk, skb); - - /* a return value > 0 means to resubmit the input, but - * it wants the return to be -protocol, or 0 - */ - if (ret > 0) - return -ret; - return 0; - } + if (sk) + return udp_unicast_rcv_skb(sk, skb, uh); if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) goto drop; -- GitLab From eb63f2964dbe36f26deac77d3016791675821ded Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Thu, 13 Sep 2018 16:27:21 +0200 Subject: [PATCH 1547/1692] udp6: add missing checks on edumux packet processing Currently the UDPv6 early demux rx code path lacks some mandatory checks, already implemented into the normal RX code path - namely the checksum conversion and no_check6_rx check. Similar to the previous commit, we move the common processing to an UDPv6 specific helper and call it from both edemux code path and normal code path. In respect to the UDPv4, we need to add an explicit check for non zero csum according to no_check6_rx value. Reported-by: Jianlin Shi Suggested-by: Xin Long Fixes: c9f2c1ae123a ("udp6: fix socket leak on early demux") Fixes: 2abb7cdc0dc8 ("udp: Add support for doing checksum unnecessary conversion") Signed-off-by: Paolo Abeni Signed-off-by: David S. Miller --- net/ipv6/udp.c | 65 ++++++++++++++++++++++++++++---------------------- 1 file changed, 37 insertions(+), 28 deletions(-) diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 83f4c77c79d8..28c4aa5078fc 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -752,6 +752,28 @@ static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) } } +/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and + * return code conversion for ip layer consumption + */ +static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb, + struct udphdr *uh) +{ + int ret; + + if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) + skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check, + ip6_compute_pseudo); + + ret = udpv6_queue_rcv_skb(sk, skb); + + /* a return value > 0 means to resubmit the input, but + * it wants the return to be -protocol, or 0 + */ + if (ret > 0) + return -ret; + return 0; +} + int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, int proto) { @@ -803,13 +825,14 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, if (unlikely(sk->sk_rx_dst != dst)) udp6_sk_rx_dst_set(sk, dst); - ret = udpv6_queue_rcv_skb(sk, skb); - sock_put(sk); + if (!uh->check && !udp_sk(sk)->no_check6_rx) { + sock_put(sk); + goto report_csum_error; + } - /* a return value > 0 means to resubmit the input */ - if (ret > 0) - return ret; - return 0; + ret = udp6_unicast_rcv_skb(sk, skb, uh); + sock_put(sk); + return ret; } /* @@ -822,30 +845,13 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, /* Unicast */ sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable); if (sk) { - int ret; - - if (!uh->check && !udp_sk(sk)->no_check6_rx) { - udp6_csum_zero_error(skb); - goto csum_error; - } - - if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) - skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check, - ip6_compute_pseudo); - - ret = udpv6_queue_rcv_skb(sk, skb); - - /* a return value > 0 means to resubmit the input */ - if (ret > 0) - return ret; - - return 0; + if (!uh->check && !udp_sk(sk)->no_check6_rx) + goto report_csum_error; + return udp6_unicast_rcv_skb(sk, skb, uh); } - if (!uh->check) { - udp6_csum_zero_error(skb); - goto csum_error; - } + if (!uh->check) + goto report_csum_error; if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) goto discard; @@ -866,6 +872,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, ulen, skb->len, daddr, ntohs(uh->dest)); goto discard; + +report_csum_error: + udp6_csum_zero_error(skb); csum_error: __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); discard: -- GitLab From 4bf9ffa0fb5744ed40d7348c24fa9ae398b1d603 Mon Sep 17 00:00:00 2001 From: Toshiaki Makita Date: Fri, 14 Sep 2018 13:33:44 +0900 Subject: [PATCH 1548/1692] veth: Orphan skb before GRO GRO expects skbs not to be owned by sockets, but when XDP is enabled veth passed skbs owned by sockets. It caused corrupted sk_wmem_alloc. Paolo Abeni reported the following splat: [ 362.098904] refcount_t overflow at skb_set_owner_w+0x5e/0xa0 in iperf3[1644], uid/euid: 0/0 [ 362.108239] WARNING: CPU: 0 PID: 1644 at kernel/panic.c:648 refcount_error_report+0xa0/0xa4 [ 362.117547] Modules linked in: tcp_diag inet_diag veth intel_rapl sb_edac x86_pkg_temp_thermal intel_powerclamp coretemp kvm_intel kvm irqbypass crct10dif_pclmul crc32_pclmul ghash_clmulni_intel intel_cstate intel_uncore intel_rapl_perf ipmi_ssif iTCO_wdt sg ipmi_si iTCO_vendor_support ipmi_devintf mxm_wmi ipmi_msghandler pcspkr dcdbas mei_me wmi mei lpc_ich acpi_power_meter pcc_cpufreq xfs libcrc32c sd_mod mgag200 drm_kms_helper syscopyarea sysfillrect sysimgblt fb_sys_fops ixgbe igb ttm ahci mdio libahci ptp crc32c_intel drm pps_core libata i2c_algo_bit dca dm_mirror dm_region_hash dm_log dm_mod [ 362.176622] CPU: 0 PID: 1644 Comm: iperf3 Not tainted 4.19.0-rc2.vanilla+ #2025 [ 362.184777] Hardware name: Dell Inc. PowerEdge R730/072T6D, BIOS 2.1.7 06/16/2016 [ 362.193124] RIP: 0010:refcount_error_report+0xa0/0xa4 [ 362.198758] Code: 08 00 00 48 8b 95 80 00 00 00 49 8d 8c 24 80 0a 00 00 41 89 c1 44 89 2c 24 48 89 de 48 c7 c7 18 4d e7 9d 31 c0 e8 30 fa ff ff <0f> 0b eb 88 0f 1f 44 00 00 55 48 89 e5 41 56 41 55 41 54 49 89 fc [ 362.219711] RSP: 0018:ffff9ee6ff603c20 EFLAGS: 00010282 [ 362.225538] RAX: 0000000000000000 RBX: ffffffff9de83e10 RCX: 0000000000000000 [ 362.233497] RDX: 0000000000000001 RSI: ffff9ee6ff6167d8 RDI: ffff9ee6ff6167d8 [ 362.241457] RBP: ffff9ee6ff603d78 R08: 0000000000000490 R09: 0000000000000004 [ 362.249416] R10: 0000000000000000 R11: ffff9ee6ff603990 R12: ffff9ee664b94500 [ 362.257377] R13: 0000000000000000 R14: 0000000000000004 R15: ffffffff9de615f9 [ 362.265337] FS: 00007f1d22d28740(0000) GS:ffff9ee6ff600000(0000) knlGS:0000000000000000 [ 362.274363] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 362.280773] CR2: 00007f1d222f35d0 CR3: 0000001fddfec003 CR4: 00000000001606f0 [ 362.288733] Call Trace: [ 362.291459] [ 362.293702] ex_handler_refcount+0x4e/0x80 [ 362.298269] fixup_exception+0x35/0x40 [ 362.302451] do_trap+0x109/0x150 [ 362.306048] do_error_trap+0xd5/0x130 [ 362.315766] invalid_op+0x14/0x20 [ 362.319460] RIP: 0010:skb_set_owner_w+0x5e/0xa0 [ 362.324512] Code: ef ff ff 74 49 48 c7 43 60 20 7b 4a 9d 8b 85 f4 01 00 00 85 c0 75 16 8b 83 e0 00 00 00 f0 01 85 44 01 00 00 0f 88 d8 23 16 00 <5b> 5d c3 80 8b 91 00 00 00 01 8b 85 f4 01 00 00 89 83 a4 00 00 00 [ 362.345465] RSP: 0018:ffff9ee6ff603e20 EFLAGS: 00010a86 [ 362.351291] RAX: 0000000000001100 RBX: ffff9ee65deec700 RCX: ffff9ee65e829244 [ 362.359250] RDX: 0000000000000100 RSI: ffff9ee65e829100 RDI: ffff9ee65deec700 [ 362.367210] RBP: ffff9ee65e829100 R08: 000000000002a380 R09: 0000000000000000 [ 362.375169] R10: 0000000000000002 R11: fffff1a4bf77bb00 R12: ffffc0754661d000 [ 362.383130] R13: ffff9ee65deec200 R14: ffff9ee65f597000 R15: 00000000000000aa [ 362.391092] veth_xdp_rcv+0x4e4/0x890 [veth] [ 362.399357] veth_poll+0x4d/0x17a [veth] [ 362.403731] net_rx_action+0x2af/0x3f0 [ 362.407912] __do_softirq+0xdd/0x29e [ 362.411897] do_softirq_own_stack+0x2a/0x40 [ 362.416561] [ 362.418899] do_softirq+0x4b/0x70 [ 362.422594] __local_bh_enable_ip+0x50/0x60 [ 362.427258] ip_finish_output2+0x16a/0x390 [ 362.431824] ip_output+0x71/0xe0 [ 362.440670] __tcp_transmit_skb+0x583/0xab0 [ 362.445333] tcp_write_xmit+0x247/0xfb0 [ 362.449609] __tcp_push_pending_frames+0x2d/0xd0 [ 362.454760] tcp_sendmsg_locked+0x857/0xd30 [ 362.459424] tcp_sendmsg+0x27/0x40 [ 362.463216] sock_sendmsg+0x36/0x50 [ 362.467104] sock_write_iter+0x87/0x100 [ 362.471382] __vfs_write+0x112/0x1a0 [ 362.475369] vfs_write+0xad/0x1a0 [ 362.479062] ksys_write+0x52/0xc0 [ 362.482759] do_syscall_64+0x5b/0x180 [ 362.486841] entry_SYSCALL_64_after_hwframe+0x44/0xa9 [ 362.492473] RIP: 0033:0x7f1d22293238 [ 362.496458] Code: 89 02 48 c7 c0 ff ff ff ff eb b3 0f 1f 80 00 00 00 00 f3 0f 1e fa 48 8d 05 c5 54 2d 00 8b 00 85 c0 75 17 b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 58 c3 0f 1f 80 00 00 00 00 41 54 49 89 d4 55 [ 362.517409] RSP: 002b:00007ffebaef8008 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 [ 362.525855] RAX: ffffffffffffffda RBX: 0000000000002800 RCX: 00007f1d22293238 [ 362.533816] RDX: 0000000000002800 RSI: 00007f1d22d36000 RDI: 0000000000000005 [ 362.541775] RBP: 00007f1d22d36000 R08: 00000002db777a30 R09: 0000562b70712b20 [ 362.549734] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000005 [ 362.557693] R13: 0000000000002800 R14: 00007ffebaef8060 R15: 0000562b70712260 In order to avoid this, orphan the skb before entering GRO. Fixes: 948d4f214fde ("veth: Add driver XDP") Reported-by: Paolo Abeni Signed-off-by: Toshiaki Makita Tested-by: Paolo Abeni Signed-off-by: David S. Miller --- drivers/net/veth.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 8d679c8b7f25..41a00cd76955 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -463,6 +463,8 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb, int mac_len, delta, off; struct xdp_buff xdp; + skb_orphan(skb); + rcu_read_lock(); xdp_prog = rcu_dereference(rq->xdp_prog); if (unlikely(!xdp_prog)) { @@ -508,8 +510,6 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb, skb_copy_header(nskb, skb); head_off = skb_headroom(nskb) - skb_headroom(skb); skb_headers_offset_update(nskb, head_off); - if (skb->sk) - skb_set_owner_w(nskb, skb->sk); consume_skb(skb); skb = nskb; } -- GitLab From b1fbebd4164b3d170ad916dcd692cf843c9c065d Mon Sep 17 00:00:00 2001 From: Takashi Sakamoto Date: Mon, 17 Sep 2018 17:25:24 +0900 Subject: [PATCH 1549/1692] ALSA: bebob: fix memory leak for M-Audio FW1814 and ProjectMix I/O at error path After allocating model-dependent data for M-Audio FW1814 and ProjectMix I/O, ALSA bebob driver has memory leak at error path. This commit releases the allocated data at the error path. Fixes: 04a2c73c97eb('ALSA: bebob: delayed registration of sound card') Cc: # v4.7+ Signed-off-by: Takashi Sakamoto Signed-off-by: Takashi Iwai --- sound/firewire/bebob/bebob.c | 2 ++ sound/firewire/bebob/bebob_maudio.c | 4 ---- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c index 730ea91d9be8..93676354f87f 100644 --- a/sound/firewire/bebob/bebob.c +++ b/sound/firewire/bebob/bebob.c @@ -263,6 +263,8 @@ do_registration(struct work_struct *work) error: mutex_unlock(&devices_mutex); snd_bebob_stream_destroy_duplex(bebob); + kfree(bebob->maudio_special_quirk); + bebob->maudio_special_quirk = NULL; snd_card_free(bebob->card); dev_info(&bebob->unit->device, "Sound card registration failed: %d\n", err); diff --git a/sound/firewire/bebob/bebob_maudio.c b/sound/firewire/bebob/bebob_maudio.c index 0c5a4cbb99ba..c266997ad299 100644 --- a/sound/firewire/bebob/bebob_maudio.c +++ b/sound/firewire/bebob/bebob_maudio.c @@ -294,10 +294,6 @@ snd_bebob_maudio_special_discover(struct snd_bebob *bebob, bool is1814) bebob->midi_output_ports = 2; } end: - if (err < 0) { - kfree(params); - bebob->maudio_special_quirk = NULL; - } mutex_unlock(&bebob->mutex); return err; } -- GitLab From ce925f088b979537f22f9e05eb923ef9822ca139 Mon Sep 17 00:00:00 2001 From: Takashi Sakamoto Date: Mon, 17 Sep 2018 17:26:08 +0900 Subject: [PATCH 1550/1692] ALSA: oxfw: fix memory leak for model-dependent data at error path After allocating model-dependent data, ALSA OXFW driver has memory leak of the data at error path. This commit releases the data at the error path. Fixes: 6c29230e2a5f ('ALSA: oxfw: delayed registration of sound card') Cc: # v4.7+ Signed-off-by: Takashi Sakamoto Signed-off-by: Takashi Iwai --- sound/firewire/oxfw/oxfw.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c index fd34ef2ac679..75c6ba2fe3dc 100644 --- a/sound/firewire/oxfw/oxfw.c +++ b/sound/firewire/oxfw/oxfw.c @@ -271,6 +271,8 @@ static void do_registration(struct work_struct *work) if (oxfw->has_output) snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->tx_stream); snd_card_free(oxfw->card); + kfree(oxfw->spec); + oxfw->spec = NULL; dev_info(&oxfw->unit->device, "Sound card registration failed: %d\n", err); } -- GitLab From 1064bc685d359f549f91c2d5f111965a9284f328 Mon Sep 17 00:00:00 2001 From: Takashi Sakamoto Date: Mon, 17 Sep 2018 17:26:20 +0900 Subject: [PATCH 1551/1692] ALSA: oxfw: fix memory leak of discovered stream formats at error path After finishing discover of stream formats, ALSA OXFW driver has memory leak of allocated memory object at error path. This commit releases the memory object at the error path. Fixes: 6c29230e2a5f ('ALSA: oxfw: delayed registration of sound card') Cc: # v4.7+ Signed-off-by: Takashi Sakamoto Signed-off-by: Takashi Iwai --- sound/firewire/oxfw/oxfw.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c index 75c6ba2fe3dc..2ea8be6c8584 100644 --- a/sound/firewire/oxfw/oxfw.c +++ b/sound/firewire/oxfw/oxfw.c @@ -208,6 +208,7 @@ static int detect_quirks(struct snd_oxfw *oxfw) static void do_registration(struct work_struct *work) { struct snd_oxfw *oxfw = container_of(work, struct snd_oxfw, dwork.work); + int i; int err; if (oxfw->registered) @@ -270,6 +271,12 @@ static void do_registration(struct work_struct *work) snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->rx_stream); if (oxfw->has_output) snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->tx_stream); + for (i = 0; i < SND_OXFW_STREAM_FORMAT_ENTRIES; ++i) { + kfree(oxfw->tx_stream_formats[i]); + oxfw->tx_stream_formats[i] = NULL; + kfree(oxfw->rx_stream_formats[i]); + oxfw->rx_stream_formats[i] = NULL; + } snd_card_free(oxfw->card); kfree(oxfw->spec); oxfw->spec = NULL; -- GitLab From c3b55e2ec9c76e7a0de2a0b1dc851fdc9440385b Mon Sep 17 00:00:00 2001 From: Takashi Sakamoto Date: Mon, 17 Sep 2018 17:26:41 +0900 Subject: [PATCH 1552/1692] ALSA: fireworks: fix memory leak of response buffer at error path After allocating memory object for response buffer, ALSA fireworks driver has leak of the memory object at error path. This commit releases the object at the error path. Fixes: 7d3c1d5901aa('ALSA: fireworks: delayed registration of sound card') Cc: # v4.7+ Signed-off-by: Takashi Sakamoto Signed-off-by: Takashi Iwai --- sound/firewire/fireworks/fireworks.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sound/firewire/fireworks/fireworks.c b/sound/firewire/fireworks/fireworks.c index 71a0613d3da0..f2d073365cf6 100644 --- a/sound/firewire/fireworks/fireworks.c +++ b/sound/firewire/fireworks/fireworks.c @@ -301,6 +301,8 @@ do_registration(struct work_struct *work) snd_efw_transaction_remove_instance(efw); snd_efw_stream_destroy_duplex(efw); snd_card_free(efw->card); + kfree(efw->resp_buf); + efw->resp_buf = NULL; dev_info(&efw->unit->device, "Sound card registration failed: %d\n", err); } -- GitLab From 55066354285b36ee09dc50e2527f43a97c567177 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Mon, 17 Sep 2018 05:23:58 -0700 Subject: [PATCH 1553/1692] hwmon: (nct6775) Use different register to get fan RPM for fan7 The documented register to retrieve the fan RPM for fan7 is found to be unreliable at least with NCT6796D revision 3. Let's use register 0x4ce instead. This is undocumented for NCT6796D, but documented for NCT6797D and NCT6798D and known to be working. Reported-by: Robert Kern Cc: Robert Kern Fixes: 81820059a428 ("hwmon: (nct6775) Add support for NCT6796D") Signed-off-by: Guenter Roeck --- drivers/hwmon/nct6775.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index af4d8792bbb5..78603b78cf41 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c @@ -502,7 +502,7 @@ static const s8 NCT6779_BEEP_BITS[] = { 30, 31 }; /* intrusion0, intrusion1 */ static const u16 NCT6779_REG_FAN[] = { - 0x4c0, 0x4c2, 0x4c4, 0x4c6, 0x4c8, 0x4ca, 0x660 }; + 0x4c0, 0x4c2, 0x4c4, 0x4c6, 0x4c8, 0x4ca, 0x4ce }; static const u16 NCT6779_REG_FAN_PULSES[NUM_FAN] = { 0x644, 0x645, 0x646, 0x647, 0x648, 0x649 }; @@ -1293,7 +1293,7 @@ static bool is_word_sized(struct nct6775_data *data, u16 reg) reg == 0x402 || reg == 0x63a || reg == 0x63c || reg == 0x63e || reg == 0x640 || reg == 0x642 || reg == 0x64a || - reg == 0x64c || reg == 0x660 || + reg == 0x64c || reg == 0x73 || reg == 0x75 || reg == 0x77 || reg == 0x79 || reg == 0x7b || reg == 0x7d; } -- GitLab From be1277f5eb17a2e5788139eabb0b53dd04c695f3 Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Mon, 16 Jul 2018 12:58:33 +0200 Subject: [PATCH 1554/1692] nvme: count all ANA groups for ANA Log page When issuing a short read on the ANA log page the number of groups should not change, even though the final returned data might contain less groups than that number. Signed-off-by: Hannes Reinecke [switched to a for loop] Signed-off-by: Christoph Hellwig --- drivers/nvme/target/admin-cmd.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index a21caea1e080..2008fa62a373 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -245,6 +245,10 @@ static void nvmet_execute_get_log_page_ana(struct nvmet_req *req) offset += len; ngrps++; } + for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) { + if (nvmet_ana_group_enabled[grpid]) + ngrps++; + } hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt); hdr.ngrps = cpu_to_le16(ngrps); -- GitLab From 85516a9881a31e2c7a8d10f4697f3adcccc7cef1 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 7 Sep 2018 16:35:54 +0200 Subject: [PATCH 1555/1692] mtd: partitions: fix unbalanced of_node_get/put() While at first mtd_part_of_parse() would just call of_get_chil_by_name(), it has been patched to deal with sub-partitions and will now directly manipulate the node returned by mtd_get_of_node() if the MTD device is a partition. A of_node_put() was a bit below in the code, to balance the of_get_child_by_name(). However, despite its name, mtd_get_of_node() does not take a reference on the OF node. It is a simple helper hiding some pointer logic to retrieve the OF node related to an MTD device. The direct effect of such unbalanced reference counting is visible by rmmod'ing any module that would have added MTD partitions: OF: ERROR: Bad of_node_put() on As it seems normal to get a reference on the OF node during the of_property_for_each_string() that follows, add a call to of_node_get() when relevant. Fixes: 76a832254ab0 ("mtd: partitions: use DT info for parsing partitions with "compatible" prop") Cc: stable@vger.kernel.org Signed-off-by: Miquel Raynal Signed-off-by: Boris Brezillon --- drivers/mtd/mtdpart.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index 52e2cb35fc79..99c460facd5e 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c @@ -873,8 +873,11 @@ static int mtd_part_of_parse(struct mtd_info *master, int ret, err = 0; np = mtd_get_of_node(master); - if (!mtd_is_partition(master)) + if (mtd_is_partition(master)) + of_node_get(np); + else np = of_get_child_by_name(np, "partitions"); + of_property_for_each_string(np, "compatible", prop, compat) { parser = mtd_part_get_compatible_parser(compat); if (!parser) -- GitLab From f025571e96caa95ffc3c1792f762a584893de582 Mon Sep 17 00:00:00 2001 From: Corentin Labbe Date: Fri, 14 Sep 2018 11:20:07 +0000 Subject: [PATCH 1556/1692] net: ethernet: ti: add missing GENERIC_ALLOCATOR dependency This patch mades TI_DAVINCI_CPDMA select GENERIC_ALLOCATOR. without that, the following sparc64 build failure happen drivers/net/ethernet/ti/davinci_cpdma.o: In function `cpdma_check_free_tx_desc': (.text+0x278): undefined reference to `gen_pool_avail' drivers/net/ethernet/ti/davinci_cpdma.o: In function `cpdma_chan_submit': (.text+0x340): undefined reference to `gen_pool_alloc' (.text+0x5c4): undefined reference to `gen_pool_free' drivers/net/ethernet/ti/davinci_cpdma.o: In function `__cpdma_chan_free': davinci_cpdma.c:(.text+0x64c): undefined reference to `gen_pool_free' drivers/net/ethernet/ti/davinci_cpdma.o: In function `cpdma_desc_pool_destroy.isra.6': davinci_cpdma.c:(.text+0x17ac): undefined reference to `gen_pool_size' davinci_cpdma.c:(.text+0x17b8): undefined reference to `gen_pool_avail' davinci_cpdma.c:(.text+0x1824): undefined reference to `gen_pool_size' davinci_cpdma.c:(.text+0x1830): undefined reference to `gen_pool_avail' drivers/net/ethernet/ti/davinci_cpdma.o: In function `cpdma_ctlr_create': (.text+0x19f8): undefined reference to `devm_gen_pool_create' (.text+0x1a90): undefined reference to `gen_pool_add_virt' Makefile:1011: recipe for target 'vmlinux' failed Signed-off-by: Corentin Labbe Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index 9263d638bd6d..f932923f7d56 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -41,6 +41,7 @@ config TI_DAVINCI_MDIO config TI_DAVINCI_CPDMA tristate "TI DaVinci CPDMA Support" depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST + select GENERIC_ALLOCATOR ---help--- This driver supports TI's DaVinci CPDMA dma engine. -- GitLab From 8540827ebac6b654ab2f69c8fbce9e4fbd6304a0 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Fri, 14 Sep 2018 16:28:05 +0200 Subject: [PATCH 1557/1692] pppoe: fix reception of frames with no mac header pppoe_rcv() needs to look back at the Ethernet header in order to lookup the PPPoE session. Therefore we need to ensure that the mac header is big enough to contain an Ethernet header. Otherwise eth_hdr(skb)->h_source might access invalid data. ================================================================== BUG: KMSAN: uninit-value in __get_item drivers/net/ppp/pppoe.c:172 [inline] BUG: KMSAN: uninit-value in get_item drivers/net/ppp/pppoe.c:236 [inline] BUG: KMSAN: uninit-value in pppoe_rcv+0xcef/0x10e0 drivers/net/ppp/pppoe.c:450 CPU: 0 PID: 4543 Comm: syz-executor355 Not tainted 4.16.0+ #87 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:17 [inline] dump_stack+0x185/0x1d0 lib/dump_stack.c:53 kmsan_report+0x142/0x240 mm/kmsan/kmsan.c:1067 __msan_warning_32+0x6c/0xb0 mm/kmsan/kmsan_instr.c:683 __get_item drivers/net/ppp/pppoe.c:172 [inline] get_item drivers/net/ppp/pppoe.c:236 [inline] pppoe_rcv+0xcef/0x10e0 drivers/net/ppp/pppoe.c:450 __netif_receive_skb_core+0x47df/0x4a90 net/core/dev.c:4562 __netif_receive_skb net/core/dev.c:4627 [inline] netif_receive_skb_internal+0x49d/0x630 net/core/dev.c:4701 netif_receive_skb+0x230/0x240 net/core/dev.c:4725 tun_rx_batched drivers/net/tun.c:1555 [inline] tun_get_user+0x740f/0x7c60 drivers/net/tun.c:1962 tun_chr_write_iter+0x1d4/0x330 drivers/net/tun.c:1990 call_write_iter include/linux/fs.h:1782 [inline] new_sync_write fs/read_write.c:469 [inline] __vfs_write+0x7fb/0x9f0 fs/read_write.c:482 vfs_write+0x463/0x8d0 fs/read_write.c:544 SYSC_write+0x172/0x360 fs/read_write.c:589 SyS_write+0x55/0x80 fs/read_write.c:581 do_syscall_64+0x309/0x430 arch/x86/entry/common.c:287 entry_SYSCALL_64_after_hwframe+0x3d/0xa2 RIP: 0033:0x4447c9 RSP: 002b:00007fff64c8fc28 EFLAGS: 00000297 ORIG_RAX: 0000000000000001 RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00000000004447c9 RDX: 000000000000fd87 RSI: 0000000020000600 RDI: 0000000000000004 RBP: 00000000006cf018 R08: 00007fff64c8fda8 R09: 00007fff00006bda R10: 0000000000005fe7 R11: 0000000000000297 R12: 00000000004020d0 R13: 0000000000402160 R14: 0000000000000000 R15: 0000000000000000 Uninit was created at: kmsan_save_stack_with_flags mm/kmsan/kmsan.c:278 [inline] kmsan_internal_poison_shadow+0xb8/0x1b0 mm/kmsan/kmsan.c:188 kmsan_kmalloc+0x94/0x100 mm/kmsan/kmsan.c:314 kmsan_slab_alloc+0x11/0x20 mm/kmsan/kmsan.c:321 slab_post_alloc_hook mm/slab.h:445 [inline] slab_alloc_node mm/slub.c:2737 [inline] __kmalloc_node_track_caller+0xaed/0x11c0 mm/slub.c:4369 __kmalloc_reserve net/core/skbuff.c:138 [inline] __alloc_skb+0x2cf/0x9f0 net/core/skbuff.c:206 alloc_skb include/linux/skbuff.h:984 [inline] alloc_skb_with_frags+0x1d4/0xb20 net/core/skbuff.c:5234 sock_alloc_send_pskb+0xb56/0x1190 net/core/sock.c:2085 tun_alloc_skb drivers/net/tun.c:1532 [inline] tun_get_user+0x2242/0x7c60 drivers/net/tun.c:1829 tun_chr_write_iter+0x1d4/0x330 drivers/net/tun.c:1990 call_write_iter include/linux/fs.h:1782 [inline] new_sync_write fs/read_write.c:469 [inline] __vfs_write+0x7fb/0x9f0 fs/read_write.c:482 vfs_write+0x463/0x8d0 fs/read_write.c:544 SYSC_write+0x172/0x360 fs/read_write.c:589 SyS_write+0x55/0x80 fs/read_write.c:581 do_syscall_64+0x309/0x430 arch/x86/entry/common.c:287 entry_SYSCALL_64_after_hwframe+0x3d/0xa2 ================================================================== Fixes: 224cf5ad14c0 ("ppp: Move the PPP drivers") Reported-by: syzbot+f5f6080811c849739212@syzkaller.appspotmail.com Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller --- drivers/net/ppp/pppoe.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index ce61231e96ea..62dc564b251d 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -429,6 +429,9 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev, if (!skb) goto out; + if (skb_mac_header_len(skb) < ETH_HLEN) + goto drop; + if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) goto drop; -- GitLab From 41948ccb4a856dddacfbd4d789d4fa8663fe41bb Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Fri, 14 Sep 2018 16:56:35 +0200 Subject: [PATCH 1558/1692] net: mvpp2: let phylink manage the carrier state Net drivers using phylink shouldn't mess with the link carrier themselves and should let phylink manage it. The mvpp2 driver wasn't following this best practice as the mac_config() function made calls to change the link carrier state. This led to wrongly reported carrier link state which then triggered other issues. This patch fixes this behaviour. But the PPv2 driver relied on this misbehaviour in two cases: for fixed links and when not using phylink (ACPI mode). The later was fixed by adding an explicit call to link_up(), which when the ACPI mode will use phylink should be removed. The fixed link case was relying on the mac_config() function to set the link up, as we found an issue in phylink_start() which assumes the carrier is off. If not, the link_up() function is never called. To fix this, a call to netif_carrier_off() is added just before phylink_start() so that we do not introduce a regression in the driver. Fixes: 4bb043262878 ("net: mvpp2: phylink support") Reported-by: Russell King Signed-off-by: Antoine Tenart Signed-off-by: David S. Miller --- .../net/ethernet/marvell/mvpp2/mvpp2_main.c | 21 ++++++------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 28500417843e..702fec82d806 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -58,6 +58,8 @@ static struct { */ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode, const struct phylink_link_state *state); +static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode, + phy_interface_t interface, struct phy_device *phy); /* Queue modes */ #define MVPP2_QDIST_SINGLE_MODE 0 @@ -3142,6 +3144,7 @@ static void mvpp2_start_dev(struct mvpp2_port *port) mvpp22_mode_reconfigure(port); if (port->phylink) { + netif_carrier_off(port->dev); phylink_start(port->phylink); } else { /* Phylink isn't used as of now for ACPI, so the MAC has to be @@ -3150,9 +3153,10 @@ static void mvpp2_start_dev(struct mvpp2_port *port) */ struct phylink_link_state state = { .interface = port->phy_interface, - .link = 1, }; mvpp2_mac_config(port->dev, MLO_AN_INBAND, &state); + mvpp2_mac_link_up(port->dev, MLO_AN_INBAND, port->phy_interface, + NULL); } netif_tx_start_all_queues(port->dev); @@ -4495,10 +4499,6 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode, return; } - netif_tx_stop_all_queues(port->dev); - if (!port->has_phy) - netif_carrier_off(port->dev); - /* Make sure the port is disabled when reconfiguring the mode */ mvpp2_port_disable(port); @@ -4523,16 +4523,7 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode, if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK) mvpp2_port_loopback_set(port, state); - /* If the port already was up, make sure it's still in the same state */ - if (state->link || !port->has_phy) { - mvpp2_port_enable(port); - - mvpp2_egress_enable(port); - mvpp2_ingress_enable(port); - if (!port->has_phy) - netif_carrier_on(dev); - netif_tx_wake_all_queues(dev); - } + mvpp2_port_enable(port); } static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode, -- GitLab From eb4ed8e2d7fecb5f40db38e4498b9ee23cddf196 Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Fri, 14 Sep 2018 17:48:10 +0200 Subject: [PATCH 1559/1692] net: macb: disable scatter-gather for macb on sama5d3 Create a new configuration for the sama5d3-macb new compatibility string. This configuration disables scatter-gather because we experienced lock down of the macb interface of this particular SoC under very high load. Signed-off-by: Nicolas Ferre Signed-off-by: David S. Miller --- drivers/net/ethernet/cadence/macb_main.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 16e4ef7d7185..f1a86b422617 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -3837,6 +3837,13 @@ static const struct macb_config at91sam9260_config = { .init = macb_init, }; +static const struct macb_config sama5d3macb_config = { + .caps = MACB_CAPS_SG_DISABLED + | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, + .clk_init = macb_clk_init, + .init = macb_init, +}; + static const struct macb_config pc302gem_config = { .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, .dma_burst_length = 16, @@ -3904,6 +3911,7 @@ static const struct of_device_id macb_dt_ids[] = { { .compatible = "cdns,gem", .data = &pc302gem_config }, { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config }, { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config }, + { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config }, { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config }, { .compatible = "cdns,at91rm9200-emac", .data = &emac_config }, { .compatible = "cdns,emac", .data = &emac_config }, -- GitLab From 321cc359d899a8e988f3725d87c18a628e1cc624 Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Fri, 14 Sep 2018 17:48:11 +0200 Subject: [PATCH 1560/1692] ARM: dts: at91: add new compatibility string for macb on sama5d3 We need this new compatibility string as we experienced different behavior for this 10/100Mbits/s macb interface on this particular SoC. Backward compatibility is preserved as we keep the alternative strings. Signed-off-by: Nicolas Ferre Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/macb.txt | 1 + arch/arm/boot/dts/sama5d3_emac.dtsi | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt index 457d5ae16f23..3e17ac1d5d58 100644 --- a/Documentation/devicetree/bindings/net/macb.txt +++ b/Documentation/devicetree/bindings/net/macb.txt @@ -10,6 +10,7 @@ Required properties: Use "cdns,pc302-gem" for Picochip picoXcell pc302 and later devices based on the Cadence GEM, or the generic form: "cdns,gem". Use "atmel,sama5d2-gem" for the GEM IP (10/100) available on Atmel sama5d2 SoCs. + Use "atmel,sama5d3-macb" for the 10/100Mbit IP available on Atmel sama5d3 SoCs. Use "atmel,sama5d3-gem" for the Gigabit IP available on Atmel sama5d3 SoCs. Use "atmel,sama5d4-gem" for the GEM IP (10/100) available on Atmel sama5d4 SoCs. Use "cdns,zynq-gem" Xilinx Zynq-7xxx SoC. diff --git a/arch/arm/boot/dts/sama5d3_emac.dtsi b/arch/arm/boot/dts/sama5d3_emac.dtsi index 7cb235ef0fb6..6e9e1c2f9def 100644 --- a/arch/arm/boot/dts/sama5d3_emac.dtsi +++ b/arch/arm/boot/dts/sama5d3_emac.dtsi @@ -41,7 +41,7 @@ macb1_clk: macb1_clk { }; macb1: ethernet@f802c000 { - compatible = "cdns,at91sam9260-macb", "cdns,macb"; + compatible = "atmel,sama5d3-macb", "cdns,at91sam9260-macb", "cdns,macb"; reg = <0xf802c000 0x100>; interrupts = <35 IRQ_TYPE_LEVEL_HIGH 3>; pinctrl-names = "default"; -- GitLab From a7f38002fb69b44f8fc622ecb838665d0b8666af Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 14 Sep 2018 17:39:53 +0100 Subject: [PATCH 1561/1692] net: hp100: fix always-true check for link up state The operation ~(p100_inb(VG_LAN_CFG_1) & HP100_LINK_UP) returns a value that is always non-zero and hence the wait for the link to drop always terminates prematurely. Fix this by using a logical not operator instead of a bitwise complement. This issue has been in the driver since pre-2.6.12-rc2. Detected by CoverityScan, CID#114157 ("Logical vs. bitwise operator") Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/hp/hp100.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c index c8c7ad2eff77..9b5a68b65432 100644 --- a/drivers/net/ethernet/hp/hp100.c +++ b/drivers/net/ethernet/hp/hp100.c @@ -2634,7 +2634,7 @@ static int hp100_login_to_vg_hub(struct net_device *dev, u_short force_relogin) /* Wait for link to drop */ time = jiffies + (HZ / 10); do { - if (~(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST)) + if (!(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST)) break; if (!in_interrupt()) schedule_timeout_interruptible(1); -- GitLab From bbd6528d28c1b8e80832b3b018ec402b6f5c3215 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 14 Sep 2018 12:02:31 -0700 Subject: [PATCH 1562/1692] ipv6: fix possible use-after-free in ip6_xmit() In the unlikely case ip6_xmit() has to call skb_realloc_headroom(), we need to call skb_set_owner_w() before consuming original skb, otherwise we risk a use-after-free. Bring IPv6 in line with what we do in IPv4 to fix this. Fixes: 1da177e4c3f41 ("Linux-2.6.12-rc2") Signed-off-by: Eric Dumazet Reported-by: syzbot Signed-off-by: David S. Miller --- net/ipv6/ip6_output.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 16f200f06500..f9f8f554d141 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -219,12 +219,10 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, kfree_skb(skb); return -ENOBUFS; } + if (skb->sk) + skb_set_owner_w(skb2, skb->sk); consume_skb(skb); skb = skb2; - /* skb_set_owner_w() changes sk->sk_wmem_alloc atomically, - * it is safe to call in our context (socket lock not held) - */ - skb_set_owner_w(skb, (struct sock *)sk); } if (opt->opt_flen) ipv6_push_frag_opts(skb, opt, &proto); -- GitLab From 28ea334bd1657f3c43485b4a8592672fc6835fac Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Fri, 14 Sep 2018 15:41:29 -0400 Subject: [PATCH 1563/1692] bnxt_en: Fix VF mac address regression. The recent commit to always forward the VF MAC address to the PF for approval may not work if the PF driver or the firmware is older. This will cause the VF driver to fail during probe: bnxt_en 0000:00:03.0 (unnamed net_device) (uninitialized): hwrm req_type 0xf seq id 0x5 error 0xffff bnxt_en 0000:00:03.0 (unnamed net_device) (uninitialized): VF MAC address 00:00:17:02:05:d0 not approved by the PF bnxt_en 0000:00:03.0: Unable to initialize mac address. bnxt_en: probe of 0000:00:03.0 failed with error -99 We fix it by treating the error as fatal only if the VF MAC address is locally generated by the VF. Fixes: 707e7e966026 ("bnxt_en: Always forward VF MAC address to the PF.") Reported-by: Seth Forshee Reported-by: Siwei Liu Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 9 +++++++-- drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | 9 +++++---- drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h | 2 +- 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index cecbb1d1f587..177587f9c3f1 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -8027,7 +8027,7 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p) if (ether_addr_equal(addr->sa_data, dev->dev_addr)) return 0; - rc = bnxt_approve_mac(bp, addr->sa_data); + rc = bnxt_approve_mac(bp, addr->sa_data, true); if (rc) return rc; @@ -8827,14 +8827,19 @@ static int bnxt_init_mac_addr(struct bnxt *bp) } else { #ifdef CONFIG_BNXT_SRIOV struct bnxt_vf_info *vf = &bp->vf; + bool strict_approval = true; if (is_valid_ether_addr(vf->mac_addr)) { /* overwrite netdev dev_addr with admin VF MAC */ memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); + /* Older PF driver or firmware may not approve this + * correctly. + */ + strict_approval = false; } else { eth_hw_addr_random(bp->dev); } - rc = bnxt_approve_mac(bp, bp->dev->dev_addr); + rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval); #endif } return rc; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index fcd085a9853a..3962f6fd543c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -1104,7 +1104,7 @@ void bnxt_update_vf_mac(struct bnxt *bp) mutex_unlock(&bp->hwrm_cmd_lock); } -int bnxt_approve_mac(struct bnxt *bp, u8 *mac) +int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict) { struct hwrm_func_vf_cfg_input req = {0}; int rc = 0; @@ -1122,12 +1122,13 @@ int bnxt_approve_mac(struct bnxt *bp, u8 *mac) memcpy(req.dflt_mac_addr, mac, ETH_ALEN); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); mac_done: - if (rc) { + if (rc && strict) { rc = -EADDRNOTAVAIL; netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n", mac); + return rc; } - return rc; + return 0; } #else @@ -1144,7 +1145,7 @@ void bnxt_update_vf_mac(struct bnxt *bp) { } -int bnxt_approve_mac(struct bnxt *bp, u8 *mac) +int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict) { return 0; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h index e9b20cd19881..2eed9eda1195 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h @@ -39,5 +39,5 @@ int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs); void bnxt_sriov_disable(struct bnxt *); void bnxt_hwrm_exec_fwd_req(struct bnxt *); void bnxt_update_vf_mac(struct bnxt *); -int bnxt_approve_mac(struct bnxt *, u8 *); +int bnxt_approve_mac(struct bnxt *, u8 *, bool); #endif -- GitLab From a15f2c08c70811f120d99288d81f70d7f3d104f1 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Fri, 14 Sep 2018 12:54:56 -0700 Subject: [PATCH 1564/1692] PCI: hv: support reporting serial number as slot information The Hyper-V host API for PCI provides a unique "serial number" which can be used as basis for sysfs PCI slot table. This can be useful for cases where userspace wants to find the PCI device based on serial number. When an SR-IOV NIC is added, the host sends an attach message with serial number. The kernel doesn't use the serial number, but it is useful when doing the same thing in a userspace driver such as the DPDK. By having /sys/bus/pci/slots/N it provides a direct way to find the matching PCI device. There maybe some cases where serial number is not unique such as when using GPU's. But the PCI slot infrastructure will handle that. This has a side effect which may also be useful. The common udev network device naming policy uses the slot information (rather than PCI address). Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/pci/controller/pci-hyperv.c | 37 +++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index c00f82cc54aa..ee80e79db21a 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -89,6 +89,9 @@ static enum pci_protocol_version_t pci_protocol_version; #define STATUS_REVISION_MISMATCH 0xC0000059 +/* space for 32bit serial number as string */ +#define SLOT_NAME_SIZE 11 + /* * Message Types */ @@ -494,6 +497,7 @@ struct hv_pci_dev { struct list_head list_entry; refcount_t refs; enum hv_pcichild_state state; + struct pci_slot *pci_slot; struct pci_function_description desc; bool reported_missing; struct hv_pcibus_device *hbus; @@ -1457,6 +1461,34 @@ static void prepopulate_bars(struct hv_pcibus_device *hbus) spin_unlock_irqrestore(&hbus->device_list_lock, flags); } +/* + * Assign entries in sysfs pci slot directory. + * + * Note that this function does not need to lock the children list + * because it is called from pci_devices_present_work which + * is serialized with hv_eject_device_work because they are on the + * same ordered workqueue. Therefore hbus->children list will not change + * even when pci_create_slot sleeps. + */ +static void hv_pci_assign_slots(struct hv_pcibus_device *hbus) +{ + struct hv_pci_dev *hpdev; + char name[SLOT_NAME_SIZE]; + int slot_nr; + + list_for_each_entry(hpdev, &hbus->children, list_entry) { + if (hpdev->pci_slot) + continue; + + slot_nr = PCI_SLOT(wslot_to_devfn(hpdev->desc.win_slot.slot)); + snprintf(name, SLOT_NAME_SIZE, "%u", hpdev->desc.ser); + hpdev->pci_slot = pci_create_slot(hbus->pci_bus, slot_nr, + name, NULL); + if (!hpdev->pci_slot) + pr_warn("pci_create slot %s failed\n", name); + } +} + /** * create_root_hv_pci_bus() - Expose a new root PCI bus * @hbus: Root PCI bus, as understood by this driver @@ -1480,6 +1512,7 @@ static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus) pci_lock_rescan_remove(); pci_scan_child_bus(hbus->pci_bus); pci_bus_assign_resources(hbus->pci_bus); + hv_pci_assign_slots(hbus); pci_bus_add_devices(hbus->pci_bus); pci_unlock_rescan_remove(); hbus->state = hv_pcibus_installed; @@ -1742,6 +1775,7 @@ static void pci_devices_present_work(struct work_struct *work) */ pci_lock_rescan_remove(); pci_scan_child_bus(hbus->pci_bus); + hv_pci_assign_slots(hbus); pci_unlock_rescan_remove(); break; @@ -1858,6 +1892,9 @@ static void hv_eject_device_work(struct work_struct *work) list_del(&hpdev->list_entry); spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags); + if (hpdev->pci_slot) + pci_destroy_slot(hpdev->pci_slot); + memset(&ctxt, 0, sizeof(ctxt)); ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message; ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE; -- GitLab From 00d7ddba1143623b31bc2c15d18216e2da031b14 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Fri, 14 Sep 2018 12:54:57 -0700 Subject: [PATCH 1565/1692] hv_netvsc: pair VF based on serial number Matching network device based on MAC address is problematic since a non VF network device can be creted with a duplicate MAC address causing confusion and problems. The VMBus API does provide a serial number that is a better matching method. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc.c | 3 ++ drivers/net/hyperv/netvsc_drv.c | 58 +++++++++++++++++++-------------- 2 files changed, 36 insertions(+), 25 deletions(-) diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 31c3d77b4733..fe01e141c8f8 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -1203,6 +1203,9 @@ static void netvsc_send_vf(struct net_device *ndev, net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated; net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial; + netdev_info(ndev, "VF slot %u %s\n", + net_device_ctx->vf_serial, + net_device_ctx->vf_alloc ? "added" : "removed"); } static void netvsc_receive_inband(struct net_device *ndev, diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 915fbd66a02b..3af6d8d15233 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -1894,20 +1894,6 @@ static void netvsc_link_change(struct work_struct *w) rtnl_unlock(); } -static struct net_device *get_netvsc_bymac(const u8 *mac) -{ - struct net_device_context *ndev_ctx; - - list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) { - struct net_device *dev = hv_get_drvdata(ndev_ctx->device_ctx); - - if (ether_addr_equal(mac, dev->perm_addr)) - return dev; - } - - return NULL; -} - static struct net_device *get_netvsc_byref(struct net_device *vf_netdev) { struct net_device_context *net_device_ctx; @@ -2036,26 +2022,48 @@ static void netvsc_vf_setup(struct work_struct *w) rtnl_unlock(); } +/* Find netvsc by VMBus serial number. + * The PCI hyperv controller records the serial number as the slot. + */ +static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev) +{ + struct device *parent = vf_netdev->dev.parent; + struct net_device_context *ndev_ctx; + struct pci_dev *pdev; + + if (!parent || !dev_is_pci(parent)) + return NULL; /* not a PCI device */ + + pdev = to_pci_dev(parent); + if (!pdev->slot) { + netdev_notice(vf_netdev, "no PCI slot information\n"); + return NULL; + } + + list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) { + if (!ndev_ctx->vf_alloc) + continue; + + if (ndev_ctx->vf_serial == pdev->slot->number) + return hv_get_drvdata(ndev_ctx->device_ctx); + } + + netdev_notice(vf_netdev, + "no netdev found for slot %u\n", pdev->slot->number); + return NULL; +} + static int netvsc_register_vf(struct net_device *vf_netdev) { - struct net_device *ndev; struct net_device_context *net_device_ctx; - struct device *pdev = vf_netdev->dev.parent; struct netvsc_device *netvsc_dev; + struct net_device *ndev; int ret; if (vf_netdev->addr_len != ETH_ALEN) return NOTIFY_DONE; - if (!pdev || !dev_is_pci(pdev) || dev_is_pf(pdev)) - return NOTIFY_DONE; - - /* - * We will use the MAC address to locate the synthetic interface to - * associate with the VF interface. If we don't find a matching - * synthetic interface, move on. - */ - ndev = get_netvsc_bymac(vf_netdev->perm_addr); + ndev = get_netvsc_byslot(vf_netdev); if (!ndev) return NOTIFY_DONE; -- GitLab From 50c6b58a814d86a93c0f6964570f839632854044 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 14 Sep 2018 23:00:55 +0200 Subject: [PATCH 1566/1692] tls: fix currently broken MSG_PEEK behavior In kTLS MSG_PEEK behavior is currently failing, strace example: [pid 2430] socket(AF_INET, SOCK_STREAM, IPPROTO_IP) = 3 [pid 2430] socket(AF_INET, SOCK_STREAM, IPPROTO_IP) = 4 [pid 2430] bind(4, {sa_family=AF_INET, sin_port=htons(0), sin_addr=inet_addr("0.0.0.0")}, 16) = 0 [pid 2430] listen(4, 10) = 0 [pid 2430] getsockname(4, {sa_family=AF_INET, sin_port=htons(38855), sin_addr=inet_addr("0.0.0.0")}, [16]) = 0 [pid 2430] connect(3, {sa_family=AF_INET, sin_port=htons(38855), sin_addr=inet_addr("0.0.0.0")}, 16) = 0 [pid 2430] setsockopt(3, SOL_TCP, 0x1f /* TCP_??? */, [7564404], 4) = 0 [pid 2430] setsockopt(3, 0x11a /* SOL_?? */, 1, "\3\0033\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"..., 40) = 0 [pid 2430] accept(4, {sa_family=AF_INET, sin_port=htons(49636), sin_addr=inet_addr("127.0.0.1")}, [16]) = 5 [pid 2430] setsockopt(5, SOL_TCP, 0x1f /* TCP_??? */, [7564404], 4) = 0 [pid 2430] setsockopt(5, 0x11a /* SOL_?? */, 2, "\3\0033\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"..., 40) = 0 [pid 2430] close(4) = 0 [pid 2430] sendto(3, "test_read_peek", 14, 0, NULL, 0) = 14 [pid 2430] sendto(3, "_mult_recs\0", 11, 0, NULL, 0) = 11 [pid 2430] recvfrom(5, "test_read_peektest_read_peektest"..., 64, MSG_PEEK, NULL, NULL) = 64 As can be seen from strace, there are two TLS records sent, i) 'test_read_peek' and ii) '_mult_recs\0' where we end up peeking 'test_read_peektest_read_peektest'. This is clearly wrong, and what happens is that given peek cannot call into tls_sw_advance_skb() to unpause strparser and proceed with the next skb, we end up looping over the current one, copying the 'test_read_peek' over and over into the user provided buffer. Here, we can only peek into the currently held skb (current, full TLS record) as otherwise we would end up having to hold all the original skb(s) (depending on the peek depth) in a separate queue when unpausing strparser to process next records, minimally intrusive is to return only up to the current record's size (which likely was what c46234ebb4d1 ("tls: RX path for ktls") originally intended as well). Thus, after patch we properly peek the first record: [pid 2046] wait4(2075, [pid 2075] socket(AF_INET, SOCK_STREAM, IPPROTO_IP) = 3 [pid 2075] socket(AF_INET, SOCK_STREAM, IPPROTO_IP) = 4 [pid 2075] bind(4, {sa_family=AF_INET, sin_port=htons(0), sin_addr=inet_addr("0.0.0.0")}, 16) = 0 [pid 2075] listen(4, 10) = 0 [pid 2075] getsockname(4, {sa_family=AF_INET, sin_port=htons(55115), sin_addr=inet_addr("0.0.0.0")}, [16]) = 0 [pid 2075] connect(3, {sa_family=AF_INET, sin_port=htons(55115), sin_addr=inet_addr("0.0.0.0")}, 16) = 0 [pid 2075] setsockopt(3, SOL_TCP, 0x1f /* TCP_??? */, [7564404], 4) = 0 [pid 2075] setsockopt(3, 0x11a /* SOL_?? */, 1, "\3\0033\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"..., 40) = 0 [pid 2075] accept(4, {sa_family=AF_INET, sin_port=htons(45732), sin_addr=inet_addr("127.0.0.1")}, [16]) = 5 [pid 2075] setsockopt(5, SOL_TCP, 0x1f /* TCP_??? */, [7564404], 4) = 0 [pid 2075] setsockopt(5, 0x11a /* SOL_?? */, 2, "\3\0033\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"..., 40) = 0 [pid 2075] close(4) = 0 [pid 2075] sendto(3, "test_read_peek", 14, 0, NULL, 0) = 14 [pid 2075] sendto(3, "_mult_recs\0", 11, 0, NULL, 0) = 11 [pid 2075] recvfrom(5, "test_read_peek", 64, MSG_PEEK, NULL, NULL) = 14 Fixes: c46234ebb4d1 ("tls: RX path for ktls") Signed-off-by: Daniel Borkmann Signed-off-by: David S. Miller --- net/tls/tls_sw.c | 8 +++++ tools/testing/selftests/net/tls.c | 49 +++++++++++++++++++++++++++++++ 2 files changed, 57 insertions(+) diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 9e918489f4fb..b9c6ecfbcfea 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -931,7 +931,15 @@ int tls_sw_recvmsg(struct sock *sk, if (control != TLS_RECORD_TYPE_DATA) goto recv_end; } + } else { + /* MSG_PEEK right now cannot look beyond current skb + * from strparser, meaning we cannot advance skb here + * and thus unpause strparser since we'd loose original + * one. + */ + break; } + /* If we have a new message from strparser, continue now. */ if (copied >= target && !ctx->recv_pkt) break; diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c index b3ebf2646e52..8fdfeafaf8c0 100644 --- a/tools/testing/selftests/net/tls.c +++ b/tools/testing/selftests/net/tls.c @@ -502,6 +502,55 @@ TEST_F(tls, recv_peek_multiple) EXPECT_EQ(memcmp(test_str, buf, send_len), 0); } +TEST_F(tls, recv_peek_multiple_records) +{ + char const *test_str = "test_read_peek_mult_recs"; + char const *test_str_first = "test_read_peek"; + char const *test_str_second = "_mult_recs"; + int len; + char buf[64]; + + len = strlen(test_str_first); + EXPECT_EQ(send(self->fd, test_str_first, len, 0), len); + + len = strlen(test_str_second) + 1; + EXPECT_EQ(send(self->fd, test_str_second, len, 0), len); + + len = sizeof(buf); + memset(buf, 0, len); + EXPECT_NE(recv(self->cfd, buf, len, MSG_PEEK), -1); + + /* MSG_PEEK can only peek into the current record. */ + len = strlen(test_str_first) + 1; + EXPECT_EQ(memcmp(test_str_first, buf, len), 0); + + len = sizeof(buf); + memset(buf, 0, len); + EXPECT_NE(recv(self->cfd, buf, len, 0), -1); + + /* Non-MSG_PEEK will advance strparser (and therefore record) + * however. + */ + len = strlen(test_str) + 1; + EXPECT_EQ(memcmp(test_str, buf, len), 0); + + /* MSG_MORE will hold current record open, so later MSG_PEEK + * will see everything. + */ + len = strlen(test_str_first); + EXPECT_EQ(send(self->fd, test_str_first, len, MSG_MORE), len); + + len = strlen(test_str_second) + 1; + EXPECT_EQ(send(self->fd, test_str_second, len, 0), len); + + len = sizeof(buf); + memset(buf, 0, len); + EXPECT_NE(recv(self->cfd, buf, len, MSG_PEEK), -1); + + len = strlen(test_str) + 1; + EXPECT_EQ(memcmp(test_str, buf, len), 0); +} + TEST_F(tls, pollin) { char const *test_str = "test_poll"; -- GitLab From ddca24dfcf1bec608668dd44c45d49397b70f520 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Fri, 14 Sep 2018 23:46:12 +0200 Subject: [PATCH 1567/1692] net: dsa: mv88e6xxx: Fix ATU Miss Violation Fix a cut/paste error and a typo which results in ATU miss violations not being reported. Fixes: 0977644c5005 ("net: dsa: mv88e6xxx: Decode ATU problem interrupt") Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/global1.h | 2 +- drivers/net/dsa/mv88e6xxx/global1_atu.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h index 7c791c1da4b9..bef01331266f 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.h +++ b/drivers/net/dsa/mv88e6xxx/global1.h @@ -128,7 +128,7 @@ #define MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION 0x7000 #define MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION BIT(7) #define MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION BIT(6) -#define MV88E6XXX_G1_ATU_OP_MISS_VIOLTATION BIT(5) +#define MV88E6XXX_G1_ATU_OP_MISS_VIOLATION BIT(5) #define MV88E6XXX_G1_ATU_OP_FULL_VIOLATION BIT(4) /* Offset 0x0C: ATU Data Register */ diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c index 307410898fc9..5200e4bdce93 100644 --- a/drivers/net/dsa/mv88e6xxx/global1_atu.c +++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c @@ -349,7 +349,7 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id) chip->ports[entry.portvec].atu_member_violation++; } - if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) { + if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) { dev_err_ratelimited(chip->dev, "ATU miss violation for %pM portvec %x\n", entry.mac, entry.portvec); -- GitLab From c73480910e9686a5c25155cb4d418d594b678196 Mon Sep 17 00:00:00 2001 From: zhong jiang Date: Mon, 17 Sep 2018 18:44:19 +0800 Subject: [PATCH 1568/1692] net: ethernet: Fix a unused function warning. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix the following compile warning: drivers/net/ethernet/microchip/lan743x_main.c:2964:12: warning: ‘lan743x_pm_suspend’ defined but not used [-Wunused-function] static int lan743x_pm_suspend(struct device *dev) drivers/net/ethernet/microchip/lan743x_main.c:2987:12: warning: ‘lan743x_pm_resume’ defined but not used [-Wunused-function] static int lan743x_pm_resume(struct device *dev) Signed-off-by: zhong jiang Signed-off-by: David S. Miller --- drivers/net/ethernet/microchip/lan743x_main.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index e7dce79ff2c9..001b5f714c1b 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -2850,7 +2850,7 @@ static void lan743x_pcidev_shutdown(struct pci_dev *pdev) lan743x_hardware_cleanup(adapter); } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static u16 lan743x_pm_wakeframe_crc16(const u8 *buf, int len) { return bitrev16(crc16(0xFFFF, buf, len)); @@ -3016,7 +3016,7 @@ static int lan743x_pm_resume(struct device *dev) static const struct dev_pm_ops lan743x_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(lan743x_pm_suspend, lan743x_pm_resume) }; -#endif /*CONFIG_PM */ +#endif /* CONFIG_PM_SLEEP */ static const struct pci_device_id lan743x_pcidev_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) }, @@ -3028,7 +3028,7 @@ static struct pci_driver lan743x_pcidev_driver = { .id_table = lan743x_pcidev_tbl, .probe = lan743x_pcidev_probe, .remove = lan743x_pcidev_remove, -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP .driver.pm = &lan743x_pm_ops, #endif .shutdown = lan743x_pcidev_shutdown, -- GitLab From 83f365554e47997ec68dc4eca3f5dce525cd15c3 Mon Sep 17 00:00:00 2001 From: Vaibhav Nagarnaik Date: Fri, 7 Sep 2018 15:31:29 -0700 Subject: [PATCH 1569/1692] ring-buffer: Allow for rescheduling when removing pages When reducing ring buffer size, pages are removed by scheduling a work item on each CPU for the corresponding CPU ring buffer. After the pages are removed from ring buffer linked list, the pages are free()d in a tight loop. The loop does not give up CPU until all pages are removed. In a worst case behavior, when lot of pages are to be freed, it can cause system stall. After the pages are removed from the list, the free() can happen while the work is rescheduled. Call cond_resched() in the loop to prevent the system hangup. Link: http://lkml.kernel.org/r/20180907223129.71994-1-vnagarnaik@google.com Cc: stable@vger.kernel.org Fixes: 83f40318dab00 ("ring-buffer: Make removal of ring buffer pages atomic") Reported-by: Jason Behmer Signed-off-by: Vaibhav Nagarnaik Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/ring_buffer.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 1d92d4a982fd..65bd4616220d 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -1546,6 +1546,8 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages) tmp_iter_page = first_page; do { + cond_resched(); + to_remove_page = tmp_iter_page; rb_inc_page(cpu_buffer, &tmp_iter_page); -- GitLab From 3c499ea0c662e2f38aafbd4f516b08aab8cfa0e5 Mon Sep 17 00:00:00 2001 From: Lyude Paul Date: Mon, 17 Sep 2018 13:37:33 -0400 Subject: [PATCH 1570/1692] drm/atomic: Use drm_drv_uses_atomic_modeset() for debugfs creation As pointed out by Daniel Vetter, we should be usinng drm_drv_uses_atomic_modeset() for determining whether or not we want to make the debugfs nodes for atomic instead of checking DRIVER_ATOMIC, as the former isn't an accurate representation of whether or not the driver is actually using atomic modesetting internally (even though it might not be exposing atomic capabilities). Signed-off-by: Lyude Paul Cc: Daniel Vetter Cc: stable@vger.kernel.org Reviewed-by: Sean Paul Link: https://patchwork.freedesktop.org/patch/msgid/20180917173733.21293-1-lyude@redhat.com --- drivers/gpu/drm/drm_atomic.c | 2 +- drivers/gpu/drm/drm_debugfs.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 3eb061e11e2e..018fcdb353d2 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -2067,7 +2067,7 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p, struct drm_connector *connector; struct drm_connector_list_iter conn_iter; - if (!drm_core_check_feature(dev, DRIVER_ATOMIC)) + if (!drm_drv_uses_atomic_modeset(dev)) return; list_for_each_entry(plane, &config->plane_list, head) { diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index 6f28fe58f169..373bd4c2b698 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -151,7 +151,7 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id, return ret; } - if (drm_core_check_feature(dev, DRIVER_ATOMIC)) { + if (drm_drv_uses_atomic_modeset(dev)) { ret = drm_atomic_debugfs_init(minor); if (ret) { DRM_ERROR("Failed to create atomic debugfs files\n"); -- GitLab From 072222b488bc55cce92ff246bdc10115fd57d3ab Mon Sep 17 00:00:00 2001 From: Dominique Martinet Date: Tue, 11 Sep 2018 11:21:43 +0200 Subject: [PATCH 1571/1692] kcm: remove any offset before parsing messages The current code assumes kcm users know they need to look for the strparser offset within their bpf program, which is not documented anywhere and examples laying around do not do. The actual recv function does handle the offset well, so we can create a temporary clone of the skb and pull that one up as required for parsing. The pull itself has a cost if we are pulling beyond the head data, measured to 2-3% latency in a noisy VM with a local client stressing that path. The clone's impact seemed too small to measure. This bug can be exhibited easily by implementing a "trivial" kcm parser taking the first bytes as size, and on the client sending at least two such packets in a single write(). Note that bpf sockmap has the same problem, both for parse and for recv, so it would pulling twice or a real pull within the strparser logic if anyone cares about that. Signed-off-by: Dominique Martinet Signed-off-by: David S. Miller --- net/kcm/kcmsock.c | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index 571d824e4e24..36c438b95955 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c @@ -381,8 +381,32 @@ static int kcm_parse_func_strparser(struct strparser *strp, struct sk_buff *skb) { struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp); struct bpf_prog *prog = psock->bpf_prog; + struct sk_buff *clone_skb = NULL; + struct strp_msg *stm; + int rc; + + stm = strp_msg(skb); + if (stm->offset) { + skb = clone_skb = skb_clone(skb, GFP_ATOMIC); + if (!clone_skb) + return -ENOMEM; + + if (!pskb_pull(clone_skb, stm->offset)) { + rc = -ENOMEM; + goto out; + } + + /* reset cloned skb's offset for bpf programs using it */ + stm = strp_msg(clone_skb); + stm->offset = 0; + } + + rc = (*prog->bpf_func)(skb, prog->insnsi); +out: + if (clone_skb) + kfree_skb(clone_skb); - return (*prog->bpf_func)(skb, prog->insnsi); + return rc; } static int kcm_read_sock_done(struct strparser *strp, int err) -- GitLab From 3275b4df3c39f97ee3fe982c9bafd6f3b7ff0dfe Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 17 Sep 2018 18:43:42 -0700 Subject: [PATCH 1572/1692] Revert "kcm: remove any offset before parsing messages" This reverts commit 072222b488bc55cce92ff246bdc10115fd57d3ab. I just read that this causes regressions. Signed-off-by: David S. Miller --- net/kcm/kcmsock.c | 26 +------------------------- 1 file changed, 1 insertion(+), 25 deletions(-) diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index 36c438b95955..571d824e4e24 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c @@ -381,32 +381,8 @@ static int kcm_parse_func_strparser(struct strparser *strp, struct sk_buff *skb) { struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp); struct bpf_prog *prog = psock->bpf_prog; - struct sk_buff *clone_skb = NULL; - struct strp_msg *stm; - int rc; - - stm = strp_msg(skb); - if (stm->offset) { - skb = clone_skb = skb_clone(skb, GFP_ATOMIC); - if (!clone_skb) - return -ENOMEM; - - if (!pskb_pull(clone_skb, stm->offset)) { - rc = -ENOMEM; - goto out; - } - - /* reset cloned skb's offset for bpf programs using it */ - stm = strp_msg(clone_skb); - stm->offset = 0; - } - - rc = (*prog->bpf_func)(skb, prog->insnsi); -out: - if (clone_skb) - kfree_skb(clone_skb); - return rc; + return (*prog->bpf_func)(skb, prog->insnsi); } static int kcm_read_sock_done(struct strparser *strp, int err) -- GitLab From 94235460f9eaefc4f0a3a4a6014fb4c6db4241cc Mon Sep 17 00:00:00 2001 From: Kai-Heng Feng Date: Wed, 12 Sep 2018 14:58:20 +0800 Subject: [PATCH 1573/1692] r8169: Align ASPM/CLKREQ setting function with vendor driver There's a small delay after setting ASPM in vendor drivers, r8101 and r8168. In addition, those drivers enable ASPM before ClkReq, also change that to align with vendor driver. I haven't seen anything bad becasue of this, but I think it's better to keep in sync with vendor driver. Signed-off-by: Kai-Heng Feng Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 1d8631303b53..8195b1f5036d 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -4775,12 +4775,14 @@ static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable) static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) { if (enable) { - RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn); RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en); + RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn); } else { RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); } + + udelay(10); } static void rtl_hw_start_8168bb(struct rtl8169_private *tp) -- GitLab From 0866cd15029baa3331ba347794053472306e8eb3 Mon Sep 17 00:00:00 2001 From: Kai-Heng Feng Date: Wed, 12 Sep 2018 14:58:21 +0800 Subject: [PATCH 1574/1692] r8169: enable ASPM on RTL8106E The Intel SoC was prevented from entering lower idle state because of RTL8106E's ASPM was not enabled. So enable ASPM on RTL8106E (chip version 39). Now the Intel SoC can enter lower idle state, power consumption and temperature are much lower. Signed-off-by: Kai-Heng Feng Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 8195b1f5036d..b0a803e96634 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -5627,6 +5627,8 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp) static void rtl_hw_start_8106(struct rtl8169_private *tp) { + rtl_hw_aspm_clkreq_enable(tp, false); + /* Force LAN exit from ASPM if Rx/Tx are not idle */ RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); @@ -5635,6 +5637,7 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp) RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); rtl_pcie_state_l2l3_enable(tp, false); + rtl_hw_aspm_clkreq_enable(tp, true); } static void rtl_hw_start_8101(struct rtl8169_private *tp) -- GitLab From b1e3454d39f992e5409cd19f97782185950df6e7 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Wed, 12 Sep 2018 11:34:54 +0200 Subject: [PATCH 1575/1692] clk: x86: add "ether_clk" alias for Bay Trail / Cherry Trail Commit d31fd43c0f9a ("clk: x86: Do not gate clocks enabled by the firmware") causes all unclaimed PMC clocks on Cherry Trail devices to be on all the time, resulting on the device not being able to reach S0i2 or S0i3 when suspended. The reason for this commit is that on some Bay Trail / Cherry Trail devices the ethernet controller uses pmc_plt_clk_4. This commit adds an "ether_clk" alias, so that the relevant ethernet drivers can try to (optionally) use this, without needing X86 specific code / hacks, thus fixing ethernet on these devices without breaking S0i3 support. This commit uses clkdev_hw_create() to create the alias, mirroring the code for the already existing "mclk" alias for pmc_plt_clk_3. Buglink: https://bugzilla.kernel.org/show_bug.cgi?id=193891#c102 Buglink: https://bugzilla.kernel.org/show_bug.cgi?id=196861 Cc: Johannes Stezenbach Cc: Carlo Caione Reported-by: Johannes Stezenbach Acked-by: Stephen Boyd Reviewed-by: Andy Shevchenko Signed-off-by: Hans de Goede Signed-off-by: David S. Miller --- drivers/clk/x86/clk-pmc-atom.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/clk/x86/clk-pmc-atom.c b/drivers/clk/x86/clk-pmc-atom.c index 08ef69945ffb..75151901ff7d 100644 --- a/drivers/clk/x86/clk-pmc-atom.c +++ b/drivers/clk/x86/clk-pmc-atom.c @@ -55,6 +55,7 @@ struct clk_plt_data { u8 nparents; struct clk_plt *clks[PMC_CLK_NUM]; struct clk_lookup *mclk_lookup; + struct clk_lookup *ether_clk_lookup; }; /* Return an index in parent table */ @@ -351,11 +352,20 @@ static int plt_clk_probe(struct platform_device *pdev) goto err_unreg_clk_plt; } + data->ether_clk_lookup = clkdev_hw_create(&data->clks[4]->hw, + "ether_clk", NULL); + if (!data->ether_clk_lookup) { + err = -ENOMEM; + goto err_drop_mclk; + } + plt_clk_free_parent_names_loop(parent_names, data->nparents); platform_set_drvdata(pdev, data); return 0; +err_drop_mclk: + clkdev_drop(data->mclk_lookup); err_unreg_clk_plt: plt_clk_unregister_loop(data, i); plt_clk_unregister_parents(data); @@ -369,6 +379,7 @@ static int plt_clk_remove(struct platform_device *pdev) data = platform_get_drvdata(pdev); + clkdev_drop(data->ether_clk_lookup); clkdev_drop(data->mclk_lookup); plt_clk_unregister_loop(data, PMC_CLK_NUM); plt_clk_unregister_parents(data); -- GitLab From c2f6f3ee7f22521fabc3295f51149bc3f4dd9202 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Wed, 12 Sep 2018 11:34:55 +0200 Subject: [PATCH 1576/1692] r8169: Get and enable optional ether_clk clock On some boards a platform clock is used as clock for the r8169 chip, this commit adds support for getting and enabling this clock (assuming it has an "ether_clk" alias set on it). This is related to commit d31fd43c0f9a ("clk: x86: Do not gate clocks enabled by the firmware") which is a previous attempt to fix this for some x86 boards, but this causes all Cherry Trail SoC using boards to not reach there lowest power states when suspending. This commit (together with an atom-pmc-clk driver commit adding the alias) fixes things properly by making the r8169 get the clock and enable it when it needs it. Buglink: https://bugzilla.kernel.org/show_bug.cgi?id=193891#c102 Buglink: https://bugzilla.kernel.org/show_bug.cgi?id=196861 Cc: Johannes Stezenbach Cc: Carlo Caione Reported-by: Johannes Stezenbach Acked-by: Stephen Boyd Reviewed-by: Andy Shevchenko Signed-off-by: Hans de Goede Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 33 ++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index b0a803e96634..bb529ff2ca81 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -665,6 +666,7 @@ struct rtl8169_private { u16 event_slow; const struct rtl_coalesce_info *coalesce_info; + struct clk *clk; struct mdio_ops { void (*write)(struct rtl8169_private *, int, int); @@ -7262,6 +7264,11 @@ static int rtl_jumbo_max(struct rtl8169_private *tp) } } +static void rtl_disable_clk(void *data) +{ + clk_disable_unprepare(data); +} + static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; @@ -7282,6 +7289,32 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT); tp->supports_gmii = cfg->has_gmii; + /* Get the *optional* external "ether_clk" used on some boards */ + tp->clk = devm_clk_get(&pdev->dev, "ether_clk"); + if (IS_ERR(tp->clk)) { + rc = PTR_ERR(tp->clk); + if (rc == -ENOENT) { + /* clk-core allows NULL (for suspend / resume) */ + tp->clk = NULL; + } else if (rc == -EPROBE_DEFER) { + return rc; + } else { + dev_err(&pdev->dev, "failed to get clk: %d\n", rc); + return rc; + } + } else { + rc = clk_prepare_enable(tp->clk); + if (rc) { + dev_err(&pdev->dev, "failed to enable clk: %d\n", rc); + return rc; + } + + rc = devm_add_action_or_reset(&pdev->dev, rtl_disable_clk, + tp->clk); + if (rc) + return rc; + } + /* enable device (incl. PCI PM wakeup and hotplug setup) */ rc = pcim_enable_device(pdev); if (rc < 0) { -- GitLab From 648e921888ad96ea3dc922739e96716ad3225d7f Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Wed, 12 Sep 2018 11:34:56 +0200 Subject: [PATCH 1577/1692] clk: x86: Stop marking clocks as CLK_IS_CRITICAL Commit d31fd43c0f9a ("clk: x86: Do not gate clocks enabled by the firmware"), which added the code to mark clocks as CLK_IS_CRITICAL, causes all unclaimed PMC clocks on Cherry Trail devices to be on all the time, resulting on the device not being able to reach S0i3 when suspended. The reason for this commit is that on some Bay Trail / Cherry Trail devices the r8169 ethernet controller uses pmc_plt_clk_4. Now that the clk-pmc-atom driver exports an "ether_clk" alias for pmc_plt_clk_4 and the r8169 driver has been modified to get and enable this clock (if present) the marking of the clocks as CLK_IS_CRITICAL is no longer necessary. This commit removes the CLK_IS_CRITICAL marking, fixing Cherry Trail devices not being able to reach S0i3 greatly decreasing their battery drain when suspended. Buglink: https://bugzilla.kernel.org/show_bug.cgi?id=193891#c102 Buglink: https://bugzilla.kernel.org/show_bug.cgi?id=196861 Cc: Johannes Stezenbach Cc: Carlo Caione Reported-by: Johannes Stezenbach Reviewed-by: Andy Shevchenko Acked-by: Stephen Boyd Signed-off-by: Hans de Goede Signed-off-by: David S. Miller --- drivers/clk/x86/clk-pmc-atom.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/drivers/clk/x86/clk-pmc-atom.c b/drivers/clk/x86/clk-pmc-atom.c index 75151901ff7d..d977193842df 100644 --- a/drivers/clk/x86/clk-pmc-atom.c +++ b/drivers/clk/x86/clk-pmc-atom.c @@ -187,13 +187,6 @@ static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id, pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE; spin_lock_init(&pclk->lock); - /* - * If the clock was already enabled by the firmware mark it as critical - * to avoid it being gated by the clock framework if no driver owns it. - */ - if (plt_clk_is_enabled(&pclk->hw)) - init.flags |= CLK_IS_CRITICAL; - ret = devm_clk_hw_register(&pdev->dev, &pclk->hw); if (ret) { pclk = ERR_PTR(ret); -- GitLab From 922005c7f50e7f4b2a6dbc182e9c575b4f92396b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B8rn=20Mork?= Date: Mon, 17 Sep 2018 22:00:24 +0200 Subject: [PATCH 1578/1692] qmi_wwan: set DTR for modems in forced USB2 mode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Recent firmware revisions have added the ability to force these modems to USB2 mode, hiding their SuperSpeed capabilities from the host. The driver has been using the SuperSpeed capability, as shown by the bcdUSB field of the device descriptor, to detect the need to enable the DTR quirk. This method fails when the modems are forced to USB2 mode by the modem firmware. Fix by unconditionally enabling the DTR quirk for the affected device IDs. Reported-by: Fred Veldini Reported-by: Deshu Wen Signed-off-by: Bjørn Mork Reported-by: Fred Veldini Reported-by: Deshu Wen Signed-off-by: Bjørn Mork Signed-off-by: David S. Miller --- drivers/net/usb/qmi_wwan.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index e3270deecec2..533b6fb8d923 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1213,13 +1213,13 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */ {QMI_FIXED_INTF(0x1199, 0x9063, 8)}, /* Sierra Wireless EM7305 */ {QMI_FIXED_INTF(0x1199, 0x9063, 10)}, /* Sierra Wireless EM7305 */ - {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */ - {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */ - {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */ - {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */ - {QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */ - {QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */ - {QMI_FIXED_INTF(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */ + {QMI_QUIRK_SET_DTR(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */ + {QMI_QUIRK_SET_DTR(0x1199, 0x9071, 10)},/* Sierra Wireless MC74xx */ + {QMI_QUIRK_SET_DTR(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */ + {QMI_QUIRK_SET_DTR(0x1199, 0x9079, 10)},/* Sierra Wireless EM74xx */ + {QMI_QUIRK_SET_DTR(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */ + {QMI_QUIRK_SET_DTR(0x1199, 0x907b, 10)},/* Sierra Wireless EM74xx */ + {QMI_QUIRK_SET_DTR(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */ {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ -- GitLab From db7c8f1e5f1c1a5e1aaec04e50be6721c1cb4dff Mon Sep 17 00:00:00 2001 From: Colin Xu Date: Mon, 17 Sep 2018 12:19:03 +0800 Subject: [PATCH 1579/1692] drm/i915/gvt: Init PHY related registers for BXT Recent patch fixed the call trace "ERROR Port B enabled but PHY powered down? (PHY_CTL 00000000)". but introduced another similar call trace shown as: "ERROR Port C enabled but PHY powered down? (PHY_CTL 00000200)". The call trace will appear when host and guest enabled different ports, i.e. host using PORT C or neither PORT is enabled, while guest is always using PORT B as simulated by gvt. The issue is actually covered previously before the commit and reverals now when the commit do the right thing. On BXT, some PHY registers are initialized by vbios, before i915 loaded. Later i915 will re-program some, or skip some based on the implementation. The initialized mmio for guest i915 is done by gvt, based on the snapshot taken from host. If host and guest have different PORT enabled, some DPIO PHY mmios that gvt initialized for guest i915 will not match the simualted monitor for guest, which leads to guest i915 print the calltrace when it's trying to enable PHY and PORT. The solution is to init these DPIO PHY registers to default value, then guest i915 will program them to reasonable value based on the default powerwell table and enabled PORT. Together with the old patch, all similar call trace in guest kernel on BXT can be resolved. v2: Move PHY register init to intel_vgpu_reset_mmio (Min) v3: Do not delete empty line in issue fix patch. (zhenyu) Fixes: c8ab5ac30ccc ("drm/i915/gvt: Make correct handling to vreg BXT_PHY_CTL_FAMILY") Reviewed-by: He, Min Signed-off-by: Colin Xu Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/mmio.c | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index 994366035364..9bb9a85c992c 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -244,6 +244,34 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr) /* set the bit 0:2(Core C-State ) to C0 */ vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0; + + if (IS_BROXTON(vgpu->gvt->dev_priv)) { + vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &= + ~(BIT(0) | BIT(1)); + vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &= + ~PHY_POWER_GOOD; + vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &= + ~PHY_POWER_GOOD; + vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) &= + ~BIT(30); + vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY1)) &= + ~BIT(30); + vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) &= + ~BXT_PHY_LANE_ENABLED; + vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) |= + BXT_PHY_CMNLANE_POWERDOWN_ACK | + BXT_PHY_LANE_POWERDOWN_ACK; + vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) &= + ~BXT_PHY_LANE_ENABLED; + vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) |= + BXT_PHY_CMNLANE_POWERDOWN_ACK | + BXT_PHY_LANE_POWERDOWN_ACK; + vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) &= + ~BXT_PHY_LANE_ENABLED; + vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) |= + BXT_PHY_CMNLANE_POWERDOWN_ACK | + BXT_PHY_LANE_POWERDOWN_ACK; + } } else { #define GVT_GEN8_MMIO_RESET_OFFSET (0x44200) /* only reset the engine related, so starting with 0x44200 -- GitLab From d817de3bc186c305b8e72a52547df2971c06499d Mon Sep 17 00:00:00 2001 From: Colin Xu Date: Fri, 14 Sep 2018 15:12:23 +0800 Subject: [PATCH 1580/1692] drm/i915/gvt: Add GEN9_CLKGATE_DIS_4 to default BXT mmio handler Host prints lots of untracked MMIO at 0x4653c when creating linux guest. "gvt: vgpu 2: untracked MMIO 0004653c len 4" GEN9_CLKGATE_DIS_4 (0x4653c) is accessed by i915 for gmbus clockgating. However vgpu doesn't support any clockgating powergating operations on related mmio access trap so need add it to default handler. GEN9_CLKGATE_DIS_4 is accessed in bxt_gmbus_clock_gating() which only applies to GEN9_LP so doens't show the warning on other platforms. The solution is to add it to default handler init_bxt_mmio_info(). Reviewed-by: He, Min Signed-off-by: Colin Xu Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/handlers.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 72afa518edd9..94c1089ecf59 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -3210,6 +3210,7 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt) MMIO_D(BXT_DSI_PLL_ENABLE, D_BXT); MMIO_D(GEN9_CLKGATE_DIS_0, D_BXT); + MMIO_D(GEN9_CLKGATE_DIS_4, D_BXT); MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A), D_BXT); MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT); -- GitLab From a1ac5f0943019bfd76345fe05a42cbc400da685c Mon Sep 17 00:00:00 2001 From: Weinan Li Date: Mon, 17 Sep 2018 09:46:14 +0800 Subject: [PATCH 1581/1692] drm/i915/gvt: request srcu_read_lock before checking if one gfn is valid Fix the suspicious RCU usage issue in intel_vgpu_emulate_mmio_write. Here need to request the srcu read lock of kvm->srcu before doing gfn_to_memslot(). The detailed log is as below: [ 218.710688] ============================= [ 218.710690] WARNING: suspicious RCU usage [ 218.710693] 4.14.15-dd+ #314 Tainted: G U [ 218.710695] ----------------------------- [ 218.710697] ./include/linux/kvm_host.h:575 suspicious rcu_dereference_check() usage! [ 218.710699] other info that might help us debug this: [ 218.710702] rcu_scheduler_active = 2, debug_locks = 1 [ 218.710704] 1 lock held by qemu-system-x86/2144: [ 218.710706] #0: (&gvt->lock){+.+.}, at: [] intel_vgpu_emulate_mmio_write+0x5a/0x2d0 [ 218.710721] stack backtrace: [ 218.710724] CPU: 0 PID: 2144 Comm: qemu-system-x86 Tainted: G U 4.14.15-dd+ #314 [ 218.710727] Hardware name: Dell Inc. OptiPlex 7040/0Y7WYT, BIOS 1.1.1 10/07/2015 [ 218.710729] Call Trace: [ 218.710734] dump_stack+0x7c/0xb3 [ 218.710739] gfn_to_memslot+0x15f/0x170 [ 218.710743] kvm_is_visible_gfn+0xa/0x30 [ 218.710746] intel_vgpu_emulate_gtt_mmio_write+0x267/0x3c0 [ 218.710751] ? __mutex_unlock_slowpath+0x3b/0x260 [ 218.710754] intel_vgpu_emulate_mmio_write+0x182/0x2d0 [ 218.710759] intel_vgpu_rw+0xba/0x170 [kvmgt] [ 218.710763] intel_vgpu_write+0x14d/0x1a0 [kvmgt] [ 218.710767] __vfs_write+0x23/0x130 [ 218.710770] vfs_write+0xb0/0x1b0 [ 218.710774] SyS_pwrite64+0x73/0x90 [ 218.710777] entry_SYSCALL_64_fastpath+0x25/0x9c [ 218.710780] RIP: 0033:0x7f33e8a91da3 [ 218.710783] RSP: 002b:00007f33dddc8700 EFLAGS: 00000293 v2: add 'Fixes' tag, refine log format.(Zhenyu) Fixes: cc753fbe1ac4 ("drm/i915/gvt: validate gfn before set shadow page") Reviewed-by: Zhenyu Wang Signed-off-by: Weinan Li Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/kvmgt.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index c7afee37b2b8..9ad89e38f6c0 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1833,6 +1833,8 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn) { struct kvmgt_guest_info *info; struct kvm *kvm; + int idx; + bool ret; if (!handle_valid(handle)) return false; @@ -1840,8 +1842,11 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn) info = (struct kvmgt_guest_info *)handle; kvm = info->kvm; - return kvm_is_visible_gfn(kvm, gfn); + idx = srcu_read_lock(&kvm->srcu); + ret = kvm_is_visible_gfn(kvm, gfn); + srcu_read_unlock(&kvm->srcu, idx); + return ret; } struct intel_gvt_mpt kvmgt_mpt = { -- GitLab From 7759ca3aac79648d01c9edcb3b00503c02bec2f5 Mon Sep 17 00:00:00 2001 From: Zhipeng Gong Date: Mon, 17 Sep 2018 15:45:08 +0800 Subject: [PATCH 1582/1692] drm/i915/gvt: clear ggtt entries when destroy vgpu When one vgpu is destroyed, its ggtt entries are not cleared. This patch clears ggtt entries to avoid information leak. v2: add 'Fixes' tag (Zhenyu) Fixes: 2707e4446688 ("drm/i915/gvt: vGPU graphics memory virtualization") Signed-off-by: Zhipeng Gong Reviewed-by: Hang Yuan Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/vgpu.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index a4e8e3cf74fd..c628be05fbfe 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -281,6 +281,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) intel_vgpu_clean_submission(vgpu); intel_vgpu_clean_display(vgpu); intel_vgpu_clean_opregion(vgpu); + intel_vgpu_reset_ggtt(vgpu, true); intel_vgpu_clean_gtt(vgpu); intel_gvt_hypervisor_detach_vgpu(vgpu); intel_vgpu_free_resource(vgpu); -- GitLab From 30bfd93062814d6767e452a8f5ddcd97f7e38c7e Mon Sep 17 00:00:00 2001 From: Peter Oskolkov Date: Mon, 17 Sep 2018 10:20:53 -0700 Subject: [PATCH 1583/1692] net/ipv6: do not copy dst flags on rt init DST_NOCOUNT in dst_entry::flags tracks whether the entry counts toward route cache size (net->ipv6.sysctl.ip6_rt_max_size). If the flag is NOT set, dst_ops::pcpuc_entries counter is incremented in dist_init() and decremented in dst_destroy(). This flag is tied to allocation/deallocation of dst_entry and should not be copied from another dst/route. Otherwise it can happen that dst_ops::pcpuc_entries counter grows until no new routes can be allocated because the counter reached ip6_rt_max_size due to DST_NOCOUNT not set and thus no counter decrements on gc-ed routes. Fixes: 3b6761d18bc1 ("net/ipv6: Move dst flags to booleans in fib entries") Cc: David Ahern Acked-by: Wei Wang Signed-off-by: Peter Oskolkov Reviewed-by: David Ahern Signed-off-by: David S. Miller --- net/ipv6/route.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 3eed045c65a5..480a79f47c52 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -946,8 +946,6 @@ static void ip6_rt_init_dst_reject(struct rt6_info *rt, struct fib6_info *ort) static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort) { - rt->dst.flags |= fib6_info_dst_flags(ort); - if (ort->fib6_flags & RTF_REJECT) { ip6_rt_init_dst_reject(rt, ort); return; -- GitLab From 4a3e85f2674cbfb81052059107d0165269778e2f Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Mon, 17 Sep 2018 16:31:30 +0200 Subject: [PATCH 1584/1692] mtd: devices: m25p80: Make sure the buffer passed in op is DMA-able As documented in spi-mem.h, spi_mem_op->data.buf.{in,out} must be DMA-able, and commit 4120f8d158ef ("mtd: spi-nor: Use the spi_mem_xx() API") failed to follow this rule as buffers passed to ->{read,write}_reg() are usually placed on the stack. Fix that by allocating a scratch buffer and copying the data around. Fixes: 4120f8d158ef ("mtd: spi-nor: Use the spi_mem_xx() API") Reported-by: Jarkko Nikula Cc: Signed-off-by: Boris Brezillon Tested-by: Jarkko Nikula Reviewed-by: Jarkko Nikula --- drivers/mtd/devices/m25p80.c | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c index cbfafc453274..270d3c9580c5 100644 --- a/drivers/mtd/devices/m25p80.c +++ b/drivers/mtd/devices/m25p80.c @@ -39,13 +39,23 @@ static int m25p80_read_reg(struct spi_nor *nor, u8 code, u8 *val, int len) struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(code, 1), SPI_MEM_OP_NO_ADDR, SPI_MEM_OP_NO_DUMMY, - SPI_MEM_OP_DATA_IN(len, val, 1)); + SPI_MEM_OP_DATA_IN(len, NULL, 1)); + void *scratchbuf; int ret; + scratchbuf = kmalloc(len, GFP_KERNEL); + if (!scratchbuf) + return -ENOMEM; + + op.data.buf.in = scratchbuf; ret = spi_mem_exec_op(flash->spimem, &op); if (ret < 0) dev_err(&flash->spimem->spi->dev, "error %d reading %x\n", ret, code); + else + memcpy(val, scratchbuf, len); + + kfree(scratchbuf); return ret; } @@ -56,9 +66,19 @@ static int m25p80_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 1), SPI_MEM_OP_NO_ADDR, SPI_MEM_OP_NO_DUMMY, - SPI_MEM_OP_DATA_OUT(len, buf, 1)); + SPI_MEM_OP_DATA_OUT(len, NULL, 1)); + void *scratchbuf; + int ret; - return spi_mem_exec_op(flash->spimem, &op); + scratchbuf = kmemdup(buf, len, GFP_KERNEL); + if (!scratchbuf) + return -ENOMEM; + + op.data.buf.out = scratchbuf; + ret = spi_mem_exec_op(flash->spimem, &op); + kfree(scratchbuf); + + return ret; } static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len, -- GitLab From 57078338b2e4c07fb76bef23369cae0acfb1f7bc Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 18 Sep 2018 16:20:18 +1000 Subject: [PATCH 1585/1692] drm: fix drm_drv_uses_atomic_modeset on non modesetting drivers. vgem seems to oops on the intel CI due to the vgem debugfs init hitting this path now. Check if we have mode_config funcs before checking one. Signed-off-by: Dave Airlie Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20180918062018.24942-1-airlied@gmail.com --- include/drm/drm_drv.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index 46a8009784df..152b3055e9e1 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -675,7 +675,7 @@ static inline bool drm_core_check_feature(struct drm_device *dev, int feature) static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev) { return drm_core_check_feature(dev, DRIVER_ATOMIC) || - dev->mode_config.funcs->atomic_commit != NULL; + (dev->mode_config.funcs && dev->mode_config.funcs->atomic_commit != NULL); } -- GitLab From 6d41907c630d3196be89c9ed5a7f8258486b3eaf Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 14 Sep 2018 16:47:14 -0300 Subject: [PATCH 1586/1692] tools lib bpf: Provide wrapper for strerror_r to build in !_GNU_SOURCE systems Same problem that got fixed in a similar fashion in tools/perf/ in c8b5f2c96d1b ("tools: Introduce str_error_r()"), fix it in the same way, licensing needs to be sorted out to libbpf to use libapi, so, for this simple case, just get the same wrapper in tools/lib/bpf. This makes libbpf and its users (bpftool, selftests, perf) to build again in Alpine Linux 3.[45678] and edge. Acked-by: Alexei Starovoitov Cc: Adrian Hunter Cc: Daniel Borkmann Cc: David Ahern Cc: Hendrik Brueckner Cc: Jakub Kicinski Cc: Jiri Olsa Cc: Martin KaFai Lau Cc: Namhyung Kim Cc: Quentin Monnet Cc: Thomas Richter Cc: Wang Nan Cc: Yonghong Song Fixes: 1ce6a9fc1549 ("bpf: fix build error in libbpf with EXTRA_CFLAGS="-Wp, -D_FORTIFY_SOURCE=2 -O2"") Link: https://lkml.kernel.org/r/20180917151636.GA21790@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/lib/bpf/Build | 2 +- tools/lib/bpf/libbpf.c | 20 ++++++++++---------- tools/lib/bpf/str_error.c | 18 ++++++++++++++++++ tools/lib/bpf/str_error.h | 6 ++++++ 4 files changed, 35 insertions(+), 11 deletions(-) create mode 100644 tools/lib/bpf/str_error.c create mode 100644 tools/lib/bpf/str_error.h diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build index 13a861135127..6eb9bacd1948 100644 --- a/tools/lib/bpf/Build +++ b/tools/lib/bpf/Build @@ -1 +1 @@ -libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o +libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 2abd0f112627..bdb94939fd60 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -50,6 +50,7 @@ #include "libbpf.h" #include "bpf.h" #include "btf.h" +#include "str_error.h" #ifndef EM_BPF #define EM_BPF 247 @@ -469,7 +470,7 @@ static int bpf_object__elf_init(struct bpf_object *obj) obj->efile.fd = open(obj->path, O_RDONLY); if (obj->efile.fd < 0) { char errmsg[STRERR_BUFSIZE]; - char *cp = strerror_r(errno, errmsg, sizeof(errmsg)); + char *cp = str_error(errno, errmsg, sizeof(errmsg)); pr_warning("failed to open %s: %s\n", obj->path, cp); return -errno; @@ -810,8 +811,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj) data->d_size, name, idx); if (err) { char errmsg[STRERR_BUFSIZE]; - char *cp = strerror_r(-err, errmsg, - sizeof(errmsg)); + char *cp = str_error(-err, errmsg, sizeof(errmsg)); pr_warning("failed to alloc program %s (%s): %s", name, obj->path, cp); @@ -1140,7 +1140,7 @@ bpf_object__create_maps(struct bpf_object *obj) *pfd = bpf_create_map_xattr(&create_attr); if (*pfd < 0 && create_attr.btf_key_type_id) { - cp = strerror_r(errno, errmsg, sizeof(errmsg)); + cp = str_error(errno, errmsg, sizeof(errmsg)); pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n", map->name, cp, errno); create_attr.btf_fd = 0; @@ -1155,7 +1155,7 @@ bpf_object__create_maps(struct bpf_object *obj) size_t j; err = *pfd; - cp = strerror_r(errno, errmsg, sizeof(errmsg)); + cp = str_error(errno, errmsg, sizeof(errmsg)); pr_warning("failed to create map (name: '%s'): %s\n", map->name, cp); for (j = 0; j < i; j++) @@ -1339,7 +1339,7 @@ load_program(enum bpf_prog_type type, enum bpf_attach_type expected_attach_type, } ret = -LIBBPF_ERRNO__LOAD; - cp = strerror_r(errno, errmsg, sizeof(errmsg)); + cp = str_error(errno, errmsg, sizeof(errmsg)); pr_warning("load bpf program failed: %s\n", cp); if (log_buf && log_buf[0] != '\0') { @@ -1654,7 +1654,7 @@ static int check_path(const char *path) dir = dirname(dname); if (statfs(dir, &st_fs)) { - cp = strerror_r(errno, errmsg, sizeof(errmsg)); + cp = str_error(errno, errmsg, sizeof(errmsg)); pr_warning("failed to statfs %s: %s\n", dir, cp); err = -errno; } @@ -1690,7 +1690,7 @@ int bpf_program__pin_instance(struct bpf_program *prog, const char *path, } if (bpf_obj_pin(prog->instances.fds[instance], path)) { - cp = strerror_r(errno, errmsg, sizeof(errmsg)); + cp = str_error(errno, errmsg, sizeof(errmsg)); pr_warning("failed to pin program: %s\n", cp); return -errno; } @@ -1708,7 +1708,7 @@ static int make_dir(const char *path) err = -errno; if (err) { - cp = strerror_r(-err, errmsg, sizeof(errmsg)); + cp = str_error(-err, errmsg, sizeof(errmsg)); pr_warning("failed to mkdir %s: %s\n", path, cp); } return err; @@ -1770,7 +1770,7 @@ int bpf_map__pin(struct bpf_map *map, const char *path) } if (bpf_obj_pin(map->fd, path)) { - cp = strerror_r(errno, errmsg, sizeof(errmsg)); + cp = str_error(errno, errmsg, sizeof(errmsg)); pr_warning("failed to pin map: %s\n", cp); return -errno; } diff --git a/tools/lib/bpf/str_error.c b/tools/lib/bpf/str_error.c new file mode 100644 index 000000000000..b8798114a357 --- /dev/null +++ b/tools/lib/bpf/str_error.c @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: LGPL-2.1 +#undef _GNU_SOURCE +#include +#include +#include "str_error.h" + +/* + * Wrapper to allow for building in non-GNU systems such as Alpine Linux's musl + * libc, while checking strerror_r() return to avoid having to check this in + * all places calling it. + */ +char *str_error(int err, char *dst, int len) +{ + int ret = strerror_r(err, dst, len); + if (ret) + snprintf(dst, len, "ERROR: strerror_r(%d)=%d", err, ret); + return dst; +} diff --git a/tools/lib/bpf/str_error.h b/tools/lib/bpf/str_error.h new file mode 100644 index 000000000000..355b1db571d1 --- /dev/null +++ b/tools/lib/bpf/str_error.h @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: LGPL-2.1 +#ifndef BPF_STR_ERROR +#define BPF_STR_ERROR + +char *str_error(int err, char *dst, int len); +#endif // BPF_STR_ERROR -- GitLab From 169e366c08084aeb49a3793c892c9abfaa47eeda Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Sun, 16 Sep 2018 16:17:05 +0100 Subject: [PATCH 1587/1692] perf Documentation: Fix out-of-tree asciidoctor man page generation The dependency for the man page rule using asciidoctor incorrectly specifies a source file in $(OUTPUT). When building out-of-tree, the source file is not found, resulting in a fall-back to the following rule which uses xmlto. Signed-off-by: Ben Hutchings Cc: Alexander Shishkin Cc: Jiri Olsa Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20180916151704.GF4765@decadent.org.uk Fixes: ffef80ecf89f ("perf Documentation: Support for asciidoctor") Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/Documentation/Makefile b/tools/perf/Documentation/Makefile index 42261a9b280e..ac841bc5c35b 100644 --- a/tools/perf/Documentation/Makefile +++ b/tools/perf/Documentation/Makefile @@ -280,7 +280,7 @@ $(MAN_HTML): $(OUTPUT)%.html : %.txt mv $@+ $@ ifdef USE_ASCIIDOCTOR -$(OUTPUT)%.1 $(OUTPUT)%.5 $(OUTPUT)%.7 : $(OUTPUT)%.txt +$(OUTPUT)%.1 $(OUTPUT)%.5 $(OUTPUT)%.7 : %.txt $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \ $(ASCIIDOC) -b manpage -d manpage \ $(ASCIIDOC_EXTRA) -aperf_version=$(PERF_VERSION) -o $@+ $< && \ -- GitLab From 753694a8df318f204a0ac1303de136def16f2e9c Mon Sep 17 00:00:00 2001 From: Xiaochen Shen Date: Sat, 15 Sep 2018 14:58:19 -0700 Subject: [PATCH 1588/1692] x86/intel_rdt: Fix data type in parsing callbacks Each resource is associated with a parsing callback to parse the data provided from user space when writing schemata file. The 'data' parameter in the callbacks is defined as a void pointer which is error prone due to lack of type check. parse_bw() processes the 'data' parameter as a string while its caller actually passes the parameter as a pointer to struct rdt_cbm_parse_data. Thus, parse_bw() takes wrong data and causes failure of parsing MBA throttle value. To fix the issue, the 'data' parameter in all parsing callbacks is defined and handled as a pointer to struct rdt_parse_data (renamed from struct rdt_cbm_parse_data). Fixes: 7604df6e16ae ("x86/intel_rdt: Support flexible data to parsing callbacks") Fixes: 9ab9aa15c309 ("x86/intel_rdt: Ensure requested schemata respects mode") Signed-off-by: Xiaochen Shen Signed-off-by: Reinette Chatre Signed-off-by: Fenghua Yu Signed-off-by: Thomas Gleixner Cc: "H Peter Anvin" Cc: "Tony Luck" Cc: "Chen Yu" Link: https://lkml.kernel.org/r/1537048707-76280-2-git-send-email-fenghua.yu@intel.com --- arch/x86/kernel/cpu/intel_rdt.h | 16 ++++++++++++---- arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c | 21 ++++++++------------- 2 files changed, 20 insertions(+), 17 deletions(-) diff --git a/arch/x86/kernel/cpu/intel_rdt.h b/arch/x86/kernel/cpu/intel_rdt.h index 4e588f36228f..78266c798280 100644 --- a/arch/x86/kernel/cpu/intel_rdt.h +++ b/arch/x86/kernel/cpu/intel_rdt.h @@ -382,6 +382,11 @@ static inline bool is_mbm_event(int e) e <= QOS_L3_MBM_LOCAL_EVENT_ID); } +struct rdt_parse_data { + struct rdtgroup *rdtgrp; + char *buf; +}; + /** * struct rdt_resource - attributes of an RDT resource * @rid: The index of the resource @@ -423,16 +428,19 @@ struct rdt_resource { struct rdt_cache cache; struct rdt_membw membw; const char *format_str; - int (*parse_ctrlval) (void *data, struct rdt_resource *r, - struct rdt_domain *d); + int (*parse_ctrlval)(struct rdt_parse_data *data, + struct rdt_resource *r, + struct rdt_domain *d); struct list_head evt_list; int num_rmid; unsigned int mon_scale; unsigned long fflags; }; -int parse_cbm(void *_data, struct rdt_resource *r, struct rdt_domain *d); -int parse_bw(void *_buf, struct rdt_resource *r, struct rdt_domain *d); +int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r, + struct rdt_domain *d); +int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r, + struct rdt_domain *d); extern struct mutex rdtgroup_mutex; diff --git a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c index af358ca05160..edd5761f7336 100644 --- a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c +++ b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c @@ -64,19 +64,19 @@ static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r) return true; } -int parse_bw(void *_buf, struct rdt_resource *r, struct rdt_domain *d) +int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r, + struct rdt_domain *d) { - unsigned long data; - char *buf = _buf; + unsigned long bw_val; if (d->have_new_ctrl) { rdt_last_cmd_printf("duplicate domain %d\n", d->id); return -EINVAL; } - if (!bw_validate(buf, &data, r)) + if (!bw_validate(data->buf, &bw_val, r)) return -EINVAL; - d->new_ctrl = data; + d->new_ctrl = bw_val; d->have_new_ctrl = true; return 0; @@ -123,18 +123,13 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r) return true; } -struct rdt_cbm_parse_data { - struct rdtgroup *rdtgrp; - char *buf; -}; - /* * Read one cache bit mask (hex). Check that it is valid for the current * resource type. */ -int parse_cbm(void *_data, struct rdt_resource *r, struct rdt_domain *d) +int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r, + struct rdt_domain *d) { - struct rdt_cbm_parse_data *data = _data; struct rdtgroup *rdtgrp = data->rdtgrp; u32 cbm_val; @@ -195,7 +190,7 @@ int parse_cbm(void *_data, struct rdt_resource *r, struct rdt_domain *d) static int parse_line(char *line, struct rdt_resource *r, struct rdtgroup *rdtgrp) { - struct rdt_cbm_parse_data data; + struct rdt_parse_data data; char *dom = NULL, *id; struct rdt_domain *d; unsigned long dom_id; -- GitLab From f968dc119a159a95628a20de2a2dcc913d0a82d7 Mon Sep 17 00:00:00 2001 From: Reinette Chatre Date: Sat, 15 Sep 2018 14:58:20 -0700 Subject: [PATCH 1589/1692] x86/intel_rdt: Fix size reporting of MBA resource Chen Yu reported a divide-by-zero error when accessing the 'size' resctrl file when a MBA resource is enabled. divide error: 0000 [#1] SMP PTI CPU: 93 PID: 1929 Comm: cat Not tainted 4.19.0-rc2-debug-rdt+ #25 RIP: 0010:rdtgroup_cbm_to_size+0x7e/0xa0 Call Trace: rdtgroup_size_show+0x11a/0x1d0 seq_read+0xd8/0x3b0 Quoting Chen Yu's report: This is because for MB resource, the r->cache.cbm_len is zero, thus calculating size in rdtgroup_cbm_to_size() will trigger the exception. Fix this issue in the 'size' file by getting correct memory bandwidth value which is in MBps when MBA software controller is enabled or in percentage when MBA software controller is disabled. Fixes: d9b48c86eb38 ("x86/intel_rdt: Display resource groups' allocations in bytes") Reported-by: Chen Yu Signed-off-by: Reinette Chatre Signed-off-by: Fenghua Yu Signed-off-by: Thomas Gleixner Tested-by: Chen Yu Cc: "H Peter Anvin" Cc: "Tony Luck" Cc: "Xiaochen Shen" Link: https://lkml.kernel.org/r/20180904174614.26682-1-yu.c.chen@intel.com Link: https://lkml.kernel.org/r/1537048707-76280-3-git-send-email-fenghua.yu@intel.com --- arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c index b799c00bef09..32e8bbdf2400 100644 --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c @@ -1155,8 +1155,8 @@ static int rdtgroup_size_show(struct kernfs_open_file *of, struct rdt_resource *r; struct rdt_domain *d; unsigned int size; - bool sep = false; - u32 cbm; + bool sep; + u32 ctrl; rdtgrp = rdtgroup_kn_lock_live(of->kn); if (!rdtgrp) { @@ -1174,6 +1174,7 @@ static int rdtgroup_size_show(struct kernfs_open_file *of, } for_each_alloc_enabled_rdt_resource(r) { + sep = false; seq_printf(s, "%*s:", max_name_width, r->name); list_for_each_entry(d, &r->domains, list) { if (sep) @@ -1181,8 +1182,13 @@ static int rdtgroup_size_show(struct kernfs_open_file *of, if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { size = 0; } else { - cbm = d->ctrl_val[rdtgrp->closid]; - size = rdtgroup_cbm_to_size(r, d, cbm); + ctrl = (!is_mba_sc(r) ? + d->ctrl_val[rdtgrp->closid] : + d->mbps_val[rdtgrp->closid]); + if (r->rid == RDT_RESOURCE_MBA) + size = ctrl; + else + size = rdtgroup_cbm_to_size(r, d, ctrl); } seq_printf(s, "%d=%u", d->id, size); sep = true; -- GitLab From c793da8e4c62d2c002a79c47f44efead450cbcae Mon Sep 17 00:00:00 2001 From: Reinette Chatre Date: Sat, 15 Sep 2018 14:58:21 -0700 Subject: [PATCH 1590/1692] x86/intel_rdt: Global closid helper to support future fixes The number of CLOSIDs supported by a system is the minimum number of CLOSIDs supported by any of its resources. Care should be taken when iterating over the CLOSIDs of a resource since it may be that the number of CLOSIDs supported on the system is less than the number of CLOSIDs supported by the resource. Introduce a helper function that can be used to query the number of CLOSIDs that is supported by all resources, irrespective of how many CLOSIDs are supported by a particular resource. Signed-off-by: Reinette Chatre Signed-off-by: Fenghua Yu Signed-off-by: Thomas Gleixner Cc: "H Peter Anvin" Cc: "Tony Luck" Cc: "Xiaochen Shen" Cc: "Chen Yu" Link: https://lkml.kernel.org/r/1537048707-76280-4-git-send-email-fenghua.yu@intel.com --- arch/x86/kernel/cpu/intel_rdt.h | 1 + arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | 7 +++++++ 2 files changed, 8 insertions(+) diff --git a/arch/x86/kernel/cpu/intel_rdt.h b/arch/x86/kernel/cpu/intel_rdt.h index 78266c798280..285eb3ec4200 100644 --- a/arch/x86/kernel/cpu/intel_rdt.h +++ b/arch/x86/kernel/cpu/intel_rdt.h @@ -544,6 +544,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp); void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp); struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r); int update_domains(struct rdt_resource *r, int closid); +int closids_supported(void); void closid_free(int closid); int alloc_rmid(void); void free_rmid(u32 rmid); diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c index 32e8bbdf2400..b372923eb209 100644 --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c @@ -97,6 +97,12 @@ void rdt_last_cmd_printf(const char *fmt, ...) * limited as the number of resources grows. */ static int closid_free_map; +static int closid_free_map_len; + +int closids_supported(void) +{ + return closid_free_map_len; +} static void closid_init(void) { @@ -111,6 +117,7 @@ static void closid_init(void) /* CLOSID 0 is always reserved for the default group */ closid_free_map &= ~1; + closid_free_map_len = rdt_min_closid; } static int closid_alloc(void) -- GitLab From 47d53b184aee983ab9492503da11b0a81b19145b Mon Sep 17 00:00:00 2001 From: Reinette Chatre Date: Sat, 15 Sep 2018 14:58:22 -0700 Subject: [PATCH 1591/1692] x86/intel_rdt: Fix invalid mode warning when multiple resources are managed When multiple resources are managed by RDT, the number of CLOSIDs used is the minimum of the CLOSIDs supported by each resource. In the function rdt_bit_usage_show(), the annotated bitmask is created to depict how the CAT supporting caches are being used. During this annotated bitmask creation, each resource group is queried for its mode that is used as a label in the annotated bitmask. The maximum number of resource groups is currently assumed to be the number of CLOSIDs supported by the resource for which the information is being displayed. This is incorrect since the number of active CLOSIDs is the minimum across all resources. If information for a cache instance with more CLOSIDs than another is being generated we thus encounter a warning like: invalid mode for closid 8 WARNING: CPU: 88 PID: 1791 at [SNIP]/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c :827 rdt_bit_usage_show+0x221/0x2b0 Fix this by ensuring that only the number of supported CLOSIDs are considered. Fixes: e651901187ab8 ("x86/intel_rdt: Introduce "bit_usage" to display cache allocations details") Signed-off-by: Reinette Chatre Signed-off-by: Fenghua Yu Signed-off-by: Thomas Gleixner Cc: "H Peter Anvin" Cc: "Tony Luck" Cc: "Xiaochen Shen" Cc: "Chen Yu" Link: https://lkml.kernel.org/r/1537048707-76280-5-git-send-email-fenghua.yu@intel.com --- arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c index b372923eb209..ea91750ba27f 100644 --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c @@ -809,7 +809,7 @@ static int rdt_bit_usage_show(struct kernfs_open_file *of, sw_shareable = 0; exclusive = 0; seq_printf(seq, "%d=", dom->id); - for (i = 0; i < r->num_closid; i++, ctrl++) { + for (i = 0; i < closids_supported(); i++, ctrl++) { if (!closid_allocated(i)) continue; mode = rdtgroup_mode_by_closid(i); -- GitLab From 70479c012b67b89e219c40eddc5dc338b7c447a3 Mon Sep 17 00:00:00 2001 From: Reinette Chatre Date: Sat, 15 Sep 2018 14:58:23 -0700 Subject: [PATCH 1592/1692] x86/intel_rdt: Fix unchecked MSR access When a new resource group is created, it is initialized with sane defaults that currently assume the resource being initialized is a CAT resource. This code path is also followed by a MBA resource that is not allocated the same as a CAT resource and as a result we encounter the following unchecked MSR access error: unchecked MSR access error: WRMSR to 0xd51 (tried to write 0x0000 000000000064) at rIP: 0xffffffffae059994 (native_write_msr+0x4/0x20) Call Trace: mba_wrmsr+0x41/0x80 update_domains+0x125/0x130 rdtgroup_mkdir+0x270/0x500 Fix the above by ensuring the initial allocation is only attempted on a CAT resource. Fixes: 95f0b77ef ("x86/intel_rdt: Initialize new resource group with sane defaults") Signed-off-by: Reinette Chatre Signed-off-by: Fenghua Yu Signed-off-by: Thomas Gleixner Cc: "H Peter Anvin" Cc: "Tony Luck" Cc: "Xiaochen Shen" Cc: "Chen Yu" Link: https://lkml.kernel.org/r/1537048707-76280-6-git-send-email-fenghua.yu@intel.com --- arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c index ea91750ba27f..74821bc457c0 100644 --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c @@ -2349,6 +2349,12 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) u32 *ctrl; for_each_alloc_enabled_rdt_resource(r) { + /* + * Only initialize default allocations for CBM cache + * resources + */ + if (r->rid == RDT_RESOURCE_MBA) + continue; list_for_each_entry(d, &r->domains, list) { d->have_new_ctrl = false; d->new_ctrl = r->cache.shareable_bits; @@ -2386,6 +2392,12 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) } for_each_alloc_enabled_rdt_resource(r) { + /* + * Only initialize default allocations for CBM cache + * resources + */ + if (r->rid == RDT_RESOURCE_MBA) + continue; ret = update_domains(r, rdtgrp->closid); if (ret < 0) { rdt_last_cmd_puts("failed to initialize allocations\n"); -- GitLab From 32d736abed4febff4b6bf85d5d240ee24d254322 Mon Sep 17 00:00:00 2001 From: Reinette Chatre Date: Sat, 15 Sep 2018 14:58:24 -0700 Subject: [PATCH 1593/1692] x86/intel_rdt: Do not allow pseudo-locking of MBA resource A system supporting pseudo-locking may have MBA as well as CAT resources of which only the CAT resources could support cache pseudo-locking. When the schemata to be pseudo-locked is provided it should be checked that that schemata does not attempt to pseudo-lock a MBA resource. Fixes: e0bdfe8e3 ("x86/intel_rdt: Support creation/removal of pseudo-locked region") Signed-off-by: Reinette Chatre Signed-off-by: Fenghua Yu Signed-off-by: Thomas Gleixner Cc: "H Peter Anvin" Cc: "Tony Luck" Cc: "Xiaochen Shen" Cc: "Chen Yu" Link: https://lkml.kernel.org/r/1537048707-76280-7-git-send-email-fenghua.yu@intel.com --- arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c index edd5761f7336..0f53049719cd 100644 --- a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c +++ b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c @@ -195,6 +195,12 @@ static int parse_line(char *line, struct rdt_resource *r, struct rdt_domain *d; unsigned long dom_id; + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP && + r->rid == RDT_RESOURCE_MBA) { + rdt_last_cmd_puts("Cannot pseudo-lock MBA resource\n"); + return -EINVAL; + } + next: if (!line || line[0] == '\0') return 0; -- GitLab From f0df4e1acf3d721958dcafb2c9c0bdf25189068d Mon Sep 17 00:00:00 2001 From: Reinette Chatre Date: Sat, 15 Sep 2018 14:58:25 -0700 Subject: [PATCH 1594/1692] x86/intel_rdt: Fix incorrect loop end condition A loop is used to check if a CAT resource's CBM of one CLOSID overlaps with the CBM of another CLOSID of the same resource. The loop is run over all CLOSIDs supported by the resource. The problem with running the loop over all CLOSIDs supported by the resource is that its number of supported CLOSIDs may be more than the number of supported CLOSIDs on the system, which is the minimum number of CLOSIDs supported across all resources. Fix the loop to only consider the number of system supported CLOSIDs, not all that are supported by the resource. Fixes: 49f7b4efa ("x86/intel_rdt: Enable setting of exclusive mode") Signed-off-by: Reinette Chatre Signed-off-by: Fenghua Yu Signed-off-by: Thomas Gleixner Cc: "H Peter Anvin" Cc: "Tony Luck" Cc: "Xiaochen Shen" Cc: "Chen Yu" Link: https://lkml.kernel.org/r/1537048707-76280-8-git-send-email-fenghua.yu@intel.com --- arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c index 74821bc457c0..afd93d45e21b 100644 --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c @@ -996,7 +996,7 @@ bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d, /* Check for overlap with other resource groups */ ctrl = d->ctrl_val; - for (i = 0; i < r->num_closid; i++, ctrl++) { + for (i = 0; i < closids_supported(); i++, ctrl++) { ctrl_b = (unsigned long *)ctrl; mode = rdtgroup_mode_by_closid(i); if (closid_allocated(i) && i != closid && -- GitLab From 939b90b20bc87e199b6b53942764b987289b87ce Mon Sep 17 00:00:00 2001 From: Reinette Chatre Date: Sat, 15 Sep 2018 14:58:26 -0700 Subject: [PATCH 1595/1692] x86/intel_rdt: Fix exclusive mode handling of MBA resource It is possible for a resource group to consist out of MBA as well as CAT/CDP resources. The "exclusive" resource mode only applies to the CAT/CDP resources since MBA allocations cannot be specified to overlap or not. When a user requests a resource group to become "exclusive" then it can only be successful if there are CAT/CDP resources in the group and none of their CBMs associated with the group's CLOSID overlaps with any other resource group. Fix the "exclusive" mode setting by failing if there isn't any CAT/CDP resource in the group and ensuring that the CBM checking is only done on CAT/CDP resources. Fixes: 49f7b4efa ("x86/intel_rdt: Enable setting of exclusive mode") Signed-off-by: Reinette Chatre Signed-off-by: Fenghua Yu Signed-off-by: Thomas Gleixner Cc: "H Peter Anvin" Cc: "Tony Luck" Cc: "Xiaochen Shen" Cc: "Chen Yu" Link: https://lkml.kernel.org/r/1537048707-76280-9-git-send-email-fenghua.yu@intel.com --- arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c index afd93d45e21b..f3231f78d69b 100644 --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c @@ -1031,16 +1031,27 @@ static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp) { int closid = rdtgrp->closid; struct rdt_resource *r; + bool has_cache = false; struct rdt_domain *d; for_each_alloc_enabled_rdt_resource(r) { + if (r->rid == RDT_RESOURCE_MBA) + continue; + has_cache = true; list_for_each_entry(d, &r->domains, list) { if (rdtgroup_cbm_overlaps(r, d, d->ctrl_val[closid], - rdtgrp->closid, false)) + rdtgrp->closid, false)) { + rdt_last_cmd_puts("schemata overlaps\n"); return false; + } } } + if (!has_cache) { + rdt_last_cmd_puts("cannot be exclusive without CAT/CDP\n"); + return false; + } + return true; } @@ -1092,7 +1103,6 @@ static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of, rdtgrp->mode = RDT_MODE_SHAREABLE; } else if (!strcmp(buf, "exclusive")) { if (!rdtgroup_mode_test_exclusive(rdtgrp)) { - rdt_last_cmd_printf("schemata overlaps\n"); ret = -EINVAL; goto out; } -- GitLab From ffb2315fd22c2568747402eecdc581a245a2f5ba Mon Sep 17 00:00:00 2001 From: Reinette Chatre Date: Sat, 15 Sep 2018 14:58:27 -0700 Subject: [PATCH 1596/1692] x86/intel_rdt: Fix incorrect loop end condition In order to determine a sane default cache allocation for a new CAT/CDP resource group, all resource groups are checked to determine which cache portions are available to share. At this time all possible CLOSIDs that can be supported by the resource is checked. This is problematic if the resource supports more CLOSIDs than another CAT/CDP resource. In this case, the number of CLOSIDs that could be allocated are fewer than the number of CLOSIDs that can be supported by the resource. Limit the check of closids to that what is supported by the system based on the minimum across all resources. Fixes: 95f0b77ef ("x86/intel_rdt: Initialize new resource group with sane defaults") Signed-off-by: Reinette Chatre Signed-off-by: Fenghua Yu Signed-off-by: Thomas Gleixner Cc: "H Peter Anvin" Cc: "Tony Luck" Cc: "Xiaochen Shen" Cc: "Chen Yu" Link: https://lkml.kernel.org/r/1537048707-76280-10-git-send-email-fenghua.yu@intel.com --- arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c index f3231f78d69b..1b8e86a5d5e1 100644 --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c @@ -2370,7 +2370,7 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) d->new_ctrl = r->cache.shareable_bits; used_b = r->cache.shareable_bits; ctrl = d->ctrl_val; - for (i = 0; i < r->num_closid; i++, ctrl++) { + for (i = 0; i < closids_supported(); i++, ctrl++) { if (closid_allocated(i) && i != closid) { mode = rdtgroup_mode_by_closid(i); if (mode == RDT_MODE_PSEUDO_LOCKSETUP) -- GitLab From 8e2aac333785f91ff74e219a1e78e6bdc1ef2c41 Mon Sep 17 00:00:00 2001 From: Simon Detheridge Date: Sat, 15 Sep 2018 22:15:18 +0100 Subject: [PATCH 1597/1692] pinctrl: cannonlake: Fix gpio base for GPP-E The gpio base for GPP-E was set incorrectly to 258 instead of 256, preventing the touchpad working on my Tong Fang GK5CN5Z laptop. Buglink: https://bugzilla.kernel.org/show_bug.cgi?id=200787 Signed-off-by: Simon Detheridge Acked-by: Mika Westerberg Signed-off-by: Linus Walleij --- drivers/pinctrl/intel/pinctrl-cannonlake.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/pinctrl/intel/pinctrl-cannonlake.c b/drivers/pinctrl/intel/pinctrl-cannonlake.c index fb1afe55bf53..8d48371caaa2 100644 --- a/drivers/pinctrl/intel/pinctrl-cannonlake.c +++ b/drivers/pinctrl/intel/pinctrl-cannonlake.c @@ -379,7 +379,7 @@ static const struct intel_padgroup cnlh_community1_gpps[] = { static const struct intel_padgroup cnlh_community3_gpps[] = { CNL_GPP(0, 155, 178, 192), /* GPP_K */ CNL_GPP(1, 179, 202, 224), /* GPP_H */ - CNL_GPP(2, 203, 215, 258), /* GPP_E */ + CNL_GPP(2, 203, 215, 256), /* GPP_E */ CNL_GPP(3, 216, 239, 288), /* GPP_F */ CNL_GPP(4, 240, 248, CNL_NO_GPIO), /* SPI */ }; -- GitLab From 558a9ef94a329a1ac75613407ad15d0d0071ff4c Mon Sep 17 00:00:00 2001 From: Icenowy Zheng Date: Sun, 16 Sep 2018 12:34:06 +0800 Subject: [PATCH 1598/1692] drm: sun4i: drop second PLL from A64 HDMI PHY The A64 HDMI PHY seems to be not able to use the second video PLL as clock parent in experiments. Drop the support for the second PLL from A64 HDMI PHY driver. Fixes: b46e2c9f5f64 ("drm/sun4i: Add support for A64 HDMI PHY") Signed-off-by: Icenowy Zheng Signed-off-by: Maxime Ripard Link: https://patchwork.freedesktop.org/patch/msgid/20180916043409.62374-2-icenowy@aosc.io --- drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c index 82502b351aec..a564b5dfe082 100644 --- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c +++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c @@ -398,7 +398,6 @@ static struct regmap_config sun8i_hdmi_phy_regmap_config = { static const struct sun8i_hdmi_phy_variant sun50i_a64_hdmi_phy = { .has_phy_clk = true, - .has_second_pll = true, .phy_init = &sun8i_hdmi_phy_init_h3, .phy_disable = &sun8i_hdmi_phy_disable_h3, .phy_config = &sun8i_hdmi_phy_config_h3, -- GitLab From 571d0563c8881595f4ab027aef9ed1c55e3e7b7c Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 19 Sep 2018 13:35:53 +0300 Subject: [PATCH 1599/1692] x86/paravirt: Fix some warning messages The first argument to WARN_ONCE() is a condition. Fixes: 5800dc5c19f3 ("x86/paravirt: Fix spectre-v2 mitigations for paravirt guests") Signed-off-by: Dan Carpenter Signed-off-by: Thomas Gleixner Reviewed-by: Juergen Gross Cc: Peter Zijlstra Cc: Alok Kataria Cc: "H. Peter Anvin" Cc: virtualization@lists.linux-foundation.org Cc: kernel-janitors@vger.kernel.org Link: https://lkml.kernel.org/r/20180919103553.GD9238@mwanda --- arch/x86/kernel/paravirt.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index afdb303285f8..8dc69d82567e 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -91,7 +91,7 @@ unsigned paravirt_patch_call(void *insnbuf, if (len < 5) { #ifdef CONFIG_RETPOLINE - WARN_ONCE("Failing to patch indirect CALL in %ps\n", (void *)addr); + WARN_ONCE(1, "Failing to patch indirect CALL in %ps\n", (void *)addr); #endif return len; /* call too long for patch site */ } @@ -111,7 +111,7 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target, if (len < 5) { #ifdef CONFIG_RETPOLINE - WARN_ONCE("Failing to patch indirect JMP in %ps\n", (void *)addr); + WARN_ONCE(1, "Failing to patch indirect JMP in %ps\n", (void *)addr); #endif return len; /* call too long for patch site */ } -- GitLab From 336b08088d4da009108d4418e241ca95c9152237 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 19 Sep 2018 13:48:41 +0200 Subject: [PATCH 1600/1692] MAINTAINERS: Add Borislav to the x86 maintainers Borislav is effectivly maintaining parts of X86 already, make it official. Signed-off-by: Thomas Gleixner Acked-by: Ingo Molnar Acked-by: Borislav Petkov --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) diff --git a/MAINTAINERS b/MAINTAINERS index 4ece30f15777..091e66b60cd2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -15913,6 +15913,7 @@ F: net/x25/ X86 ARCHITECTURE (32-BIT AND 64-BIT) M: Thomas Gleixner M: Ingo Molnar +M: Borislav Petkov R: "H. Peter Anvin" M: x86@kernel.org L: linux-kernel@vger.kernel.org -- GitLab From 70513d58751d7c6c1a0133557b13089b9f2e3e66 Mon Sep 17 00:00:00 2001 From: Boris Ostrovsky Date: Thu, 12 Jul 2018 13:27:00 -0400 Subject: [PATCH 1601/1692] xen/x86/vpmu: Zero struct pt_regs before calling into sample handling code Otherwise we may leak kernel stack for events that sample user registers. Reported-by: Mark Rutland Reviewed-by: Juergen Gross Signed-off-by: Boris Ostrovsky Cc: stable@vger.kernel.org --- arch/x86/xen/pmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c index 7d00d4ad44d4..95997e6c0696 100644 --- a/arch/x86/xen/pmu.c +++ b/arch/x86/xen/pmu.c @@ -478,7 +478,7 @@ static void xen_convert_regs(const struct xen_pmu_regs *xen_regs, irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id) { int err, ret = IRQ_NONE; - struct pt_regs regs; + struct pt_regs regs = {0}; const struct xen_pmu_data *xenpmu_data = get_xenpmu_data(); uint8_t xenpmu_flags = get_xenpmu_flags(); -- GitLab From d59f532480f5231bf62615a9287e05b78225fb05 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Wed, 19 Sep 2018 15:42:33 +0200 Subject: [PATCH 1602/1692] xen: issue warning message when out of grant maptrack entries When a driver domain (e.g. dom0) is running out of maptrack entries it can't map any more foreign domain pages. Instead of silently stalling the affected domUs issue a rate limited warning in this case in order to make it easier to detect that situation. Signed-off-by: Juergen Gross Reviewed-by: Boris Ostrovsky Signed-off-by: Boris Ostrovsky --- drivers/xen/grant-table.c | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 7bafa703a992..84575baceebc 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -1040,18 +1040,33 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, return ret; for (i = 0; i < count; i++) { - /* Retry eagain maps */ - if (map_ops[i].status == GNTST_eagain) - gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i, - &map_ops[i].status, __func__); - - if (map_ops[i].status == GNTST_okay) { + switch (map_ops[i].status) { + case GNTST_okay: + { struct xen_page_foreign *foreign; SetPageForeign(pages[i]); foreign = xen_page_foreign(pages[i]); foreign->domid = map_ops[i].dom; foreign->gref = map_ops[i].ref; + break; + } + + case GNTST_no_device_space: + pr_warn_ratelimited("maptrack limit reached, can't map all guest pages\n"); + break; + + case GNTST_eagain: + /* Retry eagain maps */ + gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, + map_ops + i, + &map_ops[i].status, __func__); + /* Test status in next loop iteration. */ + i--; + break; + + default: + break; } } -- GitLab From cb90b97bb379895302a7b47b97199928fd0bafa9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Sat, 15 Sep 2018 10:02:13 +0200 Subject: [PATCH 1603/1692] drm/amdgpu: add amdgpu_vm_entries_mask v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We can't get the mask for the root directory from the number of entries. So add a new function to avoid that problem. v2: fix typo in mask Signed-off-by: Christian König Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 34 ++++++++++++++++++++------ 1 file changed, 27 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index a7f9aaa47c49..e16d57efe39f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -190,6 +190,26 @@ static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev, return AMDGPU_VM_PTE_COUNT(adev); } +/** + * amdgpu_vm_entries_mask - the mask to get the entry number of a PD/PT + * + * @adev: amdgpu_device pointer + * @level: VMPT level + * + * Returns: + * The mask to extract the entry number of a PD/PT from an address. + */ +static uint32_t amdgpu_vm_entries_mask(struct amdgpu_device *adev, + unsigned int level) +{ + if (level <= adev->vm_manager.root_level) + return 0xffffffff; + else if (level != AMDGPU_VM_PTB) + return 0x1ff; + else + return AMDGPU_VM_PTE_COUNT(adev) - 1; +} + /** * amdgpu_vm_bo_size - returns the size of the BOs in bytes * @@ -399,17 +419,17 @@ static void amdgpu_vm_pt_start(struct amdgpu_device *adev, static bool amdgpu_vm_pt_descendant(struct amdgpu_device *adev, struct amdgpu_vm_pt_cursor *cursor) { - unsigned num_entries, shift, idx; + unsigned mask, shift, idx; if (!cursor->entry->entries) return false; BUG_ON(!cursor->entry->base.bo); - num_entries = amdgpu_vm_num_entries(adev, cursor->level); + mask = amdgpu_vm_entries_mask(adev, cursor->level); shift = amdgpu_vm_level_shift(adev, cursor->level); ++cursor->level; - idx = (cursor->pfn >> shift) % num_entries; + idx = (cursor->pfn >> shift) & mask; cursor->parent = cursor->entry; cursor->entry = &cursor->entry->entries[idx]; return true; @@ -1599,7 +1619,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, amdgpu_vm_pt_start(adev, params->vm, start, &cursor); while (cursor.pfn < end) { struct amdgpu_bo *pt = cursor.entry->base.bo; - unsigned shift, parent_shift, num_entries; + unsigned shift, parent_shift, mask; uint64_t incr, entry_end, pe_start; if (!pt) @@ -1654,9 +1674,9 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, /* Looks good so far, calculate parameters for the update */ incr = AMDGPU_GPU_PAGE_SIZE << shift; - num_entries = amdgpu_vm_num_entries(adev, cursor.level); - pe_start = ((cursor.pfn >> shift) & (num_entries - 1)) * 8; - entry_end = num_entries << shift; + mask = amdgpu_vm_entries_mask(adev, cursor.level); + pe_start = ((cursor.pfn >> shift) & mask) * 8; + entry_end = (mask + 1) << shift; entry_end += cursor.pfn & ~(entry_end - 1); entry_end = min(entry_end, end); -- GitLab From 769f846e1411a28202510b590379a1442eb3e85d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Sat, 15 Sep 2018 10:04:54 +0200 Subject: [PATCH 1604/1692] drm/amdgpu: fix parameter documentation for amdgpu_vm_free_pts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The function was modified without updating the documentation. Signed-off-by: Christian König Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index e16d57efe39f..6904d794d60a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -961,8 +961,7 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, * amdgpu_vm_free_pts - free PD/PT levels * * @adev: amdgpu device structure - * @parent: PD/PT starting level to free - * @level: level of parent structure + * @vm: amdgpu vm structure * * Free the page directory or page table level and all sub levels. */ -- GitLab From 0ee8685392a642ffb381d15d9fe4445d4ec03b56 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 14 Sep 2018 15:43:57 +0200 Subject: [PATCH 1605/1692] drm/amdgpu: add GDS, GWS and OA debugfs files MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Additional to the existing files for VRAM and GTT. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index f12ae6b525b9..1565344cc139 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -2208,7 +2208,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, static int amdgpu_mm_dump_table(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *)m->private; - unsigned ttm_pl = *(int *)node->info_ent->data; + unsigned ttm_pl = (uintptr_t)node->info_ent->data; struct drm_device *dev = node->minor->dev; struct amdgpu_device *adev = dev->dev_private; struct ttm_mem_type_manager *man = &adev->mman.bdev.man[ttm_pl]; @@ -2218,12 +2218,12 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data) return 0; } -static int ttm_pl_vram = TTM_PL_VRAM; -static int ttm_pl_tt = TTM_PL_TT; - static const struct drm_info_list amdgpu_ttm_debugfs_list[] = { - {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, &ttm_pl_vram}, - {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt}, + {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_VRAM}, + {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_TT}, + {"amdgpu_gds_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GDS}, + {"amdgpu_gws_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GWS}, + {"amdgpu_oa_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_OA}, {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL}, #ifdef CONFIG_SWIOTLB {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL} -- GitLab From 3b2de69944cfac8bc8f43c81aeadcbefc31ed48e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 14 Sep 2018 20:44:17 +0200 Subject: [PATCH 1606/1692] drm/amdgpu: stop crashing on GDS/GWS/OA eviction MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Simply ignore any copying here. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 1565344cc139..c81b35d06df1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -256,6 +256,13 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, abo = ttm_to_amdgpu_bo(bo); switch (bo->mem.mem_type) { + case AMDGPU_PL_GDS: + case AMDGPU_PL_GWS: + case AMDGPU_PL_OA: + placement->num_placement = 0; + placement->num_busy_placement = 0; + return; + case TTM_PL_VRAM: if (!adev->mman.buffer_funcs_enabled) { /* Move to system memory */ @@ -283,6 +290,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, case TTM_PL_TT: default: amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); + break; } *placement = abo->placement; } @@ -675,6 +683,16 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, amdgpu_move_null(bo, new_mem); return 0; } + if (old_mem->mem_type == AMDGPU_PL_GDS || + old_mem->mem_type == AMDGPU_PL_GWS || + old_mem->mem_type == AMDGPU_PL_OA || + new_mem->mem_type == AMDGPU_PL_GDS || + new_mem->mem_type == AMDGPU_PL_GWS || + new_mem->mem_type == AMDGPU_PL_OA) { + /* Nothing to save here */ + amdgpu_move_null(bo, new_mem); + return 0; + } if (!adev->mman.buffer_funcs_enabled) goto memcpy; -- GitLab From 21a7e77f77b348e73c9c67064a545e724c4a0c9a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 14 Sep 2018 21:03:37 +0200 Subject: [PATCH 1607/1692] drm/amdgpu: don't allocate zero sized kernel BOs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Just free the BO if the size should be zero. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index e6909252aefa..84d82d5382f9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -250,6 +250,11 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev, bool free = false; int r; + if (!size) { + amdgpu_bo_unref(bo_ptr); + return 0; + } + memset(&bp, 0, sizeof(bp)); bp.size = size; bp.byte_align = align; -- GitLab From 5297572806aeeb68c2491169fe7fcc436e09e41d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 14 Sep 2018 21:06:50 +0200 Subject: [PATCH 1608/1692] drm/amdgpu: drop size check MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We no don't allocate zero sized kernel BOs any longer. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index c81b35d06df1..5da87ec67c64 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1809,14 +1809,12 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) * This is used for VGA emulation and pre-OS scanout buffers to * avoid display artifacts while transitioning between pre-OS * and driver. */ - if (adev->gmc.stolen_size) { - r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &adev->stolen_vga_memory, - NULL, NULL); - if (r) - return r; - } + r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->stolen_vga_memory, + NULL, NULL); + if (r) + return r; DRM_INFO("amdgpu: %uM of VRAM memory ready\n", (unsigned) (adev->gmc.real_vram_size / (1024 * 1024))); -- GitLab From ddaf501347e21a19d15977029af1d229cd800379 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Mon, 17 Sep 2018 14:07:00 -0400 Subject: [PATCH 1609/1692] drm/amd/amdgpu: Avoid fault when allocating an empty buffer object MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Tom St Denis Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 84d82d5382f9..c1387efc0c91 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -348,7 +348,8 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev, if (r) return r; - amdgpu_bo_unreserve(*bo_ptr); + if (*bo_ptr) + amdgpu_bo_unreserve(*bo_ptr); return 0; } -- GitLab From 066689161a481db3940f445b34b9dd543cb8799a Mon Sep 17 00:00:00 2001 From: "A. Wilcox" Date: Sun, 1 Jul 2018 22:44:52 -0500 Subject: [PATCH 1610/1692] drm/amdgpu: use processed values for counting adev->gfx.rlc has the values from rlc_hdr already processed by le32_to_cpu. Using the rlc_hdr values on big-endian machines causes a kernel Oops due to writing well outside of the array (0x24000000 instead of 0x24). Signed-off-by: A. Wilcox Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 3882689b2d8f..11e6ccdfc3d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1114,14 +1114,14 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) tmp = (unsigned int *)((uintptr_t)rlc_hdr + le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes)); - for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++) + for (i = 0 ; i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2); i++) adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]); adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i; tmp = (unsigned int *)((uintptr_t)rlc_hdr + le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes)); - for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++) + for (i = 0 ; i < (adev->gfx.rlc.reg_list_size_bytes >> 2); i++) adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]); if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 75a91663019f..1a298f17b7dc 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -692,14 +692,14 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev) tmp = (unsigned int *)((uintptr_t)rlc_hdr + le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes)); - for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++) + for (i = 0 ; i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2); i++) adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]); adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i; tmp = (unsigned int *)((uintptr_t)rlc_hdr + le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes)); - for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++) + for (i = 0 ; i < (adev->gfx.rlc.reg_list_size_bytes >> 2); i++) adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]); if (adev->gfx.rlc.is_rlc_v2_1) -- GitLab From 1f81fbc4ce8211b38881b12a1100ea7ee2ffc5f0 Mon Sep 17 00:00:00 2001 From: Mathieu Malaterre Date: Thu, 12 Apr 2018 21:33:33 +0200 Subject: [PATCH 1611/1692] drm/radeon: change function signature to pass full range MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In function ‘radeon_process_i2c_ch’ a comparison of a u8 value against 255 is done. Since it is always false, change the signature of this function to use an `int` instead, which match the type used in caller: `radeon_atom_hw_i2c_xfer`. Fix the following warning triggered with W=1: CC [M] drivers/gpu/drm/radeon/atombios_i2c.o drivers/gpu/drm/radeon/atombios_i2c.c: In function ‘radeon_process_i2c_ch’: drivers/gpu/drm/radeon/atombios_i2c.c:71:11: warning: comparison is always false due to limited range of data type [-Wtype-limits] if (num > ATOM_MAX_HW_I2C_READ) { ^ Reviewed-by: Huang Rui Signed-off-by: Mathieu Malaterre Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/atombios_i2c.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c index 4157780585a0..9022e9af11a0 100644 --- a/drivers/gpu/drm/radeon/atombios_i2c.c +++ b/drivers/gpu/drm/radeon/atombios_i2c.c @@ -35,7 +35,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan, u8 slave_addr, u8 flags, - u8 *buf, u8 num) + u8 *buf, int num) { struct drm_device *dev = chan->dev; struct radeon_device *rdev = dev->dev_private; -- GitLab From 801281fe09ffc8720336131669a946276e21fe4e Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Mon, 17 Sep 2018 20:25:03 +0800 Subject: [PATCH 1612/1692] drm/amdgpu: update vram_info structure in atomfirmware.h atomfirmware has structure changes in varm_info. Updated it to the latest one. Signed-off-by: Hawking Zhang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c | 2 +- drivers/gpu/drm/amd/include/atomfirmware.h | 20 ++++++++++--------- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c index 236915849cfe..5461d0d55111 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c @@ -174,7 +174,7 @@ static int convert_atom_mem_type_to_vram_type (struct amdgpu_device *adev, case ATOM_DGPU_VRAM_TYPE_GDDR5: vram_type = AMDGPU_VRAM_TYPE_GDDR5; break; - case ATOM_DGPU_VRAM_TYPE_HBM: + case ATOM_DGPU_VRAM_TYPE_HBM2: vram_type = AMDGPU_VRAM_TYPE_HBM; break; default: diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h index 6109a45d7a63..8ae7adb7329b 100644 --- a/drivers/gpu/drm/amd/include/atomfirmware.h +++ b/drivers/gpu/drm/amd/include/atomfirmware.h @@ -179,7 +179,7 @@ enum atom_voltage_type enum atom_dgpu_vram_type{ ATOM_DGPU_VRAM_TYPE_GDDR5 = 0x50, - ATOM_DGPU_VRAM_TYPE_HBM = 0x60, + ATOM_DGPU_VRAM_TYPE_HBM2 = 0x60, }; enum atom_dp_vs_preemph_def{ @@ -1699,10 +1699,10 @@ struct atom_vram_module_v9 { // Design Specific Values uint32_t memory_size; // Total memory size in unit of MB for CONFIG_MEMSIZE zeros - uint32_t channel_enable; // for 32 channel ASIC usage - uint32_t umcch_addrcfg; - uint32_t umcch_addrsel; - uint32_t umcch_colsel; + uint32_t channel_enable; // bit vector, each bit indicate specific channel enable or not + uint32_t max_mem_clk; // max memory clock of this memory in unit of 10kHz, =0 means it is not defined + uint16_t reserved[3]; + uint16_t mem_voltage; // mem_voltage uint16_t vram_module_size; // Size of atom_vram_module_v9 uint8_t ext_memory_id; // Current memory module ID uint8_t memory_type; // enum of atom_dgpu_vram_type @@ -1712,20 +1712,22 @@ struct atom_vram_module_v9 uint8_t tunningset_id; // MC phy registers set per. uint8_t vender_rev_id; // [7:4] Revision, [3:0] Vendor code uint8_t refreshrate; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) - uint16_t vram_rsd2; // reserved + uint8_t hbm_ven_rev_id; // hbm_ven_rev_id + uint8_t vram_rsd2; // reserved char dram_pnstring[20]; // part number end with '0'. }; - struct atom_vram_info_header_v2_3 { - struct atom_common_table_header table_header; + struct atom_common_table_header table_header; uint16_t mem_adjust_tbloffset; // offset of atom_umc_init_reg_block structure for memory vendor specific UMC adjust setting uint16_t mem_clk_patch_tbloffset; // offset of atom_umc_init_reg_block structure for memory clock specific UMC setting uint16_t mc_adjust_pertile_tbloffset; // offset of atom_umc_init_reg_block structure for Per Byte Offset Preset Settings uint16_t mc_phyinit_tbloffset; // offset of atom_umc_init_reg_block structure for MC phy init set uint16_t dram_data_remap_tbloffset; // reserved for now - uint16_t vram_rsd2[3]; + uint16_t tmrs_seq_offset; // offset of HBM tmrs + uint16_t post_ucode_init_offset; // offset of atom_umc_init_reg_block structure for MC phy init after MC uCode complete umc init + uint16_t vram_rsd2; uint8_t vram_module_num; // indicate number of VRAM module uint8_t vram_rsd1[2]; uint8_t mc_phy_tile_num; // indicate the MCD tile number which use in DramDataRemapTbl and usMcAdjustPerTileTblOffset -- GitLab From 27e39d3dd365d661b5f6b5b09ad86ee142e11de0 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Mon, 17 Sep 2018 20:19:48 +0800 Subject: [PATCH 1613/1692] drm/amdgpu: fix unknown vram mem type for vega20 vega20 should use umc_info v3_3 instead of v3_1. There are serveral versions of umc_info for vega series. Compared to various versions of these structures, vram_info strucure is unified for vega series. The patch switch to query mem_type from vram_info structure for all the vega series dGPU. Signed-off-by: Hawking Zhang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c index 5461d0d55111..b61e1dc61b4c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c @@ -117,6 +117,10 @@ union igp_info { union umc_info { struct atom_umc_info_v3_1 v31; }; + +union vram_info { + struct atom_vram_info_header_v2_3 v23; +}; /* * Return vram width from integrated system info table, if available, * or 0 if not. @@ -195,7 +199,7 @@ int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev) int index; u16 data_offset, size; union igp_info *igp_info; - union umc_info *umc_info; + union vram_info *vram_info; u8 frev, crev; u8 mem_type; @@ -204,7 +208,7 @@ int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev) integratedsysteminfo); else index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, - umc_info); + vram_info); if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, &size, &frev, &crev, &data_offset)) { @@ -219,11 +223,11 @@ int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev) return 0; } } else { - umc_info = (union umc_info *) + vram_info = (union vram_info *) (mode_info->atom_context->bios + data_offset); switch (crev) { - case 1: - mem_type = umc_info->v31.vram_type; + case 3: + mem_type = vram_info->v23.vram_module[0].memory_type; return convert_atom_mem_type_to_vram_type(adev, mem_type); default: return 0; -- GitLab From 8a1304a5b4310b941d08c988326d15673ed0f689 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Wed, 12 Sep 2018 11:45:01 +0800 Subject: [PATCH 1614/1692] drm/amd/powerplay: update OD feature judgement Update the conditions to judge whether an OD feature should be supported on vega20. Signed-off-by: Evan Quan Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- .../drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 79 +++++++++++++------ .../drm/amd/powerplay/hwmgr/vega20_pptable.h | 2 + 2 files changed, 55 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index 1e65ac01e0f5..dc6144183968 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -832,58 +832,85 @@ static int vega20_od8_set_feature_capabilities( struct phm_ppt_v3_information *pptable_information = (struct phm_ppt_v3_information *)hwmgr->pptable; struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + PPTable_t *pp_table = &(data->smc_state_table.pp_table); struct vega20_od8_settings *od_settings = &(data->od8_settings); od_settings->overdrive8_capabilities = 0; if (data->smu_features[GNLD_DPM_GFXCLK].enabled) { - if (pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_GFXCLKFMAX] > 0 && - pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_GFXCLKFMAX] > 0 && - pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_GFXCLKFMIN] > 0 && - pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_GFXCLKFMIN] > 0) + if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_GFXCLK_LIMITS] && + pptable_information->od_settings_max[OD8_SETTING_GFXCLK_FMAX] > 0 && + pptable_information->od_settings_min[OD8_SETTING_GFXCLK_FMIN] > 0 && + (pptable_information->od_settings_max[OD8_SETTING_GFXCLK_FMAX] >= + pptable_information->od_settings_min[OD8_SETTING_GFXCLK_FMIN])) od_settings->overdrive8_capabilities |= OD8_GFXCLK_LIMITS; - if (pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_VDDGFXCURVEFREQ_P1] > 0 && - pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_VDDGFXCURVEFREQ_P2] > 0 && - pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_VDDGFXCURVEFREQ_P3] > 0 && - pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_VDDGFXCURVEFREQ_P1] > 0 && - pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_VDDGFXCURVEFREQ_P2] > 0 && - pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_VDDGFXCURVEFREQ_P3] > 0 && - pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_VDDGFXCURVEVOLTAGEOFFSET_P1] > 0 && - pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_VDDGFXCURVEVOLTAGEOFFSET_P2] > 0 && - pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_VDDGFXCURVEVOLTAGEOFFSET_P3] > 0 && - pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_VDDGFXCURVEVOLTAGEOFFSET_P1] > 0 && - pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_VDDGFXCURVEVOLTAGEOFFSET_P2] > 0 && - pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_VDDGFXCURVEVOLTAGEOFFSET_P3] > 0) + if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_GFXCLK_CURVE] && + (pptable_information->od_settings_min[OD8_SETTING_GFXCLK_VOLTAGE1] >= + pp_table->MinVoltageGfx / VOLTAGE_SCALE) && + (pptable_information->od_settings_max[OD8_SETTING_GFXCLK_VOLTAGE3] <= + pp_table->MaxVoltageGfx / VOLTAGE_SCALE) && + (pptable_information->od_settings_max[OD8_SETTING_GFXCLK_VOLTAGE3] >= + pptable_information->od_settings_min[OD8_SETTING_GFXCLK_VOLTAGE1])) od_settings->overdrive8_capabilities |= OD8_GFXCLK_CURVE; } if (data->smu_features[GNLD_DPM_UCLK].enabled) { - if (pptable_information->od_settings_min[ATOM_VEGA20_ODSETTING_UCLKFMAX] > 0 && - pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_UCLKFMAX] > 0) + if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_UCLK_MAX] && + pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX] > 0 && + pptable_information->od_settings_max[OD8_SETTING_UCLK_FMAX] > 0 && + (pptable_information->od_settings_max[OD8_SETTING_UCLK_FMAX] >= + pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX])) od_settings->overdrive8_capabilities |= OD8_UCLK_MAX; } - if (pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE] > 0 && - pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE] <= 100) + if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_POWER_LIMIT] && + pptable_information->od_settings_max[OD8_SETTING_POWER_PERCENTAGE] > 0 && + pptable_information->od_settings_max[OD8_SETTING_POWER_PERCENTAGE] <= 100 && + pptable_information->od_settings_min[OD8_SETTING_POWER_PERCENTAGE] > 0 && + pptable_information->od_settings_min[OD8_SETTING_POWER_PERCENTAGE] <= 100) od_settings->overdrive8_capabilities |= OD8_POWER_LIMIT; if (data->smu_features[GNLD_FAN_CONTROL].enabled) { - if (pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_FANRPMMIN] > 0) - od_settings->overdrive8_capabilities |= OD8_FAN_SPEED_MIN; - - if (pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_FANRPMACOUSTICLIMIT] > 0) + if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_ACOUSTIC_LIMIT] && + pptable_information->od_settings_min[OD8_SETTING_FAN_ACOUSTIC_LIMIT] > 0 && + pptable_information->od_settings_max[OD8_SETTING_FAN_ACOUSTIC_LIMIT] > 0 && + (pptable_information->od_settings_max[OD8_SETTING_FAN_ACOUSTIC_LIMIT] >= + pptable_information->od_settings_min[OD8_SETTING_FAN_ACOUSTIC_LIMIT])) od_settings->overdrive8_capabilities |= OD8_ACOUSTIC_LIMIT_SCLK; + + if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_SPEED_MIN] && + (pptable_information->od_settings_min[OD8_SETTING_FAN_MIN_SPEED] >= + (pp_table->FanPwmMin * pp_table->FanMaximumRpm / 100)) && + pptable_information->od_settings_max[OD8_SETTING_FAN_MIN_SPEED] > 0 && + (pptable_information->od_settings_max[OD8_SETTING_FAN_MIN_SPEED] >= + pptable_information->od_settings_min[OD8_SETTING_FAN_MIN_SPEED])) + od_settings->overdrive8_capabilities |= OD8_FAN_SPEED_MIN; } if (data->smu_features[GNLD_THERMAL].enabled) { - if (pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_FANTARGETTEMPERATURE] > 0) + if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_TEMPERATURE_FAN] && + pptable_information->od_settings_max[OD8_SETTING_FAN_TARGET_TEMP] > 0 && + pptable_information->od_settings_min[OD8_SETTING_FAN_TARGET_TEMP] > 0 && + (pptable_information->od_settings_max[OD8_SETTING_FAN_TARGET_TEMP] >= + pptable_information->od_settings_min[OD8_SETTING_FAN_TARGET_TEMP])) od_settings->overdrive8_capabilities |= OD8_TEMPERATURE_FAN; - if (pptable_information->od_settings_max[ATOM_VEGA20_ODSETTING_OPERATINGTEMPMAX] > 0) + if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_TEMPERATURE_SYSTEM] && + pptable_information->od_settings_max[OD8_SETTING_OPERATING_TEMP_MAX] > 0 && + pptable_information->od_settings_min[OD8_SETTING_OPERATING_TEMP_MAX] > 0 && + (pptable_information->od_settings_max[OD8_SETTING_OPERATING_TEMP_MAX] >= + pptable_information->od_settings_min[OD8_SETTING_OPERATING_TEMP_MAX])) od_settings->overdrive8_capabilities |= OD8_TEMPERATURE_SYSTEM; } + if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_MEMORY_TIMING_TUNE]) + od_settings->overdrive8_capabilities |= OD8_MEMORY_TIMING_TUNE; + + if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_ZERO_RPM_CONTROL] && + pp_table->FanZeroRpmEnable) + od_settings->overdrive8_capabilities |= OD8_FAN_ZERO_RPM_CONTROL; + return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_pptable.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_pptable.h index b104f6af81a4..2222e29405c6 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_pptable.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_pptable.h @@ -49,6 +49,8 @@ enum ATOM_VEGA20_ODFEATURE_ID { ATOM_VEGA20_ODFEATURE_FAN_SPEED_MIN, //FanMinimumPwm ATOM_VEGA20_ODFEATURE_TEMPERATURE_FAN, //FanTargetTemperature ATOM_VEGA20_ODFEATURE_TEMPERATURE_SYSTEM, //MaxOpTemp + ATOM_VEGA20_ODFEATURE_MEMORY_TIMING_TUNE, + ATOM_VEGA20_ODFEATURE_FAN_ZERO_RPM_CONTROL, ATOM_VEGA20_ODFEATURE_COUNT, }; -- GitLab From b1f82cb21231ccfec3c15b628f8deed778cce22b Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Thu, 13 Sep 2018 16:14:33 +0800 Subject: [PATCH 1615/1692] drm/amd/powerplay: update OD to take voltage value instead of offset With the latest SMC fw, we are able to get the voltage value for specific frequency point. So, we update the OD relates to take absolute voltage instead of offset. Signed-off-by: Evan Quan Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 12 +- .../drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 112 +++++++++++++----- .../drm/amd/powerplay/hwmgr/vega20_hwmgr.h | 4 + .../drm/amd/powerplay/inc/smu11_driver_if.h | 6 +- .../gpu/drm/amd/powerplay/inc/vega20_ppsmc.h | 3 +- 5 files changed, 96 insertions(+), 41 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 396c826100e6..8c334fc808c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -502,7 +502,7 @@ static ssize_t amdgpu_set_pp_table(struct device *dev, * * - maximum memory clock labeled OD_MCLK * - * - three points labeled OD_VDDC_CURVE. + * - three points labeled OD_VDDC_CURVE. * They can be used to calibrate the sclk voltage curve. * * - a list of valid ranges for sclk, mclk, and voltage curve points @@ -519,11 +519,11 @@ static ssize_t amdgpu_set_pp_table(struct device *dev, * "m 1 800" will update maximum mclk to be 800Mhz. * * For sclk voltage curve, enter the new values by writing a - * string that contains "vc point clock voff" to the file. The - * points are indexed by 0, 1 and 2. E.g., "vc 0 300 10" will - * update point1 with clock set as 300Mhz and voltage increased - * by 10mV. "vc 2 1000 -10" will update point3 with clock set - * as 1000Mhz and voltage drop by 10mV. + * string that contains "vc point clock voltage" to the file. The + * points are indexed by 0, 1 and 2. E.g., "vc 0 300 600" will + * update point1 with clock set as 300Mhz and voltage as + * 600mV. "vc 2 1000 1000" will update point3 with clock set + * as 1000Mhz and voltage 1000mV. * * - When you have edited all of the states as needed, write "c" (commit) * to the file to commit your changes diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index dc6144183968..4f9bf6049d1c 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -1001,6 +1001,26 @@ static int vega20_od8_set_feature_id( return 0; } +static int vega20_od8_get_gfx_clock_base_voltage( + struct pp_hwmgr *hwmgr, + uint32_t *voltage, + uint32_t freq) +{ + int ret = 0; + + ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_GetAVFSVoltageByDpm, + ((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq)); + PP_ASSERT_WITH_CODE(!ret, + "[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!", + return ret); + + vega20_read_arg_from_smc(hwmgr, voltage); + *voltage = *voltage / VOLTAGE_SCALE; + + return 0; +} + static int vega20_od8_initialize_default_settings( struct pp_hwmgr *hwmgr) { @@ -1036,18 +1056,41 @@ static int vega20_od8_initialize_default_settings( } if (od8_settings->overdrive8_capabilities & OD8_GFXCLK_CURVE) { + od_table->GfxclkFreq1 = od_table->GfxclkFmin; od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].default_value = od_table->GfxclkFreq1; - od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value = - od_table->GfxclkOffsetVolt1; - od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].default_value = - od_table->GfxclkFreq2; - od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value = - od_table->GfxclkOffsetVolt2; + + od_table->GfxclkFreq3 = od_table->GfxclkFmax; od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].default_value = od_table->GfxclkFreq3; - od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value = - od_table->GfxclkOffsetVolt3; + + od_table->GfxclkFreq2 = (od_table->GfxclkFreq1 + od_table->GfxclkFreq3) / 2; + od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].default_value = + od_table->GfxclkFreq2; + + PP_ASSERT_WITH_CODE(!vega20_od8_get_gfx_clock_base_voltage(hwmgr, + &(od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value), + od_table->GfxclkFreq1), + "[PhwVega20_OD8_InitializeDefaultSettings] Failed to get Base clock voltage from SMU!", + od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value = 0); + od_table->GfxclkVolt1 = od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value + * VOLTAGE_SCALE; + + PP_ASSERT_WITH_CODE(!vega20_od8_get_gfx_clock_base_voltage(hwmgr, + &(od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value), + od_table->GfxclkFreq2), + "[PhwVega20_OD8_InitializeDefaultSettings] Failed to get Base clock voltage from SMU!", + od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value = 0); + od_table->GfxclkVolt2 = od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value + * VOLTAGE_SCALE; + + PP_ASSERT_WITH_CODE(!vega20_od8_get_gfx_clock_base_voltage(hwmgr, + &(od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value), + od_table->GfxclkFreq3), + "[PhwVega20_OD8_InitializeDefaultSettings] Failed to get Base clock voltage from SMU!", + od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value = 0); + od_table->GfxclkVolt3 = od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value + * VOLTAGE_SCALE; } else { od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].default_value = 0; @@ -1086,7 +1129,7 @@ static int vega20_od8_initialize_default_settings( if (od8_settings->overdrive8_capabilities & OD8_FAN_SPEED_MIN) od8_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].default_value = - od_table->FanMinimumPwm; + od_table->FanMinimumPwm * data->smc_state_table.pp_table.FanMaximumRpm / 100; else od8_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].default_value = 0; @@ -1123,6 +1166,11 @@ static int vega20_od8_initialize_default_settings( } } + ret = vega20_copy_table_to_smc(hwmgr, (uint8_t *)od_table, TABLE_OVERDRIVE); + PP_ASSERT_WITH_CODE(!ret, + "Failed to import over drive table!", + return ret); + return 0; } @@ -1150,19 +1198,19 @@ static int vega20_od8_set_settings( od_table.GfxclkFreq1 = (uint16_t)value; break; case OD8_SETTING_GFXCLK_VOLTAGE1: - od_table.GfxclkOffsetVolt1 = (uint16_t)value; + od_table.GfxclkVolt1 = (uint16_t)value; break; case OD8_SETTING_GFXCLK_FREQ2: od_table.GfxclkFreq2 = (uint16_t)value; break; case OD8_SETTING_GFXCLK_VOLTAGE2: - od_table.GfxclkOffsetVolt2 = (uint16_t)value; + od_table.GfxclkVolt2 = (uint16_t)value; break; case OD8_SETTING_GFXCLK_FREQ3: od_table.GfxclkFreq3 = (uint16_t)value; break; case OD8_SETTING_GFXCLK_VOLTAGE3: - od_table.GfxclkOffsetVolt3 = (uint16_t)value; + od_table.GfxclkVolt3 = (uint16_t)value; break; case OD8_SETTING_UCLK_FMAX: od_table.UclkFmax = (uint16_t)value; @@ -2364,6 +2412,7 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, &(data->smc_state_table.overdrive_table); struct pp_clock_levels_with_latency clocks; int32_t input_index, input_clk, input_vol, i; + int od8_id; int ret; PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage", @@ -2480,37 +2529,38 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, return -EINVAL; } - if (input_clk < od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value || - input_clk > od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value) { + od8_id = OD8_SETTING_GFXCLK_FREQ1 + 2 * input_index; + if (input_clk < od8_settings[od8_id].min_value || + input_clk > od8_settings[od8_id].max_value) { pr_info("clock freq %d is not within allowed range [%d - %d]\n", input_clk, - od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value, - od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value); + od8_settings[od8_id].min_value, + od8_settings[od8_id].max_value); return -EINVAL; } - /* TODO: suppose voltage1/2/3 has the same min/max value */ - if (input_vol < od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].min_value || - input_vol > od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].max_value) { - pr_info("clock voltage offset %d is not within allowed range [%d - %d]\n", + od8_id = OD8_SETTING_GFXCLK_VOLTAGE1 + 2 * input_index; + if (input_vol < od8_settings[od8_id].min_value || + input_vol > od8_settings[od8_id].max_value) { + pr_info("clock voltage %d is not within allowed range [%d - %d]\n", input_vol, - od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].min_value, - od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].max_value); + od8_settings[od8_id].min_value, + od8_settings[od8_id].max_value); return -EINVAL; } switch (input_index) { case 0: od_table->GfxclkFreq1 = input_clk; - od_table->GfxclkOffsetVolt1 = input_vol; + od_table->GfxclkVolt1 = input_vol * VOLTAGE_SCALE; break; case 1: od_table->GfxclkFreq2 = input_clk; - od_table->GfxclkOffsetVolt2 = input_vol; + od_table->GfxclkVolt2 = input_vol * VOLTAGE_SCALE; break; case 2: od_table->GfxclkFreq3 = input_clk; - od_table->GfxclkOffsetVolt3 = input_vol; + od_table->GfxclkVolt3 = input_vol * VOLTAGE_SCALE; break; } } @@ -2623,13 +2673,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, size = sprintf(buf, "%s:\n", "OD_VDDC_CURVE"); size += sprintf(buf + size, "0: %10uMhz %10dmV\n", od_table->GfxclkFreq1, - od_table->GfxclkOffsetVolt1); + od_table->GfxclkVolt1 / VOLTAGE_SCALE); size += sprintf(buf + size, "1: %10uMhz %10dmV\n", od_table->GfxclkFreq2, - od_table->GfxclkOffsetVolt2); + od_table->GfxclkVolt2 / VOLTAGE_SCALE); size += sprintf(buf + size, "2: %10uMhz %10dmV\n", od_table->GfxclkFreq3, - od_table->GfxclkOffsetVolt3); + od_table->GfxclkVolt3 / VOLTAGE_SCALE); } break; @@ -2664,19 +2714,19 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n", od8_settings[OD8_SETTING_GFXCLK_FREQ1].min_value, od8_settings[OD8_SETTING_GFXCLK_FREQ1].max_value); - size += sprintf(buf + size, "VDDC_CURVE_VOFF[0]: %7dmV %11dmV\n", + size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n", od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].min_value, od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].max_value); size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n", od8_settings[OD8_SETTING_GFXCLK_FREQ2].min_value, od8_settings[OD8_SETTING_GFXCLK_FREQ2].max_value); - size += sprintf(buf + size, "VDDC_CURVE_VOFF[1]: %7dmV %11dmV\n", + size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n", od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].min_value, od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].max_value); size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n", od8_settings[OD8_SETTING_GFXCLK_FREQ3].min_value, od8_settings[OD8_SETTING_GFXCLK_FREQ3].max_value); - size += sprintf(buf + size, "VDDC_CURVE_VOFF[2]: %7dmV %11dmV\n", + size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n", od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].min_value, od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].max_value); } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h index 72e4f2a55641..b71a5f25c734 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h @@ -38,6 +38,10 @@ #define VG20_PSUEDO_NUM_DCEFCLK_DPM_LEVELS 8 #define VG20_PSUEDO_NUM_UCLK_DPM_LEVELS 4 +//OverDriver8 macro defs +#define AVFS_CURVE 0 +#define OD8_HOTCURVE_TEMPERATURE 85 + typedef uint32_t PP_Clock; enum { diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h index 59e621ef33ac..71191deb4e76 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h @@ -569,11 +569,11 @@ typedef struct { uint16_t GfxclkFmin; uint16_t GfxclkFmax; uint16_t GfxclkFreq1; - int16_t GfxclkOffsetVolt1; + uint16_t GfxclkVolt1; uint16_t GfxclkFreq2; - int16_t GfxclkOffsetVolt2; + uint16_t GfxclkVolt2; uint16_t GfxclkFreq3; - int16_t GfxclkOffsetVolt3; + uint16_t GfxclkVolt3; uint16_t UclkFmax; int16_t OverDrivePct; uint16_t FanMaximumRpm; diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h index 165429f717c4..45d64a81e945 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h +++ b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h @@ -117,7 +117,8 @@ #define PPSMC_MSG_PrepareMp1ForReset 0x59 #define PPSMC_MSG_PrepareMp1ForShutdown 0x5A #define PPSMC_MSG_SetMGpuFanBoostLimitRpm 0x5D -#define PPSMC_Message_Count 0x5E +#define PPSMC_MSG_GetAVFSVoltageByDpm 0x5F +#define PPSMC_Message_Count 0x60 typedef uint32_t PPSMC_Result; typedef uint32_t PPSMC_Msg; -- GitLab From 32f2a0d117769bdca7f7ee6224659f2c688ebc85 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 17 Sep 2018 14:59:54 +0800 Subject: [PATCH 1616/1692] drm/amd/powerplay: retrieve the updated clock table after OD With OD settings applied, the clock table will be updated accordingly. We need to retrieve the new clock tables then. Signed-off-by: Evan Quan Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- .../drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 114 ++++++++++++++---- .../drm/amd/powerplay/hwmgr/vega20_hwmgr.h | 2 + 2 files changed, 90 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index 4f9bf6049d1c..d45cbfe8e184 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -514,6 +514,47 @@ static int vega20_setup_single_dpm_table(struct pp_hwmgr *hwmgr, return ret; } +static int vega20_setup_gfxclk_dpm_table(struct pp_hwmgr *hwmgr) +{ + struct vega20_hwmgr *data = + (struct vega20_hwmgr *)(hwmgr->backend); + struct vega20_single_dpm_table *dpm_table; + int ret = 0; + + dpm_table = &(data->dpm_table.gfx_table); + if (data->smu_features[GNLD_DPM_GFXCLK].enabled) { + ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_GFXCLK); + PP_ASSERT_WITH_CODE(!ret, + "[SetupDefaultDpmTable] failed to get gfxclk dpm levels!", + return ret); + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = data->vbios_boot_state.gfx_clock / 100; + } + + return ret; +} + +static int vega20_setup_memclk_dpm_table(struct pp_hwmgr *hwmgr) +{ + struct vega20_hwmgr *data = + (struct vega20_hwmgr *)(hwmgr->backend); + struct vega20_single_dpm_table *dpm_table; + int ret = 0; + + dpm_table = &(data->dpm_table.mem_table); + if (data->smu_features[GNLD_DPM_UCLK].enabled) { + ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_UCLK); + PP_ASSERT_WITH_CODE(!ret, + "[SetupDefaultDpmTable] failed to get memclk dpm levels!", + return ret); + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = data->vbios_boot_state.mem_clock / 100; + } + + return ret; +} /* * This function is to initialize all DPM state tables @@ -547,28 +588,16 @@ static int vega20_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) /* gfxclk */ dpm_table = &(data->dpm_table.gfx_table); - if (data->smu_features[GNLD_DPM_GFXCLK].enabled) { - ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_GFXCLK); - PP_ASSERT_WITH_CODE(!ret, - "[SetupDefaultDpmTable] failed to get gfxclk dpm levels!", - return ret); - } else { - dpm_table->count = 1; - dpm_table->dpm_levels[0].value = data->vbios_boot_state.gfx_clock / 100; - } + ret = vega20_setup_gfxclk_dpm_table(hwmgr); + if (ret) + return ret; vega20_init_dpm_state(&(dpm_table->dpm_state)); /* memclk */ dpm_table = &(data->dpm_table.mem_table); - if (data->smu_features[GNLD_DPM_UCLK].enabled) { - ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_UCLK); - PP_ASSERT_WITH_CODE(!ret, - "[SetupDefaultDpmTable] failed to get memclk dpm levels!", - return ret); - } else { - dpm_table->count = 1; - dpm_table->dpm_levels[0].value = data->vbios_boot_state.mem_clock / 100; - } + ret = vega20_setup_memclk_dpm_table(hwmgr); + if (ret) + return ret; vega20_init_dpm_state(&(dpm_table->dpm_state)); /* eclk */ @@ -1181,6 +1210,9 @@ static int vega20_od8_set_settings( { OverDriveTable_t od_table; int ret = 0; + struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + struct vega20_od8_single_setting *od8_settings = + data->od8_settings.od8_settings_array; ret = vega20_copy_table_from_smc(hwmgr, (uint8_t *)(&od_table), TABLE_OVERDRIVE); PP_ASSERT_WITH_CODE(!ret, @@ -1192,6 +1224,10 @@ static int vega20_od8_set_settings( od_table.GfxclkFmin = (uint16_t)value; break; case OD8_SETTING_GFXCLK_FMAX: + if (value < od8_settings[OD8_SETTING_GFXCLK_FMAX].min_value || + value > od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value) + return -EINVAL; + od_table.GfxclkFmax = (uint16_t)value; break; case OD8_SETTING_GFXCLK_FREQ1: @@ -1213,6 +1249,9 @@ static int vega20_od8_set_settings( od_table.GfxclkVolt3 = (uint16_t)value; break; case OD8_SETTING_UCLK_FMAX: + if (value < od8_settings[OD8_SETTING_UCLK_FMAX].min_value || + value > od8_settings[OD8_SETTING_UCLK_FMAX].max_value) + return -EINVAL; od_table.UclkFmax = (uint16_t)value; break; case OD8_SETTING_POWER_PERCENTAGE: @@ -1262,8 +1301,6 @@ static int vega20_set_sclk_od( struct pp_hwmgr *hwmgr, uint32_t value) { struct vega20_hwmgr *data = hwmgr->backend; - struct vega20_single_dpm_table *sclk_table = - &(data->dpm_table.gfx_table); struct vega20_single_dpm_table *golden_sclk_table = &(data->golden_dpm_table.gfx_table); uint32_t od_sclk; @@ -1278,8 +1315,8 @@ static int vega20_set_sclk_od( "[SetSclkOD] failed to set od gfxclk!", return ret); - /* refresh gfxclk table */ - ret = vega20_setup_single_dpm_table(hwmgr, sclk_table, PPCLK_GFXCLK); + /* retrieve updated gfxclk table */ + ret = vega20_setup_gfxclk_dpm_table(hwmgr); PP_ASSERT_WITH_CODE(!ret, "[SetSclkOD] failed to refresh gfxclk table!", return ret); @@ -1309,8 +1346,6 @@ static int vega20_set_mclk_od( struct pp_hwmgr *hwmgr, uint32_t value) { struct vega20_hwmgr *data = hwmgr->backend; - struct vega20_single_dpm_table *mclk_table = - &(data->dpm_table.mem_table); struct vega20_single_dpm_table *golden_mclk_table = &(data->golden_dpm_table.mem_table); uint32_t od_mclk; @@ -1325,8 +1360,8 @@ static int vega20_set_mclk_od( "[SetMclkOD] failed to set od memclk!", return ret); - /* refresh memclk table */ - ret = vega20_setup_single_dpm_table(hwmgr, mclk_table, PPCLK_UCLK); + /* retrieve updated memclk table */ + ret = vega20_setup_memclk_dpm_table(hwmgr); PP_ASSERT_WITH_CODE(!ret, "[SetMclkOD] failed to refresh memclk table!", return ret); @@ -2451,6 +2486,10 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, return -EINVAL; } + if ((input_index == 0 && od_table->GfxclkFmin != input_clk) || + (input_index == 1 && od_table->GfxclkFmax != input_clk)) + data->gfxclk_overdrive = true; + if (input_index == 0) od_table->GfxclkFmin = input_clk; else @@ -2495,6 +2534,9 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, return -EINVAL; } + if (input_index == 1 && od_table->UclkFmax != input_clk) + data->memclk_overdrive = true; + od_table->UclkFmax = input_clk; } @@ -2567,6 +2609,9 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, break; case PP_OD_RESTORE_DEFAULT_TABLE: + data->gfxclk_overdrive = false; + data->memclk_overdrive = false; + ret = vega20_copy_table_from_smc(hwmgr, (uint8_t *)od_table, TABLE_OVERDRIVE); @@ -2583,6 +2628,23 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, "Failed to import overdrive table!", return ret); + /* retrieve updated gfxclk table */ + if (data->gfxclk_overdrive) { + data->gfxclk_overdrive = false; + + ret = vega20_setup_gfxclk_dpm_table(hwmgr); + if (ret) + return ret; + } + + /* retrieve updated memclk table */ + if (data->memclk_overdrive) { + data->memclk_overdrive = false; + + ret = vega20_setup_memclk_dpm_table(hwmgr); + if (ret) + return ret; + } break; default: diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h index b71a5f25c734..56fe6a0d42e8 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h @@ -502,6 +502,8 @@ struct vega20_hwmgr { /* ---- Overdrive next setting ---- */ struct vega20_odn_data odn_data; + bool gfxclk_overdrive; + bool memclk_overdrive; /* ---- Overdrive8 Setting ---- */ struct vega20_od8_settings od8_settings; -- GitLab From 3e14bedc581c3b7b05cd36352d0d47eca0317497 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 18 Sep 2018 10:38:09 +0200 Subject: [PATCH 1617/1692] drm/amdgpu: remove fence fallback MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit DC doesn't seem to have a fallback path either. So when interrupts doesn't work any more we are pretty much busted no matter what. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 - drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 56 ----------------------- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 1 - 3 files changed, 58 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index c43bc83c2d29..6cb35e3dab30 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -146,7 +146,6 @@ extern int amdgpu_cik_support; #define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */ #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ -#define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2) /* AMDGPU_IB_POOL_SIZE must be a power of 2 */ #define AMDGPU_IB_POOL_SIZE 16 #define AMDGPU_DEBUGFS_MAX_COMPONENTS 32 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index da36731460b5..176f28777f5e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -195,19 +195,6 @@ int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s) return 0; } -/** - * amdgpu_fence_schedule_fallback - schedule fallback check - * - * @ring: pointer to struct amdgpu_ring - * - * Start a timer as fallback to our interrupts. - */ -static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring) -{ - mod_timer(&ring->fence_drv.fallback_timer, - jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT); -} - /** * amdgpu_fence_process - check for fence activity * @@ -229,9 +216,6 @@ void amdgpu_fence_process(struct amdgpu_ring *ring) } while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq); - if (seq != ring->fence_drv.sync_seq) - amdgpu_fence_schedule_fallback(ring); - if (unlikely(seq == last_seq)) return; @@ -262,21 +246,6 @@ void amdgpu_fence_process(struct amdgpu_ring *ring) } while (last_seq != seq); } -/** - * amdgpu_fence_fallback - fallback for hardware interrupts - * - * @work: delayed work item - * - * Checks for fence activity. - */ -static void amdgpu_fence_fallback(struct timer_list *t) -{ - struct amdgpu_ring *ring = from_timer(ring, t, - fence_drv.fallback_timer); - - amdgpu_fence_process(ring); -} - /** * amdgpu_fence_wait_empty - wait for all fences to signal * @@ -424,8 +393,6 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, atomic_set(&ring->fence_drv.last_seq, 0); ring->fence_drv.initialized = false; - timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0); - ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1; spin_lock_init(&ring->fence_drv.lock); ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *), @@ -501,7 +468,6 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) amdgpu_irq_put(adev, ring->fence_drv.irq_src, ring->fence_drv.irq_type); drm_sched_fini(&ring->sched); - del_timer_sync(&ring->fence_drv.fallback_timer); for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) dma_fence_put(ring->fence_drv.fences[j]); kfree(ring->fence_drv.fences); @@ -594,27 +560,6 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f) return (const char *)fence->ring->name; } -/** - * amdgpu_fence_enable_signaling - enable signalling on fence - * @fence: fence - * - * This function is called with fence_queue lock held, and adds a callback - * to fence_queue that checks if this fence is signaled, and if so it - * signals the fence and removes itself. - */ -static bool amdgpu_fence_enable_signaling(struct dma_fence *f) -{ - struct amdgpu_fence *fence = to_amdgpu_fence(f); - struct amdgpu_ring *ring = fence->ring; - - if (!timer_pending(&ring->fence_drv.fallback_timer)) - amdgpu_fence_schedule_fallback(ring); - - DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); - - return true; -} - /** * amdgpu_fence_free - free up the fence memory * @@ -645,7 +590,6 @@ static void amdgpu_fence_release(struct dma_fence *f) static const struct dma_fence_ops amdgpu_fence_ops = { .get_driver_name = amdgpu_fence_get_driver_name, .get_timeline_name = amdgpu_fence_get_timeline_name, - .enable_signaling = amdgpu_fence_enable_signaling, .release = amdgpu_fence_release, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index 9cc239968e40..44fc665e4577 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -77,7 +77,6 @@ struct amdgpu_fence_driver { bool initialized; struct amdgpu_irq_src *irq_src; unsigned irq_type; - struct timer_list fallback_timer; unsigned num_fences_mask; spinlock_t lock; struct dma_fence **fences; -- GitLab From 4947b2f248f867626c7a3797fc3a314bd93aeac5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 11 Sep 2018 09:30:46 +0200 Subject: [PATCH 1618/1692] drm/amdgpu: stop pipelining VM PDs/PTs moves MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We are going to need this for recoverable page fault handling and it makes shadow handling during GPU reset much more easier. Signed-off-by: Christian König Acked-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index c1387efc0c91..9436994bc406 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1366,7 +1366,7 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) { WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM); WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) && - !bo->pin_count); + !bo->pin_count && bo->tbo.type != ttm_bo_type_kernel); WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET); WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM && !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 5da87ec67c64..d61910873627 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -532,7 +532,11 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, if (r) goto error; - r = ttm_bo_pipeline_move(bo, fence, evict, new_mem); + /* Always block for VM page tables before committing the new location */ + if (bo->type == ttm_bo_type_kernel) + r = ttm_bo_move_accel_cleanup(bo, fence, true, new_mem); + else + r = ttm_bo_pipeline_move(bo, fence, evict, new_mem); dma_fence_put(fence); return r; -- GitLab From 16688677427687d6e3796bfa3aa72c68f71572df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 11 Sep 2018 10:30:31 +0200 Subject: [PATCH 1619/1692] drm/amdgpu: always enable shadow BOs v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Even when GPU recovery is disabled we could run into a manually triggered recovery. v2: keep accidental removed comments Signed-off-by: Christian König Acked-by: Emily Deng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 9436994bc406..244c71c2fa06 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -51,18 +51,6 @@ * */ -static bool amdgpu_bo_need_backup(struct amdgpu_device *adev) -{ - if (adev->flags & AMD_IS_APU) - return false; - - if (amdgpu_gpu_recovery == 0 || - (amdgpu_gpu_recovery == -1 && !amdgpu_sriov_vf(adev))) - return false; - - return true; -} - /** * amdgpu_bo_subtract_pin_size - Remove BO from pin_size accounting * @@ -599,7 +587,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev, if (r) return r; - if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_bo_need_backup(adev)) { + if ((flags & AMDGPU_GEM_CREATE_SHADOW) && !(adev->flags & AMD_IS_APU)) { if (!bp->resv) WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv, NULL)); -- GitLab From 7fcb0657ffa93544142f6a0bb20b7fecf1cc1dca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 11 Sep 2018 10:31:54 +0200 Subject: [PATCH 1620/1692] drm/amdgpu: shadow BOs don't need any alignment MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit They aren't directly used by the hardware. Signed-off-by: Christian König Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 244c71c2fa06..524c21d56f75 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -532,7 +532,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, } static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, - unsigned long size, int byte_align, + unsigned long size, struct amdgpu_bo *bo) { struct amdgpu_bo_param bp; @@ -543,7 +543,6 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, memset(&bp, 0, sizeof(bp)); bp.size = size; - bp.byte_align = byte_align; bp.domain = AMDGPU_GEM_DOMAIN_GTT; bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC | AMDGPU_GEM_CREATE_SHADOW; @@ -592,7 +591,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev, WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv, NULL)); - r = amdgpu_bo_create_shadow(adev, bp->size, bp->byte_align, (*bo_ptr)); + r = amdgpu_bo_create_shadow(adev, bp->size, *bo_ptr); if (!bp->resv) reservation_object_unlock((*bo_ptr)->tbo.resv); -- GitLab From c33adbc7285f72dbd86aedba858e9570cd9f9c99 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 11 Sep 2018 10:36:16 +0200 Subject: [PATCH 1621/1692] drm/amdgpu: always recover VRAM during GPU recovery MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It shouldn't add much overhead and we should make sure that critical VRAM content is always restored. Signed-off-by: Christian König Acked-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 354f0557d697..0267fae316d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2999,7 +2999,7 @@ static int amdgpu_device_recover_vram_from_shadow(struct amdgpu_device *adev, } /** - * amdgpu_device_handle_vram_lost - Handle the loss of VRAM contents + * amdgpu_device_recover_vram - Recover some VRAM contents * * @adev: amdgpu_device pointer * @@ -3008,7 +3008,7 @@ static int amdgpu_device_recover_vram_from_shadow(struct amdgpu_device *adev, * the contents of VRAM might be lost. * Returns 0 on success, 1 on failure. */ -static int amdgpu_device_handle_vram_lost(struct amdgpu_device *adev) +static int amdgpu_device_recover_vram(struct amdgpu_device *adev) { struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; struct amdgpu_bo *bo, *tmp; @@ -3135,8 +3135,8 @@ static int amdgpu_device_reset(struct amdgpu_device *adev) } } - if (!r && ((need_full_reset && !(adev->flags & AMD_IS_APU)) || vram_lost)) - r = amdgpu_device_handle_vram_lost(adev); + if (!r) + r = amdgpu_device_recover_vram(adev); return r; } @@ -3182,7 +3182,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, amdgpu_virt_release_full_gpu(adev, true); if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { atomic_inc(&adev->vram_lost_counter); - r = amdgpu_device_handle_vram_lost(adev); + r = amdgpu_device_recover_vram(adev); } return r; -- GitLab From 403009bfba45163887398652762ed1fc6645181c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 11 Sep 2018 11:50:57 +0200 Subject: [PATCH 1622/1692] drm/amdgpu: fix shadow BO restoring MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Don't grab the reservation lock any more and simplify the handling quite a bit. Signed-off-by: Christian König Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 109 ++++++--------------- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 46 +++------ drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 8 +- 3 files changed, 43 insertions(+), 120 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 0267fae316d7..bd79d0a31942 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2950,54 +2950,6 @@ static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev) return 0; } -/** - * amdgpu_device_recover_vram_from_shadow - restore shadowed VRAM buffers - * - * @adev: amdgpu_device pointer - * @ring: amdgpu_ring for the engine handling the buffer operations - * @bo: amdgpu_bo buffer whose shadow is being restored - * @fence: dma_fence associated with the operation - * - * Restores the VRAM buffer contents from the shadow in GTT. Used to - * restore things like GPUVM page tables after a GPU reset where - * the contents of VRAM might be lost. - * Returns 0 on success, negative error code on failure. - */ -static int amdgpu_device_recover_vram_from_shadow(struct amdgpu_device *adev, - struct amdgpu_ring *ring, - struct amdgpu_bo *bo, - struct dma_fence **fence) -{ - uint32_t domain; - int r; - - if (!bo->shadow) - return 0; - - r = amdgpu_bo_reserve(bo, true); - if (r) - return r; - domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); - /* if bo has been evicted, then no need to recover */ - if (domain == AMDGPU_GEM_DOMAIN_VRAM) { - r = amdgpu_bo_validate(bo->shadow); - if (r) { - DRM_ERROR("bo validate failed!\n"); - goto err; - } - - r = amdgpu_bo_restore_from_shadow(adev, ring, bo, - NULL, fence, true); - if (r) { - DRM_ERROR("recover page table failed!\n"); - goto err; - } - } -err: - amdgpu_bo_unreserve(bo); - return r; -} - /** * amdgpu_device_recover_vram - Recover some VRAM contents * @@ -3006,16 +2958,15 @@ static int amdgpu_device_recover_vram_from_shadow(struct amdgpu_device *adev, * Restores the contents of VRAM buffers from the shadows in GTT. Used to * restore things like GPUVM page tables after a GPU reset where * the contents of VRAM might be lost. - * Returns 0 on success, 1 on failure. + * + * Returns: + * 0 on success, negative error code on failure. */ static int amdgpu_device_recover_vram(struct amdgpu_device *adev) { - struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; - struct amdgpu_bo *bo, *tmp; struct dma_fence *fence = NULL, *next = NULL; - long r = 1; - int i = 0; - long tmo; + struct amdgpu_bo *shadow; + long r = 1, tmo; if (amdgpu_sriov_runtime(adev)) tmo = msecs_to_jiffies(8000); @@ -3024,44 +2975,40 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev) DRM_INFO("recover vram bo from shadow start\n"); mutex_lock(&adev->shadow_list_lock); - list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) { - next = NULL; - amdgpu_device_recover_vram_from_shadow(adev, ring, bo, &next); + list_for_each_entry(shadow, &adev->shadow_list, shadow_list) { + + /* No need to recover an evicted BO */ + if (shadow->tbo.mem.mem_type != TTM_PL_TT || + shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM) + continue; + + r = amdgpu_bo_restore_shadow(shadow, &next); + if (r) + break; + if (fence) { r = dma_fence_wait_timeout(fence, false, tmo); - if (r == 0) - pr_err("wait fence %p[%d] timeout\n", fence, i); - else if (r < 0) - pr_err("wait fence %p[%d] interrupted\n", fence, i); - if (r < 1) { - dma_fence_put(fence); - fence = next; + dma_fence_put(fence); + fence = next; + if (r <= 0) break; - } - i++; + } else { + fence = next; } - - dma_fence_put(fence); - fence = next; } mutex_unlock(&adev->shadow_list_lock); - if (fence) { - r = dma_fence_wait_timeout(fence, false, tmo); - if (r == 0) - pr_err("wait fence %p[%d] timeout\n", fence, i); - else if (r < 0) - pr_err("wait fence %p[%d] interrupted\n", fence, i); - - } + if (fence) + tmo = dma_fence_wait_timeout(fence, false, tmo); dma_fence_put(fence); - if (r > 0) - DRM_INFO("recover vram bo from shadow done\n"); - else + if (r <= 0 || tmo <= 0) { DRM_ERROR("recover vram bo from shadow failed\n"); + return -EIO; + } - return (r > 0) ? 0 : 1; + DRM_INFO("recover vram bo from shadow done\n"); + return 0; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 524c21d56f75..113738cbb32c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -553,7 +553,7 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, if (!r) { bo->shadow->parent = amdgpu_bo_ref(bo); mutex_lock(&adev->shadow_list_lock); - list_add_tail(&bo->shadow_list, &adev->shadow_list); + list_add_tail(&bo->shadow->shadow_list, &adev->shadow_list); mutex_unlock(&adev->shadow_list_lock); } @@ -685,13 +685,10 @@ int amdgpu_bo_validate(struct amdgpu_bo *bo) } /** - * amdgpu_bo_restore_from_shadow - restore an &amdgpu_bo buffer object - * @adev: amdgpu device object - * @ring: amdgpu_ring for the engine handling the buffer operations - * @bo: &amdgpu_bo buffer to be restored - * @resv: reservation object with embedded fence + * amdgpu_bo_restore_shadow - restore an &amdgpu_bo shadow + * + * @shadow: &amdgpu_bo shadow to be restored * @fence: dma_fence associated with the operation - * @direct: whether to submit the job directly * * Copies a buffer object's shadow content back to the object. * This is used for recovering a buffer from its shadow in case of a gpu @@ -700,36 +697,19 @@ int amdgpu_bo_validate(struct amdgpu_bo *bo) * Returns: * 0 for success or a negative error code on failure. */ -int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev, - struct amdgpu_ring *ring, - struct amdgpu_bo *bo, - struct reservation_object *resv, - struct dma_fence **fence, - bool direct) +int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence) { - struct amdgpu_bo *shadow = bo->shadow; - uint64_t bo_addr, shadow_addr; - int r; - - if (!shadow) - return -EINVAL; - - bo_addr = amdgpu_bo_gpu_offset(bo); - shadow_addr = amdgpu_bo_gpu_offset(bo->shadow); - - r = reservation_object_reserve_shared(bo->tbo.resv); - if (r) - goto err; + struct amdgpu_device *adev = amdgpu_ttm_adev(shadow->tbo.bdev); + struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; + uint64_t shadow_addr, parent_addr; - r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr, - amdgpu_bo_size(bo), resv, fence, - direct, false); - if (!r) - amdgpu_bo_fence(bo, *fence, true); + shadow_addr = amdgpu_bo_gpu_offset(shadow); + parent_addr = amdgpu_bo_gpu_offset(shadow->parent); -err: - return r; + return amdgpu_copy_buffer(ring, shadow_addr, parent_addr, + amdgpu_bo_size(shadow), NULL, fence, + true, false); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 64337ff2ad63..7d3312d0da11 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -273,12 +273,8 @@ int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev, struct reservation_object *resv, struct dma_fence **fence, bool direct); int amdgpu_bo_validate(struct amdgpu_bo *bo); -int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev, - struct amdgpu_ring *ring, - struct amdgpu_bo *bo, - struct reservation_object *resv, - struct dma_fence **fence, - bool direct); +int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, + struct dma_fence **fence); uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev, uint32_t domain); -- GitLab From 77a2faa55c1a497f4e7e89eabd11830f0e3cb3dd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 14 Sep 2018 16:06:31 +0200 Subject: [PATCH 1623/1692] drm/amdgpu: fix up GDS/GWS/OA shifting MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit That only worked by pure coincident. Completely remove the shifting and always apply correct PAGE_SHIFT. Signed-off-by: Christian König Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 12 ++++++------ drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h | 7 ------- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 12 +++--------- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 14 +++++++------- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 6 +++++- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 15 +++------------ drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 9 --------- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 9 --------- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 12 +----------- 9 files changed, 25 insertions(+), 71 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index d762d78e5102..8836186eb5ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -721,16 +721,16 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, e->bo_va = amdgpu_vm_bo_find(vm, ttm_to_amdgpu_bo(e->tv.bo)); if (gds) { - p->job->gds_base = amdgpu_bo_gpu_offset(gds); - p->job->gds_size = amdgpu_bo_size(gds); + p->job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT; + p->job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT; } if (gws) { - p->job->gws_base = amdgpu_bo_gpu_offset(gws); - p->job->gws_size = amdgpu_bo_size(gws); + p->job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT; + p->job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT; } if (oa) { - p->job->oa_base = amdgpu_bo_gpu_offset(oa); - p->job->oa_size = amdgpu_bo_size(oa); + p->job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT; + p->job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT; } if (!r && p->uf_entry.tv.bo) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h index e73728d90388..ecbcefe49a98 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h @@ -24,13 +24,6 @@ #ifndef __AMDGPU_GDS_H__ #define __AMDGPU_GDS_H__ -/* Because TTM request that alloacted buffer should be PAGE_SIZE aligned, - * we should report GDS/GWS/OA size as PAGE_SIZE aligned - * */ -#define AMDGPU_GDS_SHIFT 2 -#define AMDGPU_GWS_SHIFT PAGE_SHIFT -#define AMDGPU_OA_SHIFT PAGE_SHIFT - struct amdgpu_ring; struct amdgpu_bo; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index d30a0838851b..7b3d1ebda9df 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -244,16 +244,10 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, return -EINVAL; } flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS; - if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS) - size = size << AMDGPU_GDS_SHIFT; - else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS) - size = size << AMDGPU_GWS_SHIFT; - else if (args->in.domains == AMDGPU_GEM_DOMAIN_OA) - size = size << AMDGPU_OA_SHIFT; - else - return -EINVAL; + /* GDS allocations must be DW aligned */ + if (args->in.domains & AMDGPU_GEM_DOMAIN_GDS) + size = ALIGN(size, 4); } - size = roundup(size, PAGE_SIZE); if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) { r = amdgpu_bo_reserve(vm->root.base.bo, false); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index dc4b2f34e3ea..a64056dadc58 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -528,13 +528,13 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file struct drm_amdgpu_info_gds gds_info; memset(&gds_info, 0, sizeof(gds_info)); - gds_info.gds_gfx_partition_size = adev->gds.mem.gfx_partition_size >> AMDGPU_GDS_SHIFT; - gds_info.compute_partition_size = adev->gds.mem.cs_partition_size >> AMDGPU_GDS_SHIFT; - gds_info.gds_total_size = adev->gds.mem.total_size >> AMDGPU_GDS_SHIFT; - gds_info.gws_per_gfx_partition = adev->gds.gws.gfx_partition_size >> AMDGPU_GWS_SHIFT; - gds_info.gws_per_compute_partition = adev->gds.gws.cs_partition_size >> AMDGPU_GWS_SHIFT; - gds_info.oa_per_gfx_partition = adev->gds.oa.gfx_partition_size >> AMDGPU_OA_SHIFT; - gds_info.oa_per_compute_partition = adev->gds.oa.cs_partition_size >> AMDGPU_OA_SHIFT; + gds_info.gds_gfx_partition_size = adev->gds.mem.gfx_partition_size; + gds_info.compute_partition_size = adev->gds.mem.cs_partition_size; + gds_info.gds_total_size = adev->gds.mem.total_size; + gds_info.gws_per_gfx_partition = adev->gds.gws.gfx_partition_size; + gds_info.gws_per_compute_partition = adev->gds.gws.cs_partition_size; + gds_info.oa_per_gfx_partition = adev->gds.oa.gfx_partition_size; + gds_info.oa_per_compute_partition = adev->gds.oa.cs_partition_size; return copy_to_user(out, &gds_info, min((size_t)size, sizeof(gds_info))) ? -EFAULT : 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 113738cbb32c..904014dc5915 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -427,7 +427,11 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, int r; page_align = roundup(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT; - size = ALIGN(size, PAGE_SIZE); + if (bp->domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | + AMDGPU_GEM_DOMAIN_OA)) + size <<= PAGE_SHIFT; + else + size = ALIGN(size, PAGE_SIZE); if (!amdgpu_bo_validate_size(adev, size, bp->domain)) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index d61910873627..0c4ab72474e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1845,19 +1845,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) (unsigned)(gtt_size / (1024 * 1024))); /* Initialize various on-chip memory pools */ - adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT; - adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT; - adev->gds.mem.cs_partition_size = adev->gds.mem.cs_partition_size << AMDGPU_GDS_SHIFT; - adev->gds.gws.total_size = adev->gds.gws.total_size << AMDGPU_GWS_SHIFT; - adev->gds.gws.gfx_partition_size = adev->gds.gws.gfx_partition_size << AMDGPU_GWS_SHIFT; - adev->gds.gws.cs_partition_size = adev->gds.gws.cs_partition_size << AMDGPU_GWS_SHIFT; - adev->gds.oa.total_size = adev->gds.oa.total_size << AMDGPU_OA_SHIFT; - adev->gds.oa.gfx_partition_size = adev->gds.oa.gfx_partition_size << AMDGPU_OA_SHIFT; - adev->gds.oa.cs_partition_size = adev->gds.oa.cs_partition_size << AMDGPU_OA_SHIFT; /* GDS Memory */ if (adev->gds.mem.total_size) { r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS, - adev->gds.mem.total_size >> PAGE_SHIFT); + adev->gds.mem.total_size); if (r) { DRM_ERROR("Failed initializing GDS heap.\n"); return r; @@ -1867,7 +1858,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) /* GWS */ if (adev->gds.gws.total_size) { r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS, - adev->gds.gws.total_size >> PAGE_SHIFT); + adev->gds.gws.total_size); if (r) { DRM_ERROR("Failed initializing gws heap.\n"); return r; @@ -1877,7 +1868,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) /* OA */ if (adev->gds.oa.total_size) { r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA, - adev->gds.oa.total_size >> PAGE_SHIFT); + adev->gds.oa.total_size); if (r) { DRM_ERROR("Failed initializing oa heap.\n"); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index a15d9c0f233b..c0f9732cbaf7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -4170,15 +4170,6 @@ static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring, uint32_t gws_base, uint32_t gws_size, uint32_t oa_base, uint32_t oa_size) { - gds_base = gds_base >> AMDGPU_GDS_SHIFT; - gds_size = gds_size >> AMDGPU_GDS_SHIFT; - - gws_base = gws_base >> AMDGPU_GWS_SHIFT; - gws_size = gws_size >> AMDGPU_GWS_SHIFT; - - oa_base = oa_base >> AMDGPU_OA_SHIFT; - oa_size = oa_size >> AMDGPU_OA_SHIFT; - /* GDS Base */ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 11e6ccdfc3d1..96df23c99cfb 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -5396,15 +5396,6 @@ static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring *ring, uint32_t gws_base, uint32_t gws_size, uint32_t oa_base, uint32_t oa_size) { - gds_base = gds_base >> AMDGPU_GDS_SHIFT; - gds_size = gds_size >> AMDGPU_GDS_SHIFT; - - gws_base = gws_base >> AMDGPU_GWS_SHIFT; - gws_size = gws_size >> AMDGPU_GWS_SHIFT; - - oa_base = oa_base >> AMDGPU_OA_SHIFT; - oa_size = oa_size >> AMDGPU_OA_SHIFT; - /* GDS Base */ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 1a298f17b7dc..528a8a567633 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1527,8 +1527,7 @@ static int gfx_v9_0_ngg_en(struct amdgpu_device *adev) gfx_v9_0_write_data_to_reg(ring, 0, false, SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE), (adev->gds.mem.total_size + - adev->gfx.ngg.gds_reserve_size) >> - AMDGPU_GDS_SHIFT); + adev->gfx.ngg.gds_reserve_size)); amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5)); amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC | @@ -3472,15 +3471,6 @@ static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring, { struct amdgpu_device *adev = ring->adev; - gds_base = gds_base >> AMDGPU_GDS_SHIFT; - gds_size = gds_size >> AMDGPU_GDS_SHIFT; - - gws_base = gws_base >> AMDGPU_GWS_SHIFT; - gws_size = gws_size >> AMDGPU_GWS_SHIFT; - - oa_base = oa_base >> AMDGPU_OA_SHIFT; - oa_size = oa_size >> AMDGPU_OA_SHIFT; - /* GDS Base */ gfx_v9_0_write_data_to_reg(ring, 0, false, SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid, -- GitLab From c832c346cdf9022872655be621880e0f66f4135d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 14 Sep 2018 20:59:27 +0200 Subject: [PATCH 1624/1692] drm/amdgpu: initialize GDS/GWS/OA domains even when they are zero sized MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Stops crashing on SI. Signed-off-by: Christian König Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 48 ++++++++++--------------- 1 file changed, 18 insertions(+), 30 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 0c4ab72474e4..d83f4e265c5c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1845,34 +1845,25 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) (unsigned)(gtt_size / (1024 * 1024))); /* Initialize various on-chip memory pools */ - /* GDS Memory */ - if (adev->gds.mem.total_size) { - r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS, - adev->gds.mem.total_size); - if (r) { - DRM_ERROR("Failed initializing GDS heap.\n"); - return r; - } + r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS, + adev->gds.mem.total_size); + if (r) { + DRM_ERROR("Failed initializing GDS heap.\n"); + return r; } - /* GWS */ - if (adev->gds.gws.total_size) { - r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS, - adev->gds.gws.total_size); - if (r) { - DRM_ERROR("Failed initializing gws heap.\n"); - return r; - } + r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS, + adev->gds.gws.total_size); + if (r) { + DRM_ERROR("Failed initializing gws heap.\n"); + return r; } - /* OA */ - if (adev->gds.oa.total_size) { - r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA, - adev->gds.oa.total_size); - if (r) { - DRM_ERROR("Failed initializing oa heap.\n"); - return r; - } + r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA, + adev->gds.oa.total_size); + if (r) { + DRM_ERROR("Failed initializing oa heap.\n"); + return r; } /* Register debugfs entries for amdgpu_ttm */ @@ -1909,12 +1900,9 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM); ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT); - if (adev->gds.mem.total_size) - ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS); - if (adev->gds.gws.total_size) - ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS); - if (adev->gds.oa.total_size) - ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA); + ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS); + ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS); + ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA); ttm_bo_device_release(&adev->mman.bdev); amdgpu_ttm_global_fini(adev); adev->mman.initialized = false; -- GitLab From fd395547924dad66dbe21c01e1a646f3215742ef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 14 Sep 2018 21:08:57 +0200 Subject: [PATCH 1625/1692] drm/amdgpu: move reserving GDS/GWS/OA into common code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We don't need that in the per ASIC code. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 18 ++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 19 ------------------- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 19 ------------------- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 19 ------------------- 4 files changed, 18 insertions(+), 57 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index d83f4e265c5c..a44fc12ae1f9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1852,6 +1852,12 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) return r; } + r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS, + &adev->gds.gds_gfx_bo, NULL, NULL); + if (r) + return r; + r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS, adev->gds.gws.total_size); if (r) { @@ -1859,6 +1865,12 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) return r; } + r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS, + &adev->gds.gws_gfx_bo, NULL, NULL); + if (r) + return r; + r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA, adev->gds.oa.total_size); if (r) { @@ -1866,6 +1878,12 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) return r; } + r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA, + &adev->gds.oa_gfx_bo, NULL, NULL); + if (r) + return r; + /* Register debugfs entries for amdgpu_ttm */ r = amdgpu_ttm_debugfs_init(adev); if (r) { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index c0f9732cbaf7..fc39ebbc9d9f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -4582,25 +4582,6 @@ static int gfx_v7_0_sw_init(void *handle) } } - /* reserve GDS, GWS and OA resource for gfx */ - r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size, - PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS, - &adev->gds.gds_gfx_bo, NULL, NULL); - if (r) - return r; - - r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size, - PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS, - &adev->gds.gws_gfx_bo, NULL, NULL); - if (r) - return r; - - r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size, - PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA, - &adev->gds.oa_gfx_bo, NULL, NULL); - if (r) - return r; - adev->gfx.ce_ram_size = 0x8000; gfx_v7_0_gpu_early_init(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 96df23c99cfb..470dc80f4fe7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -2161,25 +2161,6 @@ static int gfx_v8_0_sw_init(void *handle) if (r) return r; - /* reserve GDS, GWS and OA resource for gfx */ - r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size, - PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS, - &adev->gds.gds_gfx_bo, NULL, NULL); - if (r) - return r; - - r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size, - PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS, - &adev->gds.gws_gfx_bo, NULL, NULL); - if (r) - return r; - - r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size, - PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA, - &adev->gds.oa_gfx_bo, NULL, NULL); - if (r) - return r; - adev->gfx.ce_ram_size = 0x8000; r = gfx_v8_0_gpu_early_init(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 528a8a567633..f369d9603435 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1700,25 +1700,6 @@ static int gfx_v9_0_sw_init(void *handle) if (r) return r; - /* reserve GDS, GWS and OA resource for gfx */ - r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size, - PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS, - &adev->gds.gds_gfx_bo, NULL, NULL); - if (r) - return r; - - r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size, - PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS, - &adev->gds.gws_gfx_bo, NULL, NULL); - if (r) - return r; - - r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size, - PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA, - &adev->gds.oa_gfx_bo, NULL, NULL); - if (r) - return r; - adev->gfx.ce_ram_size = 0x8000; r = gfx_v9_0_gpu_early_init(adev); -- GitLab From d1766202779e81d0f2a94c4650a6ba31497d369d Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Thu, 2 Aug 2018 17:08:16 +0200 Subject: [PATCH 1626/1692] x86/kvm/lapic: always disable MMIO interface in x2APIC mode When VMX is used with flexpriority disabled (because of no support or if disabled with module parameter) MMIO interface to lAPIC is still available in x2APIC mode while it shouldn't be (kvm-unit-tests): PASS: apic_disable: Local apic enabled in x2APIC mode PASS: apic_disable: CPUID.1H:EDX.APIC[bit 9] is set FAIL: apic_disable: *0xfee00030: 50014 The issue appears because we basically do nothing while switching to x2APIC mode when APIC access page is not used. apic_mmio_{read,write} only check if lAPIC is disabled before proceeding to actual write. When APIC access is virtualized we correctly manipulate with VMX controls in vmx_set_virtual_apic_mode() and we don't get vmexits from memory writes in x2APIC mode so there's no issue. Disabling MMIO interface seems to be easy. The question is: what do we do with these reads and writes? If we add apic_x2apic_mode() check to apic_mmio_in_range() and return -EOPNOTSUPP these reads and writes will go to userspace. When lAPIC is in kernel, Qemu uses this interface to inject MSIs only (see kvm_apic_mem_write() in hw/i386/kvm/apic.c). This somehow works with disabled lAPIC but when we're in xAPIC mode we will get a real injected MSI from every write to lAPIC. Not good. The simplest solution seems to be to just ignore writes to the region and return ~0 for all reads when we're in x2APIC mode. This is what this patch does. However, this approach is inconsistent with what currently happens when flexpriority is enabled: we allocate APIC access page and create KVM memory region so in x2APIC modes all reads and writes go to this pre-allocated page which is, btw, the same for all vCPUs. Signed-off-by: Vitaly Kuznetsov Signed-off-by: Paolo Bonzini --- arch/x86/include/uapi/asm/kvm.h | 1 + arch/x86/kvm/lapic.c | 22 +++++++++++++++++++--- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h index 86299efa804a..fd23d5778ea1 100644 --- a/arch/x86/include/uapi/asm/kvm.h +++ b/arch/x86/include/uapi/asm/kvm.h @@ -377,6 +377,7 @@ struct kvm_sync_regs { #define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0) #define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1) +#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2) #define KVM_STATE_NESTED_GUEST_MODE 0x00000001 #define KVM_STATE_NESTED_RUN_PENDING 0x00000002 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 17c0472c5b34..fbb0e6df121b 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1344,9 +1344,8 @@ EXPORT_SYMBOL_GPL(kvm_lapic_reg_read); static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr) { - return kvm_apic_hw_enabled(apic) && - addr >= apic->base_address && - addr < apic->base_address + LAPIC_MMIO_LENGTH; + return addr >= apic->base_address && + addr < apic->base_address + LAPIC_MMIO_LENGTH; } static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this, @@ -1358,6 +1357,15 @@ static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this, if (!apic_mmio_in_range(apic, address)) return -EOPNOTSUPP; + if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) { + if (!kvm_check_has_quirk(vcpu->kvm, + KVM_X86_QUIRK_LAPIC_MMIO_HOLE)) + return -EOPNOTSUPP; + + memset(data, 0xff, len); + return 0; + } + kvm_lapic_reg_read(apic, offset, len, data); return 0; @@ -1917,6 +1925,14 @@ static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, if (!apic_mmio_in_range(apic, address)) return -EOPNOTSUPP; + if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) { + if (!kvm_check_has_quirk(vcpu->kvm, + KVM_X86_QUIRK_LAPIC_MMIO_HOLE)) + return -EOPNOTSUPP; + + return 0; + } + /* * APIC register must be aligned on 128-bits boundary. * 32/64/128 bits registers must be accessed thru 32 bits. -- GitLab From d35b34a9a70edae7ef923f100e51b8b5ae9fe899 Mon Sep 17 00:00:00 2001 From: Junaid Shahid Date: Wed, 8 Aug 2018 17:45:24 -0700 Subject: [PATCH 1627/1692] kvm: mmu: Don't read PDPTEs when paging is not enabled kvm should not attempt to read guest PDPTEs when CR0.PG = 0 and CR4.PAE = 1. Signed-off-by: Junaid Shahid Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 542f6315444d..5c870203737f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -628,7 +628,7 @@ bool pdptrs_changed(struct kvm_vcpu *vcpu) gfn_t gfn; int r; - if (is_long_mode(vcpu) || !is_pae(vcpu)) + if (is_long_mode(vcpu) || !is_pae(vcpu) || !is_paging(vcpu)) return false; if (!test_bit(VCPU_EXREG_PDPTR, @@ -8177,7 +8177,7 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) kvm_update_cpuid(vcpu); idx = srcu_read_lock(&vcpu->kvm->srcu); - if (!is_long_mode(vcpu) && is_pae(vcpu)) { + if (!is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu)) { load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); mmu_reset_needed = 1; } -- GitLab From 83b20b28c670868bfb717be4fe1557c925a81657 Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Fri, 7 Sep 2018 19:59:47 +0800 Subject: [PATCH 1628/1692] KVM: x86: don't reset root in kvm_mmu_setup() Here is the code path which shows kvm_mmu_setup() is invoked after kvm_mmu_create(). Since kvm_mmu_setup() is only invoked in this code path, this means the root_hpa and prev_roots are guaranteed to be invalid. And it is not necessary to reset it again. kvm_vm_ioctl_create_vcpu() kvm_arch_vcpu_create() vmx_create_vcpu() kvm_vcpu_init() kvm_arch_vcpu_init() kvm_mmu_create() kvm_arch_vcpu_setup() kvm_mmu_setup() kvm_init_mmu() This patch set reset_roots to false in kmv_mmu_setup(). Fixes: 50c28f21d045dde8c52548f8482d456b3f0956f5 Signed-off-by: Wei Yang Reviewed-by: Liran Alon Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index e24ea7067373..5402c53a079b 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -5417,7 +5417,12 @@ void kvm_mmu_setup(struct kvm_vcpu *vcpu) { MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa)); - kvm_init_mmu(vcpu, true); + /* + * kvm_mmu_setup() is called only on vCPU initialization. + * Therefore, no need to reset mmu roots as they are not yet + * initialized. + */ + kvm_init_mmu(vcpu, false); } static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm, -- GitLab From 6bd317d3c865ebcddcb287a424093fe4758c40ef Mon Sep 17 00:00:00 2001 From: Lei Yang Date: Wed, 29 Aug 2018 15:04:08 +0800 Subject: [PATCH 1629/1692] kvm: selftests: use -pthread instead of -lpthread I run into the following error testing/selftests/kvm/dirty_log_test.c:285: undefined reference to `pthread_create' testing/selftests/kvm/dirty_log_test.c:297: undefined reference to `pthread_join' collect2: error: ld returned 1 exit status my gcc version is gcc version 4.8.4 "-pthread" would work everywhere Signed-off-by: Lei Yang Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 03b0f551bedf..48c970c90353 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -20,7 +20,7 @@ INSTALL_HDR_PATH = $(top_srcdir)/usr LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/ LINUX_TOOL_INCLUDE = $(top_srcdir)tools/include CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude -I$( Date: Fri, 7 Sep 2018 05:45:02 +0000 Subject: [PATCH 1630/1692] KVM/MMU: Fix comment in walk_shadow_page_lockless_end() kvm_commit_zap_page() has been renamed to kvm_mmu_commit_zap_page() This patch is to fix the commit. Signed-off-by: Lan Tianyu Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 5402c53a079b..d7e9bce6ff61 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -899,7 +899,7 @@ static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu) { /* * Make sure the write to vcpu->mode is not reordered in front of - * reads to sptes. If it does, kvm_commit_zap_page() can see us + * reads to sptes. If it does, kvm_mmu_commit_zap_page() can see us * OUTSIDE_GUEST_MODE and proceed to free the shadow page table. */ smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE); -- GitLab From a101c9d63ebb294144e596bfe9b4ae3156b1be96 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 30 Aug 2018 14:49:59 +0300 Subject: [PATCH 1631/1692] KVM: SVM: Switch to bitmap_zalloc() Switch to bitmap_zalloc() to show clearly what we are allocating. Besides that it returns pointer of bitmap type instead of opaque void *. Signed-off-by: Andy Shevchenko Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 89c4c5aa15f1..c7f1c3fd782d 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1226,8 +1226,7 @@ static __init int sev_hardware_setup(void) min_sev_asid = cpuid_edx(0x8000001F); /* Initialize SEV ASID bitmap */ - sev_asid_bitmap = kcalloc(BITS_TO_LONGS(max_sev_asid), - sizeof(unsigned long), GFP_KERNEL); + sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL); if (!sev_asid_bitmap) return 1; @@ -1405,7 +1404,7 @@ static __exit void svm_hardware_unsetup(void) int cpu; if (svm_sev_enabled()) - kfree(sev_asid_bitmap); + bitmap_free(sev_asid_bitmap); for_each_possible_cpu(cpu) svm_cpu_uninit(cpu); -- GitLab From 4c008127e4716d246b44b403f8a65ae9744d32c4 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 27 Aug 2018 15:21:10 -0700 Subject: [PATCH 1632/1692] KVM: VMX: immediately mark preemption timer expired only for zero value A VMX preemption timer value of '0' at the time of VMEnter is architecturally guaranteed to cause a VMExit prior to the CPU executing any instructions in the guest. This architectural definition is in place to ensure that a previously expired timer is correctly recognized by the CPU as it is possible for the timer to reach zero and not trigger a VMexit due to a higher priority VMExit being signalled instead, e.g. a pending #DB that morphs into a VMExit. Whether by design or coincidence, commit f4124500c2c1 ("KVM: nVMX: Fully emulate preemption timer") special cased timer values of '0' and '1' to ensure prompt delivery of the VMExit. Unlike '0', a timer value of '1' has no has no architectural guarantees regarding when it is delivered. Modify the timer emulation to trigger immediate VMExit if and only if the timer value is '0', and document precisely why '0' is special. Do this even if calibration of the virtual TSC failed, i.e. VMExit will occur immediately regardless of the frequency of the timer. Making only '0' a special case gives KVM leeway to be more aggressive in ensuring the VMExit is injected prior to executing instructions in the nested guest, and also eliminates any ambiguity as to why '1' is a special case, e.g. why wasn't the threshold for a "short timeout" set to 10, 100, 1000, etc... Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 533a327372c8..4655d6dd6759 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -11427,16 +11427,18 @@ static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu) u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value; struct vcpu_vmx *vmx = to_vmx(vcpu); - if (vcpu->arch.virtual_tsc_khz == 0) - return; - - /* Make sure short timeouts reliably trigger an immediate vmexit. - * hrtimer_start does not guarantee this. */ - if (preemption_timeout <= 1) { + /* + * A timer value of zero is architecturally guaranteed to cause + * a VMExit prior to executing any instructions in the guest. + */ + if (preemption_timeout == 0) { vmx_preemption_timer_fn(&vmx->nested.preemption_timer); return; } + if (vcpu->arch.virtual_tsc_khz == 0) + return; + preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; preemption_timeout *= 1000000; do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz); -- GitLab From f459a707ed313f110e4939d634317edcf9e96774 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 27 Aug 2018 15:21:11 -0700 Subject: [PATCH 1633/1692] KVM: VMX: modify preemption timer bit only when arming timer Provide a singular location where the VMX preemption timer bit is set/cleared so that future usages of the preemption timer can ensure the VMCS bit is up-to-date without having to modify unrelated code paths. For example, the preemption timer can be used to force an immediate VMExit. Cache the status of the timer to avoid redundant VMREAD and VMWRITE, e.g. if the timer stays armed across multiple VMEnters/VMExits. Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 61 ++++++++++++++++++++++++---------------------- 1 file changed, 32 insertions(+), 29 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 4655d6dd6759..62670b2f6d48 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -397,6 +397,7 @@ struct loaded_vmcs { int cpu; bool launched; bool nmi_known_unmasked; + bool hv_timer_armed; /* Support for vnmi-less CPUs */ int soft_vnmi_blocked; ktime_t entry_time; @@ -10595,24 +10596,38 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) msrs[i].host, false); } -static void vmx_arm_hv_timer(struct kvm_vcpu *vcpu) +static void vmx_arm_hv_timer(struct vcpu_vmx *vmx, u32 val) +{ + vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, val); + if (!vmx->loaded_vmcs->hv_timer_armed) + vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL, + PIN_BASED_VMX_PREEMPTION_TIMER); + vmx->loaded_vmcs->hv_timer_armed = true; +} + +static void vmx_update_hv_timer(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); u64 tscl; u32 delta_tsc; - if (vmx->hv_deadline_tsc == -1) - return; + if (vmx->hv_deadline_tsc != -1) { + tscl = rdtsc(); + if (vmx->hv_deadline_tsc > tscl) + /* set_hv_timer ensures the delta fits in 32-bits */ + delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >> + cpu_preemption_timer_multi); + else + delta_tsc = 0; - tscl = rdtsc(); - if (vmx->hv_deadline_tsc > tscl) - /* sure to be 32 bit only because checked on set_hv_timer */ - delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >> - cpu_preemption_timer_multi); - else - delta_tsc = 0; + vmx_arm_hv_timer(vmx, delta_tsc); + return; + } - vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, delta_tsc); + if (vmx->loaded_vmcs->hv_timer_armed) + vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL, + PIN_BASED_VMX_PREEMPTION_TIMER); + vmx->loaded_vmcs->hv_timer_armed = false; } static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) @@ -10672,7 +10687,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) atomic_switch_perf_msrs(vmx); - vmx_arm_hv_timer(vcpu); + vmx_update_hv_timer(vcpu); /* * If this vCPU has touched SPEC_CTRL, restore the guest's value if @@ -12078,11 +12093,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, exec_control = vmcs12->pin_based_vm_exec_control; - /* Preemption timer setting is only taken from vmcs01. */ - exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER; + /* Preemption timer setting is computed directly in vmx_vcpu_run. */ exec_control |= vmcs_config.pin_based_exec_ctrl; - if (vmx->hv_deadline_tsc == -1) - exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER; + exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER; + vmx->loaded_vmcs->hv_timer_armed = false; /* Posted interrupts setting is only taken from vmcs12. */ if (nested_cpu_has_posted_intr(vmcs12)) { @@ -13255,12 +13269,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); - if (vmx->hv_deadline_tsc == -1) - vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL, - PIN_BASED_VMX_PREEMPTION_TIMER); - else - vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL, - PIN_BASED_VMX_PREEMPTION_TIMER); + if (kvm_has_tsc_control) decache_tsc_multiplier(vmx); @@ -13464,18 +13473,12 @@ static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc) return -ERANGE; vmx->hv_deadline_tsc = tscl + delta_tsc; - vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL, - PIN_BASED_VMX_PREEMPTION_TIMER); - return delta_tsc == 0; } static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu) { - struct vcpu_vmx *vmx = to_vmx(vcpu); - vmx->hv_deadline_tsc = -1; - vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL, - PIN_BASED_VMX_PREEMPTION_TIMER); + to_vmx(vcpu)->hv_deadline_tsc = -1; } #endif -- GitLab From d264ee0c2ed20c6a426663590d4fc7a36cb6abd7 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 27 Aug 2018 15:21:12 -0700 Subject: [PATCH 1634/1692] KVM: VMX: use preemption timer to force immediate VMExit A VMX preemption timer value of '0' is guaranteed to cause a VMExit prior to the CPU executing any instructions in the guest. Use the preemption timer (if it's supported) to trigger immediate VMExit in place of the current method of sending a self-IPI. This ensures that pending VMExit injection to L1 occurs prior to executing any instructions in the guest (regardless of nesting level). When deferring VMExit injection, KVM generates an immediate VMExit from the (possibly nested) guest by sending itself an IPI. Because hardware interrupts are blocked prior to VMEnter and are unblocked (in hardware) after VMEnter, this results in taking a VMExit(INTR) before any guest instruction is executed. But, as this approach relies on the IPI being received before VMEnter executes, it only works as intended when KVM is running as L0. Because there are no architectural guarantees regarding when IPIs are delivered, when running nested the INTR may "arrive" long after L2 is running e.g. L0 KVM doesn't force an immediate switch to L1 to deliver an INTR. For the most part, this unintended delay is not an issue since the events being injected to L1 also do not have architectural guarantees regarding their timing. The notable exception is the VMX preemption timer[1], which is architecturally guaranteed to cause a VMExit prior to executing any instructions in the guest if the timer value is '0' at VMEnter. Specifically, the delay in injecting the VMExit causes the preemption timer KVM unit test to fail when run in a nested guest. Note: this approach is viable even on CPUs with a broken preemption timer, as broken in this context only means the timer counts at the wrong rate. There are no known errata affecting timer value of '0'. [1] I/O SMIs also have guarantees on when they arrive, but I have no idea if/how those are emulated in KVM. Signed-off-by: Sean Christopherson [Use a hook for SVM instead of leaving the default in x86.c - Paolo] Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 2 ++ arch/x86/kvm/svm.c | 2 ++ arch/x86/kvm/vmx.c | 21 ++++++++++++++++++++- arch/x86/kvm/x86.c | 8 +++++++- 4 files changed, 31 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 8e90488c3d56..bffb25b50425 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1055,6 +1055,7 @@ struct kvm_x86_ops { bool (*umip_emulated)(void); int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr); + void (*request_immediate_exit)(struct kvm_vcpu *vcpu); void (*sched_in)(struct kvm_vcpu *kvm, int cpu); @@ -1482,6 +1483,7 @@ extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu); int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err); +void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu); int kvm_is_in_guest(void); diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index c7f1c3fd782d..d96092b35936 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -7148,6 +7148,8 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { .check_intercept = svm_check_intercept, .handle_external_intr = svm_handle_external_intr, + .request_immediate_exit = __kvm_request_immediate_exit, + .sched_in = svm_sched_in, .pmu_ops = &amd_pmu_ops, diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 62670b2f6d48..a4a1585f47f1 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1020,6 +1020,8 @@ struct vcpu_vmx { int ple_window; bool ple_window_dirty; + bool req_immediate_exit; + /* Support for PML */ #define PML_ENTITY_NUM 512 struct page *pml_pg; @@ -2865,6 +2867,8 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) u16 fs_sel, gs_sel; int i; + vmx->req_immediate_exit = false; + if (vmx->loaded_cpu_state) return; @@ -7967,6 +7971,9 @@ static __init int hardware_setup(void) kvm_x86_ops->enable_log_dirty_pt_masked = NULL; } + if (!cpu_has_vmx_preemption_timer()) + kvm_x86_ops->request_immediate_exit = __kvm_request_immediate_exit; + if (cpu_has_vmx_preemption_timer() && enable_preemption_timer) { u64 vmx_msr; @@ -9209,7 +9216,8 @@ static int handle_pml_full(struct kvm_vcpu *vcpu) static int handle_preemption_timer(struct kvm_vcpu *vcpu) { - kvm_lapic_expired_hv_timer(vcpu); + if (!to_vmx(vcpu)->req_immediate_exit) + kvm_lapic_expired_hv_timer(vcpu); return 1; } @@ -10611,6 +10619,11 @@ static void vmx_update_hv_timer(struct kvm_vcpu *vcpu) u64 tscl; u32 delta_tsc; + if (vmx->req_immediate_exit) { + vmx_arm_hv_timer(vmx, 0); + return; + } + if (vmx->hv_deadline_tsc != -1) { tscl = rdtsc(); if (vmx->hv_deadline_tsc > tscl) @@ -12879,6 +12892,11 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) return 0; } +static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu) +{ + to_vmx(vcpu)->req_immediate_exit = true; +} + static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu) { ktime_t remaining = @@ -14135,6 +14153,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { .umip_emulated = vmx_umip_emulated, .check_nested_events = vmx_check_nested_events, + .request_immediate_exit = vmx_request_immediate_exit, .sched_in = vmx_sched_in, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 5c870203737f..9d0fda9056de 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7361,6 +7361,12 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(kvm_vcpu_reload_apic_access_page); +void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu) +{ + smp_send_reschedule(vcpu->cpu); +} +EXPORT_SYMBOL_GPL(__kvm_request_immediate_exit); + /* * Returns 1 to let vcpu_run() continue the guest execution loop without * exiting to the userspace. Otherwise, the value will be returned to the @@ -7565,7 +7571,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) if (req_immediate_exit) { kvm_make_request(KVM_REQ_EVENT, vcpu); - smp_send_reschedule(vcpu->cpu); + kvm_x86_ops->request_immediate_exit(vcpu); } trace_kvm_entry(vcpu->vcpu_id); -- GitLab From a1efa9b70097a7ebb7c0a10bb72648776771b281 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Mon, 27 Aug 2018 18:48:57 +0200 Subject: [PATCH 1635/1692] x86/hyper-v: rename ipi_arg_{ex,non_ex} structures These structures are going to be used from KVM code so let's make their names reflect their Hyper-V origin. Signed-off-by: Vitaly Kuznetsov Reviewed-by: Roman Kagan Acked-by: K. Y. Srinivasan Signed-off-by: Paolo Bonzini --- arch/x86/hyperv/hv_apic.c | 8 ++++---- arch/x86/include/asm/hyperv-tlfs.h | 16 +++++++++------- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c index 5b0f613428c2..2c43e3055948 100644 --- a/arch/x86/hyperv/hv_apic.c +++ b/arch/x86/hyperv/hv_apic.c @@ -95,8 +95,8 @@ static void hv_apic_eoi_write(u32 reg, u32 val) */ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector) { - struct ipi_arg_ex **arg; - struct ipi_arg_ex *ipi_arg; + struct hv_send_ipi_ex **arg; + struct hv_send_ipi_ex *ipi_arg; unsigned long flags; int nr_bank = 0; int ret = 1; @@ -105,7 +105,7 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector) return false; local_irq_save(flags); - arg = (struct ipi_arg_ex **)this_cpu_ptr(hyperv_pcpu_input_arg); + arg = (struct hv_send_ipi_ex **)this_cpu_ptr(hyperv_pcpu_input_arg); ipi_arg = *arg; if (unlikely(!ipi_arg)) @@ -135,7 +135,7 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector) static bool __send_ipi_mask(const struct cpumask *mask, int vector) { int cur_cpu, vcpu; - struct ipi_arg_non_ex ipi_arg; + struct hv_send_ipi ipi_arg; int ret = 1; trace_hyperv_send_ipi_mask(mask, vector); diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h index e977b6b3a538..00e01d215f74 100644 --- a/arch/x86/include/asm/hyperv-tlfs.h +++ b/arch/x86/include/asm/hyperv-tlfs.h @@ -726,19 +726,21 @@ struct hv_enlightened_vmcs { #define HV_STIMER_AUTOENABLE (1ULL << 3) #define HV_STIMER_SINT(config) (__u8)(((config) >> 16) & 0x0F) -struct ipi_arg_non_ex { - u32 vector; - u32 reserved; - u64 cpu_mask; -}; - struct hv_vpset { u64 format; u64 valid_bank_mask; u64 bank_contents[]; }; -struct ipi_arg_ex { +/* HvCallSendSyntheticClusterIpi hypercall */ +struct hv_send_ipi { + u32 vector; + u32 reserved; + u64 cpu_mask; +}; + +/* HvCallSendSyntheticClusterIpiEx hypercall */ +struct hv_send_ipi_ex { u32 vector; u32 reserved; struct hv_vpset vp_set; -- GitLab From 822f312d47f0200dc0999c9f006fe94aa43bd0bd Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 12 Sep 2018 15:33:45 +0200 Subject: [PATCH 1636/1692] kvm: x86: make kvm_{load|put}_guest_fpu() static The functions kvm_load_guest_fpu() kvm_put_guest_fpu() are only used locally, make them static. This requires also that both functions are moved because they are used before their implementation. Those functions were exported (via EXPORT_SYMBOL) before commit e5bb40251a920 ("KVM: Drop kvm_{load,put}_guest_fpu() exports"). Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 46 ++++++++++++++++++++-------------------- include/linux/kvm_host.h | 2 -- 2 files changed, 23 insertions(+), 25 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 9d0fda9056de..6f4789398876 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7835,6 +7835,29 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu) return 0; } +/* Swap (qemu) user FPU context for the guest FPU context. */ +static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) +{ + preempt_disable(); + copy_fpregs_to_fpstate(&vcpu->arch.user_fpu); + /* PKRU is separately restored in kvm_x86_ops->run. */ + __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state, + ~XFEATURE_MASK_PKRU); + preempt_enable(); + trace_kvm_fpu(1); +} + +/* When vcpu_run ends, restore user space FPU context. */ +static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) +{ + preempt_disable(); + copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu); + copy_kernel_to_fpregs(&vcpu->arch.user_fpu.state); + preempt_enable(); + ++vcpu->stat.fpu_reload; + trace_kvm_fpu(0); +} + int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { int r; @@ -8412,29 +8435,6 @@ static void fx_init(struct kvm_vcpu *vcpu) vcpu->arch.cr0 |= X86_CR0_ET; } -/* Swap (qemu) user FPU context for the guest FPU context. */ -void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) -{ - preempt_disable(); - copy_fpregs_to_fpstate(&vcpu->arch.user_fpu); - /* PKRU is separately restored in kvm_x86_ops->run. */ - __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state, - ~XFEATURE_MASK_PKRU); - preempt_enable(); - trace_kvm_fpu(1); -} - -/* When vcpu_run ends, restore user space FPU context. */ -void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) -{ - preempt_disable(); - copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu); - copy_kernel_to_fpregs(&vcpu->arch.user_fpu.state); - preempt_enable(); - ++vcpu->stat.fpu_reload; - trace_kvm_fpu(0); -} - void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) { void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 0205aee44ded..c926698040e0 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -733,8 +733,6 @@ bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu); void kvm_vcpu_kick(struct kvm_vcpu *vcpu); int kvm_vcpu_yield_to(struct kvm_vcpu *target); void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible); -void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); -void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); void kvm_flush_remote_tlbs(struct kvm *kvm); void kvm_reload_remote_mmus(struct kvm *kvm); -- GitLab From 5bea5123cbf08f990a1aee8f08c643a272e06a0f Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 18 Sep 2018 15:19:17 +0200 Subject: [PATCH 1637/1692] KVM: VMX: check nested state and CR4.VMXE against SMM VMX cannot be enabled under SMM, check it when CR4 is set and when nested virtualization state is restored. This should fix some WARNs reported by syzkaller, mostly around alloc_shadow_vmcs. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index a4a1585f47f1..16e63a92992f 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -5398,9 +5398,10 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) * To use VMXON (and later other VMX instructions), a guest * must first be able to turn on cr4.VMXE (see handle_vmon()). * So basically the check on whether to allow nested VMX - * is here. + * is here. We operate under the default treatment of SMM, + * so VMX cannot be enabled under SMM. */ - if (!nested_vmx_allowed(vcpu)) + if (!nested_vmx_allowed(vcpu) || is_smm(vcpu)) return 1; } @@ -13977,6 +13978,14 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON)) return -EINVAL; + /* + * SMM temporarily disables VMX, so we cannot be in guest mode, + * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags + * must be zero. + */ + if (is_smm(vcpu) ? kvm_state->flags : kvm_state->vmx.smm.flags) + return -EINVAL; + if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && !(kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON)) return -EINVAL; -- GitLab From e6c67d8cf1173b229f0c4343d1cc7925eca11c11 Mon Sep 17 00:00:00 2001 From: Liran Alon Date: Tue, 4 Sep 2018 10:56:52 +0300 Subject: [PATCH 1638/1692] KVM: nVMX: Wake blocked vCPU in guest-mode if pending interrupt in virtual APICv In case L1 do not intercept L2 HLT or enter L2 in HLT activity-state, it is possible for a vCPU to be blocked while it is in guest-mode. According to Intel SDM 26.6.5 Interrupt-Window Exiting and Virtual-Interrupt Delivery: "These events wake the logical processor if it just entered the HLT state because of a VM entry". Therefore, if L1 enters L2 in HLT activity-state and L2 has a pending deliverable interrupt in vmcs12->guest_intr_status.RVI, then the vCPU should be waken from the HLT state and injected with the interrupt. In addition, if while the vCPU is blocked (while it is in guest-mode), it receives a nested posted-interrupt, then the vCPU should also be waken and injected with the posted interrupt. To handle these cases, this patch enhances kvm_vcpu_has_events() to also check if there is a pending interrupt in L2 virtual APICv provided by L1. That is, it evaluates if there is a pending virtual interrupt for L2 by checking RVI[7:4] > VPPR[7:4] as specified in Intel SDM 29.2.1 Evaluation of Pending Interrupts. Note that this also handles the case of nested posted-interrupt by the fact RVI is updated in vmx_complete_nested_posted_interrupt() which is called from kvm_vcpu_check_block() -> kvm_arch_vcpu_runnable() -> kvm_vcpu_running() -> vmx_check_nested_events() -> vmx_complete_nested_posted_interrupt(). Reviewed-by: Nikita Leshenko Reviewed-by: Darren Kenny Signed-off-by: Liran Alon Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/vmx.c | 22 ++++++++++++++++++++++ arch/x86/kvm/x86.c | 10 +++++++++- 3 files changed, 32 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index bffb25b50425..af63c2ca1616 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1022,6 +1022,7 @@ struct kvm_x86_ops { void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu); void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr); void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr); + bool (*guest_apic_has_interrupt)(struct kvm_vcpu *vcpu); void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap); void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu); void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa); diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 16e63a92992f..98b1203e8823 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -6189,6 +6189,27 @@ static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu) nested_mark_vmcs12_pages_dirty(vcpu); } +static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + void *vapic_page; + u32 vppr; + int rvi; + + if (WARN_ON_ONCE(!is_guest_mode(vcpu)) || + !nested_cpu_has_vid(get_vmcs12(vcpu)) || + WARN_ON_ONCE(!vmx->nested.virtual_apic_page)) + return false; + + rvi = vmcs_read16(GUEST_INTR_STATUS) & 0xff; + + vapic_page = kmap(vmx->nested.virtual_apic_page); + vppr = *((u32 *)(vapic_page + APIC_PROCPRI)); + kunmap(vmx->nested.virtual_apic_page); + + return ((rvi & 0xf0) > (vppr & 0xf0)); +} + static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu, bool nested) { @@ -14129,6 +14150,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { .apicv_post_state_restore = vmx_apicv_post_state_restore, .hwapic_irr_update = vmx_hwapic_irr_update, .hwapic_isr_update = vmx_hwapic_isr_update, + .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt, .sync_pir_to_irr = vmx_sync_pir_to_irr, .deliver_posted_interrupt = vmx_deliver_posted_interrupt, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 6f4789398876..5fea53cdc583 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9206,6 +9206,13 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm, kvm_page_track_flush_slot(kvm, slot); } +static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu *vcpu) +{ + return (is_guest_mode(vcpu) && + kvm_x86_ops->guest_apic_has_interrupt && + kvm_x86_ops->guest_apic_has_interrupt(vcpu)); +} + static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) { if (!list_empty_careful(&vcpu->async_pf.done)) @@ -9230,7 +9237,8 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) return true; if (kvm_arch_interrupt_allowed(vcpu) && - kvm_cpu_has_interrupt(vcpu)) + (kvm_cpu_has_interrupt(vcpu) || + kvm_guest_apic_has_interrupt(vcpu))) return true; if (kvm_hv_has_stimer_pending(vcpu)) -- GitLab From 6de84e581c083c4357b45c31b7ef71335725d850 Mon Sep 17 00:00:00 2001 From: Krish Sadhukhan Date: Thu, 23 Aug 2018 20:03:03 -0400 Subject: [PATCH 1639/1692] nVMX x86: check posted-interrupt descriptor addresss on vmentry of L2 According to section "Checks on VMX Controls" in Intel SDM vol 3C, the following check needs to be enforced on vmentry of L2 guests: - Bits 5:0 of the posted-interrupt descriptor address are all 0. - The posted-interrupt descriptor address does not set any bits beyond the processor's physical-address width. Signed-off-by: Krish Sadhukhan Reviewed-by: Mark Kanda Reviewed-by: Liran Alon Reviewed-by: Darren Kenny Reviewed-by: Karl Heubaum Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 98b1203e8823..581bdbd9844b 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -11698,11 +11698,15 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, * bits 15:8 should be zero in posted_intr_nv, * the descriptor address has been already checked * in nested_get_vmcs12_pages. + * + * bits 5:0 of posted_intr_desc_addr should be zero. */ if (nested_cpu_has_posted_intr(vmcs12) && (!nested_cpu_has_vid(vmcs12) || !nested_exit_intr_ack_set(vcpu) || - vmcs12->posted_intr_nv & 0xff00)) + (vmcs12->posted_intr_nv & 0xff00) || + (vmcs12->posted_intr_desc_addr & 0x3f) || + (!page_address_valid(vcpu, vmcs12->posted_intr_desc_addr)))) return -EINVAL; /* tpr shadow is needed by all apicv features. */ -- GitLab From ba8e23db59dc07e5de74fd7bd310e297d3e4ba54 Mon Sep 17 00:00:00 2001 From: Krish Sadhukhan Date: Tue, 4 Sep 2018 14:42:58 -0400 Subject: [PATCH 1640/1692] nVMX x86: Check VPID value on vmentry of L2 guests According to section "Checks on VMX Controls" in Intel SDM vol 3C, the following check needs to be enforced on vmentry of L2 guests: If the 'enable VPID' VM-execution control is 1, the value of the of the VPID VM-execution control field must not be 0000H. Signed-off-by: Krish Sadhukhan Reviewed-by: Mark Kanda Reviewed-by: Liran Alon Reviewed-by: Jim Mattson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 581bdbd9844b..06412ba46aa3 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -12373,6 +12373,9 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT) return VMXERR_ENTRY_INVALID_CONTROL_FIELD; + if (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id) + return VMXERR_ENTRY_INVALID_CONTROL_FIELD; + if (nested_vmx_check_io_bitmap_controls(vcpu, vmcs12)) return VMXERR_ENTRY_INVALID_CONTROL_FIELD; -- GitLab From d84f1cff9028c00ee870f0293b0c7a3866071dfa Mon Sep 17 00:00:00 2001 From: Drew Schmitt Date: Mon, 20 Aug 2018 10:32:14 -0700 Subject: [PATCH 1641/1692] KVM: x86: Turbo bits in MSR_PLATFORM_INFO Allow userspace to set turbo bits in MSR_PLATFORM_INFO. Previously, only the CPUID faulting bit was settable. But now any bit in MSR_PLATFORM_INFO would be settable. This can be used, for example, to convey frequency information about the platform on which the guest is running. Signed-off-by: Drew Schmitt Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 5fea53cdc583..e127703e277e 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2537,7 +2537,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) break; case MSR_PLATFORM_INFO: if (!msr_info->host_initiated || - data & ~MSR_PLATFORM_INFO_CPUID_FAULT || (!(data & MSR_PLATFORM_INFO_CPUID_FAULT) && cpuid_fault_enabled(vcpu))) return 1; -- GitLab From 6fbbde9a1969dfb476467ebf69a475095ef3fd4d Mon Sep 17 00:00:00 2001 From: Drew Schmitt Date: Mon, 20 Aug 2018 10:32:15 -0700 Subject: [PATCH 1642/1692] KVM: x86: Control guest reads of MSR_PLATFORM_INFO Add KVM_CAP_MSR_PLATFORM_INFO so that userspace can disable guest access to reads of MSR_PLATFORM_INFO. Disabling access to reads of this MSR gives userspace the control to "expose" this platform-dependent information to guests in a clear way. As it exists today, guests that read this MSR would get unpopulated information if userspace hadn't already set it (and prior to this patch series, only the CPUID faulting information could have been populated). This existing interface could be confusing if guests don't handle the potential for incorrect/incomplete information gracefully (e.g. zero reported for base frequency). Signed-off-by: Drew Schmitt Signed-off-by: Paolo Bonzini --- Documentation/virtual/kvm/api.txt | 9 +++++++++ arch/x86/include/asm/kvm_host.h | 2 ++ arch/x86/kvm/x86.c | 10 ++++++++++ include/uapi/linux/kvm.h | 1 + 4 files changed, 22 insertions(+) diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 8d8a372c8340..647f94128a85 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -4522,6 +4522,15 @@ hpage module parameter is not set to 1, -EINVAL is returned. While it is generally possible to create a huge page backed VM without this capability, the VM will not be able to run. +7.14 KVM_CAP_MSR_PLATFORM_INFO + +Architectures: x86 +Parameters: args[0] whether feature should be enabled or not + +With this capability, a guest may read the MSR_PLATFORM_INFO MSR. Otherwise, +a #GP would be raised when the guest tries to access. Currently, this +capability does not enable write permissions of this MSR for the guest. + 8. Other capabilities. ---------------------- diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index af63c2ca1616..09b2e3e2cf1b 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -869,6 +869,8 @@ struct kvm_arch { bool x2apic_format; bool x2apic_broadcast_quirk_disabled; + + bool guest_can_read_msr_platform_info; }; struct kvm_vm_stat { diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e127703e277e..4c39ec5fc4fe 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2779,6 +2779,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) msr_info->data = vcpu->arch.osvw.status; break; case MSR_PLATFORM_INFO: + if (!msr_info->host_initiated && + !vcpu->kvm->arch.guest_can_read_msr_platform_info) + return 1; msr_info->data = vcpu->arch.msr_platform_info; break; case MSR_MISC_FEATURES_ENABLES: @@ -2926,6 +2929,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_SPLIT_IRQCHIP: case KVM_CAP_IMMEDIATE_EXIT: case KVM_CAP_GET_MSR_FEATURES: + case KVM_CAP_MSR_PLATFORM_INFO: r = 1; break; case KVM_CAP_SYNC_REGS: @@ -4349,6 +4353,10 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, kvm->arch.pause_in_guest = true; r = 0; break; + case KVM_CAP_MSR_PLATFORM_INFO: + kvm->arch.guest_can_read_msr_platform_info = cap->args[0]; + r = 0; + break; default: r = -EINVAL; break; @@ -8857,6 +8865,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) kvm->arch.kvmclock_offset = -ktime_get_boot_ns(); pvclock_update_vm_gtod_copy(kvm); + kvm->arch.guest_can_read_msr_platform_info = true; + INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn); INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn); diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 07548de5c988..251be353f950 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -952,6 +952,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_S390_HPAGE_1M 156 #define KVM_CAP_NESTED_STATE 157 #define KVM_CAP_ARM_INJECT_SERROR_ESR 158 +#define KVM_CAP_MSR_PLATFORM_INFO 159 #ifdef KVM_CAP_IRQ_ROUTING -- GitLab From 8b56ee91ffc88ea01400c012e10fe22a9d233265 Mon Sep 17 00:00:00 2001 From: Drew Schmitt Date: Mon, 20 Aug 2018 10:32:16 -0700 Subject: [PATCH 1643/1692] kvm: selftests: Add platform_info_test Test guest access to MSR_PLATFORM_INFO when the capability is enabled or disabled. Signed-off-by: Drew Schmitt Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/.gitignore | 1 + tools/testing/selftests/kvm/Makefile | 3 +- .../testing/selftests/kvm/include/kvm_util.h | 4 + tools/testing/selftests/kvm/lib/kvm_util.c | 89 ++++++++++++++ .../selftests/kvm/platform_info_test.c | 110 ++++++++++++++++++ 5 files changed, 206 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/kvm/platform_info_test.c diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore index 4202139d81d9..5c34752e1cff 100644 --- a/tools/testing/selftests/kvm/.gitignore +++ b/tools/testing/selftests/kvm/.gitignore @@ -1,4 +1,5 @@ cr4_cpuid_sync_test +platform_info_test set_sregs_test sync_regs_test vmx_tsc_adjust_test diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 48c970c90353..37e4bd8619a6 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -6,7 +6,8 @@ UNAME_M := $(shell uname -m) LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c LIBKVM_x86_64 = lib/x86.c lib/vmx.c -TEST_GEN_PROGS_x86_64 = set_sregs_test +TEST_GEN_PROGS_x86_64 = platform_info_test +TEST_GEN_PROGS_x86_64 += set_sregs_test TEST_GEN_PROGS_x86_64 += sync_regs_test TEST_GEN_PROGS_x86_64 += vmx_tsc_adjust_test TEST_GEN_PROGS_x86_64 += cr4_cpuid_sync_test diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h index bb5a25fb82c6..3acf9a91704c 100644 --- a/tools/testing/selftests/kvm/include/kvm_util.h +++ b/tools/testing/selftests/kvm/include/kvm_util.h @@ -50,6 +50,7 @@ enum vm_mem_backing_src_type { }; int kvm_check_cap(long cap); +int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap); struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm); void kvm_vm_free(struct kvm_vm *vmp); @@ -108,6 +109,9 @@ void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_events *events); void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_events *events); +uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index); +void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index, + uint64_t msr_value); const char *exit_reason_str(unsigned int exit_reason); diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index e9ba389c48db..6fd8c089cafc 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -63,6 +63,29 @@ int kvm_check_cap(long cap) return ret; } +/* VM Enable Capability + * + * Input Args: + * vm - Virtual Machine + * cap - Capability + * + * Output Args: None + * + * Return: On success, 0. On failure a TEST_ASSERT failure is produced. + * + * Enables a capability (KVM_CAP_*) on the VM. + */ +int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap) +{ + int ret; + + ret = ioctl(vm->fd, KVM_ENABLE_CAP, cap); + TEST_ASSERT(ret == 0, "KVM_ENABLE_CAP IOCTL failed,\n" + " rc: %i errno: %i", ret, errno); + + return ret; +} + static void vm_open(struct kvm_vm *vm, int perm) { vm->kvm_fd = open(KVM_DEV_PATH, perm); @@ -1220,6 +1243,72 @@ void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid, ret, errno); } +/* VCPU Get MSR + * + * Input Args: + * vm - Virtual Machine + * vcpuid - VCPU ID + * msr_index - Index of MSR + * + * Output Args: None + * + * Return: On success, value of the MSR. On failure a TEST_ASSERT is produced. + * + * Get value of MSR for VCPU. + */ +uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index) +{ + struct vcpu *vcpu = vcpu_find(vm, vcpuid); + struct { + struct kvm_msrs header; + struct kvm_msr_entry entry; + } buffer = {}; + int r; + + TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); + buffer.header.nmsrs = 1; + buffer.entry.index = msr_index; + r = ioctl(vcpu->fd, KVM_GET_MSRS, &buffer.header); + TEST_ASSERT(r == 1, "KVM_GET_MSRS IOCTL failed,\n" + " rc: %i errno: %i", r, errno); + + return buffer.entry.data; +} + +/* VCPU Set MSR + * + * Input Args: + * vm - Virtual Machine + * vcpuid - VCPU ID + * msr_index - Index of MSR + * msr_value - New value of MSR + * + * Output Args: None + * + * Return: On success, nothing. On failure a TEST_ASSERT is produced. + * + * Set value of MSR for VCPU. + */ +void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index, + uint64_t msr_value) +{ + struct vcpu *vcpu = vcpu_find(vm, vcpuid); + struct { + struct kvm_msrs header; + struct kvm_msr_entry entry; + } buffer = {}; + int r; + + TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); + memset(&buffer, 0, sizeof(buffer)); + buffer.header.nmsrs = 1; + buffer.entry.index = msr_index; + buffer.entry.data = msr_value; + r = ioctl(vcpu->fd, KVM_SET_MSRS, &buffer.header); + TEST_ASSERT(r == 1, "KVM_SET_MSRS IOCTL failed,\n" + " rc: %i errno: %i", r, errno); +} + /* VM VCPU Args Set * * Input Args: diff --git a/tools/testing/selftests/kvm/platform_info_test.c b/tools/testing/selftests/kvm/platform_info_test.c new file mode 100644 index 000000000000..3764e7121265 --- /dev/null +++ b/tools/testing/selftests/kvm/platform_info_test.c @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Test for x86 KVM_CAP_MSR_PLATFORM_INFO + * + * Copyright (C) 2018, Google LLC. + * + * This work is licensed under the terms of the GNU GPL, version 2. + * + * Verifies expected behavior of controlling guest access to + * MSR_PLATFORM_INFO. + */ + +#define _GNU_SOURCE /* for program_invocation_short_name */ +#include +#include +#include +#include +#include + +#include "test_util.h" +#include "kvm_util.h" +#include "x86.h" + +#define VCPU_ID 0 +#define MSR_PLATFORM_INFO_MAX_TURBO_RATIO 0xff00 + +static void guest_code(void) +{ + uint64_t msr_platform_info; + + for (;;) { + msr_platform_info = rdmsr(MSR_PLATFORM_INFO); + GUEST_SYNC(msr_platform_info); + asm volatile ("inc %r11"); + } +} + +static void set_msr_platform_info_enabled(struct kvm_vm *vm, bool enable) +{ + struct kvm_enable_cap cap = {}; + + cap.cap = KVM_CAP_MSR_PLATFORM_INFO; + cap.flags = 0; + cap.args[0] = (int)enable; + vm_enable_cap(vm, &cap); +} + +static void test_msr_platform_info_enabled(struct kvm_vm *vm) +{ + struct kvm_run *run = vcpu_state(vm, VCPU_ID); + struct guest_args args; + + set_msr_platform_info_enabled(vm, true); + vcpu_run(vm, VCPU_ID); + TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, + "Exit_reason other than KVM_EXIT_IO: %u (%s),\n", + run->exit_reason, + exit_reason_str(run->exit_reason)); + guest_args_read(vm, VCPU_ID, &args); + TEST_ASSERT(args.port == GUEST_PORT_SYNC, + "Received IO from port other than PORT_HOST_SYNC: %u\n", + run->io.port); + TEST_ASSERT((args.arg1 & MSR_PLATFORM_INFO_MAX_TURBO_RATIO) == + MSR_PLATFORM_INFO_MAX_TURBO_RATIO, + "Expected MSR_PLATFORM_INFO to have max turbo ratio mask: %i.", + MSR_PLATFORM_INFO_MAX_TURBO_RATIO); +} + +static void test_msr_platform_info_disabled(struct kvm_vm *vm) +{ + struct kvm_run *run = vcpu_state(vm, VCPU_ID); + + set_msr_platform_info_enabled(vm, false); + vcpu_run(vm, VCPU_ID); + TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN, + "Exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s)\n", + run->exit_reason, + exit_reason_str(run->exit_reason)); +} + +int main(int argc, char *argv[]) +{ + struct kvm_vm *vm; + struct kvm_run *state; + int rv; + uint64_t msr_platform_info; + + /* Tell stdout not to buffer its content */ + setbuf(stdout, NULL); + + rv = kvm_check_cap(KVM_CAP_MSR_PLATFORM_INFO); + if (!rv) { + fprintf(stderr, + "KVM_CAP_MSR_PLATFORM_INFO not supported, skip test\n"); + exit(KSFT_SKIP); + } + + vm = vm_create_default(VCPU_ID, 0, guest_code); + + msr_platform_info = vcpu_get_msr(vm, VCPU_ID, MSR_PLATFORM_INFO); + vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, + msr_platform_info | MSR_PLATFORM_INFO_MAX_TURBO_RATIO); + test_msr_platform_info_disabled(vm); + test_msr_platform_info_enabled(vm); + vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, msr_platform_info); + + kvm_vm_free(vm); + + return 0; +} -- GitLab From a2045ee6692be2c50502d966089e5df4d799d236 Mon Sep 17 00:00:00 2001 From: Frank Min Date: Fri, 27 Apr 2018 03:44:11 +0800 Subject: [PATCH 1644/1692] drm/amdgpu: add vega20 sriov capability detection Add sriov capability detection for vega20, then can check if device is virtual device. Signed-off-by: Frank Min Signed-off-by: Xiangliang Yu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index 2e65447637c6..f8cee95d61cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c @@ -205,8 +205,19 @@ static const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = { static void nbio_v7_4_detect_hw_virt(struct amdgpu_device *adev) { - if (is_virtual_machine()) /* passthrough mode exclus sriov mod */ - adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; + uint32_t reg; + + reg = RREG32_SOC15(NBIO, 0, mmRCC_IOV_FUNC_IDENTIFIER); + if (reg & 1) + adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF; + + if (reg & 0x80000000) + adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; + + if (!reg) { + if (is_virtual_machine()) /* passthrough mode exclus sriov mod */ + adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; + } } static void nbio_v7_4_init_registers(struct amdgpu_device *adev) -- GitLab From 846311ae68f3c78365ebf3dff505c99e7da861cf Mon Sep 17 00:00:00 2001 From: Frank Min Date: Fri, 27 Apr 2018 03:45:50 +0800 Subject: [PATCH 1645/1692] drm/amdgpu: Exclude MM engines for vega20 virtual device Temporary disable UVD/VCE block if is virtual device Signed-off-by: Frank Min Signed-off-by: Xiangliang Yu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index c4daf1f93486..138c4810a3de 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -541,8 +541,10 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) #endif amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); - amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); - amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); + if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) { + amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); + amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); + } break; case CHIP_RAVEN: amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); -- GitLab From 30f3984ede683b98a4e8096e200df78bf0609b4f Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 18 Sep 2018 15:28:24 -0500 Subject: [PATCH 1646/1692] drm/amdgpu: add new polaris pci id Add new pci id. Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | 14 ++++++++------ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 1 + 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 693ec5ea4950..8816c697b205 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -367,12 +367,14 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, break; case CHIP_POLARIS10: if (type == CGS_UCODE_ID_SMU) { - if ((adev->pdev->device == 0x67df) && - ((adev->pdev->revision == 0xe0) || - (adev->pdev->revision == 0xe3) || - (adev->pdev->revision == 0xe4) || - (adev->pdev->revision == 0xe5) || - (adev->pdev->revision == 0xe7) || + if (((adev->pdev->device == 0x67df) && + ((adev->pdev->revision == 0xe0) || + (adev->pdev->revision == 0xe3) || + (adev->pdev->revision == 0xe4) || + (adev->pdev->revision == 0xe5) || + (adev->pdev->revision == 0xe7) || + (adev->pdev->revision == 0xef))) || + ((adev->pdev->device == 0x6fdf) && (adev->pdev->revision == 0xef))) { info->is_kicker = true; strcpy(fw_name, "amdgpu/polaris10_k_smc.bin"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 8843a06360fa..0f41d8647376 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -740,6 +740,7 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, {0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, {0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, + {0x1002, 0x6FDF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, /* Polaris12 */ {0x1002, 0x6980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0x1002, 0x6981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, -- GitLab From bfc888261474efb0676f0e1d128f22c9692b97b1 Mon Sep 17 00:00:00 2001 From: Deepak Rawat Date: Thu, 13 Sep 2018 12:33:49 +0200 Subject: [PATCH 1647/1692] drm/vmwgfx: don't check for old_crtc_state enable status During atomic check to prepare the new topology no need to check if old_crtc_state was enabled or not. This will cause atomic_check to fail because due to connector routing a crtc can be in atomic_state even if there was no change to enable status. Detected this issue with igt run. Signed-off-by: Deepak Rawat Reviewed-by: Sinclair Yeh Signed-off-by: Thomas Hellstrom --- drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 23beff5d8e3c..636b962849c8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1615,7 +1615,7 @@ static int vmw_kms_check_topology(struct drm_device *dev, struct drm_connector_state *conn_state; struct vmw_connector_state *vmw_conn_state; - if (!new_crtc_state->enable && old_crtc_state->enable) { + if (!new_crtc_state->enable) { rects[i].x1 = 0; rects[i].y1 = 0; rects[i].x2 = 0; -- GitLab From 0c1b174b1b9a497230f937345d4db76fea267398 Mon Sep 17 00:00:00 2001 From: Deepak Rawat Date: Thu, 13 Sep 2018 12:34:37 +0200 Subject: [PATCH 1648/1692] drm/vmwgfx: limit screen size to stdu_max during check_modeset For STDU individual screen target size is limited by SVGA_REG_SCREENTARGET_MAX_WIDTH/HEIGHT registers so add that limit during atomic check_modeset. An additional limit is placed in the update_layout ioctl to avoid requesting layouts that current user-space typically can't support. Also modified the comments to reflect current limitation on topology. Signed-off-by: Deepak Rawat Reviewed-by: Sinclair Yeh Reviewed-by: Thomas Hellstrom Signed-off-by: Thomas Hellstrom --- drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 30 +++++++++++++++++++++-------- 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 636b962849c8..12a41b039167 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1512,21 +1512,19 @@ static int vmw_kms_check_display_memory(struct drm_device *dev, struct drm_rect *rects) { struct vmw_private *dev_priv = vmw_priv(dev); - struct drm_mode_config *mode_config = &dev->mode_config; struct drm_rect bounding_box = {0}; u64 total_pixels = 0, pixel_mem, bb_mem; int i; for (i = 0; i < num_rects; i++) { /* - * Currently this check is limiting the topology within max - * texture/screentarget size. This should change in future when - * user-space support multiple fb with topology. + * For STDU only individual screen (screen target) is limited by + * SCREENTARGET_MAX_WIDTH/HEIGHT registers. */ - if (rects[i].x1 < 0 || rects[i].y1 < 0 || - rects[i].x2 > mode_config->max_width || - rects[i].y2 > mode_config->max_height) { - DRM_ERROR("Invalid GUI layout.\n"); + if (dev_priv->active_display_unit == vmw_du_screen_target && + (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width || + drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) { + DRM_ERROR("Screen size not supported.\n"); return -EINVAL; } @@ -2376,6 +2374,7 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct vmw_private *dev_priv = vmw_priv(dev); + struct drm_mode_config *mode_config = &dev->mode_config; struct drm_vmw_update_layout_arg *arg = (struct drm_vmw_update_layout_arg *)data; void __user *user_rects; @@ -2421,6 +2420,21 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, drm_rects[i].y1 = curr_rect.y; drm_rects[i].x2 = curr_rect.x + curr_rect.w; drm_rects[i].y2 = curr_rect.y + curr_rect.h; + + /* + * Currently this check is limiting the topology within + * mode_config->max (which actually is max texture size + * supported by virtual device). This limit is here to address + * window managers that create a big framebuffer for whole + * topology. + */ + if (drm_rects[i].x1 < 0 || drm_rects[i].y1 < 0 || + drm_rects[i].x2 > mode_config->max_width || + drm_rects[i].y2 > mode_config->max_height) { + DRM_ERROR("Invalid GUI layout.\n"); + ret = -EINVAL; + goto out_free; + } } ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects); -- GitLab From 140b4e67c2e1269a945c5caacdcb0c0346ad4cef Mon Sep 17 00:00:00 2001 From: Deepak Rawat Date: Thu, 13 Sep 2018 12:44:13 +0200 Subject: [PATCH 1649/1692] drm/vmwgfx: limit mode size for all display unit to texture_max For all display units, limit mode size exposed to texture_max_width/ height as this is the maximum framebuffer size that virtual device can create. Signed-off-by: Deepak Rawat Reviewed-by: Sinclair Yeh Reviewed-by: Thomas Hellstrom Signed-off-by: Thomas Hellstrom --- drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 12a41b039167..6a712a8d59e9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -2214,12 +2214,16 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector, if (dev_priv->assume_16bpp) assumed_bpp = 2; + max_width = min(max_width, dev_priv->texture_max_width); + max_height = min(max_height, dev_priv->texture_max_height); + + /* + * For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/ + * HEIGHT registers. + */ if (dev_priv->active_display_unit == vmw_du_screen_target) { max_width = min(max_width, dev_priv->stdu_max_width); - max_width = min(max_width, dev_priv->texture_max_width); - max_height = min(max_height, dev_priv->stdu_max_height); - max_height = min(max_height, dev_priv->texture_max_height); } /* Add preferred mode */ -- GitLab From a4bd815a94b7aae27e2413f2ce7b458f9843b8ae Mon Sep 17 00:00:00 2001 From: Deepak Rawat Date: Thu, 13 Sep 2018 12:46:10 +0200 Subject: [PATCH 1650/1692] drm/vmwgfx: Don't impose STDU limits on framebuffer size If framebuffers are larger, we create bounce surfaces that are within STDU limits. Signed-off-by: Deepak Rawat Reviewed-by: Thomas Hellstrom Signed-off-by: Thomas Hellstrom --- drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c | 25 ------------------------- drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | 24 ++++++++++++++---------- 2 files changed, 14 insertions(+), 35 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index 93f6b96ca7bb..f30e839f7bfd 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -1600,31 +1600,6 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv) dev_priv->active_display_unit = vmw_du_screen_target; - if (dev_priv->capabilities & SVGA_CAP_3D) { - /* - * For 3D VMs, display (scanout) buffer size is the smaller of - * max texture and max STDU - */ - uint32_t max_width, max_height; - - max_width = min(dev_priv->texture_max_width, - dev_priv->stdu_max_width); - max_height = min(dev_priv->texture_max_height, - dev_priv->stdu_max_height); - - dev->mode_config.max_width = max_width; - dev->mode_config.max_height = max_height; - } else { - /* - * Given various display aspect ratios, there's no way to - * estimate these using prim_bb_mem. So just set these to - * something arbitrarily large and we will reject any layout - * that doesn't fit prim_bb_mem later - */ - dev->mode_config.max_width = 8192; - dev->mode_config.max_height = 8192; - } - vmw_kms_create_implicit_placement_property(dev_priv, false); for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index e125233e074b..80a01cd4c051 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -1404,22 +1404,17 @@ int vmw_surface_gb_priv_define(struct drm_device *dev, *srf_out = NULL; if (for_scanout) { - uint32_t max_width, max_height; - if (!svga3dsurface_is_screen_target_format(format)) { DRM_ERROR("Invalid Screen Target surface format."); return -EINVAL; } - max_width = min(dev_priv->texture_max_width, - dev_priv->stdu_max_width); - max_height = min(dev_priv->texture_max_height, - dev_priv->stdu_max_height); - - if (size.width > max_width || size.height > max_height) { + if (size.width > dev_priv->texture_max_width || + size.height > dev_priv->texture_max_height) { DRM_ERROR("%ux%u\n, exceeds max surface size %ux%u", size.width, size.height, - max_width, max_height); + dev_priv->texture_max_width, + dev_priv->texture_max_height); return -EINVAL; } } else { @@ -1495,8 +1490,17 @@ int vmw_surface_gb_priv_define(struct drm_device *dev, if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT) srf->res.backup_size += sizeof(SVGA3dDXSOState); + /* + * Don't set SVGA3D_SURFACE_SCREENTARGET flag for a scanout surface with + * size greater than STDU max width/height. This is really a workaround + * to support creation of big framebuffer requested by some user-space + * for whole topology. That big framebuffer won't really be used for + * binding with screen target as during prepare_fb a separate surface is + * created so it's safe to ignore SVGA3D_SURFACE_SCREENTARGET flag. + */ if (dev_priv->active_display_unit == vmw_du_screen_target && - for_scanout) + for_scanout && size.width <= dev_priv->stdu_max_width && + size.height <= dev_priv->stdu_max_height) srf->flags |= SVGA3D_SURFACE_SCREENTARGET; /* -- GitLab From e71cf591876536f7dd5a54ef68d631278ca6faa1 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 14 Sep 2018 09:24:19 +0200 Subject: [PATCH 1651/1692] drm/vmwgfx: Fix buffer object eviction MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit 19be55701071 ("drm/ttm: add operation ctx to ttm_bo_validate v2") introduced a regression where the vmwgfx driver refused to evict a buffer that was still busy instead of waiting for it to become idle. Fix this. Cc: Signed-off-by: Thomas Hellstrom Reviewed-by: Christian König --- drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 1f134570b759..f0ab6b2313bb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -3729,7 +3729,7 @@ int vmw_validate_single_buffer(struct vmw_private *dev_priv, { struct vmw_buffer_object *vbo = container_of(bo, struct vmw_buffer_object, base); - struct ttm_operation_ctx ctx = { interruptible, true }; + struct ttm_operation_ctx ctx = { interruptible, false }; int ret; if (vbo->pin_count > 0) -- GitLab From d124b44f09cab67fc6da4a4513417e3e54b01efc Mon Sep 17 00:00:00 2001 From: Miguel Ojeda Date: Tue, 18 Sep 2018 18:55:41 +0200 Subject: [PATCH 1652/1692] Compiler Attributes: naked was fixed in gcc 4.6 Commit 9c695203a7dd ("compiler-gcc.h: gcc-4.5 needs noclone and noinline on __naked functions") added noinline and noclone as a workaround for a gcc 4.5 bug, which was resolved in 4.6.0. Since now the minimum gcc supported version is 4.6, we can clean it up. See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=44290 and https://godbolt.org/z/h6NMIL Fixes: 815f0ddb346c ("include/linux/compiler*.h: make compiler-*.h mutually exclusive") Cc: Rasmus Villemoes Cc: Eli Friedman Cc: Christopher Li Cc: Kees Cook Cc: Ingo Molnar Cc: Geert Uytterhoeven Cc: Arnd Bergmann Cc: Greg Kroah-Hartman Cc: Masahiro Yamada Cc: Joe Perches Cc: Dominique Martinet Cc: Linus Torvalds Cc: linux-sparse@vger.kernel.org Tested-by: Stefan Agner Reviewed-by: Stefan Agner Reviewed-by: Luc Van Oostenryck Reviewed-by: Nick Desaulniers Signed-off-by: Miguel Ojeda Signed-off-by: Greg Kroah-Hartman --- include/linux/compiler-gcc.h | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 763bbad1e258..25d3dd6b2702 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -84,14 +84,8 @@ * to trace naked functions because then mcount is called without * stack and frame pointer being set up and there is no chance to * restore the lr register to the value before mcount was called. - * - * The asm() bodies of naked functions often depend on standard calling - * conventions, therefore they must be noinline and noclone. - * - * GCC 4.[56] currently fail to enforce this, so we must do so ourselves. - * See GCC PR44290. */ -#define __naked __attribute__((naked)) noinline __noclone notrace +#define __naked __attribute__((naked)) notrace #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) -- GitLab From ae596de1a0c8c2c924dc99d23c026259372ab234 Mon Sep 17 00:00:00 2001 From: Miguel Ojeda Date: Tue, 18 Sep 2018 18:55:42 +0200 Subject: [PATCH 1653/1692] Compiler Attributes: naked can be shared The naked attribute is supported by at least gcc >= 4.6 (for ARM, which is the only current user), gcc >= 8 (for x86), clang >= 3.1 and icc >= 13. See https://godbolt.org/z/350Dyc Therefore, move it out of compiler-gcc.h so that the definition is shared by all compilers. This also fixes Clang support for ARM32 --- 815f0ddb346c ("include/linux/compiler*.h: make compiler-*.h mutually exclusive"). Fixes: 815f0ddb346c ("include/linux/compiler*.h: make compiler-*.h mutually exclusive") Cc: Rasmus Villemoes Cc: Eli Friedman Cc: Christopher Li Cc: Kees Cook Cc: Ingo Molnar Cc: Geert Uytterhoeven Cc: Greg Kroah-Hartman Cc: Masahiro Yamada Cc: Joe Perches Cc: Dominique Martinet Cc: Linus Torvalds Cc: linux-sparse@vger.kernel.org Suggested-by: Arnd Bergmann Tested-by: Stefan Agner Reviewed-by: Stefan Agner Reviewed-by: Luc Van Oostenryck Reviewed-by: Nick Desaulniers Signed-off-by: Miguel Ojeda Signed-off-by: Greg Kroah-Hartman --- include/linux/compiler-gcc.h | 8 -------- include/linux/compiler_types.h | 8 ++++++++ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 25d3dd6b2702..4d36b27214fd 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -79,14 +79,6 @@ #define __noretpoline __attribute__((indirect_branch("keep"))) #endif -/* - * it doesn't make sense on ARM (currently the only user of __naked) - * to trace naked functions because then mcount is called without - * stack and frame pointer being set up and there is no chance to - * restore the lr register to the value before mcount was called. - */ -#define __naked __attribute__((naked)) notrace - #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) #define __optimize(level) __attribute__((__optimize__(level))) diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 3525c179698c..db192becfec4 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -226,6 +226,14 @@ struct ftrace_likely_data { #define notrace __attribute__((no_instrument_function)) #endif +/* + * it doesn't make sense on ARM (currently the only user of __naked) + * to trace naked functions because then mcount is called without + * stack and frame pointer being set up and there is no chance to + * restore the lr register to the value before mcount was called. + */ +#define __naked __attribute__((naked)) notrace + #define __compiler_offsetof(a, b) __builtin_offsetof(a, b) /* -- GitLab From 7ce5c8cd753f9afa8e79e9ec40351998e354f239 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 20 Sep 2018 08:30:55 -0600 Subject: [PATCH 1654/1692] libata: mask swap internal and hardware tag hen we're comparing the hardware completion mask passed in from the driver with the internal tag pending mask, we need to account for the fact that the internal tag is different from the hardware tag. If not, then we can end up either prematurely completing the internal tag (since it's not set in the hw mask), or simply flag an error: ata2: illegal qc_active transition (100000000->00000001) If the internal tag is set, then swap that with the hardware tag in this case before comparing with what the hardware reports. Fixes: 28361c403683 ("libata: add extra internal command") Buglink: https://bugzilla.kernel.org/show_bug.cgi?id=201151 Cc: stable@vger.kernel.org Reported-by: Paul Sbarra Tested-by: Paul Sbarra Signed-off-by: Jens Axboe --- drivers/ata/libata-core.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 172e32840256..3893f9bde1e6 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -5359,10 +5359,20 @@ void ata_qc_complete(struct ata_queued_cmd *qc) */ int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active) { + u64 done_mask, ap_qc_active = ap->qc_active; int nr_done = 0; - u64 done_mask; - done_mask = ap->qc_active ^ qc_active; + /* + * If the internal tag is set on ap->qc_active, then we care about + * bit0 on the passed in qc_active mask. Move that bit up to match + * the internal tag. + */ + if (ap_qc_active & (1ULL << ATA_TAG_INTERNAL)) { + qc_active |= (qc_active & 0x01) << ATA_TAG_INTERNAL; + qc_active ^= qc_active & 0x01; + } + + done_mask = ap_qc_active ^ qc_active; if (unlikely(done_mask & qc_active)) { ata_port_err(ap, "illegal qc_active transition (%08llx->%08llx)\n", -- GitLab From 65eea8edc315589d6c993cf12dbb5d0e9ef1fe4e Mon Sep 17 00:00:00 2001 From: Andy Whitcroft Date: Thu, 20 Sep 2018 09:09:48 -0600 Subject: [PATCH 1655/1692] floppy: Do not copy a kernel pointer to user memory in FDGETPRM ioctl The final field of a floppy_struct is the field "name", which is a pointer to a string in kernel memory. The kernel pointer should not be copied to user memory. The FDGETPRM ioctl copies a floppy_struct to user memory, including this "name" field. This pointer cannot be used by the user and it will leak a kernel address to user-space, which will reveal the location of kernel code and data and undermine KASLR protection. Model this code after the compat ioctl which copies the returned data to a previously cleared temporary structure on the stack (excluding the name pointer) and copy out to userspace from there. As we already have an inparam union with an appropriate member and that memory is already cleared even for read only calls make use of that as a temporary store. Based on an initial patch by Brian Belleville. CVE-2018-7755 Signed-off-by: Andy Whitcroft Broke up long line. Signed-off-by: Jens Axboe --- drivers/block/floppy.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 48f622728ce6..f2b6f4da1034 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -3467,6 +3467,9 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int (struct floppy_struct **)&outparam); if (ret) return ret; + memcpy(&inparam.g, outparam, + offsetof(struct floppy_struct, name)); + outparam = &inparam.g; break; case FDMSGON: UDP->flags |= FTD_MSG; -- GitLab From 96147db1e1dff83679e71ac92193cbcab761a14c Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Tue, 18 Sep 2018 18:36:21 +0300 Subject: [PATCH 1656/1692] pinctrl: intel: Do pin translation in other GPIO operations as well For some reason I thought GPIOLIB handles translation from GPIO ranges to pinctrl pins but it turns out not to be the case. This means that when GPIOs operations are performed for a pin controller having a custom GPIO base such as Cannon Lake and Ice Lake incorrect pin number gets used internally. Fix this in the same way we did for lock/unlock IRQ operations and translate the GPIO number to pin before using it. Fixes: a60eac3239f0 ("pinctrl: intel: Allow custom GPIO base for pad groups") Reported-by: Rajat Jain Signed-off-by: Mika Westerberg Tested-by: Rajat Jain Signed-off-by: Linus Walleij --- drivers/pinctrl/intel/pinctrl-intel.c | 111 +++++++++++++++----------- 1 file changed, 63 insertions(+), 48 deletions(-) diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index 62b009b27eda..ec8dafc94694 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c @@ -747,13 +747,63 @@ static const struct pinctrl_desc intel_pinctrl_desc = { .owner = THIS_MODULE, }; +/** + * intel_gpio_to_pin() - Translate from GPIO offset to pin number + * @pctrl: Pinctrl structure + * @offset: GPIO offset from gpiolib + * @commmunity: Community is filled here if not %NULL + * @padgrp: Pad group is filled here if not %NULL + * + * When coming through gpiolib irqchip, the GPIO offset is not + * automatically translated to pinctrl pin number. This function can be + * used to find out the corresponding pinctrl pin. + */ +static int intel_gpio_to_pin(struct intel_pinctrl *pctrl, unsigned offset, + const struct intel_community **community, + const struct intel_padgroup **padgrp) +{ + int i; + + for (i = 0; i < pctrl->ncommunities; i++) { + const struct intel_community *comm = &pctrl->communities[i]; + int j; + + for (j = 0; j < comm->ngpps; j++) { + const struct intel_padgroup *pgrp = &comm->gpps[j]; + + if (pgrp->gpio_base < 0) + continue; + + if (offset >= pgrp->gpio_base && + offset < pgrp->gpio_base + pgrp->size) { + int pin; + + pin = pgrp->base + offset - pgrp->gpio_base; + if (community) + *community = comm; + if (padgrp) + *padgrp = pgrp; + + return pin; + } + } + } + + return -EINVAL; +} + static int intel_gpio_get(struct gpio_chip *chip, unsigned offset) { struct intel_pinctrl *pctrl = gpiochip_get_data(chip); void __iomem *reg; u32 padcfg0; + int pin; + + pin = intel_gpio_to_pin(pctrl, offset, NULL, NULL); + if (pin < 0) + return -EINVAL; - reg = intel_get_padcfg(pctrl, offset, PADCFG0); + reg = intel_get_padcfg(pctrl, pin, PADCFG0); if (!reg) return -EINVAL; @@ -770,8 +820,13 @@ static void intel_gpio_set(struct gpio_chip *chip, unsigned offset, int value) unsigned long flags; void __iomem *reg; u32 padcfg0; + int pin; + + pin = intel_gpio_to_pin(pctrl, offset, NULL, NULL); + if (pin < 0) + return; - reg = intel_get_padcfg(pctrl, offset, PADCFG0); + reg = intel_get_padcfg(pctrl, pin, PADCFG0); if (!reg) return; @@ -790,8 +845,13 @@ static int intel_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) struct intel_pinctrl *pctrl = gpiochip_get_data(chip); void __iomem *reg; u32 padcfg0; + int pin; - reg = intel_get_padcfg(pctrl, offset, PADCFG0); + pin = intel_gpio_to_pin(pctrl, offset, NULL, NULL); + if (pin < 0) + return -EINVAL; + + reg = intel_get_padcfg(pctrl, pin, PADCFG0); if (!reg) return -EINVAL; @@ -827,51 +887,6 @@ static const struct gpio_chip intel_gpio_chip = { .set_config = gpiochip_generic_config, }; -/** - * intel_gpio_to_pin() - Translate from GPIO offset to pin number - * @pctrl: Pinctrl structure - * @offset: GPIO offset from gpiolib - * @commmunity: Community is filled here if not %NULL - * @padgrp: Pad group is filled here if not %NULL - * - * When coming through gpiolib irqchip, the GPIO offset is not - * automatically translated to pinctrl pin number. This function can be - * used to find out the corresponding pinctrl pin. - */ -static int intel_gpio_to_pin(struct intel_pinctrl *pctrl, unsigned offset, - const struct intel_community **community, - const struct intel_padgroup **padgrp) -{ - int i; - - for (i = 0; i < pctrl->ncommunities; i++) { - const struct intel_community *comm = &pctrl->communities[i]; - int j; - - for (j = 0; j < comm->ngpps; j++) { - const struct intel_padgroup *pgrp = &comm->gpps[j]; - - if (pgrp->gpio_base < 0) - continue; - - if (offset >= pgrp->gpio_base && - offset < pgrp->gpio_base + pgrp->size) { - int pin; - - pin = pgrp->base + offset - pgrp->gpio_base; - if (community) - *community = comm; - if (padgrp) - *padgrp = pgrp; - - return pin; - } - } - } - - return -EINVAL; -} - static int intel_gpio_irq_reqres(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); -- GitLab From caaa4c8a6be2a275bd14f2369ee364978ff74704 Mon Sep 17 00:00:00 2001 From: Amber Lin Date: Wed, 12 Sep 2018 21:42:18 -0400 Subject: [PATCH 1657/1692] drm/amdgpu: Fix SDMA HQD destroy error on gfx_v7 A wrong register bit was examinated for checking SDMA status so it reports false failures. This typo only appears on gfx_v7. gfx_v8 checks the correct bit. Acked-by: Alex Deucher Signed-off-by: Amber Lin Reviewed-by: Felix Kuehling Signed-off-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index ea3f698aef5e..9803b91f3e77 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -685,7 +685,7 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, while (true) { temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); - if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT) + if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) break; if (time_after(jiffies, end_jiffies)) return -ETIME; -- GitLab From 15426dbb65c5b37680d27e84d58a1ed3b8532518 Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Wed, 12 Sep 2018 21:42:19 -0400 Subject: [PATCH 1658/1692] drm/amdkfd: Change the control stack MTYPE from UC to NC on GFX9 CWSR fails on Raven if the control stack is MTYPE_UC, which is used for regular GART mappings. As a workaround we map it using MTYPE_NC. The MEC firmware expects the control stack at one page offset from the start of the MQD so it is part of the MQD allocation on GFXv9. AMDGPU added a memory allocation flag just for this purpose. Acked-by: Alex Deucher Signed-off-by: Yong Zhao Reviewed-by: Felix Kuehling Signed-off-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 6 +++++- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 3 ++- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 2 +- drivers/gpu/drm/amd/include/kgd_kfd_interface.h | 2 +- 5 files changed, 10 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index f8bbbb3a9504..0c791e35acf0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -272,7 +272,7 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd) int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, void **mem_obj, uint64_t *gpu_addr, - void **cpu_ptr) + void **cpu_ptr, bool mqd_gfx9) { struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct amdgpu_bo *bo = NULL; @@ -287,6 +287,10 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC; bp.type = ttm_bo_type_kernel; bp.resv = NULL; + + if (mqd_gfx9) + bp.flags |= AMDGPU_GEM_CREATE_MQD_GFX9; + r = amdgpu_bo_create(adev, &bp, &bo); if (r) { dev_err(adev->dev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 2f379c183ed2..cc9aeab5468c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -136,7 +136,7 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd); /* Shared API */ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, void **mem_obj, uint64_t *gpu_addr, - void **cpu_ptr); + void **cpu_ptr, bool mqd_gfx9); void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj); void get_local_mem_info(struct kgd_dev *kgd, struct kfd_local_mem_info *mem_info); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 1b048715ab8a..29ac74f40dce 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -457,7 +457,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, if (kfd->kfd2kgd->init_gtt_mem_allocation( kfd->kgd, size, &kfd->gtt_mem, - &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)){ + &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr, + false)) { dev_err(kfd_device, "Could not allocate %d bytes\n", size); goto out; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index f5fc3675f21e..0cedb37cf513 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -88,7 +88,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd, ALIGN(sizeof(struct v9_mqd), PAGE_SIZE), &((*mqd_mem_obj)->gtt_mem), &((*mqd_mem_obj)->gpu_addr), - (void *)&((*mqd_mem_obj)->cpu_ptr)); + (void *)&((*mqd_mem_obj)->cpu_ptr), true); } else retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct v9_mqd), mqd_mem_obj); diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index 14391b06080c..43b82e14007e 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -292,7 +292,7 @@ struct tile_config { struct kfd2kgd_calls { int (*init_gtt_mem_allocation)(struct kgd_dev *kgd, size_t size, void **mem_obj, uint64_t *gpu_addr, - void **cpu_ptr); + void **cpu_ptr, bool mqd_gfx9); void (*free_gtt_mem)(struct kgd_dev *kgd, void *mem_obj); -- GitLab From 44d8cc6f1a905e4bb1d4221a898abb0d7e9d100a Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Wed, 12 Sep 2018 21:42:20 -0400 Subject: [PATCH 1659/1692] drm/amdkfd: Fix ATS capablity was not reported correctly on some APUs Because CRAT_CU_FLAGS_IOMMU_PRESENT was not set in some BIOS crat, we need to workaround this. For future compatibility, we also overwrite the bit in capability according to the value of needs_iommu_device. Acked-by: Alex Deucher Signed-off-by: Yong Zhao Reviewed-by: Felix Kuehling Signed-off-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_iommu.c | 13 ++++++++++++- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 1 + drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 21 ++++++++++++++++----- 3 files changed, 29 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c index 7a61f38c09e6..01494752c36a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c @@ -62,9 +62,20 @@ int kfd_iommu_device_init(struct kfd_dev *kfd) struct amd_iommu_device_info iommu_info; unsigned int pasid_limit; int err; + struct kfd_topology_device *top_dev; - if (!kfd->device_info->needs_iommu_device) + top_dev = kfd_topology_device_by_id(kfd->id); + + /* + * Overwrite ATS capability according to needs_iommu_device to fix + * potential missing corresponding bit in CRAT of BIOS. + */ + if (!kfd->device_info->needs_iommu_device) { + top_dev->node_props.capability &= ~HSA_CAP_ATS_PRESENT; return 0; + } + + top_dev->node_props.capability |= HSA_CAP_ATS_PRESENT; iommu_info.flags = 0; err = amd_iommu_device_info(kfd->pdev, &iommu_info); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index f971710f1c91..92b285ca73aa 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -806,6 +806,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu); int kfd_topology_remove_device(struct kfd_dev *gpu); struct kfd_topology_device *kfd_topology_device_by_proximity_domain( uint32_t proximity_domain); +struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id); struct kfd_dev *kfd_device_by_id(uint32_t gpu_id); struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev); int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index bc95d4dfee2e..80f5db4ef75f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -63,22 +63,33 @@ struct kfd_topology_device *kfd_topology_device_by_proximity_domain( return device; } -struct kfd_dev *kfd_device_by_id(uint32_t gpu_id) +struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id) { - struct kfd_topology_device *top_dev; - struct kfd_dev *device = NULL; + struct kfd_topology_device *top_dev = NULL; + struct kfd_topology_device *ret = NULL; down_read(&topology_lock); list_for_each_entry(top_dev, &topology_device_list, list) if (top_dev->gpu_id == gpu_id) { - device = top_dev->gpu; + ret = top_dev; break; } up_read(&topology_lock); - return device; + return ret; +} + +struct kfd_dev *kfd_device_by_id(uint32_t gpu_id) +{ + struct kfd_topology_device *top_dev; + + top_dev = kfd_topology_device_by_id(gpu_id); + if (!top_dev) + return NULL; + + return top_dev->gpu; } struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev) -- GitLab From 26b471c7e2f7befd0f59c35b257749ca57e0ed70 Mon Sep 17 00:00:00 2001 From: Liran Alon Date: Sun, 16 Sep 2018 14:28:20 +0300 Subject: [PATCH 1660/1692] KVM: nVMX: Fix bad cleanup on error of get/set nested state IOCTLs The handlers of IOCTLs in kvm_arch_vcpu_ioctl() are expected to set their return value in "r" local var and break out of switch block when they encounter some error. This is because vcpu_load() is called before the switch block which have a proper cleanup of vcpu_put() afterwards. However, KVM_{GET,SET}_NESTED_STATE IOCTLs handlers just return immediately on error without performing above mentioned cleanup. Thus, change these handlers to behave as expected. Fixes: 8fcc4b5923af ("kvm: nVMX: Introduce KVM_CAP_NESTED_STATE") Reviewed-by: Mark Kanda Reviewed-by: Patrick Colp Signed-off-by: Liran Alon Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 4c39ec5fc4fe..edbf00ec56b3 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4010,19 +4010,23 @@ long kvm_arch_vcpu_ioctl(struct file *filp, break; BUILD_BUG_ON(sizeof(user_data_size) != sizeof(user_kvm_nested_state->size)); + r = -EFAULT; if (get_user(user_data_size, &user_kvm_nested_state->size)) - return -EFAULT; + break; r = kvm_x86_ops->get_nested_state(vcpu, user_kvm_nested_state, user_data_size); if (r < 0) - return r; + break; if (r > user_data_size) { if (put_user(r, &user_kvm_nested_state->size)) - return -EFAULT; - return -E2BIG; + r = -EFAULT; + else + r = -E2BIG; + break; } + r = 0; break; } @@ -4034,19 +4038,21 @@ long kvm_arch_vcpu_ioctl(struct file *filp, if (!kvm_x86_ops->set_nested_state) break; + r = -EFAULT; if (copy_from_user(&kvm_state, user_kvm_nested_state, sizeof(kvm_state))) - return -EFAULT; + break; + r = -EINVAL; if (kvm_state.size < sizeof(kvm_state)) - return -EINVAL; + break; if (kvm_state.flags & ~(KVM_STATE_NESTED_RUN_PENDING | KVM_STATE_NESTED_GUEST_MODE)) - return -EINVAL; + break; /* nested_run_pending implies guest_mode. */ if (kvm_state.flags == KVM_STATE_NESTED_RUN_PENDING) - return -EINVAL; + break; r = kvm_x86_ops->set_nested_state(vcpu, user_kvm_nested_state, &kvm_state); break; -- GitLab From 37f31b6ca4311b94d985fb398a72e5399ad57925 Mon Sep 17 00:00:00 2001 From: Richard Weinberger Date: Mon, 3 Sep 2018 23:06:23 +0200 Subject: [PATCH 1661/1692] ubifs: Check for name being NULL while mounting The requested device name can be NULL or an empty string. Check for that and refuse to continue. UBIFS has to do this manually since we cannot use mount_bdev(), which checks for this condition. Fixes: 1e51764a3c2ac ("UBIFS: add new flash file system") Reported-by: syzbot+38bd0f7865e5c6379280@syzkaller.appspotmail.com Signed-off-by: Richard Weinberger --- fs/ubifs/super.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 23e7042666a7..87d08f738632 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -1954,6 +1954,9 @@ static struct ubi_volume_desc *open_ubi(const char *name, int mode) int dev, vol; char *endptr; + if (!name || !*name) + return ERR_PTR(-EINVAL); + /* First, try to open using the device node path method */ ubi = ubi_open_volume_path(name, mode); if (!IS_ERR(ubi)) -- GitLab From d3bdc016c598e09a4ddf17805d17d43759b0a582 Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Wed, 12 Sep 2018 14:51:38 +0200 Subject: [PATCH 1662/1692] ubifs: drop false positive assertion The following sequence triggers ubifs_assert(c, c->lst.taken_empty_lebs > 0); at the end of ubifs_remount_fs(): mount -t ubifs /dev/ubi0_0 /mnt echo 1 > /sys/kernel/debug/ubifs/ubi0_0/ro_error umount /mnt mount -t ubifs -o ro /dev/ubix_y /mnt mount -o remount,ro /mnt The resulting UBIFS assert failed in ubifs_remount_fs at 1878 (pid 161) is a false positive. In the case above c->lst.taken_empty_lebs has never been changed from its initial zero value. This will only happen when the deferred recovery is done. Fix this by doing the assertion only when recovery has been done already. Signed-off-by: Sascha Hauer Signed-off-by: Richard Weinberger --- fs/ubifs/super.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 87d08f738632..bf000c8aeffb 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -1912,7 +1912,9 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data) mutex_unlock(&c->bu_mutex); } - ubifs_assert(c, c->lst.taken_empty_lebs > 0); + if (!c->need_recovery) + ubifs_assert(c, c->lst.taken_empty_lebs > 0); + return 0; } -- GitLab From f061c1cc404a618858a77aea233fde0aeaad2f2d Mon Sep 17 00:00:00 2001 From: Richard Weinberger Date: Sun, 16 Sep 2018 23:57:35 +0200 Subject: [PATCH 1663/1692] Revert "ubifs: xattr: Don't operate on deleted inodes" This reverts commit 11a6fc3dc743e22fb50f2196ec55bee5140d3c52. UBIFS wants to assert that xattr operations are only issued on files with positive link count. The said patch made this operations return -ENOENT for unlinked files such that the asserts will no longer trigger. This was wrong since xattr operations are perfectly fine on unlinked files. Instead the assertions need to be fixed/removed. Cc: Fixes: 11a6fc3dc743 ("ubifs: xattr: Don't operate on deleted inodes") Reported-by: Koen Vandeputte Tested-by: Joel Stanley Signed-off-by: Richard Weinberger --- fs/ubifs/xattr.c | 24 ------------------------ 1 file changed, 24 deletions(-) diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c index 61afdfee4b28..f5ad1ede7990 100644 --- a/fs/ubifs/xattr.c +++ b/fs/ubifs/xattr.c @@ -152,12 +152,6 @@ static int create_xattr(struct ubifs_info *c, struct inode *host, ui->data_len = size; mutex_lock(&host_ui->ui_mutex); - - if (!host->i_nlink) { - err = -ENOENT; - goto out_noent; - } - host->i_ctime = current_time(host); host_ui->xattr_cnt += 1; host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm)); @@ -190,7 +184,6 @@ static int create_xattr(struct ubifs_info *c, struct inode *host, host_ui->xattr_size -= CALC_XATTR_BYTES(size); host_ui->xattr_names -= fname_len(nm); host_ui->flags &= ~UBIFS_CRYPT_FL; -out_noent: mutex_unlock(&host_ui->ui_mutex); out_free: make_bad_inode(inode); @@ -242,12 +235,6 @@ static int change_xattr(struct ubifs_info *c, struct inode *host, mutex_unlock(&ui->ui_mutex); mutex_lock(&host_ui->ui_mutex); - - if (!host->i_nlink) { - err = -ENOENT; - goto out_noent; - } - host->i_ctime = current_time(host); host_ui->xattr_size -= CALC_XATTR_BYTES(old_size); host_ui->xattr_size += CALC_XATTR_BYTES(size); @@ -269,7 +256,6 @@ static int change_xattr(struct ubifs_info *c, struct inode *host, out_cancel: host_ui->xattr_size -= CALC_XATTR_BYTES(size); host_ui->xattr_size += CALC_XATTR_BYTES(old_size); -out_noent: mutex_unlock(&host_ui->ui_mutex); make_bad_inode(inode); out_free: @@ -496,12 +482,6 @@ static int remove_xattr(struct ubifs_info *c, struct inode *host, return err; mutex_lock(&host_ui->ui_mutex); - - if (!host->i_nlink) { - err = -ENOENT; - goto out_noent; - } - host->i_ctime = current_time(host); host_ui->xattr_cnt -= 1; host_ui->xattr_size -= CALC_DENT_SIZE(fname_len(nm)); @@ -521,7 +501,6 @@ static int remove_xattr(struct ubifs_info *c, struct inode *host, host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm)); host_ui->xattr_size += CALC_XATTR_BYTES(ui->data_len); host_ui->xattr_names += fname_len(nm); -out_noent: mutex_unlock(&host_ui->ui_mutex); ubifs_release_budget(c, &req); make_bad_inode(inode); @@ -561,9 +540,6 @@ static int ubifs_xattr_remove(struct inode *host, const char *name) ubifs_assert(c, inode_is_locked(host)); - if (!host->i_nlink) - return -ENOENT; - if (fname_len(&nm) > UBIFS_MAX_NLEN) return -ENAMETOOLONG; -- GitLab From a8b3bb338e4ee4cc84a2b9a6fdf27049b84baa59 Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Thu, 20 Sep 2018 12:37:08 -0700 Subject: [PATCH 1664/1692] x86/intel_rdt: Add Reinette as co-maintainer for RDT Reinette Chatre is doing great job on enabling pseudo-locking and other features in RDT. Add her as co-maintainer for RDT. Suggested-by: Thomas Gleixner Signed-off-by: Fenghua Yu Signed-off-by: Thomas Gleixner Acked-by: Ingo Molnar Acked-by: Reinette Chatre Cc: "H Peter Anvin" Cc: "Tony Luck" Link: https://lkml.kernel.org/r/1537472228-221799-1-git-send-email-fenghua.yu@intel.com --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) diff --git a/MAINTAINERS b/MAINTAINERS index 091e66b60cd2..140ea6ee3ac8 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -12260,6 +12260,7 @@ F: Documentation/networking/rds.txt RDT - RESOURCE ALLOCATION M: Fenghua Yu +M: Reinette Chatre L: linux-kernel@vger.kernel.org S: Supported F: arch/x86/kernel/cpu/intel_rdt* -- GitLab From 9068a427ee0beb1365a0925e8c33f338f09f5e97 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 19 Sep 2018 14:33:14 +0200 Subject: [PATCH 1665/1692] MAINTAINERS: Add X86 MM entry Dave, Andy and Peter are de facto overseing the mm parts of X86. Add an explicit maintainers entry. Signed-off-by: Thomas Gleixner Acked-by: Dave Hansen Acked-by: Andy Lutomirski Acked-by: Peter Zijlstra Acked-by: Ingo Molnar --- MAINTAINERS | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index 140ea6ee3ac8..80a311252b04 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -15943,6 +15943,15 @@ M: Borislav Petkov S: Maintained F: arch/x86/kernel/cpu/microcode/* +X86 MM +M: Dave Hansen +M: Andy Lutomirski +M: Peter Zijlstra +L: linux-kernel@vger.kernel.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/mm +S: Maintained +F: arch/x86/mm/ + X86 PLATFORM DRIVERS M: Darren Hart M: Andy Shevchenko -- GitLab From f83606f5eb007adc33bc8541ede00590f477bdeb Mon Sep 17 00:00:00 2001 From: KJ Tsanaktsidis Date: Thu, 20 Sep 2018 12:22:25 -0700 Subject: [PATCH 1666/1692] fork: report pid exhaustion correctly Make the clone and fork syscalls return EAGAIN when the limit on the number of pids /proc/sys/kernel/pid_max is exceeded. Currently, when the pid_max limit is exceeded, the kernel will return ENOSPC from the fork and clone syscalls. This is contrary to the documented behaviour, which explicitly calls out the pid_max case as one where EAGAIN should be returned. It also leads to really confusing error messages in userspace programs which will complain about a lack of disk space when they fail to create processes/threads for this reason. This error is being returned because alloc_pid() uses the idr api to find a new pid; when there are none available, idr_alloc_cyclic() returns -ENOSPC, and this is being propagated back to userspace. This behaviour has been broken before, and was explicitly fixed in commit 35f71bc0a09a ("fork: report pid reservation failure properly"), so I think -EAGAIN is definitely the right thing to return in this case. The current behaviour change dates from commit 95846ecf9dac ("pid: replace pid bitmap implementation with IDR AIP") and was I believe unintentional. This patch has no impact on the case where allocating a pid fails because the child reaper for the namespace is dead; that case will still return -ENOMEM. Link: http://lkml.kernel.org/r/20180903111016.46461-1-ktsanaktsidis@zendesk.com Fixes: 95846ecf9dac ("pid: replace pid bitmap implementation with IDR AIP") Signed-off-by: KJ Tsanaktsidis Reviewed-by: Andrew Morton Acked-by: Michal Hocko Cc: Gargi Sharma Cc: Rik van Riel Cc: Oleg Nesterov Cc: Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- kernel/pid.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/pid.c b/kernel/pid.c index de1cfc4f75a2..cdf63e53a014 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -195,7 +195,7 @@ struct pid *alloc_pid(struct pid_namespace *ns) idr_preload_end(); if (nr < 0) { - retval = nr; + retval = (nr == -ENOSPC) ? -EAGAIN : nr; goto out_free; } -- GitLab From 889c695d419f19e5db52592dafbaf26143c36d1f Mon Sep 17 00:00:00 2001 From: Pasha Tatashin Date: Thu, 20 Sep 2018 12:22:30 -0700 Subject: [PATCH 1667/1692] mm: disable deferred struct page for 32-bit arches Deferred struct page init is needed only on systems with large amount of physical memory to improve boot performance. 32-bit systems do not benefit from this feature. Jiri reported a problem where deferred struct pages do not work well with x86-32: [ 0.035162] Dentry cache hash table entries: 131072 (order: 7, 524288 bytes) [ 0.035725] Inode-cache hash table entries: 65536 (order: 6, 262144 bytes) [ 0.036269] Initializing CPU#0 [ 0.036513] Initializing HighMem for node 0 (00036ffe:0007ffe0) [ 0.038459] page:f6780000 is uninitialized and poisoned [ 0.038460] raw: ffffffff ffffffff ffffffff ffffffff ffffffff ffffffff ffffffff ffffffff [ 0.039509] page dumped because: VM_BUG_ON_PAGE(1 && PageCompound(page)) [ 0.040038] ------------[ cut here ]------------ [ 0.040399] kernel BUG at include/linux/page-flags.h:293! [ 0.040823] invalid opcode: 0000 [#1] SMP PTI [ 0.041166] CPU: 0 PID: 0 Comm: swapper Not tainted 4.19.0-rc1_pt_jiri #9 [ 0.041694] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.11.0-20171110_100015-anatol 04/01/2014 [ 0.042496] EIP: free_highmem_page+0x64/0x80 [ 0.042839] Code: 13 46 d8 c1 e8 18 5d 83 e0 03 8d 04 c0 c1 e0 06 ff 80 ec 5f 44 d8 c3 8d b4 26 00 00 00 00 ba 08 65 28 d8 89 d8 e8 fc 71 02 00 <0f> 0b 8d 76 00 8d bc 27 00 00 00 00 ba d0 b1 26 d8 89 d8 e8 e4 71 [ 0.044338] EAX: 0000003c EBX: f6780000 ECX: 00000000 EDX: d856cbe8 [ 0.044868] ESI: 0007ffe0 EDI: d838df20 EBP: d838df00 ESP: d838defc [ 0.045372] DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068 EFLAGS: 00210086 [ 0.045913] CR0: 80050033 CR2: 00000000 CR3: 18556000 CR4: 00040690 [ 0.046413] DR0: 00000000 DR1: 00000000 DR2: 00000000 DR3: 00000000 [ 0.046913] DR6: fffe0ff0 DR7: 00000400 [ 0.047220] Call Trace: [ 0.047419] add_highpages_with_active_regions+0xbd/0x10d [ 0.047854] set_highmem_pages_init+0x5b/0x71 [ 0.048202] mem_init+0x2b/0x1e8 [ 0.048460] start_kernel+0x1d2/0x425 [ 0.048757] i386_start_kernel+0x93/0x97 [ 0.049073] startup_32_smp+0x164/0x168 [ 0.049379] Modules linked in: [ 0.049626] ---[ end trace 337949378db0abbb ]--- We free highmem pages before their struct pages are initialized: mem_init() set_highmem_pages_init() add_highpages_with_active_regions() free_highmem_page() .. Access uninitialized struct page here.. Because there is no reason to have this feature on 32-bit systems, just disable it. Link: http://lkml.kernel.org/r/20180831150506.31246-1-pavel.tatashin@microsoft.com Fixes: 2e3ca40f03bb ("mm: relax deferred struct page requirements") Signed-off-by: Pavel Tatashin Reported-by: Jiri Slaby Acked-by: Michal Hocko Cc: Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- mm/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/mm/Kconfig b/mm/Kconfig index a550635ea5c3..de64ea658716 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -637,6 +637,7 @@ config DEFERRED_STRUCT_PAGE_INIT depends on NO_BOOTMEM depends on SPARSEMEM depends on !NEED_PER_CPU_KM + depends on 64BIT help Ordinarily all struct pages are initialised during early boot in a single thread. On very large machines this can take a considerable -- GitLab From a1b3d2f217cf51505858c5c160abef96c3e91721 Mon Sep 17 00:00:00 2001 From: Dominique Martinet Date: Thu, 20 Sep 2018 12:22:35 -0700 Subject: [PATCH 1668/1692] fs/proc/kcore.c: fix invalid memory access in multi-page read optimization The 'm' kcore_list item could point to kclist_head, and it is incorrect to look at m->addr / m->size in this case. There is no choice but to run through the list of entries for every address if we did not find any entry in the previous iteration Reset 'm' to NULL in that case at Omar Sandoval's suggestion. [akpm@linux-foundation.org: add comment] Link: http://lkml.kernel.org/r/1536100702-28706-1-git-send-email-asmadeus@codewreck.org Fixes: bf991c2231117 ("proc/kcore: optimize multiple page reads") Signed-off-by: Dominique Martinet Reviewed-by: Andrew Morton Cc: Omar Sandoval Cc: Alexey Dobriyan Cc: Eric Biederman Cc: James Morse Cc: Bhupesh Sharma Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- fs/proc/kcore.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index ad72261ee3fe..d297fe4472a9 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c @@ -464,6 +464,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) ret = -EFAULT; goto out; } + m = NULL; /* skip the list anchor */ } else if (m->type == KCORE_VMALLOC) { vread(buf, (char *)start, tsz); /* we have to zero-fill user buffer even if no read */ -- GitLab From b45d71fb89ab8adfe727b9d0ee188ed58582a647 Mon Sep 17 00:00:00 2001 From: "Joel Fernandes (Google)" Date: Thu, 20 Sep 2018 12:22:39 -0700 Subject: [PATCH 1669/1692] mm: shmem.c: Correctly annotate new inodes for lockdep Directories and inodes don't necessarily need to be in the same lockdep class. For ex, hugetlbfs splits them out too to prevent false positives in lockdep. Annotate correctly after new inode creation. If its a directory inode, it will be put into a different class. This should fix a lockdep splat reported by syzbot: > ====================================================== > WARNING: possible circular locking dependency detected > 4.18.0-rc8-next-20180810+ #36 Not tainted > ------------------------------------------------------ > syz-executor900/4483 is trying to acquire lock: > 00000000d2bfc8fe (&sb->s_type->i_mutex_key#9){++++}, at: inode_lock > include/linux/fs.h:765 [inline] > 00000000d2bfc8fe (&sb->s_type->i_mutex_key#9){++++}, at: > shmem_fallocate+0x18b/0x12e0 mm/shmem.c:2602 > > but task is already holding lock: > 0000000025208078 (ashmem_mutex){+.+.}, at: ashmem_shrink_scan+0xb4/0x630 > drivers/staging/android/ashmem.c:448 > > which lock already depends on the new lock. > > -> #2 (ashmem_mutex){+.+.}: > __mutex_lock_common kernel/locking/mutex.c:925 [inline] > __mutex_lock+0x171/0x1700 kernel/locking/mutex.c:1073 > mutex_lock_nested+0x16/0x20 kernel/locking/mutex.c:1088 > ashmem_mmap+0x55/0x520 drivers/staging/android/ashmem.c:361 > call_mmap include/linux/fs.h:1844 [inline] > mmap_region+0xf27/0x1c50 mm/mmap.c:1762 > do_mmap+0xa10/0x1220 mm/mmap.c:1535 > do_mmap_pgoff include/linux/mm.h:2298 [inline] > vm_mmap_pgoff+0x213/0x2c0 mm/util.c:357 > ksys_mmap_pgoff+0x4da/0x660 mm/mmap.c:1585 > __do_sys_mmap arch/x86/kernel/sys_x86_64.c:100 [inline] > __se_sys_mmap arch/x86/kernel/sys_x86_64.c:91 [inline] > __x64_sys_mmap+0xe9/0x1b0 arch/x86/kernel/sys_x86_64.c:91 > do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290 > entry_SYSCALL_64_after_hwframe+0x49/0xbe > > -> #1 (&mm->mmap_sem){++++}: > __might_fault+0x155/0x1e0 mm/memory.c:4568 > _copy_to_user+0x30/0x110 lib/usercopy.c:25 > copy_to_user include/linux/uaccess.h:155 [inline] > filldir+0x1ea/0x3a0 fs/readdir.c:196 > dir_emit_dot include/linux/fs.h:3464 [inline] > dir_emit_dots include/linux/fs.h:3475 [inline] > dcache_readdir+0x13a/0x620 fs/libfs.c:193 > iterate_dir+0x48b/0x5d0 fs/readdir.c:51 > __do_sys_getdents fs/readdir.c:231 [inline] > __se_sys_getdents fs/readdir.c:212 [inline] > __x64_sys_getdents+0x29f/0x510 fs/readdir.c:212 > do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290 > entry_SYSCALL_64_after_hwframe+0x49/0xbe > > -> #0 (&sb->s_type->i_mutex_key#9){++++}: > lock_acquire+0x1e4/0x540 kernel/locking/lockdep.c:3924 > down_write+0x8f/0x130 kernel/locking/rwsem.c:70 > inode_lock include/linux/fs.h:765 [inline] > shmem_fallocate+0x18b/0x12e0 mm/shmem.c:2602 > ashmem_shrink_scan+0x236/0x630 drivers/staging/android/ashmem.c:455 > ashmem_ioctl+0x3ae/0x13a0 drivers/staging/android/ashmem.c:797 > vfs_ioctl fs/ioctl.c:46 [inline] > file_ioctl fs/ioctl.c:501 [inline] > do_vfs_ioctl+0x1de/0x1720 fs/ioctl.c:685 > ksys_ioctl+0xa9/0xd0 fs/ioctl.c:702 > __do_sys_ioctl fs/ioctl.c:709 [inline] > __se_sys_ioctl fs/ioctl.c:707 [inline] > __x64_sys_ioctl+0x73/0xb0 fs/ioctl.c:707 > do_syscall_64+0x1b9/0x820 arch/x86/entry/common.c:290 > entry_SYSCALL_64_after_hwframe+0x49/0xbe > > other info that might help us debug this: > > Chain exists of: > &sb->s_type->i_mutex_key#9 --> &mm->mmap_sem --> ashmem_mutex > > Possible unsafe locking scenario: > > CPU0 CPU1 > ---- ---- > lock(ashmem_mutex); > lock(&mm->mmap_sem); > lock(ashmem_mutex); > lock(&sb->s_type->i_mutex_key#9); > > *** DEADLOCK *** > > 1 lock held by syz-executor900/4483: > #0: 0000000025208078 (ashmem_mutex){+.+.}, at: > ashmem_shrink_scan+0xb4/0x630 drivers/staging/android/ashmem.c:448 Link: http://lkml.kernel.org/r/20180821231835.166639-1-joel@joelfernandes.org Signed-off-by: Joel Fernandes (Google) Reported-by: syzbot Reviewed-by: NeilBrown Suggested-by: NeilBrown Cc: Matthew Wilcox Cc: Peter Zijlstra Cc: Hugh Dickins Cc: Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- mm/shmem.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mm/shmem.c b/mm/shmem.c index 0376c124b043..446942677cd4 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2227,6 +2227,8 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode mpol_shared_policy_init(&info->policy, NULL); break; } + + lockdep_annotate_inode_mutex_key(inode); } else shmem_free_inode(sb); return inode; -- GitLab From 3bf181bc5d8bc86f04ffd538d7fda9e69af1f2c2 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Thu, 20 Sep 2018 12:22:43 -0700 Subject: [PATCH 1670/1692] kernel/sys.c: remove duplicated include Link: http://lkml.kernel.org/r/20180821133424.18716-1-yuehaibing@huawei.com Signed-off-by: YueHaibing Reviewed-by: Andrew Morton Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- kernel/sys.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/kernel/sys.c b/kernel/sys.c index cf5c67533ff1..123bd73046ec 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -71,9 +71,6 @@ #include #include -/* Hardening for Spectre-v1 */ -#include - #include "uid16.h" #ifndef SET_UNALIGN_CTL -- GitLab From 172b06c32b949759fe6313abec514bc4f15014f4 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Thu, 20 Sep 2018 12:22:46 -0700 Subject: [PATCH 1671/1692] mm: slowly shrink slabs with a relatively small number of objects 9092c71bb724 ("mm: use sc->priority for slab shrink targets") changed the way that the target slab pressure is calculated and made it priority-based: delta = freeable >> priority; delta *= 4; do_div(delta, shrinker->seeks); The problem is that on a default priority (which is 12) no pressure is applied at all, if the number of potentially reclaimable objects is less than 4096 (1<<12). This causes the last objects on slab caches of no longer used cgroups to (almost) never get reclaimed. It's obviously a waste of memory. It can be especially painful, if these stale objects are holding a reference to a dying cgroup. Slab LRU lists are reparented on memcg offlining, but corresponding objects are still holding a reference to the dying cgroup. If we don't scan these objects, the dying cgroup can't go away. Most likely, the parent cgroup hasn't any directly charged objects, only remaining objects from dying children cgroups. So it can easily hold a reference to hundreds of dying cgroups. If there are no big spikes in memory pressure, and new memory cgroups are created and destroyed periodically, this causes the number of dying cgroups grow steadily, causing a slow-ish and hard-to-detect memory "leak". It's not a real leak, as the memory can be eventually reclaimed, but it could not happen in a real life at all. I've seen hosts with a steadily climbing number of dying cgroups, which doesn't show any signs of a decline in months, despite the host is loaded with a production workload. It is an obvious waste of memory, and to prevent it, let's apply a minimal pressure even on small shrinker lists. E.g. if there are freeable objects, let's scan at least min(freeable, scan_batch) objects. This fix significantly improves a chance of a dying cgroup to be reclaimed, and together with some previous patches stops the steady growth of the dying cgroups number on some of our hosts. Link: http://lkml.kernel.org/r/20180905230759.12236-1-guro@fb.com Fixes: 9092c71bb724 ("mm: use sc->priority for slab shrink targets") Signed-off-by: Roman Gushchin Acked-by: Rik van Riel Cc: Josef Bacik Cc: Johannes Weiner Cc: Shakeel Butt Cc: Michal Hocko Cc: Vladimir Davydov Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- mm/vmscan.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/mm/vmscan.c b/mm/vmscan.c index 7e7d25504651..c7ce2c161225 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -476,6 +476,17 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl, delta = freeable >> priority; delta *= 4; do_div(delta, shrinker->seeks); + + /* + * Make sure we apply some minimal pressure on default priority + * even on small cgroups. Stale objects are not only consuming memory + * by themselves, but can also hold a reference to a dying cgroup, + * preventing it from being reclaimed. A dying cgroup with all + * corresponding structures like per-cpu stats and kmem caches + * can be really big, so it may lead to a significant waste of memory. + */ + delta = max_t(unsigned long long, delta, min(freeable, batch_size)); + total_scan += delta; if (total_scan < 0) { pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n", -- GitLab From 234b69e3e089d850a98e7b3145bd00e9b52b1111 Mon Sep 17 00:00:00 2001 From: Junxiao Bi Date: Thu, 20 Sep 2018 12:22:51 -0700 Subject: [PATCH 1672/1692] ocfs2: fix ocfs2 read block panic While reading block, it is possible that io error return due to underlying storage issue, in this case, BH_NeedsValidate was left in the buffer head. Then when reading the very block next time, if it was already linked into journal, that will trigger the following panic. [203748.702517] kernel BUG at fs/ocfs2/buffer_head_io.c:342! [203748.702533] invalid opcode: 0000 [#1] SMP [203748.702561] Modules linked in: ocfs2 ocfs2_dlmfs ocfs2_stack_o2cb ocfs2_dlm ocfs2_nodemanager ocfs2_stackglue configfs sunrpc dm_switch dm_queue_length dm_multipath bonding be2iscsi iscsi_boot_sysfs bnx2i cnic uio cxgb4i iw_cxgb4 cxgb4 cxgb3i libcxgbi iw_cxgb3 cxgb3 mdio ib_iser rdma_cm ib_cm iw_cm ib_sa ib_mad ib_core ib_addr ipv6 iscsi_tcp libiscsi_tcp libiscsi scsi_transport_iscsi ipmi_devintf iTCO_wdt iTCO_vendor_support dcdbas ipmi_ssif i2c_core ipmi_si ipmi_msghandler acpi_pad pcspkr sb_edac edac_core lpc_ich mfd_core shpchp sg tg3 ptp pps_core ext4 jbd2 mbcache2 sr_mod cdrom sd_mod ahci libahci megaraid_sas wmi dm_mirror dm_region_hash dm_log dm_mod [203748.703024] CPU: 7 PID: 38369 Comm: touch Not tainted 4.1.12-124.18.6.el6uek.x86_64 #2 [203748.703045] Hardware name: Dell Inc. PowerEdge R620/0PXXHP, BIOS 2.5.2 01/28/2015 [203748.703067] task: ffff880768139c00 ti: ffff88006ff48000 task.ti: ffff88006ff48000 [203748.703088] RIP: 0010:[] [] ocfs2_read_blocks+0x669/0x7f0 [ocfs2] [203748.703130] RSP: 0018:ffff88006ff4b818 EFLAGS: 00010206 [203748.703389] RAX: 0000000008620029 RBX: ffff88006ff4b910 RCX: 0000000000000000 [203748.703885] RDX: 0000000000000001 RSI: 0000000000000000 RDI: 00000000023079fe [203748.704382] RBP: ffff88006ff4b8d8 R08: 0000000000000000 R09: ffff8807578c25b0 [203748.704877] R10: 000000000f637376 R11: 000000003030322e R12: 0000000000000000 [203748.705373] R13: ffff88006ff4b910 R14: ffff880732fe38f0 R15: 0000000000000000 [203748.705871] FS: 00007f401992c700(0000) GS:ffff880bfebc0000(0000) knlGS:0000000000000000 [203748.706370] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [203748.706627] CR2: 00007f4019252440 CR3: 00000000a621e000 CR4: 0000000000060670 [203748.707124] Stack: [203748.707371] ffff88006ff4b828 ffffffffa0609f52 ffff88006ff4b838 0000000000000001 [203748.707885] 0000000000000000 0000000000000000 ffff880bf67c3800 ffffffffa05eca00 [203748.708399] 00000000023079ff ffffffff81c58b80 0000000000000000 0000000000000000 [203748.708915] Call Trace: [203748.709175] [] ? ocfs2_inode_cache_io_unlock+0x12/0x20 [ocfs2] [203748.709680] [] ? ocfs2_empty_dir_filldir+0x80/0x80 [ocfs2] [203748.710185] [] ocfs2_read_dir_block_direct+0x3b/0x200 [ocfs2] [203748.710691] [] ocfs2_prepare_dx_dir_for_insert.isra.57+0x19f/0xf60 [ocfs2] [203748.711204] [] ? ocfs2_metadata_cache_io_unlock+0x1f/0x30 [ocfs2] [203748.711716] [] ocfs2_prepare_dir_for_insert+0x13a/0x890 [ocfs2] [203748.712227] [] ? ocfs2_check_dir_for_entry+0x8e/0x140 [ocfs2] [203748.712737] [] ocfs2_mknod+0x4b2/0x1370 [ocfs2] [203748.713003] [] ocfs2_create+0x65/0x170 [ocfs2] [203748.713263] [] vfs_create+0xdb/0x150 [203748.713518] [] do_last+0x815/0x1210 [203748.713772] [] ? path_init+0xb9/0x450 [203748.714123] [] path_openat+0x80/0x600 [203748.714378] [] ? handle_pte_fault+0xd15/0x1620 [203748.714634] [] do_filp_open+0x3a/0xb0 [203748.714888] [] ? __alloc_fd+0xa7/0x130 [203748.715143] [] do_sys_open+0x12c/0x220 [203748.715403] [] ? syscall_trace_enter_phase1+0x11b/0x180 [203748.715668] [] ? system_call_after_swapgs+0xe9/0x190 [203748.715928] [] SyS_open+0x1e/0x20 [203748.716184] [] system_call_fastpath+0x18/0xd7 [203748.716440] Code: 00 00 48 8b 7b 08 48 83 c3 10 45 89 f8 44 89 e1 44 89 f2 4c 89 ee e8 07 06 11 e1 48 8b 03 48 85 c0 75 df 8b 5d c8 e9 4d fa ff ff <0f> 0b 48 8b 7d a0 e8 dc c6 06 00 48 b8 00 00 00 00 00 00 00 10 [203748.717505] RIP [] ocfs2_read_blocks+0x669/0x7f0 [ocfs2] [203748.717775] RSP Joesph ever reported a similar panic. Link: https://oss.oracle.com/pipermail/ocfs2-devel/2013-May/008931.html Link: http://lkml.kernel.org/r/20180912063207.29484-1-junxiao.bi@oracle.com Signed-off-by: Junxiao Bi Cc: Joseph Qi Cc: Mark Fasheh Cc: Joel Becker Cc: Changwei Ge Cc: Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- fs/ocfs2/buffer_head_io.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c index d9ebe11c8990..1d098c3c00e0 100644 --- a/fs/ocfs2/buffer_head_io.c +++ b/fs/ocfs2/buffer_head_io.c @@ -342,6 +342,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, * for this bh as it's not marked locally * uptodate. */ status = -EIO; + clear_buffer_needs_validate(bh); put_bh(bh); bhs[i] = NULL; continue; -- GitLab From 05ab1d8a4b36ee912b7087c6da127439ed0a903e Mon Sep 17 00:00:00 2001 From: Feng Tang Date: Thu, 20 Sep 2018 10:58:28 +0800 Subject: [PATCH 1673/1692] x86/mm: Expand static page table for fixmap space We met a kernel panic when enabling earlycon, which is due to the fixmap address of earlycon is not statically setup. Currently the static fixmap setup in head_64.S only covers 2M virtual address space, while it actually could be in 4M space with different kernel configurations, e.g. when VSYSCALL emulation is disabled. So increase the static space to 4M for now by defining FIXMAP_PMD_NUM to 2, and add a build time check to ensure that the fixmap is covered by the initial static page tables. Fixes: 1ad83c858c7d ("x86_64,vsyscall: Make vsyscall emulation configurable") Suggested-by: Thomas Gleixner Signed-off-by: Feng Tang Signed-off-by: Thomas Gleixner Tested-by: kernel test robot Reviewed-by: Juergen Gross (Xen parts) Cc: H Peter Anvin Cc: Peter Zijlstra Cc: Michal Hocko Cc: Yinghai Lu Cc: Dave Hansen Cc: Andi Kleen Cc: Andy Lutomirsky Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20180920025828.23699-1-feng.tang@intel.com --- arch/x86/include/asm/fixmap.h | 10 ++++++++++ arch/x86/include/asm/pgtable_64.h | 3 ++- arch/x86/kernel/head64.c | 4 +++- arch/x86/kernel/head_64.S | 16 ++++++++++++---- arch/x86/mm/pgtable.c | 9 +++++++++ arch/x86/xen/mmu_pv.c | 8 ++++++-- 6 files changed, 42 insertions(+), 8 deletions(-) diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index e203169931c7..6390bd8c141b 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h @@ -14,6 +14,16 @@ #ifndef _ASM_X86_FIXMAP_H #define _ASM_X86_FIXMAP_H +/* + * Exposed to assembly code for setting up initial page tables. Cannot be + * calculated in assembly code (fixmap entries are an enum), but is sanity + * checked in the actual fixmap C code to make sure that the fixmap is + * covered fully. + */ +#define FIXMAP_PMD_NUM 2 +/* fixmap starts downwards from the 507th entry in level2_fixmap_pgt */ +#define FIXMAP_PMD_TOP 507 + #ifndef __ASSEMBLY__ #include #include diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index ce2b59047cb8..9c85b54bf03c 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -14,6 +14,7 @@ #include #include #include +#include extern p4d_t level4_kernel_pgt[512]; extern p4d_t level4_ident_pgt[512]; @@ -22,7 +23,7 @@ extern pud_t level3_ident_pgt[512]; extern pmd_t level2_kernel_pgt[512]; extern pmd_t level2_fixmap_pgt[512]; extern pmd_t level2_ident_pgt[512]; -extern pte_t level1_fixmap_pgt[512]; +extern pte_t level1_fixmap_pgt[512 * FIXMAP_PMD_NUM]; extern pgd_t init_top_pgt[]; #define swapper_pg_dir init_top_pgt diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index c16af27eb23f..ddee1f0870c4 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -35,6 +35,7 @@ #include #include #include +#include /* * Manage page tables very early on. @@ -166,7 +167,8 @@ unsigned long __head __startup_64(unsigned long physaddr, pud[511] += load_delta; pmd = fixup_pointer(level2_fixmap_pgt, physaddr); - pmd[506] += load_delta; + for (i = FIXMAP_PMD_TOP; i > FIXMAP_PMD_TOP - FIXMAP_PMD_NUM; i--) + pmd[i] += load_delta; /* * Set up the identity mapping for the switchover. These diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 15ebc2fc166e..a3618cf04cf6 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -24,6 +24,7 @@ #include "../entry/calling.h" #include #include +#include #ifdef CONFIG_PARAVIRT #include @@ -445,13 +446,20 @@ NEXT_PAGE(level2_kernel_pgt) KERNEL_IMAGE_SIZE/PMD_SIZE) NEXT_PAGE(level2_fixmap_pgt) - .fill 506,8,0 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */ - .fill 5,8,0 + .fill (512 - 4 - FIXMAP_PMD_NUM),8,0 + pgtno = 0 + .rept (FIXMAP_PMD_NUM) + .quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \ + + _PAGE_TABLE_NOENC; + pgtno = pgtno + 1 + .endr + /* 6 MB reserved space + a 2MB hole */ + .fill 4,8,0 NEXT_PAGE(level1_fixmap_pgt) + .rept (FIXMAP_PMD_NUM) .fill 512,8,0 + .endr #undef PMDS diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index ae394552fb94..089e78c4effd 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -637,6 +637,15 @@ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte) { unsigned long address = __fix_to_virt(idx); +#ifdef CONFIG_X86_64 + /* + * Ensure that the static initial page tables are covering the + * fixmap completely. + */ + BUILD_BUG_ON(__end_of_permanent_fixed_addresses > + (FIXMAP_PMD_NUM * PTRS_PER_PTE)); +#endif + if (idx >= __end_of_fixed_addresses) { BUG(); return; diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index 2fe5c9b1816b..dd461c0167ef 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c @@ -1907,7 +1907,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) /* L3_k[511] -> level2_fixmap_pgt */ convert_pfn_mfn(level3_kernel_pgt); - /* L3_k[511][506] -> level1_fixmap_pgt */ + /* L3_k[511][508-FIXMAP_PMD_NUM ... 507] -> level1_fixmap_pgt */ convert_pfn_mfn(level2_fixmap_pgt); /* We get [511][511] and have Xen's version of level2_kernel_pgt */ @@ -1952,7 +1952,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO); set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); - set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO); + + for (i = 0; i < FIXMAP_PMD_NUM; i++) { + set_page_prot(level1_fixmap_pgt + i * PTRS_PER_PTE, + PAGE_KERNEL_RO); + } /* Pin down new L4 */ pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, -- GitLab From b57e99b4b8b0ebdf9707424e7ddc0c392bdc5fe6 Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Fri, 21 Sep 2018 16:44:34 -0700 Subject: [PATCH 1674/1692] block: use nanosecond resolution for iostat Klaus Kusche reported that the I/O busy time in /proc/diskstats was not updating properly on 4.18. This is because we started using ktime to track elapsed time, and we convert nanoseconds to jiffies when we update the partition counter. However, this gets rounded down, so any I/Os that take less than a jiffy are not accounted for. Previously in this case, the value of jiffies would sometimes increment while we were doing I/O, so at least some I/Os were accounted for. Let's convert the stats to use nanoseconds internally. We still report milliseconds as before, now more accurately than ever. The value is still truncated to 32 bits for backwards compatibility. Fixes: 522a777566f5 ("block: consolidate struct request timestamp fields") Cc: stable@vger.kernel.org Reported-by: Klaus Kusche Signed-off-by: Omar Sandoval Signed-off-by: Jens Axboe --- block/bio.c | 2 +- block/blk-core.c | 4 +--- block/genhd.c | 6 +++--- block/partition-generic.c | 6 +++--- include/linux/genhd.h | 5 ++++- 5 files changed, 12 insertions(+), 11 deletions(-) diff --git a/block/bio.c b/block/bio.c index 8c680a776171..0093bed81c0e 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1684,7 +1684,7 @@ void generic_end_io_acct(struct request_queue *q, int req_op, const int sgrp = op_stat_group(req_op); int cpu = part_stat_lock(); - part_stat_add(cpu, part, ticks[sgrp], duration); + part_stat_add(cpu, part, nsecs[sgrp], jiffies_to_nsecs(duration)); part_round_stats(q, cpu, part); part_dec_in_flight(q, part, op_is_write(req_op)); diff --git a/block/blk-core.c b/block/blk-core.c index 4dbc93f43b38..cff0a60ee200 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -2733,17 +2733,15 @@ void blk_account_io_done(struct request *req, u64 now) * containing request is enough. */ if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) { - unsigned long duration; const int sgrp = op_stat_group(req_op(req)); struct hd_struct *part; int cpu; - duration = nsecs_to_jiffies(now - req->start_time_ns); cpu = part_stat_lock(); part = req->part; part_stat_inc(cpu, part, ios[sgrp]); - part_stat_add(cpu, part, ticks[sgrp], duration); + part_stat_add(cpu, part, nsecs[sgrp], now - req->start_time_ns); part_round_stats(req->q, cpu, part); part_dec_in_flight(req->q, part, rq_data_dir(req)); diff --git a/block/genhd.c b/block/genhd.c index 8cc719a37b32..be5bab20b2ab 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -1343,18 +1343,18 @@ static int diskstats_show(struct seq_file *seqf, void *v) part_stat_read(hd, ios[STAT_READ]), part_stat_read(hd, merges[STAT_READ]), part_stat_read(hd, sectors[STAT_READ]), - jiffies_to_msecs(part_stat_read(hd, ticks[STAT_READ])), + (unsigned int)part_stat_read_msecs(hd, STAT_READ), part_stat_read(hd, ios[STAT_WRITE]), part_stat_read(hd, merges[STAT_WRITE]), part_stat_read(hd, sectors[STAT_WRITE]), - jiffies_to_msecs(part_stat_read(hd, ticks[STAT_WRITE])), + (unsigned int)part_stat_read_msecs(hd, STAT_WRITE), inflight[0], jiffies_to_msecs(part_stat_read(hd, io_ticks)), jiffies_to_msecs(part_stat_read(hd, time_in_queue)), part_stat_read(hd, ios[STAT_DISCARD]), part_stat_read(hd, merges[STAT_DISCARD]), part_stat_read(hd, sectors[STAT_DISCARD]), - jiffies_to_msecs(part_stat_read(hd, ticks[STAT_DISCARD])) + (unsigned int)part_stat_read_msecs(hd, STAT_DISCARD) ); } disk_part_iter_exit(&piter); diff --git a/block/partition-generic.c b/block/partition-generic.c index 5a8975a1201c..d3d14e81fb12 100644 --- a/block/partition-generic.c +++ b/block/partition-generic.c @@ -136,18 +136,18 @@ ssize_t part_stat_show(struct device *dev, part_stat_read(p, ios[STAT_READ]), part_stat_read(p, merges[STAT_READ]), (unsigned long long)part_stat_read(p, sectors[STAT_READ]), - jiffies_to_msecs(part_stat_read(p, ticks[STAT_READ])), + (unsigned int)part_stat_read_msecs(p, STAT_READ), part_stat_read(p, ios[STAT_WRITE]), part_stat_read(p, merges[STAT_WRITE]), (unsigned long long)part_stat_read(p, sectors[STAT_WRITE]), - jiffies_to_msecs(part_stat_read(p, ticks[STAT_WRITE])), + (unsigned int)part_stat_read_msecs(p, STAT_WRITE), inflight[0], jiffies_to_msecs(part_stat_read(p, io_ticks)), jiffies_to_msecs(part_stat_read(p, time_in_queue)), part_stat_read(p, ios[STAT_DISCARD]), part_stat_read(p, merges[STAT_DISCARD]), (unsigned long long)part_stat_read(p, sectors[STAT_DISCARD]), - jiffies_to_msecs(part_stat_read(p, ticks[STAT_DISCARD]))); + (unsigned int)part_stat_read_msecs(p, STAT_DISCARD)); } ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr, diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 57864422a2c8..25c08c6c7f99 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -83,10 +83,10 @@ struct partition { } __attribute__((packed)); struct disk_stats { + u64 nsecs[NR_STAT_GROUPS]; unsigned long sectors[NR_STAT_GROUPS]; unsigned long ios[NR_STAT_GROUPS]; unsigned long merges[NR_STAT_GROUPS]; - unsigned long ticks[NR_STAT_GROUPS]; unsigned long io_ticks; unsigned long time_in_queue; }; @@ -354,6 +354,9 @@ static inline void free_part_stats(struct hd_struct *part) #endif /* CONFIG_SMP */ +#define part_stat_read_msecs(part, which) \ + div_u64(part_stat_read(part, nsecs[which]), NSEC_PER_MSEC) + #define part_stat_read_accum(part, field) \ (part_stat_read(part, field[STAT_READ]) + \ part_stat_read(part, field[STAT_WRITE]) + \ -- GitLab From 6bf4ca7fbc85d80446ac01c0d1d77db4d91a6d84 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Sun, 23 Sep 2018 19:15:18 +0200 Subject: [PATCH 1675/1692] Linux 4.19-rc5 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index f03a1e062503..0c90c4354979 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ VERSION = 4 PATCHLEVEL = 19 SUBLEVEL = 0 -EXTRAVERSION = -rc4 +EXTRAVERSION = -rc5 NAME = Merciless Moray # *DOCUMENTATION* -- GitLab From f48097d294d6f76a38bf1a1cb579aa99ede44297 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Mon, 20 Aug 2018 17:07:25 +0300 Subject: [PATCH 1676/1692] dt-bindings: display: renesas: du: Document r8a77990 bindings Document the E3 (r8a77990) SoC in the R-Car DU bindings. Signed-off-by: Laurent Pinchart Reviewed-by: Jacopo Mondi Reviewed-by: Rob Herring Reviewed-by: Ulrich Hecht --- Documentation/devicetree/bindings/display/renesas,du.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Documentation/devicetree/bindings/display/renesas,du.txt b/Documentation/devicetree/bindings/display/renesas,du.txt index caae2348a292..9de67be632d1 100644 --- a/Documentation/devicetree/bindings/display/renesas,du.txt +++ b/Documentation/devicetree/bindings/display/renesas,du.txt @@ -16,6 +16,7 @@ Required Properties: - "renesas,du-r8a77965" for R8A77965 (R-Car M3-N) compatible DU - "renesas,du-r8a77970" for R8A77970 (R-Car V3M) compatible DU - "renesas,du-r8a77980" for R8A77980 (R-Car V3H) compatible DU + - "renesas,du-r8a77990" for R8A77990 (R-Car E3) compatible DU - "renesas,du-r8a77995" for R8A77995 (R-Car D3) compatible DU - reg: the memory-mapped I/O registers base address and length @@ -63,6 +64,7 @@ corresponding to each DU output. R8A77965 (R-Car M3-N) DPAD 0 HDMI 0 LVDS 0 - R8A77970 (R-Car V3M) DPAD 0 LVDS 0 - - R8A77980 (R-Car V3H) DPAD 0 LVDS 0 - - + R8A77990 (R-Car E3) DPAD 0 LVDS 0 LVDS 1 - R8A77995 (R-Car D3) DPAD 0 LVDS 0 LVDS 1 - -- GitLab From f9c32db12e2439ec9553a1a4c43d0ee308387790 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Mon, 20 Aug 2018 17:12:49 +0300 Subject: [PATCH 1677/1692] dt-bindings: display: renesas: lvds: Document r8a77990 bindings The E3 (r8a77990) supports two LVDS channels. Extend the binding to support them. Signed-off-by: Laurent Pinchart Reviewed-by: Jacopo Mondi Reviewed-by: Rob Herring Reviewed-by: Ulrich Hecht Reviewed-by: Kieran Bingham --- .../devicetree/bindings/display/bridge/renesas,lvds.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt b/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt index 5a4e379bb414..13af7e2ac7e8 100644 --- a/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt +++ b/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt @@ -15,6 +15,7 @@ Required properties: - "renesas,r8a7796-lvds" for R8A7796 (R-Car M3-W) compatible LVDS encoders - "renesas,r8a77970-lvds" for R8A77970 (R-Car V3M) compatible LVDS encoders - "renesas,r8a77980-lvds" for R8A77980 (R-Car V3H) compatible LVDS encoders + - "renesas,r8a77990-lvds" for R8A77990 (R-Car E3) compatible LVDS encoders - "renesas,r8a77995-lvds" for R8A77995 (R-Car D3) compatible LVDS encoders - reg: Base address and length for the memory-mapped registers -- GitLab From 9734a7009de62c05e9b08fa65efb7c5214ab3f30 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 22 Aug 2018 15:27:16 +0300 Subject: [PATCH 1678/1692] dt-bindings: display: renesas: lvds: Add EXTAL and DU_DOTCLKIN clocks On the D3 and E3 SoCs, the LVDS encoder can derive its internal pixel clock from an externally supplied clock, either through the EXTAL pin or through one of the DU_DOTCLKINx pins. Add corresponding clocks to the DT bindings. To retain backward compatibility with DT that don't specify the clock-names property, the functional clock must always be specified first, and the clock-names property is optional when only the functional clock is specified. Signed-off-by: Laurent Pinchart Reviewed-by: Jacopo Mondi Reviewed-by: Ulrich Hecht Reviewed-by: Kieran Bingham --- .../bindings/display/bridge/renesas,lvds.txt | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt b/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt index 13af7e2ac7e8..3aeb0ec06fd0 100644 --- a/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt +++ b/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt @@ -19,7 +19,17 @@ Required properties: - "renesas,r8a77995-lvds" for R8A77995 (R-Car D3) compatible LVDS encoders - reg: Base address and length for the memory-mapped registers -- clocks: A phandle + clock-specifier pair for the functional clock +- clocks: A list of phandles + clock-specifier pairs, one for each entry in + the clock-names property. +- clock-names: Name of the clocks. This property is model-dependent. + - The functional clock, which mandatory for all models, shall be listed + first, and shall be named "fck". + - On R8A77990 and R8A77995, the LVDS encoder can use the EXTAL or + DU_DOTCLKINx clocks. Those clocks are optional. When supplied they must be + named "extal" and "dclkin.x" respectively, with "x" being the DU_DOTCLKIN + numerical index. + - When the clocks property only contains the functional clock, the + clock-names property may be omitted. - resets: A phandle + reset specifier for the module reset Required nodes: -- GitLab From 399d9f2f197a06b8866192a019a97d2af29cc81e Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 22 Aug 2018 17:04:06 +0300 Subject: [PATCH 1679/1692] drm: bridge: thc63: Restrict modes based on hardware operating frequency The THC63LVD1024 is restricted to a pixel clock frequency in the range of 8 to 135 MHz. Implement the bridge .mode_valid() operation accordingly. Signed-off-by: Laurent Pinchart Reviewed-by: Andrzej Hajda Tested-by: Jacopo Mondi --- drivers/gpu/drm/bridge/thc63lvd1024.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/drivers/gpu/drm/bridge/thc63lvd1024.c b/drivers/gpu/drm/bridge/thc63lvd1024.c index c8b9edd5a7f4..b083a740565c 100644 --- a/drivers/gpu/drm/bridge/thc63lvd1024.c +++ b/drivers/gpu/drm/bridge/thc63lvd1024.c @@ -45,6 +45,23 @@ static int thc63_attach(struct drm_bridge *bridge) return drm_bridge_attach(bridge->encoder, thc63->next, bridge); } +static enum drm_mode_status thc63_mode_valid(struct drm_bridge *bridge, + const struct drm_display_mode *mode) +{ + /* + * The THC63LVD1024 clock frequency range is 8 to 135 MHz in single-in + * mode. Note that the limits are different in dual-in, single-out mode, + * and will need to be adjusted accordingly. + */ + if (mode->clock < 8000) + return MODE_CLOCK_LOW; + + if (mode->clock > 135000) + return MODE_CLOCK_HIGH; + + return MODE_OK; +} + static void thc63_enable(struct drm_bridge *bridge) { struct thc63_dev *thc63 = to_thc63(bridge); @@ -77,6 +94,7 @@ static void thc63_disable(struct drm_bridge *bridge) static const struct drm_bridge_funcs thc63_bridge_func = { .attach = thc63_attach, + .mode_valid = thc63_mode_valid, .enable = thc63_enable, .disable = thc63_disable, }; -- GitLab From c25c0136119990c62c160d95592714833bc214a5 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 21 Aug 2018 18:06:50 +0300 Subject: [PATCH 1680/1692] drm: rcar-du: lvds: D3/E3 support The LVDS encoders in the D3 and E3 SoCs differ significantly from those in the other R-Car Gen3 family members: - The LVDS PLL architecture is more complex and requires computing PLL parameters manually. - The PLL uses external clocks as inputs, which need to be retrieved from DT. - In addition to the different PLL setup, the startup sequence has changed *again* (seems someone had trouble making his/her mind). Supporting all this requires DT bindings extensions for external clocks, brand new PLL setup code, and a few quirks to handle the differences in the startup sequence. The implementation doesn't support all hardware features yet, namely - Using the LV[01] clocks generated by the CPG as PLL input. - Providing the LVDS PLL clock to the DU for use with the RGB output. Those features can be added later when the need will arise. Signed-off-by: Laurent Pinchart Tested-by: Jacopo Mondi Reviewed-by: Ulrich Hecht Reviewed-by: Jacopo Mondi --- drivers/gpu/drm/rcar-du/rcar_lvds.c | 359 ++++++++++++++++++++--- drivers/gpu/drm/rcar-du/rcar_lvds_regs.h | 43 ++- 2 files changed, 355 insertions(+), 47 deletions(-) diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c index ce0eb68c3416..173d7ad0b991 100644 --- a/drivers/gpu/drm/rcar-du/rcar_lvds.c +++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c @@ -24,6 +24,8 @@ #include "rcar_lvds_regs.h" +struct rcar_lvds; + /* Keep in sync with the LVDCR0.LVMD hardware register values. */ enum rcar_lvds_mode { RCAR_LVDS_MODE_JEIDA = 0, @@ -31,14 +33,16 @@ enum rcar_lvds_mode { RCAR_LVDS_MODE_VESA = 4, }; -#define RCAR_LVDS_QUIRK_LANES (1 << 0) /* LVDS lanes 1 and 3 inverted */ -#define RCAR_LVDS_QUIRK_GEN2_PLLCR (1 << 1) /* LVDPLLCR has gen2 layout */ -#define RCAR_LVDS_QUIRK_GEN3_LVEN (1 << 2) /* LVEN bit needs to be set */ - /* on R8A77970/R8A7799x */ +#define RCAR_LVDS_QUIRK_LANES BIT(0) /* LVDS lanes 1 and 3 inverted */ +#define RCAR_LVDS_QUIRK_GEN3_LVEN BIT(1) /* LVEN bit needs to be set on R8A77970/R8A7799x */ +#define RCAR_LVDS_QUIRK_PWD BIT(2) /* PWD bit available (all of Gen3 but E3) */ +#define RCAR_LVDS_QUIRK_EXT_PLL BIT(3) /* Has extended PLL */ +#define RCAR_LVDS_QUIRK_DUAL_LINK BIT(4) /* Supports dual-link operation */ struct rcar_lvds_device_info { unsigned int gen; unsigned int quirks; + void (*pll_setup)(struct rcar_lvds *lvds, unsigned int freq); }; struct rcar_lvds { @@ -52,7 +56,11 @@ struct rcar_lvds { struct drm_panel *panel; void __iomem *mmio; - struct clk *clock; + struct { + struct clk *mod; /* CPG module clock */ + struct clk *extal; /* External clock */ + struct clk *dotclkin[2]; /* External DU clocks */ + } clocks; bool enabled; struct drm_display_mode display_mode; @@ -128,33 +136,216 @@ static const struct drm_connector_funcs rcar_lvds_conn_funcs = { }; /* ----------------------------------------------------------------------------- - * Bridge + * PLL Setup */ -static u32 rcar_lvds_lvdpllcr_gen2(unsigned int freq) +static void rcar_lvds_pll_setup_gen2(struct rcar_lvds *lvds, unsigned int freq) { - if (freq < 39000) - return LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_38M; - else if (freq < 61000) - return LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_60M; - else if (freq < 121000) - return LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_121M; + u32 val; + + if (freq < 39000000) + val = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_38M; + else if (freq < 61000000) + val = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_60M; + else if (freq < 121000000) + val = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_121M; else - return LVDPLLCR_PLLDLYCNT_150M; + val = LVDPLLCR_PLLDLYCNT_150M; + + rcar_lvds_write(lvds, LVDPLLCR, val); } -static u32 rcar_lvds_lvdpllcr_gen3(unsigned int freq) +static void rcar_lvds_pll_setup_gen3(struct rcar_lvds *lvds, unsigned int freq) { - if (freq < 42000) - return LVDPLLCR_PLLDIVCNT_42M; - else if (freq < 85000) - return LVDPLLCR_PLLDIVCNT_85M; - else if (freq < 128000) - return LVDPLLCR_PLLDIVCNT_128M; + u32 val; + + if (freq < 42000000) + val = LVDPLLCR_PLLDIVCNT_42M; + else if (freq < 85000000) + val = LVDPLLCR_PLLDIVCNT_85M; + else if (freq < 128000000) + val = LVDPLLCR_PLLDIVCNT_128M; else - return LVDPLLCR_PLLDIVCNT_148M; + val = LVDPLLCR_PLLDIVCNT_148M; + + rcar_lvds_write(lvds, LVDPLLCR, val); } +struct pll_info { + unsigned long diff; + unsigned int pll_m; + unsigned int pll_n; + unsigned int pll_e; + unsigned int div; + u32 clksel; +}; + +static void rcar_lvds_d3_e3_pll_calc(struct rcar_lvds *lvds, struct clk *clk, + unsigned long target, struct pll_info *pll, + u32 clksel) +{ + unsigned long output; + unsigned long fin; + unsigned int m_min; + unsigned int m_max; + unsigned int m; + int error; + + if (!clk) + return; + + /* + * The LVDS PLL is made of a pre-divider and a multiplier (strangely + * enough called M and N respectively), followed by a post-divider E. + * + * ,-----. ,-----. ,-----. ,-----. + * Fin --> | 1/M | -Fpdf-> | PFD | --> | VCO | -Fvco-> | 1/E | --> Fout + * `-----' ,-> | | `-----' | `-----' + * | `-----' | + * | ,-----. | + * `-------- | 1/N | <-------' + * `-----' + * + * The clock output by the PLL is then further divided by a programmable + * divider DIV to achieve the desired target frequency. Finally, an + * optional fixed /7 divider is used to convert the bit clock to a pixel + * clock (as LVDS transmits 7 bits per lane per clock sample). + * + * ,-------. ,-----. |\ + * Fout --> | 1/DIV | --> | 1/7 | --> | | + * `-------' | `-----' | | --> dot clock + * `------------> | | + * |/ + * + * The /7 divider is optional when the LVDS PLL is used to generate a + * dot clock for the DU RGB output, without using the LVDS encoder. We + * don't support this configuration yet. + * + * The PLL allowed input frequency range is 12 MHz to 192 MHz. + */ + + fin = clk_get_rate(clk); + if (fin < 12000000 || fin > 192000000) + return; + + /* + * The comparison frequency range is 12 MHz to 24 MHz, which limits the + * allowed values for the pre-divider M (normal range 1-8). + * + * Fpfd = Fin / M + */ + m_min = max_t(unsigned int, 1, DIV_ROUND_UP(fin, 24000000)); + m_max = min_t(unsigned int, 8, fin / 12000000); + + for (m = m_min; m <= m_max; ++m) { + unsigned long fpfd; + unsigned int n_min; + unsigned int n_max; + unsigned int n; + + /* + * The VCO operating range is 900 Mhz to 1800 MHz, which limits + * the allowed values for the multiplier N (normal range + * 60-120). + * + * Fvco = Fin * N / M + */ + fpfd = fin / m; + n_min = max_t(unsigned int, 60, DIV_ROUND_UP(900000000, fpfd)); + n_max = min_t(unsigned int, 120, 1800000000 / fpfd); + + for (n = n_min; n < n_max; ++n) { + unsigned long fvco; + unsigned int e_min; + unsigned int e; + + /* + * The output frequency is limited to 1039.5 MHz, + * limiting again the allowed values for the + * post-divider E (normal value 1, 2 or 4). + * + * Fout = Fvco / E + */ + fvco = fpfd * n; + e_min = fvco > 1039500000 ? 1 : 0; + + for (e = e_min; e < 3; ++e) { + unsigned long fout; + unsigned long diff; + unsigned int div; + + /* + * Finally we have a programable divider after + * the PLL, followed by a an optional fixed /7 + * divider. + */ + fout = fvco / (1 << e) / 7; + div = DIV_ROUND_CLOSEST(fout, target); + diff = abs(fout / div - target); + + if (diff < pll->diff) { + pll->diff = diff; + pll->pll_m = m; + pll->pll_n = n; + pll->pll_e = e; + pll->div = div; + pll->clksel = clksel; + + if (diff == 0) + goto done; + } + } + } + } + +done: + output = fin * pll->pll_n / pll->pll_m / (1 << pll->pll_e) + / 7 / pll->div; + error = (long)(output - target) * 10000 / (long)target; + + dev_dbg(lvds->dev, + "%pC %lu Hz -> Fout %lu Hz (target %lu Hz, error %d.%02u%%), PLL M/N/E/DIV %u/%u/%u/%u\n", + clk, fin, output, target, error / 100, + error < 0 ? -error % 100 : error % 100, + pll->pll_m, pll->pll_n, pll->pll_e, pll->div); +} + +static void rcar_lvds_pll_setup_d3_e3(struct rcar_lvds *lvds, unsigned int freq) +{ + struct pll_info pll = { .diff = (unsigned long)-1 }; + u32 lvdpllcr; + + rcar_lvds_d3_e3_pll_calc(lvds, lvds->clocks.dotclkin[0], freq, &pll, + LVDPLLCR_CKSEL_DU_DOTCLKIN(0)); + rcar_lvds_d3_e3_pll_calc(lvds, lvds->clocks.dotclkin[1], freq, &pll, + LVDPLLCR_CKSEL_DU_DOTCLKIN(1)); + rcar_lvds_d3_e3_pll_calc(lvds, lvds->clocks.extal, freq, &pll, + LVDPLLCR_CKSEL_EXTAL); + + lvdpllcr = LVDPLLCR_PLLON | pll.clksel | LVDPLLCR_CLKOUT + | LVDPLLCR_PLLN(pll.pll_n - 1) | LVDPLLCR_PLLM(pll.pll_m - 1); + + if (pll.pll_e > 0) + lvdpllcr |= LVDPLLCR_STP_CLKOUTE | LVDPLLCR_OUTCLKSEL + | LVDPLLCR_PLLE(pll.pll_e - 1); + + rcar_lvds_write(lvds, LVDPLLCR, lvdpllcr); + + if (pll.div > 1) + /* + * The DIVRESET bit is a misnomer, setting it to 1 deasserts the + * divisor reset. + */ + rcar_lvds_write(lvds, LVDDIV, LVDDIV_DIVSEL | + LVDDIV_DIVRESET | LVDDIV_DIV(pll.div - 1)); + else + rcar_lvds_write(lvds, LVDDIV, 0); +} + +/* ----------------------------------------------------------------------------- + * Bridge + */ + static void rcar_lvds_enable(struct drm_bridge *bridge) { struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge); @@ -164,14 +355,13 @@ static void rcar_lvds_enable(struct drm_bridge *bridge) * do we get a state pointer? */ struct drm_crtc *crtc = lvds->bridge.encoder->crtc; - u32 lvdpllcr; u32 lvdhcr; u32 lvdcr0; int ret; WARN_ON(lvds->enabled); - ret = clk_prepare_enable(lvds->clock); + ret = clk_prepare_enable(lvds->clocks.mod); if (ret < 0) return; @@ -196,12 +386,13 @@ static void rcar_lvds_enable(struct drm_bridge *bridge) rcar_lvds_write(lvds, LVDCHCR, lvdhcr); + if (lvds->info->quirks & RCAR_LVDS_QUIRK_DUAL_LINK) { + /* Disable dual-link mode. */ + rcar_lvds_write(lvds, LVDSTRIPE, 0); + } + /* PLL clock configuration. */ - if (lvds->info->quirks & RCAR_LVDS_QUIRK_GEN2_PLLCR) - lvdpllcr = rcar_lvds_lvdpllcr_gen2(mode->clock); - else - lvdpllcr = rcar_lvds_lvdpllcr_gen3(mode->clock); - rcar_lvds_write(lvds, LVDPLLCR, lvdpllcr); + lvds->info->pll_setup(lvds, mode->clock * 1000); /* Set the LVDS mode and select the input. */ lvdcr0 = lvds->mode << LVDCR0_LVMD_SHIFT; @@ -220,11 +411,16 @@ static void rcar_lvds_enable(struct drm_bridge *bridge) rcar_lvds_write(lvds, LVDCR0, lvdcr0); } - /* Turn the PLL on. */ - lvdcr0 |= LVDCR0_PLLON; - rcar_lvds_write(lvds, LVDCR0, lvdcr0); + if (!(lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL)) { + /* + * Turn the PLL on (simple PLL only, extended PLL is fully + * controlled through LVDPLLCR). + */ + lvdcr0 |= LVDCR0_PLLON; + rcar_lvds_write(lvds, LVDCR0, lvdcr0); + } - if (lvds->info->gen > 2) { + if (lvds->info->quirks & RCAR_LVDS_QUIRK_PWD) { /* Set LVDS normal mode. */ lvdcr0 |= LVDCR0_PWD; rcar_lvds_write(lvds, LVDCR0, lvdcr0); @@ -236,8 +432,10 @@ static void rcar_lvds_enable(struct drm_bridge *bridge) rcar_lvds_write(lvds, LVDCR0, lvdcr0); } - /* Wait for the startup delay. */ - usleep_range(100, 150); + if (!(lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL)) { + /* Wait for the PLL startup delay (simple PLL only). */ + usleep_range(100, 150); + } /* Turn the output on. */ lvdcr0 |= LVDCR0_LVRES; @@ -264,8 +462,9 @@ static void rcar_lvds_disable(struct drm_bridge *bridge) rcar_lvds_write(lvds, LVDCR0, 0); rcar_lvds_write(lvds, LVDCR1, 0); + rcar_lvds_write(lvds, LVDPLLCR, 0); - clk_disable_unprepare(lvds->clock); + clk_disable_unprepare(lvds->clocks.mod); lvds->enabled = false; } @@ -446,6 +645,60 @@ static int rcar_lvds_parse_dt(struct rcar_lvds *lvds) return ret; } +static struct clk *rcar_lvds_get_clock(struct rcar_lvds *lvds, const char *name, + bool optional) +{ + struct clk *clk; + + clk = devm_clk_get(lvds->dev, name); + if (!IS_ERR(clk)) + return clk; + + if (PTR_ERR(clk) == -ENOENT && optional) + return NULL; + + if (PTR_ERR(clk) != -EPROBE_DEFER) + dev_err(lvds->dev, "failed to get %s clock\n", + name ? name : "module"); + + return clk; +} + +static int rcar_lvds_get_clocks(struct rcar_lvds *lvds) +{ + lvds->clocks.mod = rcar_lvds_get_clock(lvds, NULL, false); + if (IS_ERR(lvds->clocks.mod)) + return PTR_ERR(lvds->clocks.mod); + + /* + * LVDS encoders without an extended PLL have no external clock inputs. + */ + if (!(lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL)) + return 0; + + lvds->clocks.extal = rcar_lvds_get_clock(lvds, "extal", true); + if (IS_ERR(lvds->clocks.extal)) + return PTR_ERR(lvds->clocks.extal); + + lvds->clocks.dotclkin[0] = rcar_lvds_get_clock(lvds, "dclkin.0", true); + if (IS_ERR(lvds->clocks.dotclkin[0])) + return PTR_ERR(lvds->clocks.dotclkin[0]); + + lvds->clocks.dotclkin[1] = rcar_lvds_get_clock(lvds, "dclkin.1", true); + if (IS_ERR(lvds->clocks.dotclkin[1])) + return PTR_ERR(lvds->clocks.dotclkin[1]); + + /* At least one input to the PLL must be available. */ + if (!lvds->clocks.extal && !lvds->clocks.dotclkin[0] && + !lvds->clocks.dotclkin[1]) { + dev_err(lvds->dev, + "no input clock (extal, dclkin.0 or dclkin.1)\n"); + return -EINVAL; + } + + return 0; +} + static int rcar_lvds_probe(struct platform_device *pdev) { struct rcar_lvds *lvds; @@ -475,11 +728,9 @@ static int rcar_lvds_probe(struct platform_device *pdev) if (IS_ERR(lvds->mmio)) return PTR_ERR(lvds->mmio); - lvds->clock = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(lvds->clock)) { - dev_err(&pdev->dev, "failed to get clock\n"); - return PTR_ERR(lvds->clock); - } + ret = rcar_lvds_get_clocks(lvds); + if (ret < 0) + return ret; drm_bridge_add(&lvds->bridge); @@ -497,21 +748,39 @@ static int rcar_lvds_remove(struct platform_device *pdev) static const struct rcar_lvds_device_info rcar_lvds_gen2_info = { .gen = 2, - .quirks = RCAR_LVDS_QUIRK_GEN2_PLLCR, + .pll_setup = rcar_lvds_pll_setup_gen2, }; static const struct rcar_lvds_device_info rcar_lvds_r8a7790_info = { .gen = 2, - .quirks = RCAR_LVDS_QUIRK_GEN2_PLLCR | RCAR_LVDS_QUIRK_LANES, + .quirks = RCAR_LVDS_QUIRK_LANES, + .pll_setup = rcar_lvds_pll_setup_gen2, }; static const struct rcar_lvds_device_info rcar_lvds_gen3_info = { .gen = 3, + .quirks = RCAR_LVDS_QUIRK_PWD, + .pll_setup = rcar_lvds_pll_setup_gen3, }; static const struct rcar_lvds_device_info rcar_lvds_r8a77970_info = { .gen = 3, - .quirks = RCAR_LVDS_QUIRK_GEN2_PLLCR | RCAR_LVDS_QUIRK_GEN3_LVEN, + .quirks = RCAR_LVDS_QUIRK_PWD | RCAR_LVDS_QUIRK_GEN3_LVEN, + .pll_setup = rcar_lvds_pll_setup_gen2, +}; + +static const struct rcar_lvds_device_info rcar_lvds_r8a77990_info = { + .gen = 3, + .quirks = RCAR_LVDS_QUIRK_GEN3_LVEN | RCAR_LVDS_QUIRK_EXT_PLL + | RCAR_LVDS_QUIRK_DUAL_LINK, + .pll_setup = rcar_lvds_pll_setup_d3_e3, +}; + +static const struct rcar_lvds_device_info rcar_lvds_r8a77995_info = { + .gen = 3, + .quirks = RCAR_LVDS_QUIRK_GEN3_LVEN | RCAR_LVDS_QUIRK_PWD + | RCAR_LVDS_QUIRK_EXT_PLL | RCAR_LVDS_QUIRK_DUAL_LINK, + .pll_setup = rcar_lvds_pll_setup_d3_e3, }; static const struct of_device_id rcar_lvds_of_table[] = { @@ -523,6 +792,8 @@ static const struct of_device_id rcar_lvds_of_table[] = { { .compatible = "renesas,r8a7796-lvds", .data = &rcar_lvds_gen3_info }, { .compatible = "renesas,r8a77970-lvds", .data = &rcar_lvds_r8a77970_info }, { .compatible = "renesas,r8a77980-lvds", .data = &rcar_lvds_gen3_info }, + { .compatible = "renesas,r8a77990-lvds", .data = &rcar_lvds_r8a77990_info }, + { .compatible = "renesas,r8a77995-lvds", .data = &rcar_lvds_r8a77995_info }, { } }; diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h b/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h index 4870f50d9bec..87149f2f8056 100644 --- a/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h +++ b/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h @@ -18,7 +18,7 @@ #define LVDCR0_PLLON (1 << 4) #define LVDCR0_PWD (1 << 2) /* Gen3 only */ #define LVDCR0_BEN (1 << 2) /* Gen2 only */ -#define LVDCR0_LVEN (1 << 1) /* Gen2 only */ +#define LVDCR0_LVEN (1 << 1) #define LVDCR0_LVRES (1 << 0) #define LVDCR1 0x0004 @@ -27,21 +27,36 @@ #define LVDCR1_CLKSTBY (3 << 0) #define LVDPLLCR 0x0008 +/* Gen2 & V3M */ #define LVDPLLCR_CEEN (1 << 14) #define LVDPLLCR_FBEN (1 << 13) #define LVDPLLCR_COSEL (1 << 12) -/* Gen2 */ #define LVDPLLCR_PLLDLYCNT_150M (0x1bf << 0) #define LVDPLLCR_PLLDLYCNT_121M (0x22c << 0) #define LVDPLLCR_PLLDLYCNT_60M (0x77b << 0) #define LVDPLLCR_PLLDLYCNT_38M (0x69a << 0) #define LVDPLLCR_PLLDLYCNT_MASK (0x7ff << 0) -/* Gen3 */ +/* Gen3 but V3M,D3 and E3 */ #define LVDPLLCR_PLLDIVCNT_42M (0x014cb << 0) #define LVDPLLCR_PLLDIVCNT_85M (0x00a45 << 0) #define LVDPLLCR_PLLDIVCNT_128M (0x006c3 << 0) #define LVDPLLCR_PLLDIVCNT_148M (0x046c1 << 0) #define LVDPLLCR_PLLDIVCNT_MASK (0x7ffff << 0) +/* D3 and E3 */ +#define LVDPLLCR_PLLON (1 << 22) +#define LVDPLLCR_PLLSEL_PLL0 (0 << 20) +#define LVDPLLCR_PLLSEL_LVX (1 << 20) +#define LVDPLLCR_PLLSEL_PLL1 (2 << 20) +#define LVDPLLCR_CKSEL_LVX (1 << 17) +#define LVDPLLCR_CKSEL_EXTAL (3 << 17) +#define LVDPLLCR_CKSEL_DU_DOTCLKIN(n) ((5 + (n) * 2) << 17) +#define LVDPLLCR_OCKSEL (1 << 16) +#define LVDPLLCR_STP_CLKOUTE (1 << 14) +#define LVDPLLCR_OUTCLKSEL (1 << 12) +#define LVDPLLCR_CLKOUT (1 << 11) +#define LVDPLLCR_PLLE(n) ((n) << 10) +#define LVDPLLCR_PLLN(n) ((n) << 3) +#define LVDPLLCR_PLLM(n) ((n) << 0) #define LVDCTRCR 0x000c #define LVDCTRCR_CTR3SEL_ZERO (0 << 12) @@ -71,4 +86,26 @@ #define LVDCHCR_CHSEL_CH(n, c) ((((c) - (n)) & 3) << ((n) * 4)) #define LVDCHCR_CHSEL_MASK(n) (3 << ((n) * 4)) +/* All registers below are specific to D3 and E3 */ +#define LVDSTRIPE 0x0014 +#define LVDSTRIPE_ST_TRGSEL_DISP (0 << 2) +#define LVDSTRIPE_ST_TRGSEL_HSYNC_R (1 << 2) +#define LVDSTRIPE_ST_TRGSEL_HSYNC_F (2 << 2) +#define LVDSTRIPE_ST_SWAP (1 << 1) +#define LVDSTRIPE_ST_ON (1 << 0) + +#define LVDSCR 0x0018 +#define LVDSCR_DEPTH(n) (((n) - 1) << 29) +#define LVDSCR_BANDSET (1 << 28) +#define LVDSCR_TWGCNT(n) ((((n) - 256) / 16) << 24) +#define LVDSCR_SDIV(n) ((n) << 22) +#define LVDSCR_MODE (1 << 21) +#define LVDSCR_RSTN (1 << 20) + +#define LVDDIV 0x001c +#define LVDDIV_DIVSEL (1 << 8) +#define LVDDIV_DIVRESET (1 << 7) +#define LVDDIV_DIVSTP (1 << 6) +#define LVDDIV_DIV(n) ((n) << 0) + #endif /* __RCAR_LVDS_REGS_H__ */ -- GitLab From 0bb63534fdf3bc9a82bcfe9f5c6a9653b8b2a3f1 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Fri, 14 Jul 2017 03:26:17 +0300 Subject: [PATCH 1681/1692] drm: rcar-du: Perform the initial CRTC setup from rcar_du_crtc_get() The rcar_du_crtc_get() function is always immediately followed by a call to rcar_du_crtc_setup(). Call the later from the former to simplify the code, and add a comment to explain how the get and put calls are balanced. Signed-off-by: Laurent Pinchart Tested-by: Jacopo Mondi Reviewed-by: Jacopo Mondi --- drivers/gpu/drm/rcar-du/rcar_du_crtc.c | 107 +++++++++++++------------ 1 file changed, 56 insertions(+), 51 deletions(-) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index 6288b9ad9e24..c89751c26f9c 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -66,39 +66,6 @@ static void rcar_du_crtc_clr_set(struct rcar_du_crtc *rcrtc, u32 reg, rcar_du_write(rcdu, rcrtc->mmio_offset + reg, (value & ~clr) | set); } -static int rcar_du_crtc_get(struct rcar_du_crtc *rcrtc) -{ - int ret; - - ret = clk_prepare_enable(rcrtc->clock); - if (ret < 0) - return ret; - - ret = clk_prepare_enable(rcrtc->extclock); - if (ret < 0) - goto error_clock; - - ret = rcar_du_group_get(rcrtc->group); - if (ret < 0) - goto error_group; - - return 0; - -error_group: - clk_disable_unprepare(rcrtc->extclock); -error_clock: - clk_disable_unprepare(rcrtc->clock); - return ret; -} - -static void rcar_du_crtc_put(struct rcar_du_crtc *rcrtc) -{ - rcar_du_group_put(rcrtc->group); - - clk_disable_unprepare(rcrtc->extclock); - clk_disable_unprepare(rcrtc->clock); -} - /* ----------------------------------------------------------------------------- * Hardware Setup */ @@ -546,6 +513,51 @@ static void rcar_du_crtc_setup(struct rcar_du_crtc *rcrtc) drm_crtc_vblank_on(&rcrtc->crtc); } +static int rcar_du_crtc_get(struct rcar_du_crtc *rcrtc) +{ + int ret; + + /* + * Guard against double-get, as the function is called from both the + * .atomic_enable() and .atomic_begin() handlers. + */ + if (rcrtc->initialized) + return 0; + + ret = clk_prepare_enable(rcrtc->clock); + if (ret < 0) + return ret; + + ret = clk_prepare_enable(rcrtc->extclock); + if (ret < 0) + goto error_clock; + + ret = rcar_du_group_get(rcrtc->group); + if (ret < 0) + goto error_group; + + rcar_du_crtc_setup(rcrtc); + rcrtc->initialized = true; + + return 0; + +error_group: + clk_disable_unprepare(rcrtc->extclock); +error_clock: + clk_disable_unprepare(rcrtc->clock); + return ret; +} + +static void rcar_du_crtc_put(struct rcar_du_crtc *rcrtc) +{ + rcar_du_group_put(rcrtc->group); + + clk_disable_unprepare(rcrtc->extclock); + clk_disable_unprepare(rcrtc->clock); + + rcrtc->initialized = false; +} + static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc) { bool interlaced; @@ -639,16 +651,7 @@ static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc, { struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); - /* - * If the CRTC has already been setup by the .atomic_begin() handler we - * can skip the setup stage. - */ - if (!rcrtc->initialized) { - rcar_du_crtc_get(rcrtc); - rcar_du_crtc_setup(rcrtc); - rcrtc->initialized = true; - } - + rcar_du_crtc_get(rcrtc); rcar_du_crtc_start(rcrtc); } @@ -667,7 +670,6 @@ static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc, } spin_unlock_irq(&crtc->dev->event_lock); - rcrtc->initialized = false; rcrtc->outputs = 0; } @@ -680,14 +682,17 @@ static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc, /* * If a mode set is in progress we can be called with the CRTC disabled. - * We then need to first setup the CRTC in order to configure planes. - * The .atomic_enable() handler will notice and skip the CRTC setup. + * We thus need to first get and setup the CRTC in order to configure + * planes. We must *not* put the CRTC in .atomic_flush(), as it must be + * kept awake until the .atomic_enable() call that will follow. The get + * operation in .atomic_enable() will in that case be a no-op, and the + * CRTC will be put later in .atomic_disable(). + * + * If a mode set is not in progress the CRTC is enabled, and the + * following get call will be a no-op. There is thus no need to belance + * it in .atomic_flush() either. */ - if (!rcrtc->initialized) { - rcar_du_crtc_get(rcrtc); - rcar_du_crtc_setup(rcrtc); - rcrtc->initialized = true; - } + rcar_du_crtc_get(rcrtc); if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE)) rcar_du_vsp_atomic_begin(rcrtc); -- GitLab From b4734f43f3cadfaa423ce6aceb1e9faea07b8eb8 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 21 Aug 2018 21:31:04 +0300 Subject: [PATCH 1682/1692] drm: rcar-du: Use LVDS PLL clock as dot clock when possible On selected SoCs, the DU can use the clock output by the LVDS encoder PLL as its input dot clock. This feature is optional, but on the D3 and E3 SoC it is often the only way to obtain a precise dot clock frequency, as the other available clocks (CPG-generated clock and external clock) usually have fixed rates. Add a DU model information field to describe which DU channels can use the LVDS PLL output clock as their input clock, and configure clock routing accordingly. This feature is available on H2, M2-W, M2-N, D3 and E3 SoCs, with D3 and E3 being the primary targets. It is left disabled in this commit, and will be enabled per-SoC after careful testing. At the hardware level, clock routing is configured at runtime in two steps, first selecting an internal dot clock between the LVDS PLL clock and the external DOTCLKIN clock, and then selecting between the internal dot clock and the CPG-generated clock. The first part requires stopping the whole DU group in order for the change to take effect, thus causing flickering on the screen. For this reason we currently hardcode the clock source to the LVDS PLL clock if available, and allow flicker-free selection of the external DOTCLKIN clock or CPG-generated clock otherwise. A more dynamic clock selection process can be implemented later if the need arises. Signed-off-by: Laurent Pinchart Tested-by: Jacopo Mondi Reviewed-by: Jacopo Mondi --- drivers/gpu/drm/rcar-du/rcar_du_crtc.c | 8 ++++ drivers/gpu/drm/rcar-du/rcar_du_drv.h | 2 + drivers/gpu/drm/rcar-du/rcar_du_group.c | 64 +++++++++++++++++++------ 3 files changed, 59 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index c89751c26f9c..2f8776c1ec8f 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -261,6 +261,14 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) rcar_du_group_write(rcrtc->group, DPLLCR, dpllcr); escr = ESCR_DCLKSEL_DCLKIN | div; + } else if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index)) { + /* + * Use the LVDS PLL output as the dot clock when outputting to + * the LVDS encoder on an SoC that supports this clock routing + * option. We use the clock directly in that case, without any + * additional divider. + */ + escr = ESCR_DCLKSEL_DCLKIN; } else { struct du_clk_params params = { .diff = (unsigned long)-1 }; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h index fef9ea5c22f3..ebba9aefba6a 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h @@ -53,6 +53,7 @@ struct rcar_du_output_routing { * @routes: array of CRTC to output routes, indexed by output (RCAR_DU_OUTPUT_*) * @num_lvds: number of internal LVDS encoders * @dpll_mask: bit mask of DU channels equipped with a DPLL + * @lvds_clk_mask: bitmask of channels that can use the LVDS clock as dot clock */ struct rcar_du_device_info { unsigned int gen; @@ -62,6 +63,7 @@ struct rcar_du_device_info { struct rcar_du_output_routing routes[RCAR_DU_OUTPUT_MAX]; unsigned int num_lvds; unsigned int dpll_mask; + unsigned int lvds_clk_mask; }; #define RCAR_DU_MAX_CRTCS 4 diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c index ef2c177afb6d..4c62841eff2f 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_group.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c @@ -89,6 +89,54 @@ static void rcar_du_group_setup_defr8(struct rcar_du_group *rgrp) rcar_du_group_write(rgrp, DEFR8, defr8); } +static void rcar_du_group_setup_didsr(struct rcar_du_group *rgrp) +{ + struct rcar_du_device *rcdu = rgrp->dev; + struct rcar_du_crtc *rcrtc; + unsigned int num_crtcs = 0; + unsigned int i; + u32 didsr; + + /* + * Configure input dot clock routing with a hardcoded configuration. If + * the DU channel can use the LVDS encoder output clock as the dot + * clock, do so. Otherwise route DU_DOTCLKINn signal to DUn. + * + * Each channel can then select between the dot clock configured here + * and the clock provided by the CPG through the ESCR register. + */ + if (rcdu->info->gen < 3 && rgrp->index == 0) { + /* + * On Gen2 a single register in the first group controls dot + * clock selection for all channels. + */ + rcrtc = rcdu->crtcs; + num_crtcs = rcdu->num_crtcs; + } else if (rcdu->info->gen == 3 && rgrp->num_crtcs > 1) { + /* + * On Gen3 dot clocks are setup through per-group registers, + * only available when the group has two channels. + */ + rcrtc = &rcdu->crtcs[rgrp->index * 2]; + num_crtcs = rgrp->num_crtcs; + } + + if (!num_crtcs) + return; + + didsr = DIDSR_CODE; + for (i = 0; i < num_crtcs; ++i, ++rcrtc) { + if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index)) + didsr |= DIDSR_LCDS_LVDS0(i) + | DIDSR_PDCS_CLK(i, 0); + else + didsr |= DIDSR_LCDS_DCLKIN(i) + | DIDSR_PDCS_CLK(i, 0); + } + + rcar_du_group_write(rgrp, DIDSR, didsr); +} + static void rcar_du_group_setup(struct rcar_du_group *rgrp) { struct rcar_du_device *rcdu = rgrp->dev; @@ -106,21 +154,7 @@ static void rcar_du_group_setup(struct rcar_du_group *rgrp) if (rcar_du_has(rgrp->dev, RCAR_DU_FEATURE_EXT_CTRL_REGS)) { rcar_du_group_setup_defr8(rgrp); - - /* - * Configure input dot clock routing. We currently hardcode the - * configuration to routing DOTCLKINn to DUn. Register fields - * depend on the DU generation, but the resulting value is 0 in - * all cases. - * - * On Gen2 a single register in the first group controls dot - * clock selection for all channels, while on Gen3 dot clocks - * are setup through per-group registers, only available when - * the group has two channels. - */ - if ((rcdu->info->gen < 3 && rgrp->index == 0) || - (rcdu->info->gen == 3 && rgrp->num_crtcs > 1)) - rcar_du_group_write(rgrp, DIDSR, DIDSR_CODE); + rcar_du_group_setup_didsr(rgrp); } if (rcdu->info->gen >= 3) -- GitLab From 1f98b2a4fd4632db3b585a624032b7ec785a5255 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 22 Aug 2018 00:01:07 +0300 Subject: [PATCH 1683/1692] drm: rcar-du: Enable configurable DPAD0 routing on Gen3 All Gen3 SoCs supported so far have a fixed association between DPAD0 and DU channels, which led to hardcoding that association when writing the corresponding hardware register. The D3 and E3 will break that mechanism as DPAD0 can be dynamically connected to either DU0 or DU1. Make DPAD0 routing dynamic on Gen3. To ensure a valid hardware configuration when the DU starts without the RGB output enabled, DPAD0 is associated at initialization time to the first DU channel that it can be connected to. This makes no change on Gen2 as all Gen2 SoCs can connected DPAD0 to DU0, which is the current implicit default value. As the DPAD0 source is always 0 when a single source is possible on Gen2, we can also simplify the Gen2 code in the same function to remove a conditional check. Signed-off-by: Laurent Pinchart Tested-by: Jacopo Mondi Reviewed-by: Jacopo Mondi --- drivers/gpu/drm/rcar-du/rcar_du_group.c | 17 ++++++----------- drivers/gpu/drm/rcar-du/rcar_du_kms.c | 12 ++++++++++++ 2 files changed, 18 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c index 4c62841eff2f..f38703e7a10d 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_group.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c @@ -56,8 +56,6 @@ static void rcar_du_group_setup_pins(struct rcar_du_group *rgrp) static void rcar_du_group_setup_defr8(struct rcar_du_group *rgrp) { struct rcar_du_device *rcdu = rgrp->dev; - unsigned int possible_crtcs = - rcdu->info->routes[RCAR_DU_OUTPUT_DPAD0].possible_crtcs; u32 defr8 = DEFR8_CODE; if (rcdu->info->gen < 3) { @@ -69,21 +67,18 @@ static void rcar_du_group_setup_defr8(struct rcar_du_group *rgrp) * DU instances that support it. */ if (rgrp->index == 0) { - if (possible_crtcs > 1) - defr8 |= DEFR8_DRGBS_DU(rcdu->dpad0_source); + defr8 |= DEFR8_DRGBS_DU(rcdu->dpad0_source); if (rgrp->dev->vspd1_sink == 2) defr8 |= DEFR8_VSCS; } } else { /* - * On Gen3 VSPD routing can't be configured, but DPAD routing - * needs to be set despite having a single option available. + * On Gen3 VSPD routing can't be configured, and DPAD routing + * is set in the group corresponding to the DPAD output (no Gen3 + * SoC has multiple DPAD sources belonging to separate groups). */ - unsigned int rgb_crtc = ffs(possible_crtcs) - 1; - struct rcar_du_crtc *crtc = &rcdu->crtcs[rgb_crtc]; - - if (crtc->index / 2 == rgrp->index) - defr8 |= DEFR8_DRGBS_DU(crtc->index); + if (rgrp->index == rcdu->dpad0_source / 2) + defr8 |= DEFR8_DRGBS_DU(rcdu->dpad0_source); } rcar_du_group_write(rgrp, DEFR8, defr8); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index b5d79ecd25ea..4ebd61ecbee1 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -544,6 +544,7 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu) struct drm_device *dev = rcdu->ddev; struct drm_encoder *encoder; struct drm_fbdev_cma *fbdev; + unsigned int dpad0_sources; unsigned int num_encoders; unsigned int num_groups; unsigned int swindex; @@ -666,6 +667,17 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu) encoder->possible_clones = (1 << num_encoders) - 1; } + /* + * Initialize the default DPAD0 source to the index of the first DU + * channel that can be connected to DPAD0. The exact value doesn't + * matter as it should be overwritten by mode setting for the RGB + * output, but it is nonetheless required to ensure a valid initial + * hardware configuration on Gen3 where DU0 can't always be connected to + * DPAD0. + */ + dpad0_sources = rcdu->info->routes[RCAR_DU_OUTPUT_DPAD0].possible_crtcs; + rcdu->dpad0_source = ffs(dpad0_sources) - 1; + drm_mode_config_reset(dev); drm_kms_helper_poll_init(dev); -- GitLab From 9144adc5e5a99577bce0d4ee2ca3615f53b9d296 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 22 Aug 2018 16:05:02 +0300 Subject: [PATCH 1684/1692] drm: rcar-du: Cache DSYSR value to ensure known initial value DSYSR is a DU channel register that also contains group fields. It is thus written to by both the group and CRTC code, using read-update-write sequences. As the register isn't initialized explicitly at startup time, this can lead to invalid or otherwise unexpected values being written to some of the fields if they have been modified by the firmware or just not reset properly. To fix this we can write a fully known value to the DSYSR register when turning a channel's functional clock on. However, the mix of group and channel fields complicate this. A simpler solution is to cache the register and initialize the cached value to the desired hardware defaults. Signed-off-by: Laurent Pinchart Tested-by: Jacopo Mondi Reviewed-by: Kieran Bingham --- drivers/gpu/drm/rcar-du/rcar_du_crtc.c | 16 ++++++++-------- drivers/gpu/drm/rcar-du/rcar_du_crtc.h | 5 +++++ drivers/gpu/drm/rcar-du/rcar_du_group.c | 7 ++++--- 3 files changed, 17 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index 2f8776c1ec8f..f827fccf6416 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -57,13 +57,12 @@ static void rcar_du_crtc_set(struct rcar_du_crtc *rcrtc, u32 reg, u32 set) rcar_du_read(rcdu, rcrtc->mmio_offset + reg) | set); } -static void rcar_du_crtc_clr_set(struct rcar_du_crtc *rcrtc, u32 reg, - u32 clr, u32 set) +void rcar_du_crtc_dsysr_clr_set(struct rcar_du_crtc *rcrtc, u32 clr, u32 set) { struct rcar_du_device *rcdu = rcrtc->group->dev; - u32 value = rcar_du_read(rcdu, rcrtc->mmio_offset + reg); - rcar_du_write(rcdu, rcrtc->mmio_offset + reg, (value & ~clr) | set); + rcrtc->dsysr = (rcrtc->dsysr & ~clr) | set; + rcar_du_write(rcdu, rcrtc->mmio_offset + DSYSR, rcrtc->dsysr); } /* ----------------------------------------------------------------------------- @@ -576,9 +575,9 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc) * actively driven). */ interlaced = rcrtc->crtc.mode.flags & DRM_MODE_FLAG_INTERLACE; - rcar_du_crtc_clr_set(rcrtc, DSYSR, DSYSR_TVM_MASK | DSYSR_SCM_MASK, - (interlaced ? DSYSR_SCM_INT_VIDEO : 0) | - DSYSR_TVM_MASTER); + rcar_du_crtc_dsysr_clr_set(rcrtc, DSYSR_TVM_MASK | DSYSR_SCM_MASK, + (interlaced ? DSYSR_SCM_INT_VIDEO : 0) | + DSYSR_TVM_MASTER); rcar_du_group_start_stop(rcrtc->group, true); } @@ -645,7 +644,7 @@ static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc) * Select switch sync mode. This stops display operation and configures * the HSYNC and VSYNC signals as inputs. */ - rcar_du_crtc_clr_set(rcrtc, DSYSR, DSYSR_TVM_MASK, DSYSR_TVM_SWITCH); + rcar_du_crtc_dsysr_clr_set(rcrtc, DSYSR_TVM_MASK, DSYSR_TVM_SWITCH); rcar_du_group_start_stop(rcrtc->group, false); } @@ -1121,6 +1120,7 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex, rcrtc->group = rgrp; rcrtc->mmio_offset = mmio_offsets[hwindex]; rcrtc->index = hwindex; + rcrtc->dsysr = (rcrtc->index % 2 ? 0 : DSYSR_DRES) | DSYSR_TVM_TVSYNC; if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE)) primary = &rcrtc->vsp->planes[rcrtc->vsp_pipe].plane; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h index 4990bbe9ba26..59ac6e7d22c9 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h @@ -30,6 +30,7 @@ struct rcar_du_vsp; * @mmio_offset: offset of the CRTC registers in the DU MMIO block * @index: CRTC software and hardware index * @initialized: whether the CRTC has been initialized and clocks enabled + * @dsysr: cached value of the DSYSR register * @vblank_enable: whether vblank events are enabled on this CRTC * @event: event to post when the pending page flip completes * @flip_wait: wait queue used to signal page flip completion @@ -50,6 +51,8 @@ struct rcar_du_crtc { unsigned int index; bool initialized; + u32 dsysr; + bool vblank_enable; struct drm_pending_vblank_event *event; wait_queue_head_t flip_wait; @@ -103,4 +106,6 @@ void rcar_du_crtc_route_output(struct drm_crtc *crtc, enum rcar_du_output output); void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc); +void rcar_du_crtc_dsysr_clr_set(struct rcar_du_crtc *rcrtc, u32 clr, u32 set); + #endif /* __RCAR_DU_CRTC_H__ */ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c index f38703e7a10d..d85f0a1c1581 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_group.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c @@ -202,9 +202,10 @@ void rcar_du_group_put(struct rcar_du_group *rgrp) static void __rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start) { - rcar_du_group_write(rgrp, DSYSR, - (rcar_du_group_read(rgrp, DSYSR) & ~(DSYSR_DRES | DSYSR_DEN)) | - (start ? DSYSR_DEN : DSYSR_DRES)); + struct rcar_du_crtc *rcrtc = &rgrp->dev->crtcs[rgrp->index * 2]; + + rcar_du_crtc_dsysr_clr_set(rcrtc, DSYSR_DRES | DSYSR_DEN, + start ? DSYSR_DEN : DSYSR_DRES); } void rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start) -- GitLab From ffd15c3e7898cfb6d2a986b2aa8014ad7dc9e333 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 22 Aug 2018 16:21:33 +0300 Subject: [PATCH 1685/1692] drm: rcar-du: Don't use TV sync mode when not supported by the hardware The official way to stop the display is to clear the display enable (DEN) bit in the DSYSR register, but that operates at a group level and affects the two channels in the group. To disable channels selectively, the driver uses TV sync mode that stops display operation on the channel and turns output signals into inputs. While TV sync mode is available in all DU models currently supported, the D3 and E3 DUs don't support it. We will thus need to find an alternative way to turn channels off. In the meantime, condition the switch to TV sync mode to the availability of the feature, to avoid writing an invalid value to the DSYSR register. When the feature is unavailable the display output will turn blank as all planes are disabled when stopping the CRTC. Signed-off-by: Laurent Pinchart Tested-by: Jacopo Mondi Reviewed-by: Kieran Bingham --- drivers/gpu/drm/rcar-du/rcar_du_crtc.c | 7 +++++- drivers/gpu/drm/rcar-du/rcar_du_drv.c | 33 +++++++++++++++++--------- drivers/gpu/drm/rcar-du/rcar_du_drv.h | 1 + 3 files changed, 29 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index f827fccf6416..17741843cf51 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -643,8 +643,13 @@ static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc) /* * Select switch sync mode. This stops display operation and configures * the HSYNC and VSYNC signals as inputs. + * + * TODO: Find another way to stop the display for DUs that don't support + * TVM sync. */ - rcar_du_crtc_dsysr_clr_set(rcrtc, DSYSR_TVM_MASK, DSYSR_TVM_SWITCH); + if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_TVM_SYNC)) + rcar_du_crtc_dsysr_clr_set(rcrtc, DSYSR_TVM_MASK, + DSYSR_TVM_SWITCH); rcar_du_group_start_stop(rcrtc->group, false); } diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index 0954ecd2f943..fa0d381c2d0f 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -36,7 +36,8 @@ static const struct rcar_du_device_info rzg1_du_r8a7743_info = { .gen = 2, .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_EXT_CTRL_REGS - | RCAR_DU_FEATURE_INTERLACED, + | RCAR_DU_FEATURE_INTERLACED + | RCAR_DU_FEATURE_TVM_SYNC, .channels_mask = BIT(1) | BIT(0), .routes = { /* @@ -58,7 +59,8 @@ static const struct rcar_du_device_info rzg1_du_r8a7745_info = { .gen = 2, .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_EXT_CTRL_REGS - | RCAR_DU_FEATURE_INTERLACED, + | RCAR_DU_FEATURE_INTERLACED + | RCAR_DU_FEATURE_TVM_SYNC, .channels_mask = BIT(1) | BIT(0), .routes = { /* @@ -77,7 +79,8 @@ static const struct rcar_du_device_info rzg1_du_r8a7745_info = { static const struct rcar_du_device_info rcar_du_r8a7779_info = { .gen = 2, - .features = RCAR_DU_FEATURE_INTERLACED, + .features = RCAR_DU_FEATURE_INTERLACED + | RCAR_DU_FEATURE_TVM_SYNC, .channels_mask = BIT(1) | BIT(0), .routes = { /* @@ -99,7 +102,8 @@ static const struct rcar_du_device_info rcar_du_r8a7790_info = { .gen = 2, .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_EXT_CTRL_REGS - | RCAR_DU_FEATURE_INTERLACED, + | RCAR_DU_FEATURE_INTERLACED + | RCAR_DU_FEATURE_TVM_SYNC, .quirks = RCAR_DU_QUIRK_ALIGN_128B, .channels_mask = BIT(2) | BIT(1) | BIT(0), .routes = { @@ -128,7 +132,8 @@ static const struct rcar_du_device_info rcar_du_r8a7791_info = { .gen = 2, .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_EXT_CTRL_REGS - | RCAR_DU_FEATURE_INTERLACED, + | RCAR_DU_FEATURE_INTERLACED + | RCAR_DU_FEATURE_TVM_SYNC, .channels_mask = BIT(1) | BIT(0), .routes = { /* @@ -151,7 +156,8 @@ static const struct rcar_du_device_info rcar_du_r8a7792_info = { .gen = 2, .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_EXT_CTRL_REGS - | RCAR_DU_FEATURE_INTERLACED, + | RCAR_DU_FEATURE_INTERLACED + | RCAR_DU_FEATURE_TVM_SYNC, .channels_mask = BIT(1) | BIT(0), .routes = { /* R8A7792 has two RGB outputs. */ @@ -170,7 +176,8 @@ static const struct rcar_du_device_info rcar_du_r8a7794_info = { .gen = 2, .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_EXT_CTRL_REGS - | RCAR_DU_FEATURE_INTERLACED, + | RCAR_DU_FEATURE_INTERLACED + | RCAR_DU_FEATURE_TVM_SYNC, .channels_mask = BIT(1) | BIT(0), .routes = { /* @@ -193,7 +200,8 @@ static const struct rcar_du_device_info rcar_du_r8a7795_info = { .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_EXT_CTRL_REGS | RCAR_DU_FEATURE_VSP1_SOURCE - | RCAR_DU_FEATURE_INTERLACED, + | RCAR_DU_FEATURE_INTERLACED + | RCAR_DU_FEATURE_TVM_SYNC, .channels_mask = BIT(3) | BIT(2) | BIT(1) | BIT(0), .routes = { /* @@ -226,7 +234,8 @@ static const struct rcar_du_device_info rcar_du_r8a7796_info = { .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_EXT_CTRL_REGS | RCAR_DU_FEATURE_VSP1_SOURCE - | RCAR_DU_FEATURE_INTERLACED, + | RCAR_DU_FEATURE_INTERLACED + | RCAR_DU_FEATURE_TVM_SYNC, .channels_mask = BIT(2) | BIT(1) | BIT(0), .routes = { /* @@ -255,7 +264,8 @@ static const struct rcar_du_device_info rcar_du_r8a77965_info = { .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_EXT_CTRL_REGS | RCAR_DU_FEATURE_VSP1_SOURCE - | RCAR_DU_FEATURE_INTERLACED, + | RCAR_DU_FEATURE_INTERLACED + | RCAR_DU_FEATURE_TVM_SYNC, .channels_mask = BIT(3) | BIT(1) | BIT(0), .routes = { /* @@ -284,7 +294,8 @@ static const struct rcar_du_device_info rcar_du_r8a77970_info = { .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_EXT_CTRL_REGS | RCAR_DU_FEATURE_VSP1_SOURCE - | RCAR_DU_FEATURE_INTERLACED, + | RCAR_DU_FEATURE_INTERLACED + | RCAR_DU_FEATURE_TVM_SYNC, .channels_mask = BIT(0), .routes = { /* R8A77970 has one RGB output and one LVDS output. */ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h index ebba9aefba6a..143c037e2c0f 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h @@ -27,6 +27,7 @@ struct rcar_du_device; #define RCAR_DU_FEATURE_EXT_CTRL_REGS BIT(1) /* Has extended control registers */ #define RCAR_DU_FEATURE_VSP1_SOURCE BIT(2) /* Has inputs from VSP1 */ #define RCAR_DU_FEATURE_INTERLACED BIT(3) /* HW supports interlaced */ +#define RCAR_DU_FEATURE_TVM_SYNC BIT(4) /* Has TV switch/sync modes */ #define RCAR_DU_QUIRK_ALIGN_128B BIT(0) /* Align pitches to 128 bytes */ -- GitLab From 122702077e4492e02de8a6257e6cb2227c617cf0 Mon Sep 17 00:00:00 2001 From: Ulrich Hecht Date: Tue, 14 Aug 2018 15:49:56 +0200 Subject: [PATCH 1686/1692] drm: rcar-du: Add r8a77990 and r8a77995 device support Add support for the R-Car D3 (R8A77995) and E3 (R8A77990) SoCs to the R-Car DU driver. The two SoCs instantiate compatible DUs, so a single information structure is enough. Signed-off-by: Ulrich Hecht [Add support for R8A77990] Signed-off-by: Laurent Pinchart Tested-by: Jacopo Mondi Reviewed-by: Kieran Bingham --- drivers/gpu/drm/rcar-du/rcar_du_drv.c | 30 +++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index fa0d381c2d0f..084f58df4a8c 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -311,6 +311,34 @@ static const struct rcar_du_device_info rcar_du_r8a77970_info = { .num_lvds = 1, }; +static const struct rcar_du_device_info rcar_du_r8a7799x_info = { + .gen = 3, + .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK + | RCAR_DU_FEATURE_EXT_CTRL_REGS + | RCAR_DU_FEATURE_VSP1_SOURCE, + .channels_mask = BIT(1) | BIT(0), + .routes = { + /* + * R8A77990 and R8A77995 have one RGB output and two LVDS + * outputs. + */ + [RCAR_DU_OUTPUT_DPAD0] = { + .possible_crtcs = BIT(0) | BIT(1), + .port = 0, + }, + [RCAR_DU_OUTPUT_LVDS0] = { + .possible_crtcs = BIT(0), + .port = 1, + }, + [RCAR_DU_OUTPUT_LVDS1] = { + .possible_crtcs = BIT(1), + .port = 2, + }, + }, + .num_lvds = 2, + .lvds_clk_mask = BIT(1) | BIT(0), +}; + static const struct of_device_id rcar_du_of_table[] = { { .compatible = "renesas,du-r8a7743", .data = &rzg1_du_r8a7743_info }, { .compatible = "renesas,du-r8a7745", .data = &rzg1_du_r8a7745_info }, @@ -324,6 +352,8 @@ static const struct of_device_id rcar_du_of_table[] = { { .compatible = "renesas,du-r8a7796", .data = &rcar_du_r8a7796_info }, { .compatible = "renesas,du-r8a77965", .data = &rcar_du_r8a77965_info }, { .compatible = "renesas,du-r8a77970", .data = &rcar_du_r8a77970_info }, + { .compatible = "renesas,du-r8a77990", .data = &rcar_du_r8a7799x_info }, + { .compatible = "renesas,du-r8a77995", .data = &rcar_du_r8a7799x_info }, { } }; -- GitLab From 331d880b35a76b5de0eec8cbcecbf615d758a5f9 Mon Sep 17 00:00:00 2001 From: John Garry Date: Sat, 22 Sep 2018 01:25:25 +0800 Subject: [PATCH 1687/1692] drm/hisilicon: hibmc: Do not carry error code in HiBMC framebuffer pointer In hibmc_drm_fb_create(), when the call to hibmc_framebuffer_init() fails with error, do not store the error code in the HiBMC device frame-buffer pointer, as this will be later checked for non-zero value in hibmc_fbdev_destroy() when our intention is to check for a valid function pointer. This fixes the following crash: [ 9.699791] Unable to handle kernel NULL pointer dereference at virtual address 000000000000001a [ 9.708672] Mem abort info: [ 9.711489] ESR = 0x96000004 [ 9.714570] Exception class = DABT (current EL), IL = 32 bits [ 9.720551] SET = 0, FnV = 0 [ 9.723631] EA = 0, S1PTW = 0 [ 9.726799] Data abort info: [ 9.729702] ISV = 0, ISS = 0x00000004 [ 9.733573] CM = 0, WnR = 0 [ 9.736566] [000000000000001a] user address but active_mm is swapper [ 9.742987] Internal error: Oops: 96000004 [#1] PREEMPT SMP [ 9.748614] Modules linked in: [ 9.751694] CPU: 16 PID: 293 Comm: kworker/16:1 Tainted: G W 4.19.0-rc4-next-20180920-00001-g9b0012c #322 [ 9.762681] Hardware name: Huawei Taishan 2280 /D05, BIOS Hisilicon D05 IT21 Nemo 2.0 RC0 04/18/2018 [ 9.771915] Workqueue: events work_for_cpu_fn [ 9.776312] pstate: 60000005 (nZCv daif -PAN -UAO) [ 9.781150] pc : drm_mode_object_put+0x0/0x20 [ 9.785547] lr : hibmc_fbdev_fini+0x40/0x58 [ 9.789767] sp : ffff00000af1bcf0 [ 9.793108] x29: ffff00000af1bcf0 x28: 0000000000000000 [ 9.798473] x27: 0000000000000000 x26: ffff000008f66630 [ 9.803838] x25: 0000000000000000 x24: ffff0000095abb98 [ 9.809203] x23: ffff8017db92fe00 x22: ffff8017d2b13000 [ 9.814568] x21: ffffffffffffffea x20: ffff8017d2f80018 [ 9.819933] x19: ffff8017d28a0018 x18: ffffffffffffffff [ 9.825297] x17: 0000000000000000 x16: 0000000000000000 [ 9.830662] x15: ffff0000092296c8 x14: ffff00008939970f [ 9.836026] x13: ffff00000939971d x12: ffff000009229940 [ 9.841391] x11: ffff0000085f8fc0 x10: ffff00000af1b9a0 [ 9.846756] x9 : 000000000000000d x8 : 6620657a696c6169 [ 9.852121] x7 : ffff8017d3340580 x6 : ffff8017d4168000 [ 9.857486] x5 : 0000000000000000 x4 : ffff8017db92fb20 [ 9.862850] x3 : 0000000000002690 x2 : ffff8017d3340480 [ 9.868214] x1 : 0000000000000028 x0 : 0000000000000002 [ 9.873580] Process kworker/16:1 (pid: 293, stack limit = 0x(____ptrval____)) [ 9.880788] Call trace: [ 9.883252] drm_mode_object_put+0x0/0x20 [ 9.887297] hibmc_unload+0x1c/0x80 [ 9.890815] hibmc_pci_probe+0x170/0x3c8 [ 9.894773] local_pci_probe+0x3c/0xb0 [ 9.898555] work_for_cpu_fn+0x18/0x28 [ 9.902337] process_one_work+0x1e0/0x318 [ 9.906382] worker_thread+0x228/0x450 [ 9.910164] kthread+0x128/0x130 [ 9.913418] ret_from_fork+0x10/0x18 [ 9.917024] Code: a94153f3 a8c27bfd d65f03c0 d503201f (f9400c01) [ 9.923180] ---[ end trace 2695ffa0af5be375 ]--- Fixes: d1667b86795a ("drm/hisilicon/hibmc: Add support for frame buffer") Signed-off-by: John Garry Reviewed-by: Xinliang Liu Signed-off-by: Xinliang Liu --- drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c index b92595c477ef..8bd29075ae4e 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c @@ -122,6 +122,7 @@ static int hibmc_drm_fb_create(struct drm_fb_helper *helper, hi_fbdev->fb = hibmc_framebuffer_init(priv->dev, &mode_cmd, gobj); if (IS_ERR(hi_fbdev->fb)) { ret = PTR_ERR(hi_fbdev->fb); + hi_fbdev->fb = NULL; DRM_ERROR("failed to initialize framebuffer: %d\n", ret); goto out_release_fbi; } -- GitLab From 0ff9f49646353ce31312411e7e7bd2281492a40e Mon Sep 17 00:00:00 2001 From: John Garry Date: Sat, 22 Sep 2018 01:25:26 +0800 Subject: [PATCH 1688/1692] drm/hisilicon: hibmc: Don't overwrite fb helper surface depth Currently the driver overwrites the surface depth provided by the fb helper to give an invalid bpp/surface depth combination. This has been exposed by commit 70109354fed2 ("drm: Reject unknown legacy bpp and depth for drm_mode_addfb ioctl"), which now causes the driver to fail to probe. Fix by not overwriting the surface depth. Fixes: d1667b86795a ("drm/hisilicon/hibmc: Add support for frame buffer") Signed-off-by: John Garry Reviewed-by: Xinliang Liu Signed-off-by: Xinliang Liu --- drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c index 8bd29075ae4e..edcca1761500 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c @@ -71,7 +71,6 @@ static int hibmc_drm_fb_create(struct drm_fb_helper *helper, DRM_DEBUG_DRIVER("surface width(%d), height(%d) and bpp(%d)\n", sizes->surface_width, sizes->surface_height, sizes->surface_bpp); - sizes->surface_depth = 32; bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8); -- GitLab From a66dae3a2b179eaba8f2f960271fe9d4c72939cb Mon Sep 17 00:00:00 2001 From: John Garry Date: Sat, 22 Sep 2018 01:25:27 +0800 Subject: [PATCH 1689/1692] drm/hisilicon: hibmc: Use HUAWEI PCI vendor ID macro Switch to use Huawei PCI vendor ID macro from pci_ids.h file. In addition, switch to use PCI_VDEVICE() instead of open coding. Signed-off-by: John Garry Reviewed-by: Xinliang Liu Signed-off-by: Xinliang Liu --- drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c index d4f6f1f9df5b..79b6bdafdc82 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c @@ -402,7 +402,7 @@ static void hibmc_pci_remove(struct pci_dev *pdev) } static struct pci_device_id hibmc_pci_table[] = { - {0x19e5, 0x1711, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + { PCI_VDEVICE(HUAWEI, 0x1711) }, {0,} }; -- GitLab From 081d0571700be0c9f212f18b5c66f40e9c2e70d2 Mon Sep 17 00:00:00 2001 From: Souptick Joarder Date: Mon, 6 Aug 2018 20:19:01 +0530 Subject: [PATCH 1690/1692] gpu/drm/hisilicon: Convert drm_atomic_helper_suspend/resume() convert drm_atomic_helper_suspend/resume() to use drm_mode_config_helper_suspend/resume(). Fixed one sparse warning by making hibmc_drm_interrupt static. Signed-off-by: Ajit Negi Signed-off-by: Souptick Joarder Reviewed-by: Xinliang Liu Signed-off-by: Xinliang Liu --- .../gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c | 20 +++---------------- .../gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h | 1 - 2 files changed, 3 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c index 79b6bdafdc82..ccecda273112 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c @@ -37,7 +37,7 @@ static const struct file_operations hibmc_fops = { .llseek = no_llseek, }; -irqreturn_t hibmc_drm_interrupt(int irq, void *arg) +static irqreturn_t hibmc_drm_interrupt(int irq, void *arg) { struct drm_device *dev = (struct drm_device *)arg; struct hibmc_drm_private *priv = @@ -74,30 +74,16 @@ static int __maybe_unused hibmc_pm_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); - struct hibmc_drm_private *priv = drm_dev->dev_private; - - drm_kms_helper_poll_disable(drm_dev); - priv->suspend_state = drm_atomic_helper_suspend(drm_dev); - if (IS_ERR(priv->suspend_state)) { - DRM_ERROR("drm_atomic_helper_suspend failed: %ld\n", - PTR_ERR(priv->suspend_state)); - drm_kms_helper_poll_enable(drm_dev); - return PTR_ERR(priv->suspend_state); - } - return 0; + return drm_mode_config_helper_suspend(drm_dev); } static int __maybe_unused hibmc_pm_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); - struct hibmc_drm_private *priv = drm_dev->dev_private; - drm_atomic_helper_resume(drm_dev, priv->suspend_state); - drm_kms_helper_poll_enable(drm_dev); - - return 0; + return drm_mode_config_helper_resume(drm_dev); } static const struct dev_pm_ops hibmc_pm_ops = { diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h index e195521eb41e..45c25a488f42 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h @@ -47,7 +47,6 @@ struct hibmc_drm_private { /* drm */ struct drm_device *dev; bool mode_config_initialized; - struct drm_atomic_state *suspend_state; /* ttm */ struct drm_global_reference mem_global_ref; -- GitLab From 45fcedae849396598aee6686318e7488e852dedf Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Fri, 13 Jul 2018 10:48:24 +0200 Subject: [PATCH 1691/1692] drm/hisilicon: Replace drm_dev_unref with drm_dev_put This patch unifies the naming of DRM functions for reference counting of struct drm_device. The resulting code is more aligned with the rest of the Linux kernel interfaces. Signed-off-by: Thomas Zimmermann Reviewed-by: Xinliang Liu Signed-off-by: Xinliang Liu --- drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c | 4 ++-- drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c index ccecda273112..68c0c297b3a5 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c @@ -373,7 +373,7 @@ static int hibmc_pci_probe(struct pci_dev *pdev, err_disable: pci_disable_device(pdev); err_free: - drm_dev_unref(dev); + drm_dev_put(dev); return ret; } @@ -384,7 +384,7 @@ static void hibmc_pci_remove(struct pci_dev *pdev) drm_dev_unregister(dev); hibmc_unload(dev); - drm_dev_unref(dev); + drm_dev_put(dev); } static struct pci_device_id hibmc_pci_table[] = { diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c index ddb0403f1975..e6a62d5a00a3 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c @@ -193,7 +193,7 @@ static int kirin_drm_bind(struct device *dev) ret = kirin_drm_kms_init(drm_dev); if (ret) - goto err_drm_dev_unref; + goto err_drm_dev_put; ret = drm_dev_register(drm_dev, 0); if (ret) @@ -203,8 +203,8 @@ static int kirin_drm_bind(struct device *dev) err_kms_cleanup: kirin_drm_kms_cleanup(drm_dev); -err_drm_dev_unref: - drm_dev_unref(drm_dev); +err_drm_dev_put: + drm_dev_put(drm_dev); return ret; } @@ -215,7 +215,7 @@ static void kirin_drm_unbind(struct device *dev) drm_dev_unregister(drm_dev); kirin_drm_kms_cleanup(drm_dev); - drm_dev_unref(drm_dev); + drm_dev_put(drm_dev); } static const struct component_master_ops kirin_drm_ops = { -- GitLab From c932c4f831e66fb5bb15229324825a4932ba3992 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Tue, 31 Jul 2018 08:33:05 +0200 Subject: [PATCH 1692/1692] drm/hisilicon: Replace ttm_bo_unref with ttm_bo_put The function ttm_bo_put releases a reference to a TTM buffer object. The function's name is more aligned to the Linux kernel convention of naming ref-counting function _get and _put. A call to ttm_bo_unref takes the address of the TTM BO object's pointer and clears the pointer's value to NULL. This is not necessary in most cases and sometimes even worked around by the calling code. A call to ttm_bo_put only releases the reference without clearing the pointer. The current behaviour of cleaning the pointer is kept in the calling code, but should be removed if not required in a later patch. Signed-off-by: Thomas Zimmermann Reviewed-by: Xinliang Liu Signed-off-by: Xinliang Liu --- drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c index 4871025f7573..2e3e0bdb8932 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c @@ -283,7 +283,7 @@ static void hibmc_bo_unref(struct hibmc_bo **bo) return; tbo = &((*bo)->bo); - ttm_bo_unref(&tbo); + ttm_bo_put(tbo); *bo = NULL; } -- GitLab